summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-25 11:33:12 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-08 17:25:15 +0100
commitc5c9bcacd16f087b3e595ec2ecab59790d5923f7 (patch)
treeb321372b238bddd639373c7da496ed5d565ab13e
parent4861153a4598b8c68e3c5baa33199ed540ace753 (diff)
validation: Performance
-rw-r--r--testsuites/validation/tc-event-performance.c411
-rw-r--r--testsuites/validation/tc-message-performance.c743
2 files changed, 1152 insertions, 2 deletions
diff --git a/testsuites/validation/tc-event-performance.c b/testsuites/validation/tc-event-performance.c
index 3c16211057..d73ec73f6e 100644
--- a/testsuites/validation/tc-event-performance.c
+++ b/testsuites/validation/tc-event-performance.c
@@ -54,6 +54,8 @@
#include <rtems.h>
+#include "tx-support.h"
+
#include <rtems/test.h>
/**
@@ -72,6 +74,11 @@
*/
typedef struct {
/**
+ * @brief This member provides a worker identifier.
+ */
+ rtems_id worker_id;
+
+ /**
* @brief This member provides a status code.
*/
rtems_status_code status;
@@ -100,6 +107,53 @@ typedef struct {
static RtemsEventValPerf_Context
RtemsEventValPerf_Instance;
+typedef RtemsEventValPerf_Context Context;
+
+typedef enum {
+ EVENT_END = RTEMS_EVENT_0,
+ EVENT_OTHER = RTEMS_EVENT_1
+} Event;
+
+static void Send( const Context *ctx, rtems_event_set events )
+{
+ SendEvents( ctx->worker_id, events );
+}
+
+static void Satisfy( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ ctx->begin = T_tick();
+ ctx->status = rtems_event_send( ctx->worker_id, EVENT_END | EVENT_OTHER );
+}
+
+static void Worker( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+ rtems_status_code sc;
+ T_ticks ticks;
+
+ sc = rtems_event_receive(
+ EVENT_END | EVENT_OTHER,
+ RTEMS_EVENT_ALL | RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT,
+ &events
+ );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+ }
+}
+
static void RtemsEventValPerf_Setup_Context( RtemsEventValPerf_Context *ctx )
{
T_measure_runtime_config config;
@@ -112,23 +166,344 @@ static void RtemsEventValPerf_Setup_Context( RtemsEventValPerf_Context *ctx )
T_assert_not_null( ctx->context );
}
+/**
+ * @brief Create a mutex and a worker task.
+ */
+static void RtemsEventValPerf_Setup( RtemsEventValPerf_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+ ctx->worker_id = CreateTask( "WORK", PRIO_HIGH );
+ StartTask( ctx->worker_id, Worker, ctx );
+}
+
static void RtemsEventValPerf_Setup_Wrap( void *arg )
{
RtemsEventValPerf_Context *ctx;
ctx = arg;
RtemsEventValPerf_Setup_Context( ctx );
+ RtemsEventValPerf_Setup( ctx );
+}
+
+/**
+ * @brief Delete the worker task and the mutex.
+ */
+static void RtemsEventValPerf_Teardown( RtemsEventValPerf_Context *ctx )
+{
+ DeleteTask( ctx->worker_id );
+ RestoreRunnerPriority();
+}
+
+static void RtemsEventValPerf_Teardown_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventValPerf_Teardown( ctx );
}
static T_fixture RtemsEventValPerf_Fixture = {
.setup = RtemsEventValPerf_Setup_Wrap,
.stop = NULL,
- .teardown = NULL,
+ .teardown = RtemsEventValPerf_Teardown_Wrap,
.scope = NULL,
.initial_context = &RtemsEventValPerf_Instance
};
/**
+ * @brief Send two events from with interrupt context. Satisfy the event
+ * condition.
+ */
+static void RtemsEventReqPerfIsrPreempt_Body( RtemsEventValPerf_Context *ctx )
+{
+ CallWithinISR( Satisfy, ctx );
+}
+
+static void RtemsEventReqPerfIsrPreempt_Body_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventReqPerfIsrPreempt_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsEventReqPerfIsrPreempt_Teardown(
+ RtemsEventValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+
+ return tic == toc;
+}
+
+static bool RtemsEventReqPerfIsrPreempt_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsEventReqPerfIsrPreempt_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Lower the worker priority.
+ */
+static void RtemsEventReqPerfOther_Setup( RtemsEventValPerf_Context *ctx )
+{
+ SetPriority( ctx->worker_id, PRIO_LOW );
+}
+
+static void RtemsEventReqPerfOther_Setup_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventReqPerfOther_Setup( ctx );
+}
+
+/**
+ * @brief Send two events. Satisfy the event condition.
+ */
+static void RtemsEventReqPerfOther_Body( RtemsEventValPerf_Context *ctx )
+{
+ ctx->status = rtems_event_send( ctx->worker_id, EVENT_END | EVENT_OTHER );
+}
+
+static void RtemsEventReqPerfOther_Body_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventReqPerfOther_Body( ctx );
+}
+
+/**
+ * @brief Restore the worker priority. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsEventReqPerfOther_Teardown(
+ RtemsEventValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+
+ return tic == toc;
+}
+
+static bool RtemsEventReqPerfOther_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsEventReqPerfOther_Teardown( ctx, delta, tic, toc, retry );
+}
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Move worker to scheduler B.
+ */
+static void RtemsEventReqPerfOtherCpu_Prepare( RtemsEventValPerf_Context *ctx )
+{
+ SetScheduler( ctx->worker_id, SCHEDULER_B_ID, PRIO_NORMAL );
+}
+
+/**
+ * @brief Send two events. Satisfy the event condition.
+ */
+static void RtemsEventReqPerfOtherCpu_Body( RtemsEventValPerf_Context *ctx )
+{
+ ctx->begin = T_tick();
+ ctx->status = rtems_event_send( ctx->worker_id, EVENT_END | EVENT_OTHER );
+}
+
+static void RtemsEventReqPerfOtherCpu_Body_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventReqPerfOtherCpu_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Make sure the worker waits for the next
+ * event. Discard samples interrupted by a clock tick.
+ */
+static bool RtemsEventReqPerfOtherCpu_Teardown(
+ RtemsEventValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+ WaitForNextTask( 1, ctx->worker_id );
+
+ return tic == toc;
+}
+
+static bool RtemsEventReqPerfOtherCpu_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsEventReqPerfOtherCpu_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Move worker to scheduler A.
+ */
+static void RtemsEventReqPerfOtherCpu_Cleanup( RtemsEventValPerf_Context *ctx )
+{
+ SetScheduler( ctx->worker_id, SCHEDULER_A_ID, PRIO_HIGH );
+}
+#endif
+
+/**
+ * @brief Send an event. Do not satisfy the event condition.
+ */
+static void RtemsEventReqPerfOtherNotSatisfied_Body(
+ RtemsEventValPerf_Context *ctx
+)
+{
+ ctx->status = rtems_event_send( ctx->worker_id, EVENT_OTHER );
+}
+
+static void RtemsEventReqPerfOtherNotSatisfied_Body_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventReqPerfOtherNotSatisfied_Body( ctx );
+}
+
+/**
+ * @brief Let the worker wait for the next set of events. Discard samples
+ * interrupted by a clock tick.
+ */
+static bool RtemsEventReqPerfOtherNotSatisfied_Teardown(
+ RtemsEventValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ Send( ctx, EVENT_END );
+
+ return tic == toc;
+}
+
+static bool RtemsEventReqPerfOtherNotSatisfied_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsEventReqPerfOtherNotSatisfied_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Send two events. Satisfy the event condition.
+ */
+static void RtemsEventReqPerfOtherPreempt_Body(
+ RtemsEventValPerf_Context *ctx
+)
+{
+ ctx->begin = T_tick();
+ ctx->status = rtems_event_send( ctx->worker_id, EVENT_END | EVENT_OTHER );
+}
+
+static void RtemsEventReqPerfOtherPreempt_Body_Wrap( void *arg )
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsEventReqPerfOtherPreempt_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsEventReqPerfOtherPreempt_Teardown(
+ RtemsEventValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+
+ return tic == toc;
+}
+
+static bool RtemsEventReqPerfOtherPreempt_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsEventValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsEventReqPerfOtherPreempt_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
* @fn void T_case_body_RtemsEventValPerf( void )
*/
T_TEST_CASE_FIXTURE( RtemsEventValPerf, &RtemsEventValPerf_Fixture )
@@ -136,6 +511,40 @@ T_TEST_CASE_FIXTURE( RtemsEventValPerf, &RtemsEventValPerf_Fixture )
RtemsEventValPerf_Context *ctx;
ctx = T_fixture_context();
+
+ ctx->request.name = "RtemsEventReqPerfIsrPreempt";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsEventReqPerfIsrPreempt_Body_Wrap;
+ ctx->request.teardown = RtemsEventReqPerfIsrPreempt_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsEventReqPerfOther";
+ ctx->request.setup = RtemsEventReqPerfOther_Setup_Wrap;
+ ctx->request.body = RtemsEventReqPerfOther_Body_Wrap;
+ ctx->request.teardown = RtemsEventReqPerfOther_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ #if defined(RTEMS_SMP)
+ RtemsEventReqPerfOtherCpu_Prepare( ctx );
+ ctx->request.name = "RtemsEventReqPerfOtherCpu";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsEventReqPerfOtherCpu_Body_Wrap;
+ ctx->request.teardown = RtemsEventReqPerfOtherCpu_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+ RtemsEventReqPerfOtherCpu_Cleanup( ctx );
+ #endif
+
+ ctx->request.name = "RtemsEventReqPerfOtherNotSatisfied";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsEventReqPerfOtherNotSatisfied_Body_Wrap;
+ ctx->request.teardown = RtemsEventReqPerfOtherNotSatisfied_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsEventReqPerfOtherPreempt";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsEventReqPerfOtherPreempt_Body_Wrap;
+ ctx->request.teardown = RtemsEventReqPerfOtherPreempt_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
}
/** @} */
diff --git a/testsuites/validation/tc-message-performance.c b/testsuites/validation/tc-message-performance.c
index f540fac2ab..cc76217ff9 100644
--- a/testsuites/validation/tc-message-performance.c
+++ b/testsuites/validation/tc-message-performance.c
@@ -54,6 +54,8 @@
#include <rtems.h>
+#include "tx-support.h"
+
#include <rtems/test.h>
/**
@@ -72,6 +74,16 @@
*/
typedef struct {
/**
+ * @brief This member provides a message queue identifier.
+ */
+ rtems_id queue_id;
+
+ /**
+ * @brief This member provides a worker identifier.
+ */
+ rtems_id worker_id;
+
+ /**
* @brief This member provides a status code.
*/
rtems_status_code status;
@@ -100,6 +112,96 @@ typedef struct {
static RtemsMessageValPerf_Context
RtemsMessageValPerf_Instance;
+#define MAXIMUM_PENDING_MESSAGES 1
+
+#define MAXIMUM_MESSAGE_SIZE 8
+
+typedef RtemsMessageValPerf_Context Context;
+
+typedef enum {
+ EVENT_END = RTEMS_EVENT_0,
+ EVENT_SEND = RTEMS_EVENT_1,
+ EVENT_SEND_END = RTEMS_EVENT_2,
+ EVENT_RECEIVE = RTEMS_EVENT_3,
+ EVENT_RECEIVE_END = RTEMS_EVENT_4
+} Event;
+
+static RTEMS_MESSAGE_QUEUE_BUFFER( MAXIMUM_MESSAGE_SIZE )
+ storage_area[ MAXIMUM_PENDING_MESSAGES ];
+
+rtems_message_queue_config config = {
+ .name = OBJECT_NAME,
+ .maximum_pending_messages = MAXIMUM_PENDING_MESSAGES,
+ .maximum_message_size = MAXIMUM_MESSAGE_SIZE,
+ .storage_area = storage_area,
+ .storage_size = sizeof( storage_area )
+};
+
+static void Send( const Context *ctx, rtems_event_set events )
+{
+ SendEvents( ctx->worker_id, events );
+}
+
+static void Worker( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+ rtems_status_code sc;
+ T_ticks ticks;
+ uint64_t message;
+
+ sc = rtems_event_receive(
+ RTEMS_ALL_EVENTS,
+ RTEMS_EVENT_ANY | RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT,
+ &events
+ );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+
+ if ( ( events & EVENT_SEND ) != 0 ) {
+ message = 0;
+ sc = rtems_message_queue_send(
+ ctx->queue_id,
+ &message,
+ sizeof( message )
+ );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_SEND_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+ }
+
+ if ( ( events & EVENT_RECEIVE ) != 0 ) {
+ size_t size;
+
+ sc = rtems_message_queue_receive(
+ ctx->queue_id,
+ &message,
+ &size,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_RECEIVE_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+ }
+ }
+}
+
static void RtemsMessageValPerf_Setup_Context(
RtemsMessageValPerf_Context *ctx
)
@@ -114,23 +216,616 @@ static void RtemsMessageValPerf_Setup_Context(
T_assert_not_null( ctx->context );
}
+/**
+ * @brief Create a message queue and a worker task.
+ */
+static void RtemsMessageValPerf_Setup( RtemsMessageValPerf_Context *ctx )
+{
+ rtems_status_code sc;
+
+ SetSelfPriority( PRIO_NORMAL );
+
+ sc = rtems_message_queue_construct( &config, &ctx->queue_id );
+ T_rsc_success( sc );
+
+ ctx->worker_id = CreateTask( "WORK", PRIO_HIGH );
+ StartTask( ctx->worker_id, Worker, ctx );
+}
+
static void RtemsMessageValPerf_Setup_Wrap( void *arg )
{
RtemsMessageValPerf_Context *ctx;
ctx = arg;
RtemsMessageValPerf_Setup_Context( ctx );
+ RtemsMessageValPerf_Setup( ctx );
+}
+
+/**
+ * @brief Delete the worker task and the message queue.
+ */
+static void RtemsMessageValPerf_Teardown( RtemsMessageValPerf_Context *ctx )
+{
+ rtems_status_code sc;
+
+ DeleteTask( ctx->worker_id );
+
+ sc = rtems_message_queue_delete( ctx->queue_id );
+ T_rsc_success( sc );
+
+ RestoreRunnerPriority();
+}
+
+static void RtemsMessageValPerf_Teardown_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageValPerf_Teardown( ctx );
}
static T_fixture RtemsMessageValPerf_Fixture = {
.setup = RtemsMessageValPerf_Setup_Wrap,
.stop = NULL,
- .teardown = NULL,
+ .teardown = RtemsMessageValPerf_Teardown_Wrap,
.scope = NULL,
.initial_context = &RtemsMessageValPerf_Instance
};
/**
+ * @brief Try to receive a message.
+ */
+static void RtemsMessageReqPerfReceiveTry_Body(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ uint64_t message;
+ size_t size;
+
+ ctx->status = rtems_message_queue_receive(
+ ctx->queue_id,
+ &message,
+ &size,
+ RTEMS_NO_WAIT,
+ 0
+ );
+}
+
+static void RtemsMessageReqPerfReceiveTry_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfReceiveTry_Body( ctx );
+}
+
+/**
+ * @brief Discard samples interrupted by a clock tick.
+ */
+static bool RtemsMessageReqPerfReceiveTry_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_UNSATISFIED );
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfReceiveTry_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfReceiveTry_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Schedule a message send.
+ */
+static void RtemsMessageReqPerfReceiveWaitForever_Setup(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ SetPriority( ctx->worker_id, PRIO_LOW );
+ Send( ctx, EVENT_END | EVENT_SEND );
+}
+
+static void RtemsMessageReqPerfReceiveWaitForever_Setup_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfReceiveWaitForever_Setup( ctx );
+}
+
+/**
+ * @brief Receive a message. Wait forever.
+ */
+static void RtemsMessageReqPerfReceiveWaitForever_Body(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ uint64_t message;
+ size_t size;
+
+ ctx->status = rtems_message_queue_receive(
+ ctx->queue_id,
+ &message,
+ &size,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+}
+
+static void RtemsMessageReqPerfReceiveWaitForever_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfReceiveWaitForever_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Restore the worker priority. Discard
+ * samples interrupted by a clock tick.
+ */
+static bool RtemsMessageReqPerfReceiveWaitForever_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfReceiveWaitForever_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfReceiveWaitForever_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Schedule a message send.
+ */
+static void RtemsMessageReqPerfReceiveWaitTimed_Setup(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ SetPriority( ctx->worker_id, PRIO_LOW );
+ Send( ctx, EVENT_END | EVENT_SEND );
+}
+
+static void RtemsMessageReqPerfReceiveWaitTimed_Setup_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfReceiveWaitTimed_Setup( ctx );
+}
+
+/**
+ * @brief Receive a message. Wait with a timeout.
+ */
+static void RtemsMessageReqPerfReceiveWaitTimed_Body(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ uint64_t message;
+ size_t size;
+
+ ctx->status = rtems_message_queue_receive(
+ ctx->queue_id,
+ &message,
+ &size,
+ RTEMS_WAIT,
+ UINT32_MAX
+ );
+}
+
+static void RtemsMessageReqPerfReceiveWaitTimed_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfReceiveWaitTimed_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Restore the worker priority. Discard
+ * samples interrupted by a clock tick.
+ */
+static bool RtemsMessageReqPerfReceiveWaitTimed_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfReceiveWaitTimed_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfReceiveWaitTimed_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Send a message.
+ */
+static void RtemsMessageReqPerfSend_Body( RtemsMessageValPerf_Context *ctx )
+{
+ uint64_t message;
+
+ ctx->status = rtems_message_queue_send(
+ ctx->queue_id,
+ &message,
+ sizeof( message )
+ );
+}
+
+static void RtemsMessageReqPerfSend_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSend_Body( ctx );
+}
+
+/**
+ * @brief Flush the message queue. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsMessageReqPerfSend_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ rtems_status_code sc;
+ uint32_t count;
+
+ T_quiet_rsc_success( ctx->status );
+
+ sc = rtems_message_queue_flush( ctx->queue_id, &count );
+ T_quiet_rsc_success( sc );
+ T_quiet_eq_u32( count, 1 );
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfSend_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfSend_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Let the worker wait on the message queue.
+ */
+static void RtemsMessageReqPerfSendOther_Setup(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ Send( ctx, EVENT_RECEIVE );
+ SetPriority( ctx->worker_id, PRIO_LOW );
+}
+
+static void RtemsMessageReqPerfSendOther_Setup_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSendOther_Setup( ctx );
+}
+
+/**
+ * @brief Send a message.
+ */
+static void RtemsMessageReqPerfSendOther_Body(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ uint64_t message;
+
+ ctx->status = rtems_message_queue_send(
+ ctx->queue_id,
+ &message,
+ sizeof( message )
+ );
+}
+
+static void RtemsMessageReqPerfSendOther_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSendOther_Body( ctx );
+}
+
+/**
+ * @brief Restore the worker priority. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsMessageReqPerfSendOther_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfSendOther_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfSendOther_Teardown( ctx, delta, tic, toc, retry );
+}
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Move worker to scheduler B.
+ */
+static void RtemsMessageReqPerfSendOtherCpu_Prepare(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ SetScheduler( ctx->worker_id, SCHEDULER_B_ID, PRIO_NORMAL );
+}
+
+/**
+ * @brief Let the worker wait on the message queue.
+ */
+static void RtemsMessageReqPerfSendOtherCpu_Setup(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ Send( ctx, EVENT_RECEIVE | EVENT_RECEIVE_END );
+ WaitForNextTask( 1, ctx->worker_id );
+}
+
+static void RtemsMessageReqPerfSendOtherCpu_Setup_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSendOtherCpu_Setup( ctx );
+}
+
+/**
+ * @brief Send a message.
+ */
+static void RtemsMessageReqPerfSendOtherCpu_Body(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ uint64_t message;
+
+ ctx->begin = T_tick();
+ ctx->status = rtems_message_queue_send(
+ ctx->queue_id,
+ &message,
+ sizeof( message )
+ );
+}
+
+static void RtemsMessageReqPerfSendOtherCpu_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSendOtherCpu_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Make sure the worker waits for the next
+ * event. Discard samples interrupted by a clock tick.
+ */
+static bool RtemsMessageReqPerfSendOtherCpu_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+ WaitForNextTask( 1, ctx->worker_id );
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfSendOtherCpu_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfSendOtherCpu_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Move worker to scheduler A.
+ */
+static void RtemsMessageReqPerfSendOtherCpu_Cleanup(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ SetScheduler( ctx->worker_id, SCHEDULER_A_ID, PRIO_HIGH );
+}
+#endif
+
+/**
+ * @brief Let the worker wait on the message queue.
+ */
+static void RtemsMessageReqPerfSendPreempt_Setup(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ Send( ctx, EVENT_RECEIVE | EVENT_RECEIVE_END );
+}
+
+static void RtemsMessageReqPerfSendPreempt_Setup_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSendPreempt_Setup( ctx );
+}
+
+/**
+ * @brief Send a message.
+ */
+static void RtemsMessageReqPerfSendPreempt_Body(
+ RtemsMessageValPerf_Context *ctx
+)
+{
+ uint64_t message;
+
+ ctx->begin = T_tick();
+ ctx->status = rtems_message_queue_send(
+ ctx->queue_id,
+ &message,
+ sizeof( message )
+ );
+}
+
+static void RtemsMessageReqPerfSendPreempt_Body_Wrap( void *arg )
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsMessageReqPerfSendPreempt_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsMessageReqPerfSendPreempt_Teardown(
+ RtemsMessageValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ *delta = ctx->end - ctx->begin;
+
+ return tic == toc;
+}
+
+static bool RtemsMessageReqPerfSendPreempt_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsMessageValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsMessageReqPerfSendPreempt_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
* @fn void T_case_body_RtemsMessageValPerf( void )
*/
T_TEST_CASE_FIXTURE( RtemsMessageValPerf, &RtemsMessageValPerf_Fixture )
@@ -138,6 +833,52 @@ T_TEST_CASE_FIXTURE( RtemsMessageValPerf, &RtemsMessageValPerf_Fixture )
RtemsMessageValPerf_Context *ctx;
ctx = T_fixture_context();
+
+ ctx->request.name = "RtemsMessageReqPerfReceiveTry";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsMessageReqPerfReceiveTry_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfReceiveTry_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsMessageReqPerfReceiveWaitForever";
+ ctx->request.setup = RtemsMessageReqPerfReceiveWaitForever_Setup_Wrap;
+ ctx->request.body = RtemsMessageReqPerfReceiveWaitForever_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfReceiveWaitForever_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsMessageReqPerfReceiveWaitTimed";
+ ctx->request.setup = RtemsMessageReqPerfReceiveWaitTimed_Setup_Wrap;
+ ctx->request.body = RtemsMessageReqPerfReceiveWaitTimed_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfReceiveWaitTimed_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsMessageReqPerfSend";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsMessageReqPerfSend_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfSend_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsMessageReqPerfSendOther";
+ ctx->request.setup = RtemsMessageReqPerfSendOther_Setup_Wrap;
+ ctx->request.body = RtemsMessageReqPerfSendOther_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfSendOther_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ #if defined(RTEMS_SMP)
+ RtemsMessageReqPerfSendOtherCpu_Prepare( ctx );
+ ctx->request.name = "RtemsMessageReqPerfSendOtherCpu";
+ ctx->request.setup = RtemsMessageReqPerfSendOtherCpu_Setup_Wrap;
+ ctx->request.body = RtemsMessageReqPerfSendOtherCpu_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfSendOtherCpu_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+ RtemsMessageReqPerfSendOtherCpu_Cleanup( ctx );
+ #endif
+
+ ctx->request.name = "RtemsMessageReqPerfSendPreempt";
+ ctx->request.setup = RtemsMessageReqPerfSendPreempt_Setup_Wrap;
+ ctx->request.body = RtemsMessageReqPerfSendPreempt_Body_Wrap;
+ ctx->request.teardown = RtemsMessageReqPerfSendPreempt_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
}
/** @} */