summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cpukit/include/rtems/recordclient.h69
-rw-r--r--cpukit/libtrace/record/record-client.c207
-rw-r--r--testsuites/libtests/record02/init.c1
-rw-r--r--testsuites/libtests/record02/record02.scn146
4 files changed, 341 insertions, 82 deletions
diff --git a/cpukit/include/rtems/recordclient.h b/cpukit/include/rtems/recordclient.h
index 01828e4f6b..d6fe5b8056 100644
--- a/cpukit/include/rtems/recordclient.h
+++ b/cpukit/include/rtems/recordclient.h
@@ -35,6 +35,7 @@
#include "recorddata.h"
+#include <stdbool.h>
#include <stddef.h>
#ifdef __cplusplus
@@ -54,7 +55,13 @@ typedef enum {
RTEMS_RECORD_CLIENT_ERROR_INVALID_MAGIC,
RTEMS_RECORD_CLIENT_ERROR_UNKNOWN_FORMAT,
RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_VERSION,
- RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU
+ RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU,
+ RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU_MAX,
+ RTEMS_RECORD_CLIENT_ERROR_DOUBLE_CPU_MAX,
+ RTEMS_RECORD_CLIENT_ERROR_DOUBLE_PER_CPU_COUNT,
+ RTEMS_RECORD_CLIENT_ERROR_NO_CPU_MAX,
+ RTEMS_RECORD_CLIENT_ERROR_NO_MEMORY,
+ RTEMS_RECORD_CLIENT_ERROR_PER_CPU_ITEMS_OVERFLOW
} rtems_record_client_status;
typedef rtems_record_client_status ( *rtems_record_client_handler )(
@@ -67,21 +74,69 @@ typedef rtems_record_client_status ( *rtems_record_client_handler )(
);
typedef struct {
+ /**
+ * @brief Event time to uptime maintenance.
+ */
struct {
uint64_t bt;
uint32_t time_at_bt;
uint32_t time_last;
uint32_t time_accumulated;
} uptime;
+
+ /**
+ * @brief The current or previous ring buffer tail.
+ *
+ * Indexed by the tail_head_index member.
+ */
uint32_t tail[ 2 ];
+
+ /**
+ * @brief The current or previous ring buffer head.
+ *
+ * Indexed by the tail_head_index member.
+ */
uint32_t head[ 2 ];
+
+ /**
+ * @brief The index of the tail and head members.
+ *
+ * This index is used to maintain the current and previous tail/head
+ * positions to detect ring buffer overflows.
+ */
size_t tail_head_index;
+
+ /**
+ * @brief If true, then hold back items for overflow or initial ramp up
+ * processing.
+ */
+ bool hold_back;
+
+ /**
+ * @brief Storage for hold back items.
+ *
+ * In case of a ring buffer overflow, the rtems_record_drain() will push the
+ * complete ring buffer content to the client. While the items are processed
+ * by the client, new items may overwrite some items being processed. The
+ * overwritten items can be detected in the following iteration once the next
+ * tail/head information is pushed to the client.
+ *
+ * In case of the initial ramp up, the items are stored in the hold back
+ * buffer to determine the uptime of the first event.
+ */
+ rtems_record_item_64 *items;
+
+ /**
+ * @brief The index for the next hold back item.
+ */
+ size_t item_index;
} rtems_record_client_per_cpu;
typedef struct rtems_record_client_context {
uint64_t to_bt_scaler;
rtems_record_client_per_cpu per_cpu[ RTEMS_RECORD_CLIENT_MAXIMUM_CPU_COUNT ];
uint32_t cpu;
+ uint32_t cpu_count;
uint32_t count;
union {
rtems_record_item_32 format_32;
@@ -128,6 +183,18 @@ rtems_record_client_status rtems_record_client_run(
size_t n
);
+/**
+ * @brief Drains all internal buffers and frees the allocated resources.
+ *
+ * The client context must not be used afterwards. It can be re-initialized
+ * via rtems_record_client_init().
+ *
+ * @param ctx The record client context.
+ */
+void rtems_record_client_destroy(
+ rtems_record_client_context *ctx
+);
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/libtrace/record/record-client.c b/cpukit/libtrace/record/record-client.c
index db79410acf..d97fe51b03 100644
--- a/cpukit/libtrace/record/record-client.c
+++ b/cpukit/libtrace/record/record-client.c
@@ -36,8 +36,17 @@
#include <rtems/recordclient.h>
+#include <stdlib.h>
#include <string.h>
+#define TIME_MASK ( ( UINT32_C( 1 ) << RTEMS_RECORD_TIME_BITS ) - 1 )
+
+static rtems_record_client_status visit(
+ rtems_record_client_context *ctx,
+ uint32_t time_event,
+ uint64_t data
+);
+
static void set_to_bt_scaler(
rtems_record_client_context *ctx,
uint32_t frequency
@@ -74,16 +83,84 @@ static rtems_record_client_status call_handler(
static void signal_overflow(
const rtems_record_client_context *ctx,
- const rtems_record_client_per_cpu *per_cpu,
+ rtems_record_client_per_cpu *per_cpu,
uint32_t data
)
{
- uint64_t bt;
+ per_cpu->hold_back = true;
+ per_cpu->item_index = 0;
+ call_handler( ctx, 0, RTEMS_RECORD_PER_CPU_OVERFLOW, data );
+}
+
+static void resolve_hold_back(
+ rtems_record_client_context *ctx,
+ rtems_record_client_per_cpu *per_cpu
+)
+{
+ if ( per_cpu->hold_back ) {
+ uint32_t last_head;
+ uint32_t new_head;
+ uint32_t overwritten;
+ uint32_t index;
+ uint32_t first;
+ uint32_t last;
+ uint32_t delta;
+ uint64_t uptime;
+
+ per_cpu->hold_back = false;
+
+ last_head = per_cpu->head[ per_cpu->tail_head_index ];
+ new_head = per_cpu->head[ per_cpu->tail_head_index ^ 1 ];
+ overwritten = new_head - last_head;
+
+ if ( overwritten >= per_cpu->item_index ) {
+ return;
+ }
+
+ if ( overwritten > 0 ) {
+ call_handler( ctx, 0, RTEMS_RECORD_PER_CPU_OVERFLOW, overwritten );
+ }
+
+ first = RTEMS_RECORD_GET_TIME( per_cpu->items[ overwritten ].event );
+ last = first;
+ delta = 0;
+ uptime = 0;
+
+ for ( index = overwritten; index < per_cpu->item_index; ++index ) {
+ const rtems_record_item_64 *item;
+ rtems_record_event event;
+ uint32_t time;
+
+ item = &per_cpu->items[ index ];
+ event = RTEMS_RECORD_GET_EVENT( item->event );
+ time = RTEMS_RECORD_GET_TIME( item->event );
+ delta += ( time - last ) & TIME_MASK;
+ last = time;
+
+ if (
+ event == RTEMS_RECORD_UPTIME_LOW
+ && index + 1 < per_cpu->item_index
+ && RTEMS_RECORD_GET_EVENT( ( item + 1 )->event )
+ == RTEMS_RECORD_UPTIME_HIGH
+ ) {
+ uptime = (uint32_t) item->data;
+ uptime += ( item + 1 )->data << 32;
+ break;
+ }
+ }
- bt = ( per_cpu->uptime.time_accumulated * ctx->to_bt_scaler ) >> 31;
- bt += per_cpu->uptime.bt;
+ per_cpu->uptime.bt = uptime - ( ( delta * ctx->to_bt_scaler ) >> 31 );
+ per_cpu->uptime.time_at_bt = first;
+ per_cpu->uptime.time_last = first;
+ per_cpu->uptime.time_accumulated = 0;
- call_handler( ctx, bt, RTEMS_RECORD_PER_CPU_OVERFLOW, data );
+ for ( index = overwritten; index < per_cpu->item_index; ++index ) {
+ const rtems_record_item_64 *item;
+
+ item = &per_cpu->items[ index ];
+ visit( ctx, item->event, item->data );
+ }
+ }
}
static void process_per_cpu_head(
@@ -113,6 +190,12 @@ static void process_per_cpu_head(
last_head = per_cpu->head[ per_cpu->tail_head_index ];
if ( last_tail == last_head ) {
+ if ( per_cpu->uptime.bt == 0 ) {
+ per_cpu->hold_back = true;
+ } else {
+ resolve_hold_back( ctx, per_cpu );
+ }
+
return;
}
@@ -120,12 +203,79 @@ static void process_per_cpu_head(
new_content = new_head - last_head;
if ( new_content <= capacity ) {
+ resolve_hold_back( ctx, per_cpu );
return;
}
signal_overflow( ctx, per_cpu, new_content - capacity );
}
+static rtems_record_client_status process_per_cpu_count(
+ rtems_record_client_context *ctx,
+ uint64_t data
+)
+{
+ size_t per_cpu_items;
+ rtems_record_item_64 *items;
+ uint32_t cpu;
+
+ if ( ctx->count != 0 ) {
+ return RTEMS_RECORD_CLIENT_ERROR_DOUBLE_PER_CPU_COUNT;
+ }
+
+ if ( ctx->cpu_count == 0 ) {
+ return RTEMS_RECORD_CLIENT_ERROR_NO_CPU_MAX;
+ }
+
+ ctx->count = (uint32_t) data;
+
+ /*
+ * The ring buffer capacity plus two items for RTEMS_RECORD_PROCESSOR and
+ * RTEMS_RECORD_PER_CPU_TAIL.
+ */
+ per_cpu_items = ctx->count + 1;
+
+ items = malloc( per_cpu_items * ctx->cpu_count * sizeof( *items ) );
+
+ if ( items == NULL ) {
+ return RTEMS_RECORD_CLIENT_ERROR_NO_MEMORY;
+ }
+
+ for ( cpu = 0; cpu < ctx->cpu_count; ++cpu ) {
+ ctx->per_cpu[ cpu ].items = items;
+ items += per_cpu_items;
+ }
+
+ return RTEMS_RECORD_CLIENT_SUCCESS;
+}
+
+static rtems_record_client_status hold_back(
+ rtems_record_client_context *ctx,
+ rtems_record_client_per_cpu *per_cpu,
+ uint32_t time_event,
+ rtems_record_event event,
+ uint64_t data
+)
+{
+ if ( event != RTEMS_RECORD_PER_CPU_HEAD ) {
+ uint32_t item_index;
+
+ item_index = per_cpu->item_index;
+
+ if ( item_index <= ctx->count ) {
+ per_cpu->items[ item_index ].event = time_event;
+ per_cpu->items[ item_index ].data = data;
+ per_cpu->item_index = item_index + 1;
+ } else {
+ return RTEMS_RECORD_CLIENT_ERROR_PER_CPU_ITEMS_OVERFLOW;
+ }
+ } else {
+ return call_handler( ctx, 0, RTEMS_RECORD_GET_EVENT( time_event ), data );
+ }
+
+ return RTEMS_RECORD_CLIENT_SUCCESS;
+}
+
static rtems_record_client_status visit(
rtems_record_client_context *ctx,
uint32_t time_event,
@@ -136,6 +286,7 @@ static rtems_record_client_status visit(
uint32_t time;
rtems_record_event event;
uint64_t bt;
+ rtems_record_client_status status;
per_cpu = &ctx->per_cpu[ ctx->cpu ];
time = RTEMS_RECORD_GET_TIME( time_event );
@@ -143,7 +294,7 @@ static rtems_record_client_status visit(
switch ( event ) {
case RTEMS_RECORD_PROCESSOR:
- if ( data >= RTEMS_RECORD_CLIENT_MAXIMUM_CPU_COUNT ) {
+ if ( data >= ctx->cpu_count ) {
return RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU;
}
@@ -167,8 +318,24 @@ static rtems_record_client_status visit(
case RTEMS_RECORD_PER_CPU_HEAD:
process_per_cpu_head( ctx, per_cpu, data );
break;
+ case RTEMS_RECORD_PROCESSOR_MAXIMUM:
+ if ( data >= RTEMS_RECORD_CLIENT_MAXIMUM_CPU_COUNT ) {
+ return RTEMS_RECORD_CLIENT_ERROR_UNSUPPORTED_CPU_MAX;
+ }
+
+ if ( ctx->cpu_count != 0 ) {
+ return RTEMS_RECORD_CLIENT_ERROR_DOUBLE_CPU_MAX;
+ }
+
+ ctx->cpu_count = (uint32_t) data + 1;
+ break;
case RTEMS_RECORD_PER_CPU_COUNT:
- ctx->count = (uint32_t) data;
+ status = process_per_cpu_count( ctx, data );
+
+ if ( status != RTEMS_RECORD_CLIENT_SUCCESS ) {
+ return status;
+ }
+
break;
case RTEMS_RECORD_FREQUENCY:
set_to_bt_scaler( ctx, (uint32_t) data );
@@ -183,11 +350,14 @@ static rtems_record_client_status visit(
break;
}
+ if ( per_cpu->hold_back ) {
+ return hold_back( ctx, per_cpu, time_event, event, data );
+ }
+
if ( time != 0 ) {
uint32_t delta;
- delta = ( time - per_cpu->uptime.time_last )
- & ( ( UINT32_C( 1 ) << RTEMS_RECORD_TIME_BITS ) - 1 );
+ delta = ( time - per_cpu->uptime.time_last ) & TIME_MASK;
per_cpu->uptime.time_last = time;
per_cpu->uptime.time_accumulated += delta;
bt = ( per_cpu->uptime.time_accumulated * ctx->to_bt_scaler ) >> 31;
@@ -471,3 +641,22 @@ rtems_record_client_status rtems_record_client_run(
{
return ( *ctx->consume )( ctx, buf, n );
}
+
+void rtems_record_client_destroy(
+ rtems_record_client_context *ctx
+)
+{
+ uint32_t cpu;
+
+ for ( cpu = 0; cpu < ctx->cpu_count; ++cpu ) {
+ rtems_record_client_per_cpu *per_cpu;
+
+ ctx->cpu = cpu;
+ per_cpu = &ctx->per_cpu[ cpu ];
+ per_cpu->head[ per_cpu->tail_head_index ^ 1 ] =
+ per_cpu->head[ per_cpu->tail_head_index ];
+ resolve_hold_back( ctx, per_cpu );
+ }
+
+ free( ctx->per_cpu[ 0 ].items );
+}
diff --git a/testsuites/libtests/record02/init.c b/testsuites/libtests/record02/init.c
index 5aeeb18cd9..f1c93f7df8 100644
--- a/testsuites/libtests/record02/init.c
+++ b/testsuites/libtests/record02/init.c
@@ -105,6 +105,7 @@ static void Init(rtems_task_argument arg)
cs = rtems_record_client_run(&ctx->client, &header, sizeof(header));
rtems_test_assert(cs == RTEMS_RECORD_CLIENT_SUCCESS);
rtems_record_drain(drain_visitor, ctx);
+ rtems_record_client_destroy(&ctx->client);
TEST_END();
rtems_test_exit(0);
diff --git a/testsuites/libtests/record02/record02.scn b/testsuites/libtests/record02/record02.scn
index 487c601caf..4535cbbdca 100644
--- a/testsuites/libtests/record02/record02.scn
+++ b/testsuites/libtests/record02/record02.scn
@@ -1,82 +1,84 @@
*** BEGIN OF TEST RECORD 2 ***
-*** TEST VERSION: 5.0.0.f0ae613ba72bc4b95797e2482c4bd0fa5546331d
+*** TEST VERSION: 5.0.0.b2c060a507712f6043d7c7f0cf7b50661f25d8c3
*** TEST STATE: EXPECTED-PASS
-*** TEST BUILD: RTEMS_NETWORKING
-*** TEST TOOLS: 7.4.0 20181206 (RTEMS 5, RSB 376edee1c734fb412b731a7d9e57757dd4cc5f07, Newlib dc6e94551f09d3a983afd571478d63a09d6f66fa)
-*:0:VERSION:1
+*** TEST BUILD: RTEMS_NETWORKING RTEMS_POSIX_API
+*** TEST TOOLS: 7.4.1 20190514 (RTEMS 5, RSB 639eed3c77c830a2ac9f36baca21919f8b7bf1be, Newlib 5c2a3661c)
+*:0:VERSION:5
*:0:PROCESSOR_MAXIMUM:0
-*:0:COUNT:80
+*:0:PER_CPU_COUNT:80
*:0:FREQUENCY:f4240
*:0:PROCESSOR:0
-*:0:TAIL:0
-*:0:HEAD:44
+*:0:PER_CPU_TAIL:0
+*:0:PER_CPU_HEAD:46
*:0:THREAD_CREATE:9010001
+*:0:THREAD_NAME:49494949
*:0:THREAD_START:9010001
-*:0:UPTIME_LOW:9a240
+*:0:UPTIME_LOW:852b4
*:0:UPTIME_HIGH:1
-1.000537999:0:THREAD_CREATE:a010001
-1.000843999:0:THREAD_START:a010001
-1.003692999:0:THREAD_BEGIN:a010001
-1.005548999:0:THREAD_SWITCH_OUT:a010001
-1.005548999:0:THREAD_STACK_CURRENT:d48
-1.005548999:0:THREAD_SWITCH_IN:9010001
-1.005645999:0:THREAD_BEGIN:9010001
-1.013832999:0:THREAD_SWITCH_OUT:9010001
-1.013832999:0:THREAD_STACK_CURRENT:d38
-1.013832999:0:THREAD_SWITCH_IN:a010001
-1.014077999:0:THREAD_SWITCH_OUT:a010001
-1.014077999:0:THREAD_STACK_CURRENT:d48
-1.014077999:0:THREAD_SWITCH_IN:9010001
-1.023821999:0:THREAD_SWITCH_OUT:9010001
-1.023821999:0:THREAD_STACK_CURRENT:d38
-1.023821999:0:THREAD_SWITCH_IN:a010001
-1.024065999:0:THREAD_SWITCH_OUT:a010001
-1.024065999:0:THREAD_STACK_CURRENT:d48
-1.024065999:0:THREAD_SWITCH_IN:9010001
-1.033821999:0:THREAD_SWITCH_OUT:9010001
-1.033821999:0:THREAD_STACK_CURRENT:d38
-1.033821999:0:THREAD_SWITCH_IN:a010001
-1.034065999:0:THREAD_SWITCH_OUT:a010001
-1.034065999:0:THREAD_STACK_CURRENT:d48
-1.034065999:0:THREAD_SWITCH_IN:9010001
-1.043821999:0:THREAD_SWITCH_OUT:9010001
-1.043821999:0:THREAD_STACK_CURRENT:d38
-1.043821999:0:THREAD_SWITCH_IN:a010001
-1.044065999:0:THREAD_SWITCH_OUT:a010001
-1.044065999:0:THREAD_STACK_CURRENT:d48
-1.044065999:0:THREAD_SWITCH_IN:9010001
-1.053821999:0:THREAD_SWITCH_OUT:9010001
-1.053821999:0:THREAD_STACK_CURRENT:d38
-1.053821999:0:THREAD_SWITCH_IN:a010001
-1.054065999:0:THREAD_SWITCH_OUT:a010001
-1.054065999:0:THREAD_STACK_CURRENT:d48
-1.054065999:0:THREAD_SWITCH_IN:9010001
-1.063821999:0:THREAD_SWITCH_OUT:9010001
-1.063821999:0:THREAD_STACK_CURRENT:d38
-1.063821999:0:THREAD_SWITCH_IN:a010001
-1.064065999:0:THREAD_SWITCH_OUT:a010001
-1.064065999:0:THREAD_STACK_CURRENT:d48
-1.064065999:0:THREAD_SWITCH_IN:9010001
-1.073821999:0:THREAD_SWITCH_OUT:9010001
-1.073821999:0:THREAD_STACK_CURRENT:d38
-1.073821999:0:THREAD_SWITCH_IN:a010001
-1.074065999:0:THREAD_SWITCH_OUT:a010001
-1.074065999:0:THREAD_STACK_CURRENT:d48
-1.074065999:0:THREAD_SWITCH_IN:9010001
-1.083821999:0:THREAD_SWITCH_OUT:9010001
-1.083821999:0:THREAD_STACK_CURRENT:d38
-1.083821999:0:THREAD_SWITCH_IN:a010001
-1.084065999:0:THREAD_SWITCH_OUT:a010001
-1.084065999:0:THREAD_STACK_CURRENT:d48
-1.084065999:0:THREAD_SWITCH_IN:9010001
-1.093821999:0:THREAD_SWITCH_OUT:9010001
-1.093821999:0:THREAD_STACK_CURRENT:d38
-1.093821999:0:THREAD_SWITCH_IN:a010001
-1.094065999:0:THREAD_SWITCH_OUT:a010001
-1.094065999:0:THREAD_STACK_CURRENT:d48
-1.094065999:0:THREAD_SWITCH_IN:9010001
-1.103821999:0:THREAD_SWITCH_OUT:9010001
-1.103821999:0:THREAD_STACK_CURRENT:d38
-1.103821999:0:THREAD_SWITCH_IN:a010001
+1.000436999:0:THREAD_CREATE:a010001
+1.000436999:0:THREAD_NAME:55555555
+1.000627999:0:THREAD_START:a010001
+1.001891999:0:THREAD_BEGIN:a010001
+1.003466999:0:THREAD_SWITCH_OUT:a010001
+1.003466999:0:THREAD_STACK_CURRENT:d78
+1.003466999:0:THREAD_SWITCH_IN:9010001
+1.003533999:0:THREAD_BEGIN:9010001
+1.005078999:0:THREAD_SWITCH_OUT:9010001
+1.005078999:0:THREAD_STACK_CURRENT:d68
+1.005078999:0:THREAD_SWITCH_IN:a010001
+1.005218999:0:THREAD_SWITCH_OUT:a010001
+1.005218999:0:THREAD_STACK_CURRENT:d78
+1.005218999:0:THREAD_SWITCH_IN:9010001
+1.015067999:0:THREAD_SWITCH_OUT:9010001
+1.015067999:0:THREAD_STACK_CURRENT:d68
+1.015067999:0:THREAD_SWITCH_IN:a010001
+1.015207999:0:THREAD_SWITCH_OUT:a010001
+1.015207999:0:THREAD_STACK_CURRENT:d78
+1.015207999:0:THREAD_SWITCH_IN:9010001
+1.025067999:0:THREAD_SWITCH_OUT:9010001
+1.025067999:0:THREAD_STACK_CURRENT:d68
+1.025067999:0:THREAD_SWITCH_IN:a010001
+1.025207999:0:THREAD_SWITCH_OUT:a010001
+1.025207999:0:THREAD_STACK_CURRENT:d78
+1.025207999:0:THREAD_SWITCH_IN:9010001
+1.035067999:0:THREAD_SWITCH_OUT:9010001
+1.035067999:0:THREAD_STACK_CURRENT:d68
+1.035067999:0:THREAD_SWITCH_IN:a010001
+1.035207999:0:THREAD_SWITCH_OUT:a010001
+1.035207999:0:THREAD_STACK_CURRENT:d78
+1.035207999:0:THREAD_SWITCH_IN:9010001
+1.045067999:0:THREAD_SWITCH_OUT:9010001
+1.045067999:0:THREAD_STACK_CURRENT:d68
+1.045067999:0:THREAD_SWITCH_IN:a010001
+1.045207999:0:THREAD_SWITCH_OUT:a010001
+1.045207999:0:THREAD_STACK_CURRENT:d78
+1.045207999:0:THREAD_SWITCH_IN:9010001
+1.055067999:0:THREAD_SWITCH_OUT:9010001
+1.055067999:0:THREAD_STACK_CURRENT:d68
+1.055067999:0:THREAD_SWITCH_IN:a010001
+1.055207999:0:THREAD_SWITCH_OUT:a010001
+1.055207999:0:THREAD_STACK_CURRENT:d78
+1.055207999:0:THREAD_SWITCH_IN:9010001
+1.065067999:0:THREAD_SWITCH_OUT:9010001
+1.065067999:0:THREAD_STACK_CURRENT:d68
+1.065067999:0:THREAD_SWITCH_IN:a010001
+1.065207999:0:THREAD_SWITCH_OUT:a010001
+1.065207999:0:THREAD_STACK_CURRENT:d78
+1.065207999:0:THREAD_SWITCH_IN:9010001
+1.075067999:0:THREAD_SWITCH_OUT:9010001
+1.075067999:0:THREAD_STACK_CURRENT:d68
+1.075067999:0:THREAD_SWITCH_IN:a010001
+1.075207999:0:THREAD_SWITCH_OUT:a010001
+1.075207999:0:THREAD_STACK_CURRENT:d78
+1.075207999:0:THREAD_SWITCH_IN:9010001
+1.085067999:0:THREAD_SWITCH_OUT:9010001
+1.085067999:0:THREAD_STACK_CURRENT:d68
+1.085067999:0:THREAD_SWITCH_IN:a010001
+1.085207999:0:THREAD_SWITCH_OUT:a010001
+1.085207999:0:THREAD_STACK_CURRENT:d78
+1.085207999:0:THREAD_SWITCH_IN:9010001
+1.095067999:0:THREAD_SWITCH_OUT:9010001
+1.095067999:0:THREAD_STACK_CURRENT:d68
+1.095067999:0:THREAD_SWITCH_IN:a010001
*** END OF TEST RECORD 2 ***