From 32f1f747cc7789d105f16227cafbe96b0371ae4d Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Thu, 6 Aug 2020 19:12:55 +0200 Subject: libtest: Fix T_interrupt_test() in SMP configs Update #3199. --- cpukit/libtest/t-test-interrupt.c | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'cpukit/libtest') diff --git a/cpukit/libtest/t-test-interrupt.c b/cpukit/libtest/t-test-interrupt.c index 7012319dd9..530fc74188 100644 --- a/cpukit/libtest/t-test-interrupt.c +++ b/cpukit/libtest/t-test-interrupt.c @@ -47,6 +47,10 @@ #include #include +#ifdef RTEMS_SMP +#include +#endif + typedef T_interrupt_test_state (*T_interrupt_test_handler)(void *); #define T_INTERRUPT_SAMPLE_COUNT 8 @@ -61,6 +65,10 @@ typedef struct { T_interrupt_test_state (*interrupt)(void *); void (*blocked)(void *); void *arg; +#ifdef RTEMS_SMP + Per_CPU_Job job; + Per_CPU_Job_context job_context; +#endif Watchdog_Control wdg; User_extensions_Control ext; T_fixture_node node; @@ -199,11 +207,31 @@ T_interrupt_do_nothing(void *arg) (void)arg; } +#ifdef RTEMS_SMP +static void +T_interrupt_blocked(void *arg) +{ + T_interrupt_context *ctx; + + ctx = arg; + (*ctx->blocked)(ctx->arg); +} +#endif + static void T_interrupt_thread_switch(Thread_Control *, Thread_Control *); static T_interrupt_context T_interrupt_instance = { .interrupt = T_interrupt_continue, .blocked = T_interrupt_do_nothing, +#ifdef RTEMS_SMP + .job = { + .context = &T_interrupt_instance.job_context + }, + .job_context = { + .handler = T_interrupt_blocked, + .arg = &T_interrupt_instance + }, +#endif .wdg = WATCHDOG_INITIALIZER(T_interrupt_watchdog), .ext = { .Callouts = { @@ -263,7 +291,24 @@ T_interrupt_thread_switch(Thread_Control *executing, Thread_Control *heir) state = _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED); if (state != T_INTERRUPT_TEST_INITIAL) { +#ifdef RTEMS_SMP + Per_CPU_Control *cpu_self; + + /* + * In SMP configurations, the thread switch extension + * runs in a very restricted environment. Interrupts + * are disabled and the caller owns the per-CPU lock. + * In order to avoid deadlocks at SMP lock level, we + * have to use an SMP job which runs later in the + * context of the inter-processor interrupt. + */ + cpu_self = _Per_CPU_Get(); + _Per_CPU_Add_job(cpu_self, &ctx->job); + _SMP_Send_message(_Per_CPU_Get_index(cpu_self), + SMP_MESSAGE_PERFORM_JOBS); +#else (*ctx->blocked)(ctx->arg); +#endif } } } -- cgit v1.2.3