summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/percpujobs.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-28 13:11:41 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-28 19:32:24 +0200
commitf799b4522ff966b4512443662134a5154a6be341 (patch)
tree8d90db7124b7f65cd11ba52d91327d9a702715cc /cpukit/score/src/percpujobs.c
parentscore: Remove SMP message multicast/broadcast (diff)
downloadrtems-f799b4522ff966b4512443662134a5154a6be341.tar.bz2
score: Move per-CPU jobs support
Add percpujobs.c to contain the per-CPU jobs implementation.
Diffstat (limited to 'cpukit/score/src/percpujobs.c')
-rw-r--r--cpukit/score/src/percpujobs.c124
1 files changed, 124 insertions, 0 deletions
diff --git a/cpukit/score/src/percpujobs.c b/cpukit/score/src/percpujobs.c
new file mode 100644
index 0000000000..4ce96dc738
--- /dev/null
+++ b/cpukit/score/src/percpujobs.c
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScorePerCPU
+ *
+ * @brief This source file contains the implementation of _Per_CPU_Add_job(),
+ * _Per_CPU_Perform_jobs(), and _Per_CPU_Wait_for_job().
+ */
+
+/*
+ * Copyright (C) 2019 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/assert.h>
+
+#define _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, lock_context ) \
+ _ISR_lock_ISR_disable_and_acquire( &( cpu )->Jobs.Lock, lock_context )
+
+#define _Per_CPU_Jobs_release_and_ISR_enable( cpu, lock_context ) \
+ _ISR_lock_Release_and_ISR_enable( &( cpu )->Jobs.Lock, lock_context )
+
+void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu )
+{
+ ISR_lock_Context lock_context;
+ Per_CPU_Job *job;
+
+ _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
+ job = cpu->Jobs.head;
+ cpu->Jobs.head = NULL;
+ _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
+
+ while ( job != NULL ) {
+ const Per_CPU_Job_context *context;
+ Per_CPU_Job *next;
+
+ context = job->context;
+ next = job->next;
+ ( *context->handler )( context->arg );
+ _Atomic_Store_ulong( &job->done, PER_CPU_JOB_DONE, ATOMIC_ORDER_RELEASE );
+
+ job = next;
+ }
+}
+
+void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
+{
+ ISR_lock_Context lock_context;
+
+ _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED );
+ _Assert( job->next == NULL );
+
+ _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
+
+ if ( cpu->Jobs.head == NULL ) {
+ cpu->Jobs.head = job;
+ } else {
+ *cpu->Jobs.tail = job;
+ }
+
+ cpu->Jobs.tail = &job->next;
+
+ _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
+}
+
+void _Per_CPU_Wait_for_job(
+ const Per_CPU_Control *cpu,
+ const Per_CPU_Job *job
+)
+{
+ while (
+ _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE )
+ != PER_CPU_JOB_DONE
+ ) {
+ Per_CPU_Control *cpu_self;
+
+ switch ( _Per_CPU_Get_state( cpu ) ) {
+ case PER_CPU_STATE_INITIAL:
+ case PER_CPU_STATE_READY_TO_START_MULTITASKING:
+ case PER_CPU_STATE_UP:
+ /*
+ * Calling this function with the current processor is intentional. We
+ * have to perform our own jobs here in case inter-processor interrupts
+ * are not working.
+ */
+ cpu_self = _Per_CPU_Get();
+ _SMP_Try_to_process_message(
+ cpu_self,
+ _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED )
+ );
+ break;
+ default:
+ _SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS );
+ break;
+ }
+ }
+}