summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJennifer Averett <jennifer.averett@oarcorp.com>2014-05-09 08:35:58 -0500
committerJennifer Averett <jennifer.averett@oarcorp.com>2014-05-09 08:37:18 -0500
commit2d51251192aac8527e773c98047d8597a7f02b8a (patch)
treef0098e58a37b434d4e8fcecc550e3b810da41379
parentschedsim_priority: Remove scenario output files. (diff)
downloadrtems-schedsim-2d51251192aac8527e773c98047d8597a7f02b8a.tar.bz2
schedsim: Add smp support.
-rw-r--r--schedsim/configure.ac5
-rw-r--r--schedsim/rtems/Makefile.am22
-rw-r--r--schedsim/rtems/rtems_init.c35
-rw-r--r--schedsim/rtems/sched_cpu/cpu.c41
-rw-r--r--schedsim/rtems/sched_cpu/machine/_default_types.h184
-rw-r--r--schedsim/rtems/sched_cpu/machine/_types.h8
-rw-r--r--schedsim/rtems/sched_cpu/rtems/score/asm.h120
-rw-r--r--schedsim/rtems/sched_cpu/rtems/score/cpu.h34
-rw-r--r--schedsim/rtems/sched_cpu/rtems/score/cpuatomic.h14
-rw-r--r--schedsim/rtems/sched_cpu/stdatomic.h422
-rw-r--r--schedsim/rtems/sched_cpu/sys/_types.h54
-rw-r--r--schedsim/rtems/sched_cpu/sys/cpuset.h9
-rw-r--r--schedsim/rtems/sched_cpu/sys/features.h16
-rw-r--r--schedsim/rtems/sched_cpu/sys/lock.h35
-rw-r--r--schedsim/shell/schedsim_priority/smp_stub.c48
-rw-r--r--schedsim/shell/schedsim_priority/wrap_thread_dispatch.c4
-rw-r--r--schedsim/shell/schedsim_smpsimple/Makefile.am9
-rw-r--r--schedsim/shell/schedsim_smpsimple/main_current_cpu.c1
-rw-r--r--schedsim/shell/schedsim_smpsimple/main_dispatch.c1
-rw-r--r--schedsim/shell/schedsim_smpsimple/main_dump_ready_tasks.c15
-rw-r--r--schedsim/shell/schedsim_smpsimple/smp_stub.c62
-rw-r--r--schedsim/shell/schedsim_smpsimple/wrap_thread_dispatch.c36
22 files changed, 1034 insertions, 141 deletions
diff --git a/schedsim/configure.ac b/schedsim/configure.ac
index f5fc1ef..8012b2f 100644
--- a/schedsim/configure.ac
+++ b/schedsim/configure.ac
@@ -77,6 +77,11 @@ RTEMS_CPUOPT([RTEMS_SMP],
[1],
[if SMP is enabled])
+RTEMS_CPUOPT([__RTEMS_HAVE_SYS_CPUSET_H__],
+ [true],
+ [1],
+ [<sys/cpuset.h> is provided])
+
RTEMS_CPUOPT([RTEMS_NETWORKING],
[test x"$rtems_cv_HAS_NETWORKING" = xyes],
[1],
diff --git a/schedsim/rtems/Makefile.am b/schedsim/rtems/Makefile.am
index f69f11c..f7cf8b5 100644
--- a/schedsim/rtems/Makefile.am
+++ b/schedsim/rtems/Makefile.am
@@ -3,7 +3,9 @@ ACLOCAL_AMFLAGS = -I ../../aclocal
lib_LIBRARIES = librtems.a
cpukitdir=@rtems_srcdir@/cpukit
+rtemscdir=@rtems_srcdir@/c
librtems_a_CPPFLAGS = -D__RTEMS_VIOLATE_KERNEL_VISIBILITY__
+librtems_a_CPPFLAGS += -D_GNU_SOURCE
librtems_a_CPPFLAGS += -I$(top_builddir)/score/include
librtems_a_CPPFLAGS += -I$(srcdir)/sched_cpu
librtems_a_CPPFLAGS += -I$(cpukitdir)/include
@@ -245,7 +247,10 @@ librtems_a_SOURCES += $(cpukitdir)/score/src/threadenabledispatch.c
librtems_a_SOURCES += $(cpukitdir)/score/src/threaddispatchdisablelevel.c
librtems_a_SOURCES += $(cpukitdir)/score/src/schedulerprioritysmp.c
librtems_a_SOURCES += $(cpukitdir)/score/src/schedulersimplesmp.c
-librtems_a_SOURCES += $(cpukitdir)/score/src/schedulersmpstartidle.c
+librtems_a_SOURCES += $(cpukitdir)/score/src/debugisthreaddispatchingallowed.c
+librtems_a_SOURCES += $(cpukitdir)/score/src/schedulerdefaultgetaffinity.c
+librtems_a_SOURCES += $(cpukitdir)/score/src/schedulerdefaultsetaffinity.c
+librtems_a_SOURCES += $(cpukitdir)/score/src/cpuset.c
endif
schedsim_includedir = $(includedir)/schedsim
@@ -254,7 +259,14 @@ schedsim_rtems_score_includedir = $(includedir)/schedsim/rtems/score
schedsim_rtems_rtems_includedir = $(includedir)/schedsim/rtems/rtems
schedsim_include_HEADERS = \
- ${cpukitdir}/rtems/include/rtems.h
+ ${cpukitdir}/rtems/include/rtems.h \
+ sched_cpu/stdatomic.h \
+ sched_cpu/sys/_types.h \
+ sched_cpu/machine/_types.h \
+ sched_cpu/machine/_default_types.h \
+ sched_cpu/sys/features.h \
+ sched_cpu/sys/lock.h \
+ sched_cpu/sys/cpuset.h
schedsim_rtems_include_HEADERS = \
${cpukitdir}/score/include//rtems/seterr.h \
@@ -281,7 +293,7 @@ schedsim_rtems_include_HEADERS = \
$(cpukitdir)/libcsupport/include/rtems/termiostypes.h \
$(cpukitdir)/libcsupport/include/rtems/malloc.h \
sched_cpu/rtems/stringto.h \
- sched_cpu/rtems/asm.h
+ sched_cpu/rtems/asm.h
if HAS_PTHREADS
schedsim_rtems_include_HEADERS += \
@@ -292,6 +304,9 @@ schedsim_rtems_score_include_HEADERS = \
${cpukitdir}/score/include/rtems/score/address.h \
${cpukitdir}/score/include/rtems/score/coremutex.h \
${cpukitdir}/score/include/rtems/score/corerwlock.h \
+ ${cpukitdir}/score/include/rtems/score/cpuset.h \
+ ${cpukitdir}/score/include/rtems/score/cpusetimpl.h \
+ ${cpukitdir}/score/include/rtems/score/cpustdatomic.h \
${cpukitdir}/score/include/rtems/score/threadsync.h \
${cpukitdir}/score/include/rtems/score/priority.h \
${cpukitdir}/score/include/rtems/score/sysstate.h \
@@ -334,6 +349,7 @@ schedsim_rtems_score_include_HEADERS = \
sched_cpu/rtems/score/cpu_asm.h \
$(top_builddir)/score/include/rtems/score/cpuopts.h \
sched_cpu/rtems/score/cpu.h \
+ sched_cpu/rtems/score/cpuatomic.h \
sched_cpu/rtems/score/types.h \
sched_cpu/rtems/score/no_cpu.h
diff --git a/schedsim/rtems/rtems_init.c b/schedsim/rtems/rtems_init.c
index bcf068f..8df4681 100644
--- a/schedsim/rtems/rtems_init.c
+++ b/schedsim/rtems/rtems_init.c
@@ -46,6 +46,8 @@
#include <rtems/posix/keyimpl.h>
+void Init__wrap__Thread_Dispatch();
+
/*
* Declare Object Information tables directly here instead of API
* specific initialization files as in cpukit/sapi/src.
@@ -135,15 +137,44 @@ void rtems_initialize_data_structures(void)
_System_state_Set( SYSTEM_STATE_UP );
- _SMP_Request_start_multitasking();
+ _SMP_Request_start_multitasking();
_Thread_Start_multitasking();
+ /* Add Initialization of the Thread_Dispatch wrapper */
+ Init__wrap__Thread_Dispatch();
+
/*
* Now we are back in a non-dispatching critical section
*/
#if defined(RTEMS_SMP)
- #error "NOT IMPLEMENTED"
+ {
+ ISR_Level level;
+
+ /*
+ * On SMP we enter _Thread_Handler() with interrupts disabled and
+ * _Thread_Dispatch() obtained the per-CPU lock for us. We have to
+ * release it here and set the desired interrupt level of the thread.
+ */
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+ _Assert( cpu_self->thread_dispatch_disable_level == 1 );
+ _Assert( _ISR_Get_level() != 0 );
+
+ cpu_self->thread_dispatch_disable_level = 0;
+ _Profiling_Thread_dispatch_enable( cpu_self, 0 );
+
+ _Per_CPU_Release( cpu_self );
+
+ level = _Thread_Executing->Start.isr_level;
+ _ISR_Set_level( level);
+
+ /*
+ * The thread dispatch level changed from one to zero. Make sure we lose
+ * no thread dispatch necessary update.
+ */
+ _Thread_Dispatch();
+ }
#else
_Thread_Enable_dispatch();
#endif
diff --git a/schedsim/rtems/sched_cpu/cpu.c b/schedsim/rtems/sched_cpu/cpu.c
index 3d0c095..8ac3452 100644
--- a/schedsim/rtems/sched_cpu/cpu.c
+++ b/schedsim/rtems/sched_cpu/cpu.c
@@ -19,6 +19,8 @@
#include <rtems/score/isr.h>
#include <rtems/score/wkspace.h>
+int _CPU_ISR_level_on_sched_cpu;
+
/* _CPU_Initialize
*
* This routine performs processor dependent initialization.
@@ -32,36 +34,10 @@
void _CPU_Initialize(void)
{
- /*
- * If there is not an easy way to initialize the FP context
- * during Context_Initialize, then it is usually easier to
- * save an "uninitialized" FP context here and copy it to
- * the task's during Context_Initialize.
- */
-
- /* FP context initialization support goes here */
+ _CPU_ISR_level_on_sched_cpu = 1;
}
-/*PAGE
- *
- * _CPU_ISR_Get_level
- *
- * NO_CPU Specific Information:
- *
- * XXX document implementation including references if appropriate
- */
-
-uint32_t _CPU_ISR_Get_level( void )
-{
- /*
- * This routine returns the current interrupt level.
- */
-
- return 0;
-}
-
-/*PAGE
- *
+/*
* _CPU_ISR_install_raw_handler
*
* NO_CPU Specific Information:
@@ -81,8 +57,7 @@ void _CPU_ISR_install_raw_handler(
*/
}
-/*PAGE
- *
+/*
* _CPU_ISR_install_vector
*
* This kernel routine installs the RTEMS handler for the
@@ -125,8 +100,7 @@ void _CPU_ISR_install_vector(
_ISR_Vector_table[ vector ] = new_handler;
}
-/*PAGE
- *
+/*
* _CPU_Install_interrupt_stack
*
* NO_CPU Specific Information:
@@ -138,8 +112,7 @@ void _CPU_Install_interrupt_stack( void )
{
}
-/*PAGE
- *
+/*
* _CPU_Thread_Idle_body
*
* NOTES:
diff --git a/schedsim/rtems/sched_cpu/machine/_default_types.h b/schedsim/rtems/sched_cpu/machine/_default_types.h
new file mode 100644
index 0000000..47986d2
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/machine/_default_types.h
@@ -0,0 +1,184 @@
+/*
+ * $Id: _default_types.h,v 1.6 2013/12/03 16:04:41 corinna Exp $
+ */
+
+#ifndef _MACHINE__DEFAULT_TYPES_H
+#define _MACHINE__DEFAULT_TYPES_H
+
+#include <sys/features.h>
+
+/*
+ * Guess on types by examining *_MIN / *_MAX defines.
+ */
+#if __GNUC_PREREQ (3, 3)
+/* GCC >= 3.3.0 has __<val>__ implicitly defined. */
+#define __EXP(x) __##x##__
+#else
+/* Fall back to POSIX versions from <limits.h> */
+#define __EXP(x) x
+#include <limits.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __INT8_TYPE__
+typedef __INT8_TYPE__ __int8_t;
+typedef __UINT8_TYPE__ __uint8_t;
+#define ___int8_t_defined 1
+#elif __EXP(SCHAR_MAX) == 0x7f
+typedef signed char __int8_t ;
+typedef unsigned char __uint8_t ;
+#define ___int8_t_defined 1
+#endif
+
+#ifdef __INT16_TYPE__
+typedef __INT16_TYPE__ __int16_t;
+typedef __UINT16_TYPE__ __uint16_t;
+#define ___int16_t_defined 1
+#elif __EXP(INT_MAX) == 0x7fff
+typedef signed int __int16_t;
+typedef unsigned int __uint16_t;
+#define ___int16_t_defined 1
+#elif __EXP(SHRT_MAX) == 0x7fff
+typedef signed short __int16_t;
+typedef unsigned short __uint16_t;
+#define ___int16_t_defined 1
+#elif __EXP(SCHAR_MAX) == 0x7fff
+typedef signed char __int16_t;
+typedef unsigned char __uint16_t;
+#define ___int16_t_defined 1
+#endif
+
+#ifdef __INT32_TYPE__
+typedef __INT32_TYPE__ __int32_t;
+typedef __UINT32_TYPE__ __uint32_t;
+#define ___int32_t_defined 1
+#elif __EXP(INT_MAX) == 0x7fffffffL
+typedef signed int __int32_t;
+typedef unsigned int __uint32_t;
+#define ___int32_t_defined 1
+#elif __EXP(LONG_MAX) == 0x7fffffffL
+typedef signed long __int32_t;
+typedef unsigned long __uint32_t;
+#define ___int32_t_defined 1
+#elif __EXP(SHRT_MAX) == 0x7fffffffL
+typedef signed short __int32_t;
+typedef unsigned short __uint32_t;
+#define ___int32_t_defined 1
+#elif __EXP(SCHAR_MAX) == 0x7fffffffL
+typedef signed char __int32_t;
+typedef unsigned char __uint32_t;
+#define ___int32_t_defined 1
+#endif
+
+#ifdef __INT64_TYPE__
+typedef __INT64_TYPE__ __int64_t;
+typedef __UINT64_TYPE__ __uint64_t;
+#define ___int64_t_defined 1
+#elif __EXP(LONG_MAX) > 0x7fffffff
+typedef signed long __int64_t;
+typedef unsigned long __uint64_t;
+#define ___int64_t_defined 1
+
+/* GCC has __LONG_LONG_MAX__ */
+#elif defined(__LONG_LONG_MAX__) && (__LONG_LONG_MAX__ > 0x7fffffff)
+typedef signed long long __int64_t;
+typedef unsigned long long __uint64_t;
+#define ___int64_t_defined 1
+
+/* POSIX mandates LLONG_MAX in <limits.h> */
+#elif defined(LLONG_MAX) && (LLONG_MAX > 0x7fffffff)
+typedef signed long long __int64_t;
+typedef unsigned long long __uint64_t;
+#define ___int64_t_defined 1
+
+#elif __EXP(INT_MAX) > 0x7fffffff
+typedef signed int __int64_t;
+typedef unsigned int __uint64_t;
+#define ___int64_t_defined 1
+#endif
+
+#ifdef __INT_LEAST8_TYPE__
+typedef __INT_LEAST8_TYPE__ __int_least8_t;
+typedef __UINT_LEAST8_TYPE__ __uint_least8_t;
+#define ___int_least8_t_defined 1
+#elif defined(___int8_t_defined)
+typedef __int8_t __int_least8_t;
+typedef __uint8_t __uint_least8_t;
+#define ___int_least8_t_defined 1
+#elif defined(___int16_t_defined)
+typedef __int16_t __int_least8_t;
+typedef __uint16_t __uint_least8_t;
+#define ___int_least8_t_defined 1
+#elif defined(___int32_t_defined)
+typedef __int32_t __int_least8_t;
+typedef __uint32_t __uint_least8_t;
+#define ___int_least8_t_defined 1
+#elif defined(___int64_t_defined)
+typedef __int64_t __int_least8_t;
+typedef __uint64_t __uint_least8_t;
+#define ___int_least8_t_defined 1
+#endif
+
+#ifdef __INT_LEAST16_TYPE__
+typedef __INT_LEAST16_TYPE__ __int_least16_t;
+typedef __UINT_LEAST16_TYPE__ __uint_least16_t;
+#define ___int_least16_t_defined 1
+#elif defined(___int16_t_defined)
+typedef __int16_t __int_least16_t;
+typedef __uint16_t __uint_least16_t;
+#define ___int_least16_t_defined 1
+#elif defined(___int32_t_defined)
+typedef __int32_t __int_least16_t;
+typedef __uint32_t __uint_least16_t;
+#define ___int_least16_t_defined 1
+#elif defined(___int64_t_defined)
+typedef __int64_t __int_least16_t;
+typedef __uint64_t __uint_least16_t;
+#define ___int_least16_t_defined 1
+#endif
+
+#ifdef __INT_LEAST32_TYPE__
+typedef __INT_LEAST32_TYPE__ __int_least32_t;
+typedef __UINT_LEAST32_TYPE__ __uint_least32_t;
+#define ___int_least32_t_defined 1
+#elif defined(___int32_t_defined)
+typedef __int32_t __int_least32_t;
+typedef __uint32_t __uint_least32_t;
+#define ___int_least32_t_defined 1
+#elif defined(___int64_t_defined)
+typedef __int64_t __int_least32_t;
+typedef __uint64_t __uint_least32_t;
+#define ___int_least32_t_defined 1
+#endif
+
+#ifdef __INT_LEAST64_TYPE__
+typedef __INT_LEAST64_TYPE__ __int_least64_t;
+typedef __UINT_LEAST64_TYPE__ __uint_least64_t;
+#define ___int_least64_t_defined 1
+#elif defined(___int64_t_defined)
+typedef __int64_t __int_least64_t;
+typedef __uint64_t __uint_least64_t;
+#define ___int_least64_t_defined 1
+#endif
+
+#ifdef __INTPTR_TYPE__
+typedef __INTPTR_TYPE__ __intptr_t;
+typedef __UINTPTR_TYPE__ __uintptr_t;
+#elif defined(__PTRDIFF_TYPE__)
+typedef __PTRDIFF_TYPE__ __intptr_t;
+typedef unsigned __PTRDIFF_TYPE__ __uintptr_t;
+#else
+typedef long __intptr_t;
+typedef unsigned long __uintptr_t;
+#endif
+
+#undef __EXP
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MACHINE__DEFAULT_TYPES_H */
diff --git a/schedsim/rtems/sched_cpu/machine/_types.h b/schedsim/rtems/sched_cpu/machine/_types.h
new file mode 100644
index 0000000..18f96d5
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/machine/_types.h
@@ -0,0 +1,8 @@
+/*
+ * $Id: _types.h,v 1.3 2007/09/07 21:16:25 jjohnstn Exp $
+ */
+
+#ifndef _MACHINE__TYPES_H
+#define _MACHINE__TYPES_H
+#include <machine/_default_types.h>
+#endif
diff --git a/schedsim/rtems/sched_cpu/rtems/score/asm.h b/schedsim/rtems/sched_cpu/rtems/score/asm.h
new file mode 100644
index 0000000..a2b11f6
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/rtems/score/asm.h
@@ -0,0 +1,120 @@
+/**
+ * @file
+ *
+ * @brief Address the Problems Caused by Incompatible Flavor of
+ * Assemblers and Toolsets
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ *
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ */
+
+/*
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted.
+ */
+
+#ifndef _RTEMS_ASM_H
+#define _RTEMS_ASM_H
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#ifndef ASM
+#define ASM
+#endif
+
+#include <rtems/score/cpuopts.h>
+#include <rtems/score/cpu.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+/* XXX __USER_LABEL_PREFIX__ and __REGISTER_PREFIX__ do not work on gcc 2.7.0 */
+/* XXX The following ifdef magic fixes the problem but results in a warning */
+/* XXX when compiling assembly code. */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+#include <rtems/concat.h>
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/*
+ * define macros for all of the registers on this CPU
+ *
+ * EXAMPLE: #define d0 REG (d0)
+ */
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+/*
+ * Entry for traps which jump to a programmer-specified trap handler.
+ */
+
+#define TRAP(_vector, _handler) \
+ mov %psr, %l0 ; \
+ sethi %hi(_handler), %l4 ; \
+ jmp %l4+%lo(_handler); \
+ mov _vector, %l3
+
+/*
+ * Used for the reset trap to avoid a supervisor instruction
+ */
+
+#define RTRAP(_vector, _handler) \
+ mov %g0, %l0 ; \
+ sethi %hi(_handler), %l4 ; \
+ jmp %l4+%lo(_handler); \
+ mov _vector, %l3
+
+#endif
diff --git a/schedsim/rtems/sched_cpu/rtems/score/cpu.h b/schedsim/rtems/sched_cpu/rtems/score/cpu.h
index 219a347..03817fc 100644
--- a/schedsim/rtems/sched_cpu/rtems/score/cpu.h
+++ b/schedsim/rtems/sched_cpu/rtems/score/cpu.h
@@ -720,6 +720,11 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
#define _CPU_Initialize_vectors()
/**
+ * XXX fake cpu isr level variable
+ */
+extern int _CPU_ISR_level_on_sched_cpu;
+
+/**
* @ingroup CPUInterrupt
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in @a _isr_cookie.
@@ -732,7 +737,8 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
*/
#define _CPU_ISR_Disable( _isr_cookie ) \
{ \
- (_isr_cookie) = 0; /* do something to prevent warnings */ \
+ (_isr_cookie) = _CPU_ISR_level_on_sched_cpu; \
+ _CPU_ISR_level_on_sched_cpu = 1; \
}
/**
@@ -749,6 +755,7 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
*/
#define _CPU_ISR_Enable( _isr_cookie ) \
{ \
+ _CPU_ISR_level_on_sched_cpu = (_isr_cookie); \
}
/**
@@ -787,6 +794,7 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
*/
#define _CPU_ISR_Set_level( new_level ) \
{ \
+ _CPU_ISR_level_on_sched_cpu = (new_level); \
}
/**
@@ -800,7 +808,7 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
*
* XXX document implementation including references if appropriate
*/
-uint32_t _CPU_ISR_Get_level( void );
+#define _CPU_ISR_Get_level() (uint32_t) _CPU_ISR_level_on_sched_cpu
/* end of ISR handler macros */
@@ -1220,21 +1228,13 @@ void _CPU_Context_restore_fp(
#ifdef RTEMS_SMP
#define _CPU_Context_switch_to_first_task_smp(_context )
- RTEMS_COMPILER_PURE_ATTRIBUTE static inline uint32_t
- _CPU_SMP_Get_current_processor( void )
- {
- return 0;
- }
-
- #define _CPU_SMP_Send_interrupt( dest);
-
- static inline void _CPU_SMP_Processor_event_broadcast( void )
- {
- }
-
- static inline void _CPU_SMP_Processor_event_receive( void )
- {
- }
+ uint32_t _CPU_SMP_Get_current_processor( void );
+ uint32_t _CPU_SMP_Initialize( void );
+ bool _CPU_SMP_Start_processor( uint32_t cpu_index );
+ void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
+ void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
+ void _CPU_SMP_Processor_event_broadcast( void );
+ void _CPU_SMP_Processor_event_receive( void );
#endif
typedef struct {
uint32_t trap;
diff --git a/schedsim/rtems/sched_cpu/rtems/score/cpuatomic.h b/schedsim/rtems/sched_cpu/rtems/score/cpuatomic.h
new file mode 100644
index 0000000..598ee76
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/rtems/score/cpuatomic.h
@@ -0,0 +1,14 @@
+/*
+ * COPYRIGHT (c) 2012-2013 Deng Hengyi.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
+#define _RTEMS_SCORE_ATOMIC_CPU_H
+
+#include <rtems/score/cpustdatomic.h>
+
+#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */
diff --git a/schedsim/rtems/sched_cpu/stdatomic.h b/schedsim/rtems/sched_cpu/stdatomic.h
new file mode 100644
index 0000000..f331bc6
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/stdatomic.h
@@ -0,0 +1,422 @@
+/*-
+ * Copyright (c) 2011 Ed Schouten <ed@FreeBSD.org>
+ * David Chisnall <theraven@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _STDATOMIC_H_
+#define _STDATOMIC_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+#define __GNUC_ATOMICS /* --jla */
+
+/* --jla
+#if __has_extension(c_atomic) || __has_extension(cxx_atomic)
+#define __CLANG_ATOMICS
+#elif __GNUC_PREREQ__(4, 7)
+#define __GNUC_ATOMICS
+#elif defined(__GNUC__)
+#define __SYNC_ATOMICS
+#else
+#error "stdatomic.h does not support your compiler"
+#endif
+*/
+
+#define __unused /* --jla */
+#define _Atomic(T) struct { T volatile __val; } /* --jla */
+
+/*
+ * 7.17.1 Atomic lock-free macros.
+ */
+
+#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE
+#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_INT_LOCK_FREE
+#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_LONG_LOCK_FREE
+#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE
+#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE
+#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#endif
+
+/*
+ * 7.17.2 Initialization.
+ */
+
+#if defined(__CLANG_ATOMICS)
+#define ATOMIC_VAR_INIT(value) (value)
+#define atomic_init(obj, value) __c11_atomic_init(obj, value)
+#else
+#define ATOMIC_VAR_INIT(value) { .__val = (value) }
+#define atomic_init(obj, value) ((void)((obj)->__val = (value)))
+#endif
+
+/*
+ * Clang and recent GCC both provide predefined macros for the memory
+ * orderings. If we are using a compiler that doesn't define them, use the
+ * clang values - these will be ignored in the fallback path.
+ */
+
+#ifndef __ATOMIC_RELAXED
+#define __ATOMIC_RELAXED 0
+#endif
+#ifndef __ATOMIC_CONSUME
+#define __ATOMIC_CONSUME 1
+#endif
+#ifndef __ATOMIC_ACQUIRE
+#define __ATOMIC_ACQUIRE 2
+#endif
+#ifndef __ATOMIC_RELEASE
+#define __ATOMIC_RELEASE 3
+#endif
+#ifndef __ATOMIC_ACQ_REL
+#define __ATOMIC_ACQ_REL 4
+#endif
+#ifndef __ATOMIC_SEQ_CST
+#define __ATOMIC_SEQ_CST 5
+#endif
+
+/*
+ * 7.17.3 Order and consistency.
+ *
+ * The memory_order_* constants that denote the barrier behaviour of the
+ * atomic operations.
+ */
+
+typedef enum {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+/*
+ * 7.17.4 Fences.
+ */
+
+static __inline void
+atomic_thread_fence(memory_order __order __unused)
+{
+
+#ifdef __CLANG_ATOMICS
+ __c11_atomic_thread_fence(__order);
+#elif defined(__GNUC_ATOMICS)
+ __atomic_thread_fence(__order);
+#else
+ __sync_synchronize();
+#endif
+}
+
+static __inline void
+atomic_signal_fence(memory_order __order __unused)
+{
+
+#ifdef __CLANG_ATOMICS
+ __c11_atomic_signal_fence(__order);
+#elif defined(__GNUC_ATOMICS)
+ __atomic_signal_fence(__order);
+#else
+ __asm volatile ("" ::: "memory");
+#endif
+}
+
+/*
+ * 7.17.5 Lock-free property.
+ */
+
+#if defined(_KERNEL)
+/* Atomics in kernelspace are always lock-free. */
+#define atomic_is_lock_free(obj) \
+ ((void)(obj), (_Bool)1)
+#elif defined(__CLANG_ATOMICS)
+#define atomic_is_lock_free(obj) \
+ __atomic_is_lock_free(sizeof(*(obj)), obj)
+#elif defined(__GNUC_ATOMICS)
+#define atomic_is_lock_free(obj) \
+ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val)
+#else
+#define atomic_is_lock_free(obj) \
+ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *))
+#endif
+
+/*
+ * 7.17.6 Atomic integer types.
+ */
+
+typedef _Atomic(_Bool) atomic_bool;
+typedef _Atomic(char) atomic_char;
+typedef _Atomic(signed char) atomic_schar;
+typedef _Atomic(unsigned char) atomic_uchar;
+typedef _Atomic(short) atomic_short;
+typedef _Atomic(unsigned short) atomic_ushort;
+typedef _Atomic(int) atomic_int;
+typedef _Atomic(unsigned int) atomic_uint;
+typedef _Atomic(long) atomic_long;
+typedef _Atomic(unsigned long) atomic_ulong;
+typedef _Atomic(long long) atomic_llong;
+typedef _Atomic(unsigned long long) atomic_ullong;
+#if 0
+typedef _Atomic(__char16_t) atomic_char16_t;
+typedef _Atomic(__char32_t) atomic_char32_t;
+#endif
+typedef _Atomic(wchar_t) atomic_wchar_t;
+typedef _Atomic(int_least8_t) atomic_int_least8_t;
+typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
+typedef _Atomic(int_least16_t) atomic_int_least16_t;
+typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
+typedef _Atomic(int_least32_t) atomic_int_least32_t;
+typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
+typedef _Atomic(int_least64_t) atomic_int_least64_t;
+typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
+typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
+typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
+typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
+typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
+typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
+typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
+typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
+typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
+typedef _Atomic(intptr_t) atomic_intptr_t;
+typedef _Atomic(uintptr_t) atomic_uintptr_t;
+typedef _Atomic(size_t) atomic_size_t;
+typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
+typedef _Atomic(intmax_t) atomic_intmax_t;
+typedef _Atomic(uintmax_t) atomic_uintmax_t;
+
+/*
+ * 7.17.7 Operations on atomic types.
+ */
+
+/*
+ * Compiler-specific operations.
+ */
+
+#if defined(__CLANG_ATOMICS)
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) \
+ __c11_atomic_compare_exchange_strong(object, expected, desired, \
+ success, failure)
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ __c11_atomic_compare_exchange_weak(object, expected, desired, \
+ success, failure)
+#define atomic_exchange_explicit(object, desired, order) \
+ __c11_atomic_exchange(object, desired, order)
+#define atomic_fetch_add_explicit(object, operand, order) \
+ __c11_atomic_fetch_add(object, operand, order)
+#define atomic_fetch_and_explicit(object, operand, order) \
+ __c11_atomic_fetch_and(object, operand, order)
+#define atomic_fetch_or_explicit(object, operand, order) \
+ __c11_atomic_fetch_or(object, operand, order)
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ __c11_atomic_fetch_sub(object, operand, order)
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ __c11_atomic_fetch_xor(object, operand, order)
+#define atomic_load_explicit(object, order) \
+ __c11_atomic_load(object, order)
+#define atomic_store_explicit(object, desired, order) \
+ __c11_atomic_store(object, desired, order)
+#elif defined(__GNUC_ATOMICS)
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) \
+ __atomic_compare_exchange_n(&(object)->__val, expected, \
+ desired, 0, success, failure)
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ __atomic_compare_exchange_n(&(object)->__val, expected, \
+ desired, 1, success, failure)
+#define atomic_exchange_explicit(object, desired, order) \
+ __atomic_exchange_n(&(object)->__val, desired, order)
+#define atomic_fetch_add_explicit(object, operand, order) \
+ __atomic_fetch_add(&(object)->__val, operand, order)
+#define atomic_fetch_and_explicit(object, operand, order) \
+ __atomic_fetch_and(&(object)->__val, operand, order)
+#define atomic_fetch_or_explicit(object, operand, order) \
+ __atomic_fetch_or(&(object)->__val, operand, order)
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ __atomic_fetch_sub(&(object)->__val, operand, order)
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ __atomic_fetch_xor(&(object)->__val, operand, order)
+#define atomic_load_explicit(object, order) \
+ __atomic_load_n(&(object)->__val, order)
+#define atomic_store_explicit(object, desired, order) \
+ __atomic_store_n(&(object)->__val, desired, order)
+#else
+#define __atomic_apply_stride(object, operand) \
+ (((__typeof__((object)->__val))0) + (operand))
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) __extension__ ({ \
+ __typeof__(expected) __ep = (expected); \
+ __typeof__(*__ep) __e = *__ep; \
+ (void)(success); (void)(failure); \
+ (_Bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \
+ __e, desired)) == __e); \
+})
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure)
+#if 0 /* --jla comment out. */
+#if __has_builtin(__sync_swap)
+/* Clang provides a full-barrier atomic exchange - use it if available. */
+#define atomic_exchange_explicit(object, desired, order) \
+ ((void)(order), __sync_swap(&(object)->__val, desired))
+#endif /* --jla */
+#else
+/*
+ * __sync_lock_test_and_set() is only an acquire barrier in theory (although in
+ * practice it is usually a full barrier) so we need an explicit barrier before
+ * it.
+ */
+#define atomic_exchange_explicit(object, desired, order) \
+__extension__ ({ \
+ __typeof__(object) __o = (object); \
+ __typeof__(desired) __d = (desired); \
+ (void)(order); \
+ __sync_synchronize(); \
+ __sync_lock_test_and_set(&(__o)->__val, __d); \
+})
+#endif
+#define atomic_fetch_add_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_add(&(object)->__val, \
+ __atomic_apply_stride(object, operand)))
+#define atomic_fetch_and_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_and(&(object)->__val, operand))
+#define atomic_fetch_or_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_or(&(object)->__val, operand))
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_sub(&(object)->__val, \
+ __atomic_apply_stride(object, operand)))
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand))
+#define atomic_load_explicit(object, order) \
+ ((void)(order), __sync_fetch_and_add(&(object)->__val, 0))
+#define atomic_store_explicit(object, desired, order) \
+ ((void)atomic_exchange_explicit(object, desired, order))
+#endif
+
+/*
+ * Convenience functions.
+ *
+ * Don't provide these in kernel space. In kernel space, we should be
+ * disciplined enough to always provide explicit barriers.
+ */
+
+#ifndef _KERNEL
+#define atomic_compare_exchange_strong(object, expected, desired) \
+ atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, memory_order_seq_cst, memory_order_seq_cst)
+#define atomic_compare_exchange_weak(object, expected, desired) \
+ atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, memory_order_seq_cst, memory_order_seq_cst)
+#define atomic_exchange(object, desired) \
+ atomic_exchange_explicit(object, desired, memory_order_seq_cst)
+#define atomic_fetch_add(object, operand) \
+ atomic_fetch_add_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_and(object, operand) \
+ atomic_fetch_and_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_or(object, operand) \
+ atomic_fetch_or_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_sub(object, operand) \
+ atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_xor(object, operand) \
+ atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst)
+#define atomic_load(object) \
+ atomic_load_explicit(object, memory_order_seq_cst)
+#define atomic_store(object, desired) \
+ atomic_store_explicit(object, desired, memory_order_seq_cst)
+#endif /* !_KERNEL */
+
+/*
+ * 7.17.8 Atomic flag type and operations.
+ *
+ * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some
+ * kind of compiler built-in type we could use?
+ */
+
+typedef struct {
+ atomic_bool __flag;
+} atomic_flag;
+
+#define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) }
+
+static __inline _Bool
+atomic_flag_test_and_set_explicit(volatile atomic_flag *__object,
+ memory_order __order)
+{
+ return (atomic_exchange_explicit(&__object->__flag, 1, __order));
+}
+
+static __inline void
+atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order)
+{
+
+ atomic_store_explicit(&__object->__flag, 0, __order);
+}
+
+#ifndef _KERNEL
+static __inline _Bool
+atomic_flag_test_and_set(volatile atomic_flag *__object)
+{
+
+ return (atomic_flag_test_and_set_explicit(__object,
+ memory_order_seq_cst));
+}
+
+static __inline void
+atomic_flag_clear(volatile atomic_flag *__object)
+{
+
+ atomic_flag_clear_explicit(__object, memory_order_seq_cst);
+}
+#endif /* !_KERNEL */
+
+#endif /* !_STDATOMIC_H_ */
diff --git a/schedsim/rtems/sched_cpu/sys/_types.h b/schedsim/rtems/sched_cpu/sys/_types.h
new file mode 100644
index 0000000..1ad429d
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/sys/_types.h
@@ -0,0 +1,54 @@
+/* ANSI C namespace clean utility typedefs */
+
+/* This file defines various typedefs needed by the system calls that support
+ the C library. Basically, they're just the POSIX versions with an '_'
+ prepended. This file lives in the `sys' directory so targets can provide
+ their own if desired (or they can put target dependant conditionals here).
+*/
+
+#ifndef _SYS__TYPES_H
+#define _SYS__TYPES_H
+
+typedef long _off_t;
+__extension__ typedef long long _off64_t;
+
+typedef long _fpos_t;
+__extension__ typedef long long _fpos64_t;
+
+#if defined(__INT_MAX__) && __INT_MAX__ == 2147483647
+typedef int _ssize_t;
+#else
+typedef long _ssize_t;
+#endif
+
+#define __need_wint_t
+#include <stddef.h>
+
+/* Conversion state information. */
+typedef struct
+{
+ int __count;
+ union
+ {
+ wint_t __wch;
+ unsigned char __wchb[4];
+ } __value; /* Value so far. */
+} _mbstate_t;
+
+struct __flock_mutex_t_tmp;
+typedef struct
+{
+ int __a;
+ int __b;
+ struct
+ {
+ long int __c1;
+ int __c2;
+ } __c;
+ int __d;
+ struct __flock_mutex_t_tmp * __e;
+} __flock_mutex_t;
+
+typedef struct { __flock_mutex_t mutex; } _flock_t;
+
+#endif /* _SYS__TYPES_H */
diff --git a/schedsim/rtems/sched_cpu/sys/cpuset.h b/schedsim/rtems/sched_cpu/sys/cpuset.h
new file mode 100644
index 0000000..d89bb68
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/sys/cpuset.h
@@ -0,0 +1,9 @@
+/*
+ * On Linux, the cpu_set_t is defined here proteted with _GNU_SOURCE
+ * and it is defined in Makefile.am`
+ */
+#include <sched.h>
+#ifndef __CPU_ZERO_S
+#error "__CPU_ZERO_S not defined - check configuration"
+#endif
+
diff --git a/schedsim/rtems/sched_cpu/sys/features.h b/schedsim/rtems/sched_cpu/sys/features.h
new file mode 100644
index 0000000..34cd0eb
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/sys/features.h
@@ -0,0 +1,16 @@
+#ifndef _SYS_FEATURES_H
+#define _SYS_FEATURES_H
+
+#include <bits/posix_opt.h>
+
+/* We do not support asynchronous I/O. */
+#undef _POSIX_ASYNCHRONOUS_IO
+#undef _POSIX_ASYNC_IO
+#undef _LFS_ASYNCHRONOUS_IO
+#undef _LFS64_ASYNCHRONOUS_IO
+
+/* POSIX message queues are supported. */
+#undef _POSIX_MESSAGE_PASSING
+#define _POSIX_MESSAGE_PASSING 1
+
+#endif /* _SYS_FEATURES_H */
diff --git a/schedsim/rtems/sched_cpu/sys/lock.h b/schedsim/rtems/sched_cpu/sys/lock.h
new file mode 100644
index 0000000..d934031
--- /dev/null
+++ b/schedsim/rtems/sched_cpu/sys/lock.h
@@ -0,0 +1,35 @@
+#ifndef __SYS_LOCK_H__
+#define __SYS_LOCK_H__
+
+#include <features.h>
+
+#define _LIBC 1
+#define NOT_IN_libc 1
+
+#ifndef __USE_GNU
+#define __USE_GNU 1
+#endif
+
+#include <bits/libc-lock.h>
+
+typedef __libc_lock_t _LOCK_T;
+typedef __libc_lock_recursive_t _LOCK_RECURSIVE_T;
+
+#define __LOCK_INIT(class,lock) \
+ __libc_lock_define_initialized(class, lock)
+#define __LOCK_INIT_RECURSIVE(class, lock) \
+ __libc_lock_define_initialized_recursive(class, lock)
+
+#define __lock_init(__lock) __libc_lock_init(__lock)
+#define __lock_init_recursive(__lock) __libc_lock_init_recursive(__lock)
+#define __lock_acquire(__lock) __libc_lock_lock(__lock)
+#define __lock_acquire_recursive(__lock) __libc_lock_lock_recursive(__lock)
+#define __lock_release(__lock) __libc_lock_unlock(__lock)
+#define __lock_release_recursive(__lock) __libc_lock_unlock_recursive(__lock)
+#define __lock_try_acquire(__lock) __libc_lock_trylock(__lock)
+#define __lock_try_acquire_recursive(__lock) \
+ __libc_lock_trylock_recursive(__lock)
+#define __lock_close(__lock) __libc_lock_fini(__lock)
+#define __lock_close_recursive(__lock) __libc_lock_fini_recursive(__lock)
+
+#endif /* __SYS_LOCK_H__ */
diff --git a/schedsim/shell/schedsim_priority/smp_stub.c b/schedsim/shell/schedsim_priority/smp_stub.c
index c2f1308..c328e1a 100644
--- a/schedsim/shell/schedsim_priority/smp_stub.c
+++ b/schedsim/shell/schedsim_priority/smp_stub.c
@@ -11,60 +11,36 @@
#include <rtems.h>
#include <rtems/bspIo.h>
-#include <rtems/bspsmp.h>
#include <stdlib.h>
-
-void bsp_smp_secondary_cpu_initialize(int cpu)
+uint32_t _CPU_SMP_Initialize( void )
{
+ /* return the number of CPUs */
+ return 1; /* XXX */
}
-int bsp_smp_processor_id(void)
+bool _CPU_SMP_Start_processor( uint32_t cpu_index )
{
- return 0;
+ return true;
}
-uint32_t bsp_smp_initialize(
- uint32_t configured_cpu_count
-)
+void _CPU_SMP_Finalize_initialization( uint32_t cpu_count )
{
- /* return the number of CPUs */
- return configured_cpu_count;
}
-void bsp_smp_broadcast_interrupt(void)
+void _CPU_SMP_Send_interrupt( uint32_t target_processor_index )
{
-}
+}
-void bsp_smp_broadcast_message(
- uint32_t message
-)
+void _CPU_SMP_Processor_event_broadcast( void )
{
}
-void bsp_smp_interrupt_cpu(
- int cpu
-)
+void _CPU_SMP_Processor_event_receive( void )
{
}
-void bsp_smp_delay( int max )
+uint32_t _CPU_SMP_Get_current_processor( void )
{
+ return 0;
}
-
-void bsp_smp_wait_for(
- volatile unsigned int *address,
- unsigned int desired,
- int maximum_usecs
-)
-{
- int iterations;
- volatile int i;
- volatile unsigned int *p = address;
-
- for (iterations=0 ; iterations < maximum_usecs ; iterations++ ) {
- *p = desired;
- /* XXX hack to make simulator happy */
- }
-}
-
diff --git a/schedsim/shell/schedsim_priority/wrap_thread_dispatch.c b/schedsim/shell/schedsim_priority/wrap_thread_dispatch.c
index a66d37d..5a72bcb 100644
--- a/schedsim/shell/schedsim_priority/wrap_thread_dispatch.c
+++ b/schedsim/shell/schedsim_priority/wrap_thread_dispatch.c
@@ -24,6 +24,10 @@ Thread_Control *last_executing = NULL;
extern void __real__Thread_Dispatch(void);
+void Init__wrap__Thread_Dispatch()
+{
+}
+
void check_heir_and_executing(void)
{
if ( last_heir != _Thread_Heir )
diff --git a/schedsim/shell/schedsim_smpsimple/Makefile.am b/schedsim/shell/schedsim_smpsimple/Makefile.am
index 19cf617..f4f0404 100644
--- a/schedsim/shell/schedsim_smpsimple/Makefile.am
+++ b/schedsim/shell/schedsim_smpsimple/Makefile.am
@@ -24,8 +24,15 @@ if HAS_PTHREADS
schedsim_smpsimple_CPPFLAGS += -I$(cpukitdir)/posix/include
schedsim_smpsimple_CPPFLAGS += -I$(cpukitdir)/posix/inline
endif
+## Ensure all linker provided symbols are available
+schedsim_smpsimple_LDFLAGS =
+schedsim_smpsimple_LDFLAGS += -Wl,--defsym=_TLS_Data_begin=0
+schedsim_smpsimple_LDFLAGS += -Wl,--defsym=_TLS_BSS_end=0
+schedsim_smpsimple_LDFLAGS += -Wl,--defsym=_TLS_Alignment=4
+
+## Wrap _Thread_Dispatch so we can see context switches
+schedsim_smpsimple_LDFLAGS +=-Wl,--wrap=_Thread_Dispatch
-schedsim_smpsimple_LDFLAGS =-Wl,--wrap=_Thread_Dispatch
## schedsim_smpsimple_LDADD +=-Wl,--start-group
schedsim_smpsimple_LDADD = ../shared/libschedsim.a
schedsim_smpsimple_LDADD += ../../rtems/librtems.a
diff --git a/schedsim/shell/schedsim_smpsimple/main_current_cpu.c b/schedsim/shell/schedsim_smpsimple/main_current_cpu.c
index 73edd3e..5a93a5f 100644
--- a/schedsim/shell/schedsim_smpsimple/main_current_cpu.c
+++ b/schedsim/shell/schedsim_smpsimple/main_current_cpu.c
@@ -17,7 +17,6 @@
#include "rtems_sched.h"
#include <rtems.h>
-#include <rtems/bspsmp.h>
#include <rtems/score/percpu.h>
#include <rtems/score/schedulerpriority.h>
diff --git a/schedsim/shell/schedsim_smpsimple/main_dispatch.c b/schedsim/shell/schedsim_smpsimple/main_dispatch.c
index 4c1461f..f742a75 100644
--- a/schedsim/shell/schedsim_smpsimple/main_dispatch.c
+++ b/schedsim/shell/schedsim_smpsimple/main_dispatch.c
@@ -17,7 +17,6 @@
#include "rtems_sched.h"
#include <rtems.h>
-#include <rtems/bspsmp.h>
#include <rtems/score/percpu.h>
#include <rtems/score/smp.h>
#include <rtems/score/schedulersimplesmp.h>
diff --git a/schedsim/shell/schedsim_smpsimple/main_dump_ready_tasks.c b/schedsim/shell/schedsim_smpsimple/main_dump_ready_tasks.c
index ba9a9cd..f9edf80 100644
--- a/schedsim/shell/schedsim_smpsimple/main_dump_ready_tasks.c
+++ b/schedsim/shell/schedsim_smpsimple/main_dump_ready_tasks.c
@@ -17,8 +17,13 @@
#include "rtems_sched.h"
#include <rtems/score/chainimpl.h>
#include <rtems/score/thread.h>
+#include <rtems/score/assert.h>
-#include <rtems/score/schedulerpriority.h>
+/*
+ * Note: This source depends upon the scheduler being
+ * tested.
+ */
+#include <rtems/score/schedulersimplesmp.h>
int main_dump_ready_tasks(int argc, char **argv)
{
@@ -26,8 +31,14 @@ int main_dump_ready_tasks(int argc, char **argv)
Chain_Node *n;
Thread_Control *t;
+ Scheduler_simple_SMP_Context * self =
+ (Scheduler_simple_SMP_Context *) _Scheduler_Table[0].context;
+
+ /* We don't support this yet */
+ _Assert( _Scheduler_Count != 1 );
+
printf( "=== Ready Set of Threads\n" );
- chain = (Chain_Control *)_Scheduler.information;
+ chain = &self->Ready;
for (n = _Chain_First( chain ); !_Chain_Is_tail(chain, n); n = n->next) {
t = (Thread_Control *)n;
printf(
diff --git a/schedsim/shell/schedsim_smpsimple/smp_stub.c b/schedsim/shell/schedsim_smpsimple/smp_stub.c
index fc9c1dd..7f8f2d4 100644
--- a/schedsim/shell/schedsim_smpsimple/smp_stub.c
+++ b/schedsim/shell/schedsim_smpsimple/smp_stub.c
@@ -11,25 +11,12 @@
#include <rtems.h>
#include <rtems/bspIo.h>
-#include <rtems/bspsmp.h>
#include <stdlib.h>
uint32_t Schedsim_Current_cpu;
extern uint32_t Schedsim_Maximum_CPUs_From_Command_Line;
-void bsp_smp_secondary_cpu_initialize(int cpu)
-{
- Schedsim_Current_cpu = 0;
-}
-
-int bsp_smp_processor_id(void)
-{
- return Schedsim_Current_cpu;
-}
-
-uint32_t bsp_smp_initialize(
- uint32_t configured_cpu_count
-)
+uint32_t _CPU_SMP_Initialize( void )
{
if ( configured_cpu_count < Schedsim_Maximum_CPUs_From_Command_Line ) {
printf(
@@ -43,39 +30,42 @@ uint32_t bsp_smp_initialize(
return Schedsim_Maximum_CPUs_From_Command_Line;
}
-void bsp_smp_broadcast_interrupt(void)
+bool _CPU_SMP_Start_processor( uint32_t cpu_index )
{
+ return true;
}
-void bsp_smp_broadcast_message(
- uint32_t message
-)
+void _CPU_SMP_Finalize_initialization( uint32_t cpu_count )
{
}
-void bsp_smp_interrupt_cpu(
- int cpu
-)
+void _CPU_SMP_Send_interrupt( uint32_t target_processor_index )
{
-}
+}
-void bsp_smp_delay( int max )
+void _CPU_SMP_Processor_event_broadcast( void )
{
+ Per_CPU_Control *cpu = _Per_CPU_Get();
+ uint32_t cpu_count = _SMP_Get_processor_count();
+ uint32_t cpu_index;
+ Per_CPU_State state = cpu->state;
+
+ if (state == PER_CPU_STATE_REQUEST_START_MULTITASKING) {
+ for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ state = cpu->state;
+ if (state == PER_CPU_STATE_INITIAL )
+ cpu->state = PER_CPU_STATE_READY_TO_START_MULTITASKING;
+ }
+ }
}
-void bsp_smp_wait_for(
- volatile unsigned int *address,
- unsigned int desired,
- int maximum_usecs
-)
-{
- int iterations;
- volatile int i;
- volatile unsigned int *p = address;
- for (iterations=0 ; iterations < maximum_usecs ; iterations++ ) {
- *p = desired;
- /* XXX hack to make simulator happy */
- }
+void _CPU_SMP_Processor_event_receive( void )
+{
}
+uint32_t _CPU_SMP_Get_current_processor( void )
+{
+ return Schedsim_Current_cpu;
+}
diff --git a/schedsim/shell/schedsim_smpsimple/wrap_thread_dispatch.c b/schedsim/shell/schedsim_smpsimple/wrap_thread_dispatch.c
index f9f29ee..0edefd1 100644
--- a/schedsim/shell/schedsim_smpsimple/wrap_thread_dispatch.c
+++ b/schedsim/shell/schedsim_smpsimple/wrap_thread_dispatch.c
@@ -12,29 +12,49 @@
#include "shell.h"
#include <schedsim_shell.h>
+#include <stdlib.h>
#include <stdio.h>
#include <rtems.h>
-Thread_Control *last_heir = NULL;
-Thread_Control *last_executing = NULL;
+typedef Thread_Control * Thread_Control_ptr;
+extern uint32_t Schedsim_Current_cpu;
+
+Thread_Control_ptr *last_heir = NULL;
+Thread_Control_ptr *last_executing = NULL;
extern void __real__Thread_Dispatch(void);
+void Init__wrap__Thread_Dispatch()
+{
+ last_heir = (Thread_Control_ptr *) calloc( sizeof( Thread_Control_ptr ), _SMP_Processor_count );
+ last_executing = (Thread_Control_ptr *) calloc( sizeof( Thread_Control_ptr ), _SMP_Processor_count );
+}
+
void check_heir_and_executing(void)
{
- if ( last_heir != _Thread_Heir )
+ if ( last_heir[Schedsim_Current_cpu] != _Thread_Heir )
PRINT_HEIR();
- if ( last_executing != _Thread_Executing )
+ if ( last_executing[Schedsim_Current_cpu] != _Thread_Executing )
PRINT_EXECUTING();
- last_heir = _Thread_Heir;
- last_executing = _Thread_Executing;
+ last_heir[Schedsim_Current_cpu] = _Thread_Heir;
+ last_executing[Schedsim_Current_cpu] = _Thread_Executing;
}
void __wrap__Thread_Dispatch(void)
{
- check_heir_and_executing();
+ uint32_t cpu;
+ uint32_t current_cpu;
+
+ current_cpu = Schedsim_Current_cpu;
+ for ( cpu=0 ; cpu < _SMP_Processor_count ; cpu++ ) {
+ Schedsim_Current_cpu = cpu;
+ check_heir_and_executing();
__real__Thread_Dispatch();
- check_heir_and_executing();
+ check_heir_and_executing();
+ }
+
+ Schedsim_Current_cpu = current_cpu;
}
+