summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/cpu/i386/rtems/score/i386.h136
-rw-r--r--cpukit/score/cpu/m68k/rtems/score/m68k.h239
-rw-r--r--cpukit/score/cpu/powerpc/asm.h10
-rw-r--r--cpukit/score/cpu/powerpc/rtems/asm.h10
-rw-r--r--cpukit/score/cpu/powerpc/rtems/score/ppc.h157
-rw-r--r--cpukit/score/include/rtems/score/object.h34
-rw-r--r--cpukit/score/include/rtems/score/thread.h17
-rw-r--r--cpukit/score/src/Makefile.am19
8 files changed, 603 insertions, 19 deletions
diff --git a/cpukit/score/cpu/i386/rtems/score/i386.h b/cpukit/score/cpu/i386/rtems/score/i386.h
index f113ebee4e..ca1af66fff 100644
--- a/cpukit/score/cpu/i386/rtems/score/i386.h
+++ b/cpukit/score/cpu/i386/rtems/score/i386.h
@@ -144,6 +144,142 @@ static inline unsigned int i386_swap_U16(
}
+/*
+ * Added for pagination management
+ */
+
+static inline unsigned int i386_get_cr0()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr0,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline void i386_set_cr0(unsigned int segment)
+{
+ asm volatile ( "movl %0,%%cr0" : "=r" (segment) : "0" (segment) );
+}
+
+static inline unsigned int i386_get_cr2()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr2,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned int i386_get_cr3()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr3,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline void i386_set_cr3(unsigned int segment)
+{
+ asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) );
+}
+
+/*
+ * Disable the entire cache
+ */
+void _CPU_disable_cache() {
+ cr0 regCr0;
+
+ regCr0.i = i386_get_cr0();
+ regCr0.cr0.page_level_cache_disable = 1;
+ regCr0.cr0.no_write_through = 1;
+ i386_set_cr0( regCr0.i );
+ rtems_flush_entire_data_cache();
+}
+
+/*
+ * Enable the entire cache
+ */
+static inline void _CPU_enable_cache() {
+ cr0 regCr0;
+
+ regCr0.i = i386_get_cr0();
+ regCr0.cr0.page_level_cache_disable = 0;
+ regCr0.cr0.no_write_through = 0;
+ i386_set_cr0( regCr0.i );
+ /*rtems_flush_entire_data_cache();*/
+}
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
+ * each CPU. The routines below should be implemented per CPU,
+ * to accomodate the capabilities of each.
+ */
+
+/* FIXME: I don't belong here. */
+#define I386_CACHE_ALIGNMENT 16
+
+#if defined(I386_CACHE_ALIGNMENT)
+#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
+
+static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
+static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
+static inline void _CPU_freeze_data_cache (void) {}
+static inline void _CPU_unfreeze_data_cache (void) {}
+static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
+static inline void _CPU_freeze_inst_cache (void) {}
+static inline void _CPU_unfreeze_inst_cache (void) {}
+
+static inline void _CPU_flush_entire_data_cache (
+ const void * d_addr )
+{
+ asm ("wbinvd");
+}
+static inline void _CPU_invalidate_entire_data_cache (
+ const void * d_addr )
+{
+ asm ("invd");
+}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ _CPU_enable_cache();
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ _CPU_disable_cache();
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ const void * i_addr )
+{
+ asm ("invd");
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ _CPU_enable_cache();
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ _CPU_disable_cache();
+}
+#endif
+
+
/* routines */
/*
diff --git a/cpukit/score/cpu/m68k/rtems/score/m68k.h b/cpukit/score/cpu/m68k/rtems/score/m68k.h
index 64639220dd..c38a9a13ed 100644
--- a/cpukit/score/cpu/m68k/rtems/score/m68k.h
+++ b/cpukit/score/cpu/m68k/rtems/score/m68k.h
@@ -157,6 +157,7 @@ extern "C" {
# endif
#elif defined(__mc68302__)
+
#define CPU_MODEL_NAME "m68302"
#define M68K_HAS_VBR 0
#define M68K_HAS_SEPARATE_STACKS 0
@@ -350,16 +351,238 @@ static inline unsigned int m68k_swap_u16(
return( swapped );
}
-/* XXX this is only valid for some m68k family members and should be fixed */
+#define CPU_swap_u32( value ) m68k_swap_u32( value )
+#define CPU_swap_u16( value ) m68k_swap_u16( value )
+
+
+/*
+ * _CPU_virtual_to_physical
+ *
+ * DESCRIPTION:
+ *
+ * This function is used to map virtual addresses to physical
+ * addresses.
+ *
+ * FIXME: ASSUMES THAT VIRTUAL ADDRESSES ARE THE SAME AS THE
+ * PHYSICAL ADDRESSES
+ */
+static inline void * _CPU_virtual_to_physical (
+ const void * d_addr )
+{
+ return (void *) d_addr;
+}
+
+
+/*
+ * Since the cacr is common to all mc680x0, provide macros
+ * for masking values in that register.
+ */
-#define m68k_enable_caching() \
- { register unsigned32 _ctl=0x01; \
- asm volatile ( "movec %0,%%cacr" \
- : "=d" (_ctl) : "0" (_ctl) ); \
+/*
+ * Used to clear bits in the cacr.
+ */
+#define _CPU_CACR_AND(mask) \
+ { \
+ register unsigned long _value = mask; \
+ register unsigned long _ctl = 0; \
+ asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
+ andl %2, %0; /* and with _val */ \
+ movec %1, %%cacr" /* write the cacr */ \
+ : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
-#define CPU_swap_u32( value ) m68k_swap_u32( value )
-#define CPU_swap_u16( value ) m68k_swap_u16( value )
+
+/*
+ * Used to set bits in the cacr.
+ */
+#define _CPU_CACR_OR(mask) \
+ { \
+ register unsigned long _value = mask; \
+ register unsigned long _ctl = 0; \
+ asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
+ orl %2, %0; /* or with _val */ \
+ movec %1, %%cacr" /* write the cacr */ \
+ : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
+ }
+
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ */
+#if ( defined(__mc68020__) || defined(__mc68030__) )
+#define M68K_INST_CACHE_ALIGNMENT 16
+
+#if defined(__mc68030__)
+#define M68K_DATA_CACHE_ALIGNMENT 16
+
+/* Only the mc68030 has a data cache; it is writethrough only. */
+
+static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
+static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
+
+static inline void _CPU_invalidate_1_data_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
+ _CPU_CACR_OR(0x00000400);
+}
+
+static inline void _CPU_invalidate_entire_data_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000800 );
+}
+
+static inline void _CPU_freeze_data_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000200 );
+}
+
+static inline void _CPU_unfreeze_data_cache (
+ void )
+{
+ _CPU_CACR_AND( 0xFFFFFDFF );
+}
+
+static inline void _CPU_enable_data_cache ( void )
+{
+ _CPU_CACR_OR( 0x00000100 );
+}
+static inline void _CPU_disable_data_cache ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFEFF );
+}
+#endif
+
+
+/* Both the 68020 and 68030 have instruction caches */
+
+static inline void _CPU_invalidate_1_inst_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
+ _CPU_CACR_OR( 0x00000004 );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000008 );
+}
+
+static inline void _CPU_freeze_inst_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000002);
+}
+
+static inline void _CPU_unfreeze_inst_cache (
+ void )
+{
+ _CPU_CACR_AND( 0xFFFFFFFD );
+}
+
+static inline void _CPU_enable_inst_cache ( void )
+{
+ _CPU_CACR_OR( 0x00000001 );
+}
+
+static inline void _CPU_disable_inst_cache ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFFFE );
+}
+
+
+#elif ( defined(__mc68040__) || defined (__mc68060__) )
+
+#define M68K_INST_CACHE_ALIGNMENT 16
+#define M68K_DATA_CACHE_ALIGNMENT 16
+
+/* Cannot be frozen */
+static inline void _CPU_freeze_data_cache ( void ) {}
+static inline void _CPU_unfreeze_data_cache ( void ) {}
+static inline void _CPU_freeze_inst_cache ( void ) {}
+static inline void _CPU_unfreeze_inst_cache ( void ) {}
+
+static inline void _CPU_flush_1_data_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
+}
+
+static inline void _CPU_invalidate_1_data_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
+}
+
+static inline void _CPU_flush_entire_data_cache (
+ void )
+{
+ asm volatile ( "cpusha %%dc" :: );
+}
+
+static inline void _CPU_invalidate_entire_data_cache (
+ void )
+{
+ asm volatile ( "cinva %%dc" :: );
+}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x80000000 );
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ _CPU_CACR_AND( 0x7FFFFFFF );
+}
+
+static inline void _CPU_invalidate_1_inst_cache_line (
+ const void * i_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( i_addr );
+ asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ void )
+{
+ asm volatile ( "cinva %%ic" :: );
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00008000 );
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ _CPU_CACR_AND( 0xFFFF7FFF );
+}
+#endif
+
+
+#if defined(M68K_DATA_CACHE_ALIGNMENT)
+#define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
+#endif
+
+#if defined(M68K_INST_CACHE_ALIGNMENT)
+#define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
+#endif
+
#endif /* !ASM */
@@ -367,5 +590,5 @@ static inline unsigned int m68k_swap_u16(
}
#endif
-#endif
+#endif /* __M68K_h */
/* end of include file */
diff --git a/cpukit/score/cpu/powerpc/asm.h b/cpukit/score/cpu/powerpc/asm.h
index af14c95665..3c2e28ad5c 100644
--- a/cpukit/score/cpu/powerpc/asm.h
+++ b/cpukit/score/cpu/powerpc/asm.h
@@ -164,14 +164,20 @@
*/
#define srr0 0x01a
#define srr1 0x01b
+#ifdef ppc403
#define srr2 0x3de /* IBM 400 series only */
#define srr3 0x3df /* IBM 400 series only */
+#endif /* ppc403 */
+
#define sprg0 0x110
#define sprg1 0x111
#define sprg2 0x112
#define sprg3 0x113
+#define dar 0x013 /* Data Address Register */
+#define dec 0x016 /* Decrementer Register */
+#if defined(ppc403)
/* the following SPR/DCR registers exist only in IBM 400 series */
#define dear 0x3d5
#define evpr 0x3d6 /* SPR: exception vector prefix register */
@@ -190,9 +196,13 @@
#define br7 0x087 /* DCR: memory bank register 7 */
/* end of IBM400 series register definitions */
+#elif defined(mpc860) || defined(mpc821)
/* The following registers are for the MPC8x0 */
#define der 0x095 /* Debug Enable Register */
+#define ictrl 0x09E /* Instruction Support Control Register */
+#define immr 0x27E /* Internal Memory Map Register */
/* end of MPC8x0 registers */
+#endif
/*
* Following must be tailor for a particular flavor of the C compiler.
diff --git a/cpukit/score/cpu/powerpc/rtems/asm.h b/cpukit/score/cpu/powerpc/rtems/asm.h
index af14c95665..3c2e28ad5c 100644
--- a/cpukit/score/cpu/powerpc/rtems/asm.h
+++ b/cpukit/score/cpu/powerpc/rtems/asm.h
@@ -164,14 +164,20 @@
*/
#define srr0 0x01a
#define srr1 0x01b
+#ifdef ppc403
#define srr2 0x3de /* IBM 400 series only */
#define srr3 0x3df /* IBM 400 series only */
+#endif /* ppc403 */
+
#define sprg0 0x110
#define sprg1 0x111
#define sprg2 0x112
#define sprg3 0x113
+#define dar 0x013 /* Data Address Register */
+#define dec 0x016 /* Decrementer Register */
+#if defined(ppc403)
/* the following SPR/DCR registers exist only in IBM 400 series */
#define dear 0x3d5
#define evpr 0x3d6 /* SPR: exception vector prefix register */
@@ -190,9 +196,13 @@
#define br7 0x087 /* DCR: memory bank register 7 */
/* end of IBM400 series register definitions */
+#elif defined(mpc860) || defined(mpc821)
/* The following registers are for the MPC8x0 */
#define der 0x095 /* Debug Enable Register */
+#define ictrl 0x09E /* Instruction Support Control Register */
+#define immr 0x27E /* Internal Memory Map Register */
/* end of MPC8x0 registers */
+#endif
/*
* Following must be tailor for a particular flavor of the C compiler.
diff --git a/cpukit/score/cpu/powerpc/rtems/score/ppc.h b/cpukit/score/cpu/powerpc/rtems/score/ppc.h
index 83a54bfa25..682675efde 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/ppc.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/ppc.h
@@ -44,6 +44,8 @@
extern "C" {
#endif
+#include <rtems/score/ppctypes.h>
+
/*
* Define the name of the CPU family.
*/
@@ -220,6 +222,7 @@ extern "C" {
#elif defined(mpc860)
/*
* Added by Jay Monkman (jmonkman@frasca.com) 6/28/98
+ * with some changes by Darlene Stewart (Darlene.Stewart@iit.nrc.ca)
*/
#define CPU_MODEL_NAME "PowerPC MPC860"
@@ -231,7 +234,6 @@ extern "C" {
#define PPC_HAS_FPU 0
#define PPC_HAS_DOUBLE 0
#define PPC_USE_MULTIPLE 1
-#define PPC_USE_SPRG 1
#define PPC_MSR_0 0x00009000
#define PPC_MSR_1 0x00001000
@@ -382,6 +384,157 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif
+#ifndef ASM
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Some functions simply have not been implemented.
+ */
+
+#if defined(ppc603) /* And possibly others */
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+/* Helpful macros */
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00004000; /* set DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFFBFFF; /* clear DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00008000; /* Set ICE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFF7FFF; /* Clear ICE bit */
+ PPC_Set_HID0( value );
+}
+
+#elif ( defined(mpc860) || defined(mpc821) )
+
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
+#define isync __asm__ volatile ("isync\n"::)
+
+static inline void _CPU_flush_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbf 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_flush_entire_data_cache ( void ) {}
+static inline void _CPU_invalidate_entire_data_cache ( void ) {}
+static inline void _CPU_freeze_data_cache ( void ) {}
+static inline void _CPU_unfreeze_data_cache ( void ) {}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_invalidate_1_inst_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "icbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
+static inline void _CPU_freeze_inst_cache ( void ) {}
+static inline void _CPU_unfreeze_inst_cache ( void ) {}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+#endif
+
+#endif /* !ASM */
+
/*
* Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix.
@@ -550,7 +703,7 @@ extern "C" {
#define PPC_IRQ_LVL6 (PPC_STD_IRQ_LAST + 23)
#define PPC_IRQ_IRQ7 (PPC_STD_IRQ_LAST + 24)
#define PPC_IRQ_LVL7 (PPC_STD_IRQ_LAST + 25)
-#define PPC_IRQ_CPM_RESERVED_0 (PPC_STD_IRQ_LAST + 26)
+#define PPC_IRQ_CPM_ERROR (PPC_STD_IRQ_LAST + 26)
#define PPC_IRQ_CPM_PC4 (PPC_STD_IRQ_LAST + 27)
#define PPC_IRQ_CPM_PC5 (PPC_STD_IRQ_LAST + 28)
#define PPC_IRQ_CPM_SMC2 (PPC_STD_IRQ_LAST + 29)
diff --git a/cpukit/score/include/rtems/score/object.h b/cpukit/score/include/rtems/score/object.h
index 811e33a17e..7640dce3d3 100644
--- a/cpukit/score/include/rtems/score/object.h
+++ b/cpukit/score/include/rtems/score/object.h
@@ -444,6 +444,21 @@ Objects_Control *_Objects_Get (
);
/*
+ * _Objects_Get_by_index
+ *
+ * DESCRIPTION:
+ *
+ * This routine sets the object pointer for the given
+ * object id based on the given object information structure.
+ */
+
+Objects_Control *_Objects_Get_by_index(
+ Objects_Information *information,
+ unsigned32 index,
+ Objects_Locations *location
+);
+
+/*
* _Objects_Get_next
*
* DESCRIPTION:
@@ -460,6 +475,25 @@ Objects_Control *_Objects_Get_next(
);
/*
+ * _Objects_Local_iterate
+ *
+ * DESCRIPTION:
+ *
+ * This function invokes the callback function for each existing object
+ * of the type specified by the information block pointer. Iteration
+ * continues until either all objects have been processed, or, if
+ * break_on_error is TRUE, until an invocation of the callback returns
+ * something other than 0.
+ */
+
+unsigned32 _Objects_Local_iterate(
+ Objects_Information *information,
+ unsigned32 (*callback)(Objects_Control *object, void * arg),
+ void * arg,
+ boolean break_on_error
+);
+
+/*
* Pieces of object.inl are promoted out to the user
*/
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 4c8032f558..7739925e66 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -734,6 +734,23 @@ Thread_Control *_Thread_Get (
#endif
/*
+ * _Thread_Local_iterate
+ *
+ * DESCRIPTION:
+ *
+ * This function invokes the callback function for each existing thread.
+ * Iteration continues until either all threads have been processed, or,
+ * if break_on_error is TRUE, until an invocation of the callback returns
+ * an integer value other than 0.
+ */
+
+unsigned32 _Thread_Local_iterate(
+ unsigned32 (*callback)(Thread_Control *the_thread, void * arg),
+ void * arg,
+ boolean break_on_error
+);
+
+/*
* _Thread_Idle_body
*
* DESCRIPTION:
diff --git a/cpukit/score/src/Makefile.am b/cpukit/score/src/Makefile.am
index c348a606f1..97a2597cda 100644
--- a/cpukit/score/src/Makefile.am
+++ b/cpukit/score/src/Makefile.am
@@ -27,17 +27,18 @@ OBJECT_C_FILES = object.c objectallocate.c objectallocatebyindex.c \
objectclearname.c objectcomparenameraw.c objectcomparenamestring.c \
objectcopynameraw.c objectcopynamestring.c objectextendinformation.c \
objectfree.c objectget.c objectgetbyindex.c objectgetnext.c \
- objectinitializeinformation.c objectnametoid.c objectshrinkinformation.c
+ objectinitializeinformation.c objectlocaliterate.c objectnametoid.c \
+ objectshrinkinformation.c
THREAD_C_FILES = thread.c threadchangepriority.c threadclearstate.c \
threadclose.c threadcreateidle.c threaddelayended.c threaddispatch.c \
threadevaluatemode.c threadget.c threadhandler.c threadidlebody.c \
- threadinitialize.c threadloadenv.c threadready.c threadresettimeslice.c \
- threadreset.c threadrestart.c threadresume.c threadrotatequeue.c \
- threadsetpriority.c threadsetstate.c threadsettransient.c \
- threadstackallocate.c threadstackfree.c threadstart.c \
- threadstartmultitasking.c threadsuspend.c threadtickletimeslice.c \
- threadyieldprocessor.c
+ threadinitialize.c threadloadenv.c threadlocaliterate.c threadready.c \
+ threadresettimeslice.c threadreset.c threadrestart.c threadresume.c \
+ threadrotatequeue.c threadsetpriority.c threadsetstate.c \
+ threadsettransient.c threadstackallocate.c threadstackfree.c \
+ threadstart.c threadstartmultitasking.c threadsuspend.c \
+ threadtickletimeslice.c threadyieldprocessor.c
THREADQ_C_FILES = threadq.c threadqdequeue.c threadqdequeuefifo.c \
threadqdequeuepriority.c threadqenqueue.c threadqenqueuefifo.c \
@@ -53,8 +54,8 @@ WATCHDOG_C_FILES = watchdog.c watchdogadjust.c watchdoginsert.c \
watchdogremove.c watchdogtickle.c
STD_C_FILES = apiext.c chain.c $(CORE_MESSAGE_QUEUE_C_FILES) \
- $(CORE_MUTEX_C_FILES) $(CORE_SEMAPHORE_C_FILES) $(HEAP_C_FILES) interr.c \
- isr.c $(OBJECT_C_FILES) $(THREAD_C_FILES) $(THREADQ_C_FILES) \
+ $(CORE_MUTEX_C_FILES) $(CORE_SEMAPHORE_C_FILES) $(HEAP_C_FILES) \
+ interr.c isr.c $(OBJECT_C_FILES) $(THREAD_C_FILES) $(THREADQ_C_FILES) \
$(TOD_C_FILES) userext.c $(WATCHDOG_C_FILES) wkspace.c
if HAS_MP