summaryrefslogtreecommitdiffstats
path: root/c/src/exec
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2000-06-12 19:57:02 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2000-06-12 19:57:02 +0000
commit8ef38186faea3d9b5e6f0f1242f668cb7e7a3d52 (patch)
tree9253f150814c99167239f7c2cc850cdd9d92c003 /c/src/exec
parentf481c39c015a71a6747d8e0f4f923fabe1086ffc (diff)
downloadrtems-8ef38186faea3d9b5e6f0f1242f668cb7e7a3d52.tar.bz2
Patch from John Cotton <john.cotton@nrc.ca>, Charles-Antoine Gauthier
<charles.gauthier@iit.nrc.ca>, and Darlene A. Stewart <Darlene.Stewart@nrc.ca> to add support for a number of very significant things: + BSPs for many variations on the Motorola MBX8xx board series + Cache Manager including initial support for m68040 and PowerPC + Rework of mpc8xx libcpu code so all mpc8xx CPUs now use same code base. + Rework of eth_comm BSP to utiltize above. John reports this works on the 821 and 860
Diffstat (limited to 'c/src/exec')
-rw-r--r--c/src/exec/libcsupport/include/sys/termios.h1
-rw-r--r--c/src/exec/libcsupport/src/malloc.c22
-rw-r--r--c/src/exec/libcsupport/src/termios.c8
-rw-r--r--c/src/exec/posix/src/waitpid.c2
-rw-r--r--c/src/exec/rtems/include/rtems/rtems/Makefile.am6
-rw-r--r--c/src/exec/rtems/include/rtems/rtems/cache.h140
-rw-r--r--c/src/exec/rtems/src/Makefile.am2
-rw-r--r--c/src/exec/rtems/src/cache.c252
-rw-r--r--c/src/exec/score/cpu/i386/rtems/score/i386.h136
-rw-r--r--c/src/exec/score/cpu/m68k/rtems/score/m68k.h239
-rw-r--r--c/src/exec/score/cpu/powerpc/asm.h10
-rw-r--r--c/src/exec/score/cpu/powerpc/old_exception_processing/Makefile.am2
-rw-r--r--c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.c7
-rw-r--r--c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.h9
-rw-r--r--c/src/exec/score/cpu/powerpc/rtems/score/ppc.h157
-rw-r--r--c/src/exec/score/cpu/powerpc/shared/asm.h10
-rw-r--r--c/src/exec/score/cpu/powerpc/shared/ppc.h157
-rw-r--r--c/src/exec/score/include/rtems/score/object.h34
-rw-r--r--c/src/exec/score/include/rtems/score/thread.h17
-rw-r--r--c/src/exec/score/src/Makefile.am19
-rw-r--r--c/src/exec/score/src/objectlocaliterate.c77
-rw-r--r--c/src/exec/score/src/threadlocaliterate.c77
22 files changed, 1355 insertions, 29 deletions
diff --git a/c/src/exec/libcsupport/include/sys/termios.h b/c/src/exec/libcsupport/include/sys/termios.h
index 7cc28d7d38..199bb2aef2 100644
--- a/c/src/exec/libcsupport/include/sys/termios.h
+++ b/c/src/exec/libcsupport/include/sys/termios.h
@@ -181,6 +181,7 @@ int tcflow(int, int);
int tcflush(int, int);
int tcgetattr(int, struct termios *);
int tcsetattr(int, int, struct termios *);
+int tcdrain(int);
pid_t tcgetprgrp(int);
int tcsetprgrp(int, pid_t);
int tcsendbreak(int, int);
diff --git a/c/src/exec/libcsupport/src/malloc.c b/c/src/exec/libcsupport/src/malloc.c
index 08660d75cc..dc6824891a 100644
--- a/c/src/exec/libcsupport/src/malloc.c
+++ b/c/src/exec/libcsupport/src/malloc.c
@@ -418,5 +418,27 @@ void _free_r(
{
free( ptr );
}
+
+
+/*
+ * rtems_cache_aligned_malloc
+ *
+ * DESCRIPTION:
+ *
+ * This function is used to allocate storage that spans an
+ * integral number of cache blocks.
+ */
+RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
+ size_t nbytes
+)
+{
+ /*
+ * Arrange to have the user storage start on the first cache
+ * block beyond the header.
+ */
+ return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
+ + _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
+}
+
#endif
diff --git a/c/src/exec/libcsupport/src/termios.c b/c/src/exec/libcsupport/src/termios.c
index 7883e94b63..5052502e76 100644
--- a/c/src/exec/libcsupport/src/termios.c
+++ b/c/src/exec/libcsupport/src/termios.c
@@ -23,6 +23,7 @@
#include <stdlib.h>
#include <termios.h>
#include <unistd.h>
+#include <sys/filio.h>
/*
* FreeBSD does not support a full POSIX termios so we have to help it out
@@ -243,7 +244,7 @@ rtems_termios_open (
/*
* Set default parameters
*/
- tty->termios.c_iflag = BRKINT | ICRNL | IMAXBEL;
+ tty->termios.c_iflag = BRKINT | ICRNL | IXON | IMAXBEL;
tty->termios.c_oflag = OPOST | ONLCR | XTABS;
tty->termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL;
tty->termios.c_lflag = ISIG | ICANON | IEXTEN | ECHO | ECHOK | ECHOE | ECHOCTL;
@@ -474,6 +475,11 @@ rtems_termios_ioctl (void *arg)
case RTEMS_IO_TCDRAIN:
drainOutput (tty);
break;
+
+ case FIONREAD:
+ /* Half guess that this is the right operation */
+ *(int *)args->buffer = tty->ccount - tty->cindex;
+ break;
}
rtems_semaphore_release (tty->osem);
args->ioctl_return = sc;
diff --git a/c/src/exec/posix/src/waitpid.c b/c/src/exec/posix/src/waitpid.c
index d52177961d..e367e87074 100644
--- a/c/src/exec/posix/src/waitpid.c
+++ b/c/src/exec/posix/src/waitpid.c
@@ -1,5 +1,5 @@
/*
- * wait() - POSIX 1003.1b 3.2.1
+ * waitpid() - POSIX 1003.1 3.2.1
*
* $Id$
*/
diff --git a/c/src/exec/rtems/include/rtems/rtems/Makefile.am b/c/src/exec/rtems/include/rtems/rtems/Makefile.am
index f9c8d82fef..af16908428 100644
--- a/c/src/exec/rtems/include/rtems/rtems/Makefile.am
+++ b/c/src/exec/rtems/include/rtems/rtems/Makefile.am
@@ -8,9 +8,9 @@ AUTOMAKE_OPTIONS = foreign 1.4
MP_H_FILES = eventmp.h mp.h msgmp.h partmp.h regionmp.h semmp.h signalmp.h \
taskmp.h
-STD_H_FILES = asr.h attr.h clock.h config.h dpmem.h event.h eventset.h \
- intr.h message.h modes.h options.h part.h ratemon.h region.h rtemsapi.h \
- sem.h signal.h status.h support.h tasks.h timer.h types.h
+STD_H_FILES = asr.h attr.h cache.h clock.h config.h dpmem.h event.h eventset.h \
+ intr.h message.h modes.h options.h part.h ratemon.h region.h rtemsapi.h sem.h \
+ signal.h status.h support.h tasks.h timer.h types.h
if HAS_MP
H_FILES = $(STD_H_FILES) $(MP_H_FILES)
diff --git a/c/src/exec/rtems/include/rtems/rtems/cache.h b/c/src/exec/rtems/include/rtems/rtems/cache.h
new file mode 100644
index 0000000000..1e71a9bf3a
--- /dev/null
+++ b/c/src/exec/rtems/include/rtems/rtems/cache.h
@@ -0,0 +1,140 @@
+/* cache.h
+ *
+ * Cache Manager
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ *
+ * The functions in this file define the API to the RTEMS Cache Manager and
+ * are divided into data cache and instruction cache functions. Data cache
+ * functions are only declared if a data cache is supported. Instruction
+ * cache functions are only declared if an instruction cache is supported.
+ * Support for a particular cache exists only if _CPU_x_CACHE_ALIGNMENT is
+ * defined, where x E {DATA, INST}. These definitions are found in the CPU
+ * dependent source files in the supercore, often
+ *
+ * rtems/c/src/exec/score/cpu/CPU/rtems/score/CPU.h
+ *
+ * The functions below are implemented with CPU dependent inline routines
+ * also found in the above file. In the event that a CPU does not support a
+ * specific function, the CPU dependent routine does nothing (but does exist).
+ *
+ * At this point, the Cache Manager makes no considerations, and provides no
+ * support for BSP specific issues such as a secondary cache. In such a system,
+ * the CPU dependent routines would have to be modified, or a BSP layer added
+ * to this Manager.
+ */
+
+#ifndef __CACHE_h
+#define __CACHE_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/system.h>
+#include <sys/types.h>
+
+
+/* THESE FUNCTIONS ONLY EXIST IF WE HAVE A DATA CACHE */
+#if defined(_CPU_DATA_CACHE_ALIGNMENT)
+
+/*
+ * This function is called to flush the data cache by performing cache
+ * copybacks. It must determine how many cache lines need to be copied
+ * back and then perform the copybacks.
+ */
+void rtems_flush_multiple_data_cache_lines( const void *, size_t );
+
+/*
+ * This function is responsible for performing a data cache invalidate.
+ * It must determine how many cache lines need to be invalidated and then
+ * perform the invalidations.
+ */
+void rtems_invalidate_multiple_data_cache_lines( const void *, size_t );
+
+/*
+ * This function is responsible for performing a data cache flush.
+ * It flushes the entire cache.
+ */
+void rtems_flush_entire_data_cache( void );
+
+/*
+ * This function is responsible for performing a data cache
+ * invalidate. It invalidates the entire cache.
+ */
+void rtems_invalidate_entire_data_cache( void );
+
+/*
+ * This function returns the data cache granularity.
+ */
+int rtems_get_data_cache_line_size( void );
+
+/*
+ * This function freezes the data cache.
+ */
+void rtems_freeze_data_cache( void );
+
+/*
+ * This function unfreezes the data cache.
+ */
+void rtems_unfreeze_data_cache( void );
+
+/*
+ * These functions enable/disable the data cache.
+ */
+void rtems_enable_data_cache( void );
+void rtems_disable_data_cache( void );
+#endif
+
+
+/* THESE FUNCTIONS ONLY EXIST IF WE HAVE AN INSTRUCTION CACHE */
+#if defined(_CPU_INST_CACHE_ALIGNMENT)
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It must determine how many cache lines need to be invalidated
+ * and then perform the invalidations.
+ */
+void rtems_invalidate_multiple_inst_cache_lines( const void *, size_t );
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It invalidates the entire cache.
+ */
+void rtems_invalidate_entire_inst_cache( void );
+
+/*
+ * This function returns the instruction cache granularity.
+ */
+int rtems_get_inst_cache_line_size( void );
+
+/*
+ * This function freezes the instruction cache.
+ */
+void rtems_freeze_inst_cache( void );
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_unfreeze_inst_cache( void );
+
+/*
+ * These functions enable/disable the instruction cache.
+ */
+void rtems_enable_inst_cache( void );
+void rtems_disable_inst_cache( void );
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/rtems/src/Makefile.am b/c/src/exec/rtems/src/Makefile.am
index f597292be1..f2b13c2aa9 100644
--- a/c/src/exec/rtems/src/Makefile.am
+++ b/c/src/exec/rtems/src/Makefile.am
@@ -50,7 +50,7 @@ PARTITION_C_FILES = part.c partcreate.c partdelete.c partgetbuffer.c \
DPMEM_C_FILES = dpmem.c dpmemcreate.c dpmemdelete.c dpmemexternal2internal.c \
dpmemident.c dpmeminternal2external.c
-STD_C_FILES = attr.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
+STD_C_FILES = attr.c cache.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
$(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \
$(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \
$(PARTITION_C_FILES) $(REGION_C_FILES) $(DPMEM_C_FILES)
diff --git a/c/src/exec/rtems/src/cache.c b/c/src/exec/rtems/src/cache.c
new file mode 100644
index 0000000000..82d6410a92
--- /dev/null
+++ b/c/src/exec/rtems/src/cache.c
@@ -0,0 +1,252 @@
+/* cache.c
+ *
+ * Cache Manager
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ *
+ * The functions in this file define the API to the RTEMS Cache Manager and
+ * are divided into data cache and instruction cache functions. Data cache
+ * functions are only declared if a data cache is supported. Instruction
+ * cache functions are only declared if an instruction cache is supported.
+ * Support for a particular cache exists only if _CPU_x_CACHE_ALIGNMENT is
+ * defined, where x E {DATA, INST}. These definitions are found in the CPU
+ * dependent source files in the supercore, often
+ *
+ * rtems/c/src/exec/score/cpu/CPU/rtems/score/CPU.h
+ *
+ * The functions below are implemented with CPU dependent inline routines
+ * also found in the above file. In the event that a CPU does not support a
+ * specific function, the CPU dependent routine does nothing (but does exist).
+ *
+ * At this point, the Cache Manager makes no considerations, and provides no
+ * support for BSP specific issues such as a secondary cache. In such a system,
+ * the CPU dependent routines would have to be modified, or a BSP layer added
+ * to this Manager.
+ */
+
+#include <rtems/system.h>
+#include <sys/types.h>
+#include <rtems/rtems/cache.h>
+
+
+/*
+ * THESE FUNCTIONS ONLY EXIST IF WE HAVE A DATA CACHE
+ */
+#if defined(_CPU_DATA_CACHE_ALIGNMENT)
+
+/*
+ * This function is called to flush the data cache by performing cache
+ * copybacks. It must determine how many cache lines need to be copied
+ * back and then perform the copybacks.
+ */
+void
+rtems_flush_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
+{
+ const void * final_address;
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment d_addr and push
+ * the resulting line until final_address is passed.
+ */
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
+ while( d_addr <= final_address ) {
+ _CPU_flush_1_data_cache_line( d_addr );
+ d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
+ }
+}
+
+
+/*
+ * This function is responsible for performing a data cache invalidate.
+ * It must determine how many cache lines need to be invalidated and then
+ * perform the invalidations.
+ */
+void
+rtems_invalidate_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
+{
+ const void * final_address;
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment d_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
+ while( final_address > d_addr ) {
+ _CPU_invalidate_1_data_cache_line( d_addr );
+ d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
+ }
+}
+
+
+/*
+ * This function is responsible for performing a data cache flush.
+ * It flushes the entire cache.
+ */
+void
+rtems_flush_entire_data_cache( void )
+{
+ /*
+ * Call the CPU-specific routine
+ */
+ _CPU_flush_entire_data_cache();
+
+}
+
+
+/*
+ * This function is responsible for performing a data cache
+ * invalidate. It invalidates the entire cache.
+ */
+void
+rtems_invalidate_entire_data_cache( void )
+{
+ /*
+ * Call the CPU-specific routine
+ */
+ _CPU_invalidate_entire_data_cache();
+}
+
+
+/*
+ * This function returns the data cache granularity.
+ */
+int
+rtems_get_data_cache_line_size( void )
+{
+ return _CPU_DATA_CACHE_ALIGNMENT;
+}
+
+
+/*
+ * This function freezes the data cache; cache lines
+ * are not replaced.
+ */
+void
+rtems_freeze_data_cache( void )
+{
+ _CPU_freeze_data_cache();
+}
+
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_unfreeze_data_cache( void )
+{
+ _CPU_unfreeze_data_cache();
+}
+
+
+/* Turn on the data cache. */
+void
+rtems_enable_data_cache( void )
+{
+ _CPU_enable_data_cache();
+}
+
+
+/* Turn off the data cache. */
+void
+rtems_disable_data_cache( void )
+{
+ _CPU_disable_data_cache();
+}
+#endif
+
+
+
+/*
+ * THESE FUNCTIONS ONLY EXIST IF WE HAVE AN INSTRUCTION CACHE
+ */
+#if defined(_CPU_INST_CACHE_ALIGNMENT)
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It must determine how many cache lines need to be invalidated
+ * and then perform the invalidations.
+ */
+void
+rtems_invalidate_multiple_inst_cache_lines( const void * i_addr, size_t n_bytes )
+{
+ const void * final_address;
+ /*
+ * Set i_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment i_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+ final_address = (void *)((size_t)i_addr + n_bytes - 1);
+ i_addr = (void *)((size_t)i_addr & ~(_CPU_INST_CACHE_ALIGNMENT - 1));
+ while( final_address > i_addr ) {
+ _CPU_invalidate_1_inst_cache_line( i_addr );
+ i_addr = (void *)((size_t)i_addr + _CPU_INST_CACHE_ALIGNMENT);
+ }
+}
+
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It invalidates the entire cache.
+ */
+void
+rtems_invalidate_entire_inst_cache( void )
+{
+ /*
+ * Call the CPU-specific routine
+ */
+ _CPU_invalidate_entire_inst_cache();
+}
+
+
+/*
+ * This function returns the instruction cache granularity.
+ */
+int
+rtems_get_inst_cache_line_size( void )
+{
+ return _CPU_INST_CACHE_ALIGNMENT;
+}
+
+
+/*
+ * This function freezes the instruction cache; cache lines
+ * are not replaced.
+ */
+void
+rtems_freeze_inst_cache( void )
+{
+ _CPU_freeze_inst_cache();
+}
+
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_unfreeze_inst_cache( void )
+{
+ _CPU_unfreeze_inst_cache();
+}
+
+
+/* Turn on the instruction cache. */
+void
+rtems_enable_inst_cache( void )
+{
+ _CPU_enable_inst_cache();
+}
+
+
+/* Turn off the instruction cache. */
+void
+rtems_disable_inst_cache( void )
+{
+ _CPU_disable_inst_cache();
+}
+#endif
diff --git a/c/src/exec/score/cpu/i386/rtems/score/i386.h b/c/src/exec/score/cpu/i386/rtems/score/i386.h
index f113ebee4e..ca1af66fff 100644
--- a/c/src/exec/score/cpu/i386/rtems/score/i386.h
+++ b/c/src/exec/score/cpu/i386/rtems/score/i386.h
@@ -144,6 +144,142 @@ static inline unsigned int i386_swap_U16(
}
+/*
+ * Added for pagination management
+ */
+
+static inline unsigned int i386_get_cr0()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr0,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline void i386_set_cr0(unsigned int segment)
+{
+ asm volatile ( "movl %0,%%cr0" : "=r" (segment) : "0" (segment) );
+}
+
+static inline unsigned int i386_get_cr2()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr2,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned int i386_get_cr3()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr3,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline void i386_set_cr3(unsigned int segment)
+{
+ asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) );
+}
+
+/*
+ * Disable the entire cache
+ */
+void _CPU_disable_cache() {
+ cr0 regCr0;
+
+ regCr0.i = i386_get_cr0();
+ regCr0.cr0.page_level_cache_disable = 1;
+ regCr0.cr0.no_write_through = 1;
+ i386_set_cr0( regCr0.i );
+ rtems_flush_entire_data_cache();
+}
+
+/*
+ * Enable the entire cache
+ */
+static inline void _CPU_enable_cache() {
+ cr0 regCr0;
+
+ regCr0.i = i386_get_cr0();
+ regCr0.cr0.page_level_cache_disable = 0;
+ regCr0.cr0.no_write_through = 0;
+ i386_set_cr0( regCr0.i );
+ /*rtems_flush_entire_data_cache();*/
+}
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
+ * each CPU. The routines below should be implemented per CPU,
+ * to accomodate the capabilities of each.
+ */
+
+/* FIXME: I don't belong here. */
+#define I386_CACHE_ALIGNMENT 16
+
+#if defined(I386_CACHE_ALIGNMENT)
+#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
+
+static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
+static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
+static inline void _CPU_freeze_data_cache (void) {}
+static inline void _CPU_unfreeze_data_cache (void) {}
+static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
+static inline void _CPU_freeze_inst_cache (void) {}
+static inline void _CPU_unfreeze_inst_cache (void) {}
+
+static inline void _CPU_flush_entire_data_cache (
+ const void * d_addr )
+{
+ asm ("wbinvd");
+}
+static inline void _CPU_invalidate_entire_data_cache (
+ const void * d_addr )
+{
+ asm ("invd");
+}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ _CPU_enable_cache();
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ _CPU_disable_cache();
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ const void * i_addr )
+{
+ asm ("invd");
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ _CPU_enable_cache();
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ _CPU_disable_cache();
+}
+#endif
+
+
/* routines */
/*
diff --git a/c/src/exec/score/cpu/m68k/rtems/score/m68k.h b/c/src/exec/score/cpu/m68k/rtems/score/m68k.h
index 64639220dd..c38a9a13ed 100644
--- a/c/src/exec/score/cpu/m68k/rtems/score/m68k.h
+++ b/c/src/exec/score/cpu/m68k/rtems/score/m68k.h
@@ -157,6 +157,7 @@ extern "C" {
# endif
#elif defined(__mc68302__)
+
#define CPU_MODEL_NAME "m68302"
#define M68K_HAS_VBR 0
#define M68K_HAS_SEPARATE_STACKS 0
@@ -350,16 +351,238 @@ static inline unsigned int m68k_swap_u16(
return( swapped );
}
-/* XXX this is only valid for some m68k family members and should be fixed */
+#define CPU_swap_u32( value ) m68k_swap_u32( value )
+#define CPU_swap_u16( value ) m68k_swap_u16( value )
+
+
+/*
+ * _CPU_virtual_to_physical
+ *
+ * DESCRIPTION:
+ *
+ * This function is used to map virtual addresses to physical
+ * addresses.
+ *
+ * FIXME: ASSUMES THAT VIRTUAL ADDRESSES ARE THE SAME AS THE
+ * PHYSICAL ADDRESSES
+ */
+static inline void * _CPU_virtual_to_physical (
+ const void * d_addr )
+{
+ return (void *) d_addr;
+}
+
+
+/*
+ * Since the cacr is common to all mc680x0, provide macros
+ * for masking values in that register.
+ */
-#define m68k_enable_caching() \
- { register unsigned32 _ctl=0x01; \
- asm volatile ( "movec %0,%%cacr" \
- : "=d" (_ctl) : "0" (_ctl) ); \
+/*
+ * Used to clear bits in the cacr.
+ */
+#define _CPU_CACR_AND(mask) \
+ { \
+ register unsigned long _value = mask; \
+ register unsigned long _ctl = 0; \
+ asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
+ andl %2, %0; /* and with _val */ \
+ movec %1, %%cacr" /* write the cacr */ \
+ : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
-#define CPU_swap_u32( value ) m68k_swap_u32( value )
-#define CPU_swap_u16( value ) m68k_swap_u16( value )
+
+/*
+ * Used to set bits in the cacr.
+ */
+#define _CPU_CACR_OR(mask) \
+ { \
+ register unsigned long _value = mask; \
+ register unsigned long _ctl = 0; \
+ asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
+ orl %2, %0; /* or with _val */ \
+ movec %1, %%cacr" /* write the cacr */ \
+ : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
+ }
+
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ */
+#if ( defined(__mc68020__) || defined(__mc68030__) )
+#define M68K_INST_CACHE_ALIGNMENT 16
+
+#if defined(__mc68030__)
+#define M68K_DATA_CACHE_ALIGNMENT 16
+
+/* Only the mc68030 has a data cache; it is writethrough only. */
+
+static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
+static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
+
+static inline void _CPU_invalidate_1_data_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
+ _CPU_CACR_OR(0x00000400);
+}
+
+static inline void _CPU_invalidate_entire_data_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000800 );
+}
+
+static inline void _CPU_freeze_data_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000200 );
+}
+
+static inline void _CPU_unfreeze_data_cache (
+ void )
+{
+ _CPU_CACR_AND( 0xFFFFFDFF );
+}
+
+static inline void _CPU_enable_data_cache ( void )
+{
+ _CPU_CACR_OR( 0x00000100 );
+}
+static inline void _CPU_disable_data_cache ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFEFF );
+}
+#endif
+
+
+/* Both the 68020 and 68030 have instruction caches */
+
+static inline void _CPU_invalidate_1_inst_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
+ _CPU_CACR_OR( 0x00000004 );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000008 );
+}
+
+static inline void _CPU_freeze_inst_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00000002);
+}
+
+static inline void _CPU_unfreeze_inst_cache (
+ void )
+{
+ _CPU_CACR_AND( 0xFFFFFFFD );
+}
+
+static inline void _CPU_enable_inst_cache ( void )
+{
+ _CPU_CACR_OR( 0x00000001 );
+}
+
+static inline void _CPU_disable_inst_cache ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFFFE );
+}
+
+
+#elif ( defined(__mc68040__) || defined (__mc68060__) )
+
+#define M68K_INST_CACHE_ALIGNMENT 16
+#define M68K_DATA_CACHE_ALIGNMENT 16
+
+/* Cannot be frozen */
+static inline void _CPU_freeze_data_cache ( void ) {}
+static inline void _CPU_unfreeze_data_cache ( void ) {}
+static inline void _CPU_freeze_inst_cache ( void ) {}
+static inline void _CPU_unfreeze_inst_cache ( void ) {}
+
+static inline void _CPU_flush_1_data_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
+}
+
+static inline void _CPU_invalidate_1_data_cache_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
+}
+
+static inline void _CPU_flush_entire_data_cache (
+ void )
+{
+ asm volatile ( "cpusha %%dc" :: );
+}
+
+static inline void _CPU_invalidate_entire_data_cache (
+ void )
+{
+ asm volatile ( "cinva %%dc" :: );
+}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x80000000 );
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ _CPU_CACR_AND( 0x7FFFFFFF );
+}
+
+static inline void _CPU_invalidate_1_inst_cache_line (
+ const void * i_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( i_addr );
+ asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ void )
+{
+ asm volatile ( "cinva %%ic" :: );
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ _CPU_CACR_OR( 0x00008000 );
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ _CPU_CACR_AND( 0xFFFF7FFF );
+}
+#endif
+
+
+#if defined(M68K_DATA_CACHE_ALIGNMENT)
+#define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
+#endif
+
+#if defined(M68K_INST_CACHE_ALIGNMENT)
+#define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
+#endif
+
#endif /* !ASM */
@@ -367,5 +590,5 @@ static inline unsigned int m68k_swap_u16(
}
#endif
-#endif
+#endif /* __M68K_h */
/* end of include file */
diff --git a/c/src/exec/score/cpu/powerpc/asm.h b/c/src/exec/score/cpu/powerpc/asm.h
index af14c95665..3c2e28ad5c 100644
--- a/c/src/exec/score/cpu/powerpc/asm.h
+++ b/c/src/exec/score/cpu/powerpc/asm.h
@@ -164,14 +164,20 @@
*/
#define srr0 0x01a
#define srr1 0x01b
+#ifdef ppc403
#define srr2 0x3de /* IBM 400 series only */
#define srr3 0x3df /* IBM 400 series only */
+#endif /* ppc403 */
+
#define sprg0 0x110
#define sprg1 0x111
#define sprg2 0x112
#define sprg3 0x113
+#define dar 0x013 /* Data Address Register */
+#define dec 0x016 /* Decrementer Register */
+#if defined(ppc403)
/* the following SPR/DCR registers exist only in IBM 400 series */
#define dear 0x3d5
#define evpr 0x3d6 /* SPR: exception vector prefix register */
@@ -190,9 +196,13 @@
#define br7 0x087 /* DCR: memory bank register 7 */
/* end of IBM400 series register definitions */
+#elif defined(mpc860) || defined(mpc821)
/* The following registers are for the MPC8x0 */
#define der 0x095 /* Debug Enable Register */
+#define ictrl 0x09E /* Instruction Support Control Register */
+#define immr 0x27E /* Internal Memory Map Register */
/* end of MPC8x0 registers */
+#endif
/*
* Following must be tailor for a particular flavor of the C compiler.
diff --git a/c/src/exec/score/cpu/powerpc/old_exception_processing/Makefile.am b/c/src/exec/score/cpu/powerpc/old_exception_processing/Makefile.am
index 9629808140..92f3fee811 100644
--- a/c/src/exec/score/cpu/powerpc/old_exception_processing/Makefile.am
+++ b/c/src/exec/score/cpu/powerpc/old_exception_processing/Makefile.am
@@ -5,7 +5,7 @@
AUTOMAKE_OPTIONS = foreign 1.4
# C source names
-C_FILES = cpu.c ppccache.c
+C_FILES = cpu.c
C_O_FILES = $(C_FILES:%.c=${ARCH}/%.o)
ROOT_H_FILES =
diff --git a/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.c b/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.c
index 7d6824cb26..5a5fadfd97 100644
--- a/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.c
+++ b/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.c
@@ -50,6 +50,9 @@
static void ppc_spurious(int, CPU_Interrupt_frame *);
+int _CPU_spurious_count = 0;
+int _CPU_last_spurious = 0;
+
void _CPU_Initialize(
rtems_cpu_table *cpu_table,
void (*thread_dispatch) /* ignored on this CPU */
@@ -369,6 +372,8 @@ static void ppc_spurious(int v, CPU_Interrupt_frame *i)
"=&r" ((r)) : "0" ((r))); /* TSR */
}
#endif
+ ++_CPU_spurious_count;
+ _CPU_last_spurious = v;
}
void _CPU_Fatal_error(unsigned32 _error)
@@ -748,7 +753,7 @@ unsigned32 ppc_exception_vector_addr(
case PPC_IRQ_LVL7:
Offset = 0x23c0;
break;
- case PPC_IRQ_CPM_RESERVED_0:
+ case PPC_IRQ_CPM_ERROR:
Offset = 0x2400;
break;
case PPC_IRQ_CPM_PC4:
diff --git a/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.h b/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.h
index 2a502d0745..30dd6dc092 100644
--- a/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.h
+++ b/c/src/exec/score/cpu/powerpc/old_exception_processing/cpu.h
@@ -766,6 +766,15 @@ SCORE_EXTERN struct {
); \
} while (0)
+#define _CPU_Data_Cache_Block_Invalidate( _address ) \
+ do { register void *__address = (_address); \
+ register unsigned32 _zero = 0; \
+ asm volatile ( "dcbi %0,%1" : \
+ "=r" (_zero), "=r" (__address) : \
+ "0" (_zero), "1" (__address) \
+ ); \
+ } while (0)
+
/*
* Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
diff --git a/c/src/exec/score/cpu/powerpc/rtems/score/ppc.h b/c/src/exec/score/cpu/powerpc/rtems/score/ppc.h
index 83a54bfa25..682675efde 100644
--- a/c/src/exec/score/cpu/powerpc/rtems/score/ppc.h
+++ b/c/src/exec/score/cpu/powerpc/rtems/score/ppc.h
@@ -44,6 +44,8 @@
extern "C" {
#endif
+#include <rtems/score/ppctypes.h>
+
/*
* Define the name of the CPU family.
*/
@@ -220,6 +222,7 @@ extern "C" {
#elif defined(mpc860)
/*
* Added by Jay Monkman (jmonkman@frasca.com) 6/28/98
+ * with some changes by Darlene Stewart (Darlene.Stewart@iit.nrc.ca)
*/
#define CPU_MODEL_NAME "PowerPC MPC860"
@@ -231,7 +234,6 @@ extern "C" {
#define PPC_HAS_FPU 0
#define PPC_HAS_DOUBLE 0
#define PPC_USE_MULTIPLE 1
-#define PPC_USE_SPRG 1
#define PPC_MSR_0 0x00009000
#define PPC_MSR_1 0x00001000
@@ -382,6 +384,157 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif
+#ifndef ASM
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Some functions simply have not been implemented.
+ */
+
+#if defined(ppc603) /* And possibly others */
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+/* Helpful macros */
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00004000; /* set DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFFBFFF; /* clear DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00008000; /* Set ICE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFF7FFF; /* Clear ICE bit */
+ PPC_Set_HID0( value );
+}
+
+#elif ( defined(mpc860) || defined(mpc821) )
+
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
+#define isync __asm__ volatile ("isync\n"::)
+
+static inline void _CPU_flush_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbf 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_flush_entire_data_cache ( void ) {}
+static inline void _CPU_invalidate_entire_data_cache ( void ) {}
+static inline void _CPU_freeze_data_cache ( void ) {}
+static inline void _CPU_unfreeze_data_cache ( void ) {}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_invalidate_1_inst_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "icbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
+static inline void _CPU_freeze_inst_cache ( void ) {}
+static inline void _CPU_unfreeze_inst_cache ( void ) {}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+#endif
+
+#endif /* !ASM */
+
/*
* Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix.
@@ -550,7 +703,7 @@ extern "C" {
#define PPC_IRQ_LVL6 (PPC_STD_IRQ_LAST + 23)
#define PPC_IRQ_IRQ7 (PPC_STD_IRQ_LAST + 24)
#define PPC_IRQ_LVL7 (PPC_STD_IRQ_LAST + 25)
-#define PPC_IRQ_CPM_RESERVED_0 (PPC_STD_IRQ_LAST + 26)
+#define PPC_IRQ_CPM_ERROR (PPC_STD_IRQ_LAST + 26)
#define PPC_IRQ_CPM_PC4 (PPC_STD_IRQ_LAST + 27)
#define PPC_IRQ_CPM_PC5 (PPC_STD_IRQ_LAST + 28)
#define PPC_IRQ_CPM_SMC2 (PPC_STD_IRQ_LAST + 29)
diff --git a/c/src/exec/score/cpu/powerpc/shared/asm.h b/c/src/exec/score/cpu/powerpc/shared/asm.h
index af14c95665..3c2e28ad5c 100644
--- a/c/src/exec/score/cpu/powerpc/shared/asm.h
+++ b/c/src/exec/score/cpu/powerpc/shared/asm.h
@@ -164,14 +164,20 @@
*/
#define srr0 0x01a
#define srr1 0x01b
+#ifdef ppc403
#define srr2 0x3de /* IBM 400 series only */
#define srr3 0x3df /* IBM 400 series only */
+#endif /* ppc403 */
+
#define sprg0 0x110
#define sprg1 0x111
#define sprg2 0x112
#define sprg3 0x113
+#define dar 0x013 /* Data Address Register */
+#define dec 0x016 /* Decrementer Register */
+#if defined(ppc403)
/* the following SPR/DCR registers exist only in IBM 400 series */
#define dear 0x3d5
#define evpr 0x3d6 /* SPR: exception vector prefix register */
@@ -190,9 +196,13 @@
#define br7 0x087 /* DCR: memory bank register 7 */
/* end of IBM400 series register definitions */
+#elif defined(mpc860) || defined(mpc821)
/* The following registers are for the MPC8x0 */
#define der 0x095 /* Debug Enable Register */
+#define ictrl 0x09E /* Instruction Support Control Register */
+#define immr 0x27E /* Internal Memory Map Register */
/* end of MPC8x0 registers */
+#endif
/*
* Following must be tailor for a particular flavor of the C compiler.
diff --git a/c/src/exec/score/cpu/powerpc/shared/ppc.h b/c/src/exec/score/cpu/powerpc/shared/ppc.h
index 83a54bfa25..682675efde 100644
--- a/c/src/exec/score/cpu/powerpc/shared/ppc.h
+++ b/c/src/exec/score/cpu/powerpc/shared/ppc.h
@@ -44,6 +44,8 @@
extern "C" {
#endif
+#include <rtems/score/ppctypes.h>
+
/*
* Define the name of the CPU family.
*/
@@ -220,6 +222,7 @@ extern "C" {
#elif defined(mpc860)
/*
* Added by Jay Monkman (jmonkman@frasca.com) 6/28/98
+ * with some changes by Darlene Stewart (Darlene.Stewart@iit.nrc.ca)
*/
#define CPU_MODEL_NAME "PowerPC MPC860"
@@ -231,7 +234,6 @@ extern "C" {
#define PPC_HAS_FPU 0
#define PPC_HAS_DOUBLE 0
#define PPC_USE_MULTIPLE 1
-#define PPC_USE_SPRG 1
#define PPC_MSR_0 0x00009000
#define PPC_MSR_1 0x00001000
@@ -382,6 +384,157 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif
+#ifndef ASM
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Some functions simply have not been implemented.
+ */
+
+#if defined(ppc603) /* And possibly others */
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+/* Helpful macros */
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00004000; /* set DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFFBFFF; /* clear DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00008000; /* Set ICE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFF7FFF; /* Clear ICE bit */
+ PPC_Set_HID0( value );
+}
+
+#elif ( defined(mpc860) || defined(mpc821) )
+
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
+#define isync __asm__ volatile ("isync\n"::)
+
+static inline void _CPU_flush_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbf 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_flush_entire_data_cache ( void ) {}
+static inline void _CPU_invalidate_entire_data_cache ( void ) {}
+static inline void _CPU_freeze_data_cache ( void ) {}
+static inline void _CPU_unfreeze_data_cache ( void ) {}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_invalidate_1_inst_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "icbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
+static inline void _CPU_freeze_inst_cache ( void ) {}
+static inline void _CPU_unfreeze_inst_cache ( void ) {}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+#endif
+
+#endif /* !ASM */
+
/*
* Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix.
@@ -550,7 +703,7 @@ extern "C" {
#define PPC_IRQ_LVL6 (PPC_STD_IRQ_LAST + 23)
#define PPC_IRQ_IRQ7 (PPC_STD_IRQ_LAST + 24)
#define PPC_IRQ_LVL7 (PPC_STD_IRQ_LAST + 25)
-#define PPC_IRQ_CPM_RESERVED_0 (PPC_STD_IRQ_LAST + 26)
+#define PPC_IRQ_CPM_ERROR (PPC_STD_IRQ_LAST + 26)
#define PPC_IRQ_CPM_PC4 (PPC_STD_IRQ_LAST + 27)
#define PPC_IRQ_CPM_PC5 (PPC_STD_IRQ_LAST + 28)
#define PPC_IRQ_CPM_SMC2 (PPC_STD_IRQ_LAST + 29)
diff --git a/c/src/exec/score/include/rtems/score/object.h b/c/src/exec/score/include/rtems/score/object.h
index 811e33a17e..7640dce3d3 100644
--- a/c/src/exec/score/include/rtems/score/object.h
+++ b/c/src/exec/score/include/rtems/score/object.h
@@ -444,6 +444,21 @@ Objects_Control *_Objects_Get (
);
/*
+ * _Objects_Get_by_index
+ *
+ * DESCRIPTION:
+ *
+ * This routine sets the object pointer for the given
+ * object id based on the given object information structure.
+ */
+
+Objects_Control *_Objects_Get_by_index(
+ Objects_Information *information,
+ unsigned32 index,
+ Objects_Locations *location
+);
+
+/*
* _Objects_Get_next
*
* DESCRIPTION:
@@ -460,6 +475,25 @@ Objects_Control *_Objects_Get_next(
);
/*
+ * _Objects_Local_iterate
+ *
+ * DESCRIPTION:
+ *
+ * This function invokes the callback function for each existing object
+ * of the type specified by the information block pointer. Iteration
+ * continues until either all objects have been processed, or, if
+ * break_on_error is TRUE, until an invocation of the callback returns
+ * something other than 0.
+ */
+
+unsigned32 _Objects_Local_iterate(
+ Objects_Information *information,
+ unsigned32 (*callback)(Objects_Control *object, void * arg),
+ void * arg,
+ boolean break_on_error
+);
+
+/*
* Pieces of object.inl are promoted out to the user
*/
diff --git a/c/src/exec/score/include/rtems/score/thread.h b/c/src/exec/score/include/rtems/score/thread.h
index 4c8032f558..7739925e66 100644
--- a/c/src/exec/score/include/rtems/score/thread.h
+++ b/c/src/exec/score/include/rtems/score/thread.h
@@ -734,6 +734,23 @@ Thread_Control *_Thread_Get (
#endif
/*
+ * _Thread_Local_iterate
+ *
+ * DESCRIPTION:
+ *
+ * This function invokes the callback function for each existing thread.
+ * Iteration continues until either all threads have been processed, or,
+ * if break_on_error is TRUE, until an invocation of the callback returns
+ * an integer value other than 0.
+ */
+
+unsigned32 _Thread_Local_iterate(
+ unsigned32 (*callback)(Thread_Control *the_thread, void * arg),
+ void * arg,
+ boolean break_on_error
+);
+
+/*
* _Thread_Idle_body
*
* DESCRIPTION:
diff --git a/c/src/exec/score/src/Makefile.am b/c/src/exec/score/src/Makefile.am
index c348a606f1..97a2597cda 100644
--- a/c/src/exec/score/src/Makefile.am
+++ b/c/src/exec/score/src/Makefile.am
@@ -27,17 +27,18 @@ OBJECT_C_FILES = object.c objectallocate.c objectallocatebyindex.c \
objectclearname.c objectcomparenameraw.c objectcomparenamestring.c \
objectcopynameraw.c objectcopynamestring.c objectextendinformation.c \
objectfree.c objectget.c objectgetbyindex.c objectgetnext.c \
- objectinitializeinformation.c objectnametoid.c objectshrinkinformation.c
+ objectinitializeinformation.c objectlocaliterate.c objectnametoid.c \
+ objectshrinkinformation.c
THREAD_C_FILES = thread.c threadchangepriority.c threadclearstate.c \
threadclose.c threadcreateidle.c threaddelayended.c threaddispatch.c \
threadevaluatemode.c threadget.c threadhandler.c threadidlebody.c \
- threadinitialize.c threadloadenv.c threadready.c threadresettimeslice.c \
- threadreset.c threadrestart.c threadresume.c threadrotatequeue.c \
- threadsetpriority.c threadsetstate.c threadsettransient.c \
- threadstackallocate.c threadstackfree.c threadstart.c \
- threadstartmultitasking.c threadsuspend.c threadtickletimeslice.c \
- threadyieldprocessor.c
+ threadinitialize.c threadloadenv.c threadlocaliterate.c threadready.c \
+ threadresettimeslice.c threadreset.c threadrestart.c threadresume.c \
+ threadrotatequeue.c threadsetpriority.c threadsetstate.c \
+ threadsettransient.c threadstackallocate.c threadstackfree.c \
+ threadstart.c threadstartmultitasking.c threadsuspend.c \
+ threadtickletimeslice.c threadyieldprocessor.c
THREADQ_C_FILES = threadq.c threadqdequeue.c threadqdequeuefifo.c \
threadqdequeuepriority.c threadqenqueue.c threadqenqueuefifo.c \
@@ -53,8 +54,8 @@ WATCHDOG_C_FILES = watchdog.c watchdogadjust.c watchdoginsert.c \
watchdogremove.c watchdogtickle.c
STD_C_FILES = apiext.c chain.c $(CORE_MESSAGE_QUEUE_C_FILES) \
- $(CORE_MUTEX_C_FILES) $(CORE_SEMAPHORE_C_FILES) $(HEAP_C_FILES) interr.c \
- isr.c $(OBJECT_C_FILES) $(THREAD_C_FILES) $(THREADQ_C_FILES) \
+ $(CORE_MUTEX_C_FILES) $(CORE_SEMAPHORE_C_FILES) $(HEAP_C_FILES) \
+ interr.c isr.c $(OBJECT_C_FILES) $(THREAD_C_FILES) $(THREADQ_C_FILES) \
$(TOD_C_FILES) userext.c $(WATCHDOG_C_FILES) wkspace.c
if HAS_MP
diff --git a/c/src/exec/score/src/objectlocaliterate.c b/c/src/exec/score/src/objectlocaliterate.c
new file mode 100644
index 0000000000..e4b0f28de8
--- /dev/null
+++ b/c/src/exec/score/src/objectlocaliterate.c
@@ -0,0 +1,77 @@
+/*
+ * object iterator
+ *
+ *
+ * COPYRIGHT (c) 2000.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/object.h>
+
+/*PAGE
+ *
+ * _Objects_Local_iterate
+ *
+ * DESCRIPTION:
+ *
+ * This function invokes the callback function for each existing object
+ * of the type specified by the information block pointer. Iteration
+ * continues until either all objects have been processed, or, if
+ * break_on_error is TRUE, until an invocation of the callback returns
+ * something other than 0.
+ *
+ * Input parameters:
+ * information:
+ * A pointer to an Objects_Information block. Determines the type of
+ * object over which to iterate.
+ * callback:
+ * A pointer to a function with the indicated signature.
+ * arg:
+ * A pointer to some arbitrary entity. Passed on to the callback.
+ * break_on_error
+ * If TRUE, stop iterating on error.
+ *
+ * Output parameters: NONE
+ * But callback may write into space pointed to by arg.
+ *
+ * Return value:
+ * 0 if successful
+ * Value returned by the callback otherwise.
+ */
+
+unsigned32 _Objects_Local_iterate(
+ Objects_Information *information,
+ unsigned32 (*callback)(Objects_Control *object, void * arg),
+ void *arg,
+ boolean break_on_error
+)
+{
+ unsigned32 result;
+ unsigned32 i;
+ Objects_Control *the_object;
+
+ if ( !information )
+ return 0;
+
+ if ( !callback )
+ return 0;
+
+ for( i = 1; i <= information->maximum; i++ ) {
+ the_object = (Objects_Control *)information->local_table[i];
+ if( the_object ) {
+ result = (*callback)( the_object, arg );
+ if ( result && break_on_error )
+ return result;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/c/src/exec/score/src/threadlocaliterate.c b/c/src/exec/score/src/threadlocaliterate.c
new file mode 100644
index 0000000000..a2d94fe81a
--- /dev/null
+++ b/c/src/exec/score/src/threadlocaliterate.c
@@ -0,0 +1,77 @@
+/*
+ * Thread Iterator
+ *
+ *
+ * COPYRIGHT (c) 2000.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/object.h>
+#include <rtems/score/thread.h>
+
+
+/*PAGE
+ *
+ * _Thread_Local_iterate
+ *
+ * DESCRIPTION:
+ *
+ * This function invokes the callback function for each existing thread.
+ * Iteration continues until either all threads have been processed, or,
+ * if break_on_error is TRUE, until an invocation of the callback returns
+ * an integer value other than 0.
+ *
+ * Input parameters:
+ * callback:
+ * A pointer to a function with the indicated signature.
+ * arg:
+ * A pointer to some arbitrary entity. Passed on to the callback.
+ * break_on_error
+ * If TRUE, stop iterating on error.
+ *
+ *
+ * Output parameters: NONE
+ * But callback may write into space pointed to by arg.
+ *
+ * Return value:
+ * 0 if successful
+ * Value returned by the callback otherwise.
+ */
+
+unsigned32 _Thread_Local_iterate(
+ unsigned32 (*callback)(Thread_Control *the_thread, void * arg),
+ void *arg,
+ boolean break_on_error
+)
+{
+ unsigned32 class_index;
+ unsigned32 result;
+ Objects_Information *information;
+
+ if( callback == NULL )
+ return 0;
+
+ for ( class_index = OBJECTS_CLASSES_FIRST ;
+ class_index <= OBJECTS_CLASSES_LAST ;
+ class_index++ ) {
+ information = _Objects_Information_table[ class_index ];
+ if ( information && information->is_thread ) {
+ result = _Objects_Local_iterate(
+ information,
+ (unsigned32 (*)(Objects_Control *, void *))callback,
+ arg,
+ break_on_error );
+ if( result && break_on_error )
+ return result;
+ }
+ }
+
+ return 0;
+}