summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>1999-10-27 15:29:18 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>1999-10-27 15:29:18 +0000
commit702c5f5b42e975c35a94f1ae3d39a77815f36f70 (patch)
tree46a9caa5230280f7c51530aeeff50355f893083c /cpukit
parentFirst attempt at icluding Eric Valette and Emmanuel Raguet. (diff)
downloadrtems-702c5f5b42e975c35a94f1ae3d39a77815f36f70.tar.bz2
The rxgen960 BSP and i960 RPM support was submitted by Mark Bronson
<mark@ramix.com> of RAMIX.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/score/cpu/i960/cpu.c41
-rw-r--r--cpukit/score/cpu/i960/rtems/score/cpu.h6
-rw-r--r--cpukit/score/cpu/i960/rtems/score/i960.h241
3 files changed, 245 insertions, 43 deletions
diff --git a/cpukit/score/cpu/i960/cpu.c b/cpukit/score/cpu/i960/cpu.c
index 7dbbb5828f..07ca05f69b 100644
--- a/cpukit/score/cpu/i960/cpu.c
+++ b/cpukit/score/cpu/i960/cpu.c
@@ -12,10 +12,14 @@
*
* $Id$
*/
+/*
+ * 1999/04/26: added support for Intel i960RP
+ */
#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
+#elif defined(__i960RP__)
#else
-#warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA ONLY ***"
+#warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA & RP ONLY ***"
#warning "*** THIS FILE WILL NOT COMPILE ON ANOTHER FAMILY MEMBER ***"
#endif
@@ -61,9 +65,14 @@ unsigned32 _CPU_ISR_Get_level( void )
*
* _CPU_ISR_install_raw_handler
*/
-
+
+#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
#define _Is_vector_caching_enabled( _prcb ) \
((_prcb)->control_tbl->icon & 0x2000)
+#elif defined(__i960RP__)
+#define _Is_vector_caching_enabled( _prcb ) \
+ ((*((unsigned int *) ICON_ADDR)) & 0x2000)
+#endif
void _CPU_ISR_install_raw_handler(
unsigned32 vector,
@@ -71,7 +80,7 @@ void _CPU_ISR_install_raw_handler(
proc_ptr *old_handler
)
{
- i960ca_PRCB *prcb = _CPU_Table.Prcb;
+ i960_PRCB *prcb = _CPU_Table.Prcb;
proc_ptr *cached_intr_tbl = NULL;
/* The i80960CA does not support vectors 0-7. The first 9 entries
@@ -124,8 +133,9 @@ void _CPU_ISR_install_vector(
* _CPU_Install_interrupt_stack
*/
+#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
#define soft_reset( prcb ) \
- { register i960ca_PRCB *_prcb = (prcb); \
+ { register i960_PRCB *_prcb = (prcb); \
register unsigned32 *_next=0; \
register unsigned32 _cmd = 0x30000; \
asm volatile( "lda next,%1; \
@@ -134,11 +144,28 @@ void _CPU_ISR_install_vector(
: "=d" (_cmd), "=d" (_next), "=d" (_prcb) \
: "0" (_cmd), "1" (_next), "2" (_prcb) ); \
}
+#else
+#if defined(__i960RP__) || defined(__i960_RP__) || defined(__i960RP)
+#define soft_reset( prcb ) \
+ { register i960_PRCB *_prcb = (prcb); \
+ register unsigned32 *_next=0; \
+ register unsigned32 _cmd = 0x300; \
+ asm volatile( "lda next,%1; \
+ sysctl %0,%1,%2; \
+ next: mov g0,g0" \
+ : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \
+ : "0" (_cmd), "1" (_next), "2" (_prcb) ); \
+ }
+#endif
+#endif
void _CPU_Install_interrupt_stack( void )
{
- i960ca_PRCB *prcb = _CPU_Table.Prcb;
+ i960_PRCB *prcb = _CPU_Table.Prcb;
unsigned32 level;
+#if defined(__i960RP__) || defined(__i960_RP__)
+ int *isp = (int *) ISP_ADDR;
+#endif
/*
* Set the Interrupt Stack in the PRCB and force a reload of it.
@@ -149,7 +176,11 @@ void _CPU_Install_interrupt_stack( void )
prcb->intr_stack = _CPU_Interrupt_stack_low;
+#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
soft_reset( prcb );
+#elif defined(__i960RP__) || defined(__i960_RP__) || defined(__i960RP)
+ *isp = prcb->intr_stack;
+#endif
_CPU_ISR_Enable( level );
}
diff --git a/cpukit/score/cpu/i960/rtems/score/cpu.h b/cpukit/score/cpu/i960/rtems/score/cpu.h
index 703abab583..14083d9860 100644
--- a/cpukit/score/cpu/i960/rtems/score/cpu.h
+++ b/cpukit/score/cpu/i960/rtems/score/cpu.h
@@ -215,10 +215,12 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
#define CPU_STACK_MINIMUM_SIZE 2048
/*
- * i960 is pretty tolerant of alignment. Just put things on 4 byte boundaries.
+ * i960 is pretty tolerant of alignment but some CPU models do
+ * better with different default aligments so we use what the
+ * CPU model selected in rtems/score/i960.h.
*/
-#define CPU_ALIGNMENT 4
+#define CPU_ALIGNMENT I960_CPU_ALIGNMENT
#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
diff --git a/cpukit/score/cpu/i960/rtems/score/i960.h b/cpukit/score/cpu/i960/rtems/score/i960.h
index bb4884941a..3719d96496 100644
--- a/cpukit/score/cpu/i960/rtems/score/i960.h
+++ b/cpukit/score/cpu/i960/rtems/score/i960.h
@@ -38,13 +38,19 @@ extern "C" {
#define CPU_MODEL_NAME "i960ca"
#define __RTEMS_I960CA__
-#define I960_HAS_FPU 0
#elif defined(__i960HA__) || defined(__i960_HA__) || defined(__i960HA)
#define CPU_MODEL_NAME "i960ha"
#define __RTEMS_I960HA__
-#define I960_HAS_FPU 0
+
+#elif defined(__i960RP__)
+
+#include <i960RP.h>
+#define CPU_MODEL_NAME "i960rp"
+#define __RTEMS_I960RP__
+#define I960_CPU_ALIGNMENT 8
+#define I960_SOFT_RESET_COMMAND 0x300
#else
@@ -53,6 +59,22 @@ extern "C" {
#endif
/*
+ * Now default some CPU model variation parameters
+ */
+
+#ifndef I960_HAS_FPU
+#define I960_HAS_FPU 0
+#endif
+
+#ifndef I960_CPU_ALIGNMENT
+#define I960_CPU_ALIGNMENT 4
+#endif
+
+#ifndef I960_SOFT_RESET_COMMAND
+#define I960_SOFT_RESET_COMMAND 0x30000
+#endif
+
+/*
* Define the name of the CPU family.
*/
@@ -66,6 +88,18 @@ extern "C" {
*/
#if defined(__RTEMS_I960CA__)
+/*
+ * Now default some CPU model variation parameters
+ */
+
+#ifndef I960_HAS_FPU
+#define I960_HAS_FPU 0
+#endif
+
+#ifndef I960_CPU_ALIGNMENT
+#define I960_CPU_ALIGNMENT 4
+#endif
+
/* i960CA control structures */
@@ -195,9 +229,102 @@ typedef struct {
typedef i960ha_control_table i960_control_table;
typedef i960ha_PRCB i960_PRCB;
+#elif defined(__RTEMS_I960RP__)
+
+/* i960RP control structures */
+
+/* Intel i960RP Control Table */
+
+typedef struct {
+ /* Control Group 0 */
+ unsigned int rsvd00;
+ unsigned int rsvd01;
+ unsigned int rsvd02;
+ unsigned int rsvd03;
+ /* Control Group 1 */
+ unsigned int imap0; /* interrupt map 0 */
+ unsigned int imap1; /* interrupt map 1 */
+ unsigned int imap2; /* interrupt map 2 */
+ unsigned int icon; /* interrupt control */
+ /* Control Group 2 */
+ unsigned int pmcon0; /* memory region 0 configuration */
+ unsigned int rsvd1;
+ unsigned int pmcon2; /* memory region 2 configuration */
+ unsigned int rsvd2;
+ /* Control Group 3 */
+ unsigned int pmcon4; /* memory region 4 configuration */
+ unsigned int rsvd3;
+ unsigned int pmcon6; /* memory region 6 configuration */
+ unsigned int rsvd4;
+ /* Control Group 4 */
+ unsigned int pmcon8; /* memory region 8 configuration */
+ unsigned int rsvd5;
+ unsigned int pmcon10; /* memory region 10 configuration */
+ unsigned int rsvd6;
+ /* Control Group 5 */
+ unsigned int pmcon12; /* memory region 12 configuration */
+ unsigned int rsvd7;
+ unsigned int pmcon14; /* memory region 14 configuration */
+ unsigned int rsvd8;
+ /* Control Group 6 */
+ unsigned int rsvd9;
+ unsigned int rsvd10;
+ unsigned int tc; /* trace control */
+ unsigned int bcon; /* bus configuration control */
+} i960rp_control_table;
+
+/* Intel i960RP Processor Control Block */
+
+typedef struct {
+ unsigned int *fault_tbl; /* fault table base address */
+ i960rp_control_table
+ *control_tbl; /* control table base address */
+ unsigned int initial_ac; /* AC register initial value */
+ unsigned int fault_config; /* fault configuration word */
+ void **intr_tbl; /* interrupt table base address */
+ void *sys_proc_tbl; /* system procedure table
+ base address */
+ unsigned int reserved; /* reserved */
+ unsigned int *intr_stack; /* interrupt stack pointer */
+ unsigned int ins_cache_cfg; /* instruction cache
+ configuration word */
+ unsigned int reg_cache_cfg; /* register cache configuration word */
+} i960rp_PRCB;
+
+typedef i960rp_control_table i960_control_table;
+typedef i960rp_PRCB i960_PRCB;
+
+#else
+#error "invalid processor selection!"
#endif
/*
+ * Miscellaneous Support Routines
+ */
+
+#define i960_reload_ctl_group( group ) \
+ { register int _cmd = ((group)|0x400) ; \
+ asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \
+ }
+
+#define i960_atomic_modify( mask, addr, prev ) \
+ { register unsigned int _mask = (mask); \
+ register unsigned int *_addr = (unsigned int *)(addr); \
+ asm volatile( "atmod %0,%1,%1" \
+ : "=d" (_addr), "=d" (_mask) \
+ : "0" (_addr), "1" (_mask) ); \
+ (prev) = _mask; \
+ }
+
+#define atomic_modify( _mask, _address, _previous ) \
+ i960_atomic_modify( _mask, _address, _previous )
+
+#define i960_enable_tracing() \
+ { register unsigned int _pc = 0x1; \
+ asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \
+ }
+
+/*
* Interrupt Level Routines
*/
@@ -231,23 +358,16 @@ typedef i960ha_PRCB i960_PRCB;
(_level) = ((_level) & 0x1f0000) >> 16; \
} while ( 0 )
-#define i960_atomic_modify( mask, addr, prev ) \
- { register unsigned int _mask = (mask); \
- register unsigned int *_addr = (unsigned int *)(addr); \
- asm volatile( "atmod %0,%1,%1" \
- : "=d" (_addr), "=d" (_mask) \
- : "0" (_addr), "1" (_mask) ); \
- (prev) = _mask; \
+#define i960_cause_intr( intr ) \
+ { register int _intr = (intr); \
+ asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \
}
+/*
+ * Interrupt Masking Routines
+ */
-#define atomic_modify( _mask, _address, _previous ) \
- i960_atomic_modify( _mask, _address, _previous )
-
-#define i960_enable_tracing() \
- { register unsigned int _pc = 0x1; \
- asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \
- }
+#if defined(__RTEMS_I960CA__) || defined(__RTEMS_I960HA__)
#define i960_unmask_intr( xint ) \
{ register unsigned int _mask= (1<<(xint)); \
@@ -266,38 +386,66 @@ asm volatile( "loop_til_cleared: clrbit %0,sf0,sf0 ; \
: "=d" (_xint) : "0" (_xint) ); \
}
-#define i960_reload_ctl_group( group ) \
- { register int _cmd = ((group)|0x400) ; \
- asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \
- }
+static inline unsigned int i960_pend_intrs()
+{ register unsigned int _intr=0;
+ asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) );
+ return ( _intr );
+}
-#define i960_cause_intr( intr ) \
- { register int _intr = (intr); \
- asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \
+static inline unsigned int i960_mask_intrs()
+{ register unsigned int _intr=0;
+ asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) );
+ return( _intr );
+}
+
+#elif defined(__RTEMS_I960RP__)
+
+#define i960_unmask_intr( xint ) \
+ { register unsigned int _mask= (1<<(xint)); \
+ register unsigned int *_imsk = (int * ) IMSK_ADDR; \
+ register unsigned int _val= *_imsk; \
+ asm volatile( "or %0,%2,%0; \
+ st %0,(%1)" \
+ : "=d" (_val), "=d" (_imsk), "=d" (_mask) \
+ : "0" (_val), "1" (_imsk), "2" (_mask) ); \
}
-#define i960_soft_reset( prcb ) \
- { register i960ca_PRCB *_prcb = (prcb); \
- register unsigned int *_next=0; \
- register unsigned int _cmd = 0x30000; \
- asm volatile( "lda next,%1; \
- sysctl %0,%1,%2; \
- next: mov g0,g0" \
- : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \
- : "0" (_cmd), "1" (_next), "2" (_prcb) ); \
+#define i960_mask_intr( xint ) \
+ { register unsigned int _mask= (1<<(xint)); \
+ register unsigned int *_imsk = (int * ) IMSK_ADDR; \
+ register unsigned int _val = *_imsk; \
+ asm volatile( "andnot %2,%0,%0; \
+ st %0,(%1)" \
+ : "=d" (_val), "=d" (_imsk), "=d" (_mask) \
+ : "0" (_val), "1" (_imsk), "2" (_mask) ); \
+ }
+#define i960_clear_intr( xint ) \
+ { register unsigned int _xint=xint; \
+ register unsigned int _mask=(1<<(xint)); \
+ register unsigned int *_ipnd = (int * ) IPND_ADDR; \
+ register unsigned int _rslt = 0; \
+asm volatile( "loop_til_cleared: mov 0, %0; \
+ atmod %1, %2, %0; \
+ bbs %3,%0, loop_til_cleared" \
+ : "=d" (_rslt), "=d" (_ipnd), "=d" (_mask), "=d" (_xint) \
+ : "0" (_rslt), "1" (_ipnd), "2" (_mask), "3" (_xint) ); \
}
static inline unsigned int i960_pend_intrs()
-{ register unsigned int _intr=0;
- asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) );
+{ register unsigned int _intr= *(unsigned int *) IPND_ADDR;
+ /*register unsigned int *_ipnd = (int * ) IPND_ADDR; \
+ asm volatile( "mov (%0),%1" \
+ : "=d" (_ipnd), "=d" (_mask) \
+ : "0" (_ipnd), "1" (_mask) ); \ */
return ( _intr );
}
static inline unsigned int i960_mask_intrs()
-{ register unsigned int _intr=0;
- asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) );
+{ register unsigned int _intr= *(unsigned int *) IMSK_ADDR;
+ /*asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) );*/
return( _intr );
}
+#endif
static inline unsigned int i960_get_fp()
{ register unsigned int _fp=0;
@@ -306,6 +454,27 @@ static inline unsigned int i960_get_fp()
}
/*
+ * Soft Reset
+ */
+
+#if defined(I960_SOFT_RESET_COMMAND)
+
+#define i960_soft_reset( prcb ) \
+ { register i960_PRCB *_prcb = (prcb); \
+ register unsigned int *_next=0; \
+ register unsigned int _cmd = I960_SOFT_RESET_COMMAND; \
+ asm volatile( "lda next,%1; \
+ sysctl %0,%1,%2; \
+ next: mov g0,g0" \
+ : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \
+ : "0" (_cmd), "1" (_next), "2" (_prcb) ); \
+ }
+
+#else
+#warning "I960_SOFT_RESET_COMMAND is not defined"
+#endif
+
+/*
* The following routine swaps the endian format of an unsigned int.
* It must be static because it is referenced indirectly.
*