summaryrefslogtreecommitdiffstats
path: root/c/src/lib
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>1999-12-02 14:31:19 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>1999-12-02 14:31:19 +0000
commitacc25eec35e186abc118b9ca4f097e22fc6b4846 (patch)
tree7fa75871c51372e70cbd9cb50b0a2fab55cfa750 /c/src/lib
parentMerged of mcp750 and mvme2307 BSP by Eric Valette <valette@crf.canon.fr>. (diff)
downloadrtems-acc25eec35e186abc118b9ca4f097e22fc6b4846.tar.bz2
Merged of mcp750 and mvme2307 BSP by Eric Valette <valette@crf.canon.fr>.
As part of this effort, the mpc750 libcpu code is now shared with the ppc6xx.
Diffstat (limited to '')
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/bootloader/Makefile.in4
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/clock/Makefile.in9
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/console/Makefile.in15
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/console/console_reserve_resources.c61
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/include/Makefile.in10
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/irq/Makefile.in10
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/openpic/Makefile.in11
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/pci/Makefile.in15
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/residual/Makefile.in11
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/start/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/startup/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/vectors/Makefile.in6
-rw-r--r--c/src/lib/libbsp/powerpc/mcp750/wrapup/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/bootloader/Makefile.in4
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/clock/Makefile.in9
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/console/Makefile.in15
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/console/console.c382
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/console/console_reserve_resources.c61
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/include/Makefile.in10
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/irq/Makefile.in10
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/openpic/Makefile.in11
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/pci/Makefile.in15
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/residual/Makefile.in11
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/start/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/startup/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/vectors/Makefile.in6
-rw-r--r--c/src/lib/libbsp/powerpc/motorola_powerpc/wrapup/Makefile.in2
-rw-r--r--c/src/lib/libbsp/powerpc/shared/Makefile.in36
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/Makefile.in44
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/README41
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h258
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/em86.c580
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S4561
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/exception.S473
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/head.S381
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/lib.c53
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/misc.c528
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/mm.c982
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/pci.c931
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/pci.h1159
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/ppcboot.lds94
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c2143
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h438
-rw-r--r--c/src/lib/libbsp/powerpc/shared/clock/Makefile.in38
-rw-r--r--c/src/lib/libbsp/powerpc/shared/clock/p_clock.c37
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/Makefile.in48
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/console.c (renamed from c/src/lib/libbsp/powerpc/mcp750/console/console.c)6
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/consoleIo.h45
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/inch.c318
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/keyboard.h433
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/polled_io.c1080
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/uart.c778
-rw-r--r--c/src/lib/libbsp/powerpc/shared/console/uart.h169
-rw-r--r--c/src/lib/libbsp/powerpc/shared/dec21140/Makefile.in32
-rw-r--r--c/src/lib/libbsp/powerpc/shared/dec21140/dec21140.c905
-rw-r--r--c/src/lib/libbsp/powerpc/shared/include/Makefile.in48
-rw-r--r--c/src/lib/libbsp/powerpc/shared/include/bsp.h57
-rw-r--r--c/src/lib/libbsp/powerpc/shared/include/nvram.h170
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/Makefile.in41
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/i8259.c152
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/irq.c398
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/irq.h319
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S322
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/irq_init.c315
-rw-r--r--c/src/lib/libbsp/powerpc/shared/motorola/Makefile.in41
-rw-r--r--c/src/lib/libbsp/powerpc/shared/motorola/motorola.c120
-rw-r--r--c/src/lib/libbsp/powerpc/shared/motorola/motorola.h67
-rw-r--r--c/src/lib/libbsp/powerpc/shared/openpic/Makefile.in42
-rw-r--r--c/src/lib/libbsp/powerpc/shared/openpic/openpic.c509
-rw-r--r--c/src/lib/libbsp/powerpc/shared/openpic/openpic.h340
-rw-r--r--c/src/lib/libbsp/powerpc/shared/pci/Makefile.in42
-rw-r--r--c/src/lib/libbsp/powerpc/shared/pci/pci.c388
-rw-r--r--c/src/lib/libbsp/powerpc/shared/pci/pci.h1153
-rw-r--r--c/src/lib/libbsp/powerpc/shared/residual/Makefile.in50
-rw-r--r--c/src/lib/libbsp/powerpc/shared/residual/pnp.h647
-rw-r--r--c/src/lib/libbsp/powerpc/shared/residual/residual.c106
-rw-r--r--c/src/lib/libbsp/powerpc/shared/residual/residual.h346
-rw-r--r--c/src/lib/libbsp/powerpc/shared/start/Makefile.in29
-rw-r--r--c/src/lib/libbsp/powerpc/shared/start/start.S131
-rw-r--r--c/src/lib/libbsp/powerpc/shared/startup/Makefile.in35
-rw-r--r--c/src/lib/libbsp/powerpc/shared/startup/bspstart.c338
-rw-r--r--c/src/lib/libbsp/powerpc/shared/startup/linkcmds147
-rw-r--r--c/src/lib/libbsp/powerpc/shared/vectors/Makefile.in39
-rw-r--r--c/src/lib/libbsp/powerpc/shared/vectors/vectors.S154
-rw-r--r--c/src/lib/libbsp/powerpc/shared/vectors/vectors.h144
-rw-r--r--c/src/lib/libbsp/powerpc/shared/vectors/vectors_init.c122
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl9
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c116
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h979
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S396
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/README80
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO8
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl4
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c853
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h1200
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S809
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S268
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c61
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S132
-rw-r--r--c/src/lib/libcpu/powerpc/Makefile.in9
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/Makefile.in25
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/clock/Makefile.in68
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c208
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.h42
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/exceptions/Makefile.in79
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/exceptions/asm_utils.S65
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c195
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.h168
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.in79
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c64
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h40
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S224
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/wrapup/Makefile.in62
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/cpu.c116
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S396
-rw-r--r--c/src/lib/libcpu/powerpc/new_exception_processing/Makefile.in90
-rw-r--r--c/src/lib/libcpu/powerpc/new_exception_processing/c_isr.inl9
-rw-r--r--c/src/lib/libcpu/powerpc/new_exception_processing/cpu.c116
-rw-r--r--c/src/lib/libcpu/powerpc/new_exception_processing/cpu.h979
-rw-r--r--c/src/lib/libcpu/powerpc/new_exception_processing/cpu_asm.S396
-rw-r--r--c/src/lib/libcpu/powerpc/old-exceptions/README80
-rw-r--r--c/src/lib/libcpu/powerpc/old-exceptions/TODO8
-rw-r--r--c/src/lib/libcpu/powerpc/old-exceptions/cpu.c853
-rw-r--r--c/src/lib/libcpu/powerpc/old-exceptions/cpu_asm.S809
-rw-r--r--c/src/lib/libcpu/powerpc/old-exceptions/irq_stub.S268
-rw-r--r--c/src/lib/libcpu/powerpc/old-exceptions/ppccache.c61
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/Makefile.in90
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/README80
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/TODO8
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/c_isr.inl4
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/cpu.c853
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/cpu.h1200
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/cpu_asm.S809
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/irq_stub.S268
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/ppccache.c61
-rw-r--r--c/src/lib/libcpu/powerpc/old_exception_processing/rtems.S132
-rw-r--r--c/src/lib/libcpu/powerpc/shared/Makefile.in2
-rw-r--r--c/src/lib/libcpu/powerpc/shared/cpu.h24
-rw-r--r--c/src/lib/libcpu/powerpc/wrapup/Makefile.in2
141 files changed, 37427 insertions, 635 deletions
diff --git a/c/src/lib/libbsp/powerpc/mcp750/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/Makefile.in
index 584bb31e28..6813a48f92 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/Makefile.in
@@ -29,7 +29,7 @@ NETWORK = $(NETWORK_$(HAS_NETWORKING)_V)
# wrapup is the one that actually builds and installs the library
# from the individual .rel files built in other directories
SUBDIRS = clock console include pci residual openpic irq vectors start \
- startup bootloader $(NETWORK) wrapup
+ startup bootloader $(NETWORK) motorola wrapup
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \
diff --git a/c/src/lib/libbsp/powerpc/mcp750/bootloader/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/bootloader/Makefile.in
index e0ee75873b..86ca29151d 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/bootloader/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/bootloader/Makefile.in
@@ -11,7 +11,7 @@ subdir = powerpc/mcp750/bootloader
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../../../shared:@srcdir@/../console
+VPATH = @srcdir@:@srcdir@/../../../shared:@srcdir@/../../shared/console:@srcdir@/../../shared/bootloader
# C source names, if any, go here -- minus the .c
C_PIECES = misc pci zlib mm em86 polled_io lib
@@ -77,7 +77,7 @@ CLOBBER_ADDITIONS += $(IMAGES)
#
bootloader : ${OBJS} $(IMAGES) $(BINARY_LOADED) ppcboot.lds
$(LD) -o bootloader $(OBJS) --just-symbols=$(BINARY_LOADED) \
- -b binary $(IMAGES) -T @srcdir@/ppcboot.lds \
+ -b binary $(IMAGES) -T @srcdir@/../../shared/bootloader/ppcboot.lds \
-Map bootloader.map
check_unresolved : ${OBJS}
diff --git a/c/src/lib/libbsp/powerpc/mcp750/clock/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/clock/Makefile.in
index b1af70d442..4490d4b392 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/clock/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/clock/Makefile.in
@@ -11,9 +11,7 @@ subdir = powerpc/mcp750/clock
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/clock.rel
+VPATH = @srcdir@:@srcdir@/../../shared/clock
# C source names, if any, go here -- minus the .c
C_PIECES = p_clock
@@ -58,10 +56,7 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-$(PGM): ${OBJS}
- $(make-rel)
-
-all: ${ARCH} $(SRCS) $(PGM)
+all: ${ARCH} $(SRCS) $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/mcp750/console/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/console/Makefile.in
index 02639f189d..95b8c27b57 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/console/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/console/Makefile.in
@@ -1,5 +1,5 @@
#
-# $Id:
+# $Id$
#
@SET_MAKE@
@@ -11,14 +11,14 @@ subdir = powerpc/mcp750/console
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../../../shared
+VPATH = @srcdir@:@srcdir@/../../shared/console:@srcdir@/../../../shared
# C source names, if any, go here -- minus the .c
-C_PIECES = polled_io uart console inch console_reserve_resources
+C_PIECES = polled_io uart console inch
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/consoleIo.h $(srcdir)/keyboard.h $(srcdir)/uart.h
+H_FILES = $(srcdir)/../../shared/console/consoleIo.h $(srcdir)/../../shared/console/keyboard.h $(srcdir)/../../shared/console/uart.h
# Assembly source names, if any, go here -- minus the .s
S_PIECES =
@@ -48,6 +48,7 @@ CC_O_FILES = $(CC_PIECES:%=${ARCH}/%.o)
#
CPPFLAGS += -DSTATIC_LOG_ALLOC
+CFLAGS +=
#
# Add your list of files to delete here. The config files
# already know how to delete some stuff, so you may want
@@ -56,11 +57,7 @@ CPPFLAGS += -DSTATIC_LOG_ALLOC
# 'make clobber' already includes 'make clean'
#
-preinstall:
- @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
- @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-
-all: ${ARCH} $(SRCS) preinstall ${OBJS}
+all: ${ARCH} $(SRCS) ${OBJS}
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \
diff --git a/c/src/lib/libbsp/powerpc/mcp750/console/console_reserve_resources.c b/c/src/lib/libbsp/powerpc/mcp750/console/console_reserve_resources.c
deleted file mode 100644
index 2ec22746dd..0000000000
--- a/c/src/lib/libbsp/powerpc/mcp750/console/console_reserve_resources.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * console.c -- console I/O package
- *
- * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
- *
- * This code is based on the pc386 BSP console.c so the following
- * copyright also applies :
- *
- * (C) Copyright 1997 -
- * - NavIST Group - Real-Time Distributed Systems and Industrial Automation
- *
- * http://pandora.ist.utl.pt
- *
- * Instituto Superior Tecnico * Lisboa * PORTUGAL
- * The license and distribution terms for this file may be
- * found in found in the file LICENSE in this distribution or at
- * http://www.OARcorp.com/rtems/license.html.
- *
- * $Id$
- */
-
-#include <stdlib.h>
-#include <assert.h>
-#include <stdlib.h>
-
-#undef __assert
-void __assert (const char *file, int line, const char *msg);
-extern int close(int fd);
-
-#include <bsp.h>
-#include <bsp/irq.h>
-#include <rtems/libio.h>
-#include <termios.h>
-#include <bsp/uart.h>
-#include <bsp/consoleIo.h>
-
-/* Definitions for BSPConsolePort */
-#define BSP_CONSOLE_PORT_CONSOLE (-1)
-#define BSP_CONSOLE_PORT_COM1 (BSP_UART_COM1)
-#define BSP_CONSOLE_PORT_COM2 (BSP_UART_COM2)
-/*
- * Possible value for console input/output :
- * BSP_CONSOLE_PORT_CONSOLE
- * BSP_UART_COM1
- * BSP_UART_COM2
- */
-
-int BSPConsolePort = BSP_UART_COM1;
-
-/* int BSPConsolePort = BSP_UART_COM2; */
-int BSPBaseBaud = 115200;
-
-void console_reserve_resources(rtems_configuration_table *conf)
-{
- if(BSPConsolePort != BSP_CONSOLE_PORT_CONSOLE)
- {
- rtems_termios_reserve_resources(conf, 1);
- }
-
- return;
-}
diff --git a/c/src/lib/libbsp/powerpc/mcp750/include/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/include/Makefile.in
index 26345294f6..cb3d87abd3 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/include/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/include/Makefile.in
@@ -13,7 +13,13 @@ PROJECT_ROOT = @PROJECT_ROOT@
VPATH = @srcdir@
-H_FILES = $(srcdir)/nvram.h $(srcdir)/bsp.h
+H_FILES = $(srcdir)/../../shared/include/nvram.h \
+ $(srcdir)/../../shared/include/bsp.h
+
+BSP_H_FILES = $(srcdir)/../../shared/console/consoleIo.h \
+ $(srcdir)/../../shared/console/uart.h \
+ $(srcdir)/../../shared/irq/irq.h \
+ $(srcdir)/../../shared/motorola/motorola.h
#
# Equate files are for including from assembly preprocessed by
@@ -43,7 +49,9 @@ CLOBBER_ADDITIONS +=
preinstall:
$(mkinstalldirs) $(PROJECT_INCLUDE)
+ $(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)
+ @$(INSTALL_CHANGE) -m 644 $(BSP_H_FILES) $(PROJECT_INCLUDE)/bsp
all: $(SRCS) preinstall
diff --git a/c/src/lib/libbsp/powerpc/mcp750/irq/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/irq/Makefile.in
index a44fecdfa9..2f9bb6ae04 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/irq/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/irq/Makefile.in
@@ -11,14 +11,14 @@ subdir = powerpc/mcp750/irq
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
+VPATH = @srcdir@:@srcdir@/../../shared/irq
# C source names, if any, go here -- minus the .c
C_PIECES = irq_init i8259 irq
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/irq.h
+H_FILES = $(srcdir)/../../shared/irq/irq.h
# Assembly source names, if any, go here -- minus the .s
S_PIECES = irq_asm
@@ -61,11 +61,7 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-preinstall:
- @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
- @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-
-all: ${ARCH} $(SRCS) preinstall ${OBJS}
+all: ${ARCH} $(SRCS) ${OBJS}
install: all
diff --git a/c/src/lib/libbsp/powerpc/mcp750/openpic/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/openpic/Makefile.in
index 3b4fc9856b..2f3e2c2c23 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/openpic/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/openpic/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/openpic
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/openpic.rel
+VPATH = @srcdir@:@srcdir@/../../shared/openpic
# C source names, if any, go here -- minus the .c
C_PIECES = $(OPENPIC_C_PIECES)
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/openpic.h
+H_FILES = $(srcdir)/../../shared/openpic/openpic.h
SRCS = $(C_FILES) $(H_FILES)
OBJS = $(C_O_FILES)
@@ -60,14 +58,11 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-$(PGM): ${OBJS}
- $(make-rel)
-
preinstall:
@$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-all: ${ARCH} $(SRCS) preinstall $(PGM)
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/mcp750/pci/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/pci/Makefile.in
index b09f066481..b2192fb262 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/pci/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/pci/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/pci
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/pci.rel
+VPATH = @srcdir@:@srcdir@/../../shared/pci
# C source names, if any, go here -- minus the .c
-C_PIECES = $(PCI_C_PIECES)
+C_PIECES = pci
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/pci.h
+H_FILES = $(srcdir)/../../shared/pci/pci.h
SRCS = $(C_FILES) $(H_FILES)
OBJS = $(C_O_FILES)
@@ -36,8 +34,6 @@ INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
$(INSTALLDIRS):
@$(mkinstalldirs) $(INSTALLDIRS)
-PCI_C_PIECES = pci
-
#
# (OPTIONAL) Add local stuff here using +=
#
@@ -60,14 +56,11 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-$(PGM): ${OBJS}
- $(make-rel)
-
preinstall:
@$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-all: ${ARCH} $(SRCS) preinstall $(PGM)
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/mcp750/residual/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/residual/Makefile.in
index d1b0ec1475..af74498505 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/residual/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/residual/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/residual
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/residual.rel
+VPATH = @srcdir@:@srcdir@/../../shared/residual
# C source names, if any, go here -- minus the .c
C_PIECES = $(RESIDUAL_C_PIECES)
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/pnp.h $(srcdir)/residual.h
+H_FILES = $(srcdir)/../../shared/residual/pnp.h $(srcdir)/../../shared/residual/residual.h
SRCS = $(C_FILES) $(H_FILES)
OBJS = $(C_O_FILES)
@@ -64,10 +62,7 @@ preinstall:
@$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-$(PGM): ${OBJS}
- $(make-rel)
-
-all: ${ARCH} $(SRCS) preinstall $(PGM)
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/mcp750/start/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/start/Makefile.in
index 16b61c2e02..704bfeb8e8 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/start/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/start/Makefile.in
@@ -11,7 +11,7 @@ subdir = powerpc/mcp750/start
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
+VPATH = @srcdir@:@srcdir@/../../shared/start
PGM = ${ARCH}/start.o
diff --git a/c/src/lib/libbsp/powerpc/mcp750/startup/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/startup/Makefile.in
index 0a246d35a2..faf523d8c4 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/startup/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/startup/Makefile.in
@@ -11,7 +11,7 @@ subdir = powerpc/mcp750/startup
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../console:@srcdir@/../../../shared
+VPATH = @srcdir@:@srcdir@/../console:@srcdir@/../../../shared:@srcdir@/../../shared/startup
# C source names, if any, go here -- minus the .c
C_PIECES = bootcard main bspstart bsppost bsplibc sbrk bspclean \
diff --git a/c/src/lib/libbsp/powerpc/mcp750/vectors/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/vectors/Makefile.in
index e37ed7230c..33e1b3cefa 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/vectors/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/vectors/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/vectors
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../console:
-
-PGM = ${ARCH}/vectors.rel
+VPATH = @srcdir@:@srcdir@/../console:@srcdir@/../../shared/vectors
# C source names, if any, go here -- minus the .c
C_PIECES = vectors_init
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/vectors.h
+H_FILES = $(srcdir)/../../shared/vectors/vectors.h
# Assembly source names, if any, go here -- minus the .s
S_PIECES = vectors
diff --git a/c/src/lib/libbsp/powerpc/mcp750/wrapup/Makefile.in b/c/src/lib/libbsp/powerpc/mcp750/wrapup/Makefile.in
index 15c5e8e9f1..4380a59cbd 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/wrapup/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/mcp750/wrapup/Makefile.in
@@ -18,7 +18,7 @@ NETWORK_yes_V = dec21140
NETWORK = $(NETWORK_$(HAS_NETWORKING)_V)
BSP_PIECES = clock console irq openpic pci residual startup $(NETWORK) \
- vectors
+ vectors motorola
GENERIC_PIECES =
# bummer; have to use $foreach since % pattern subst rules only replace 1x
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/Makefile.in
index 584bb31e28..6813a48f92 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/Makefile.in
@@ -29,7 +29,7 @@ NETWORK = $(NETWORK_$(HAS_NETWORKING)_V)
# wrapup is the one that actually builds and installs the library
# from the individual .rel files built in other directories
SUBDIRS = clock console include pci residual openpic irq vectors start \
- startup bootloader $(NETWORK) wrapup
+ startup bootloader $(NETWORK) motorola wrapup
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/bootloader/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/bootloader/Makefile.in
index e0ee75873b..86ca29151d 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/bootloader/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/bootloader/Makefile.in
@@ -11,7 +11,7 @@ subdir = powerpc/mcp750/bootloader
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../../../shared:@srcdir@/../console
+VPATH = @srcdir@:@srcdir@/../../../shared:@srcdir@/../../shared/console:@srcdir@/../../shared/bootloader
# C source names, if any, go here -- minus the .c
C_PIECES = misc pci zlib mm em86 polled_io lib
@@ -77,7 +77,7 @@ CLOBBER_ADDITIONS += $(IMAGES)
#
bootloader : ${OBJS} $(IMAGES) $(BINARY_LOADED) ppcboot.lds
$(LD) -o bootloader $(OBJS) --just-symbols=$(BINARY_LOADED) \
- -b binary $(IMAGES) -T @srcdir@/ppcboot.lds \
+ -b binary $(IMAGES) -T @srcdir@/../../shared/bootloader/ppcboot.lds \
-Map bootloader.map
check_unresolved : ${OBJS}
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/clock/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/clock/Makefile.in
index b1af70d442..4490d4b392 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/clock/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/clock/Makefile.in
@@ -11,9 +11,7 @@ subdir = powerpc/mcp750/clock
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/clock.rel
+VPATH = @srcdir@:@srcdir@/../../shared/clock
# C source names, if any, go here -- minus the .c
C_PIECES = p_clock
@@ -58,10 +56,7 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-$(PGM): ${OBJS}
- $(make-rel)
-
-all: ${ARCH} $(SRCS) $(PGM)
+all: ${ARCH} $(SRCS) $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/console/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/console/Makefile.in
index 02639f189d..95b8c27b57 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/console/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/console/Makefile.in
@@ -1,5 +1,5 @@
#
-# $Id:
+# $Id$
#
@SET_MAKE@
@@ -11,14 +11,14 @@ subdir = powerpc/mcp750/console
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../../../shared
+VPATH = @srcdir@:@srcdir@/../../shared/console:@srcdir@/../../../shared
# C source names, if any, go here -- minus the .c
-C_PIECES = polled_io uart console inch console_reserve_resources
+C_PIECES = polled_io uart console inch
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/consoleIo.h $(srcdir)/keyboard.h $(srcdir)/uart.h
+H_FILES = $(srcdir)/../../shared/console/consoleIo.h $(srcdir)/../../shared/console/keyboard.h $(srcdir)/../../shared/console/uart.h
# Assembly source names, if any, go here -- minus the .s
S_PIECES =
@@ -48,6 +48,7 @@ CC_O_FILES = $(CC_PIECES:%=${ARCH}/%.o)
#
CPPFLAGS += -DSTATIC_LOG_ALLOC
+CFLAGS +=
#
# Add your list of files to delete here. The config files
# already know how to delete some stuff, so you may want
@@ -56,11 +57,7 @@ CPPFLAGS += -DSTATIC_LOG_ALLOC
# 'make clobber' already includes 'make clean'
#
-preinstall:
- @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
- @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-
-all: ${ARCH} $(SRCS) preinstall ${OBJS}
+all: ${ARCH} $(SRCS) ${OBJS}
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/console/console.c b/c/src/lib/libbsp/powerpc/motorola_powerpc/console/console.c
deleted file mode 100644
index fc50656af1..0000000000
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/console/console.c
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * console.c -- console I/O package
- *
- * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
- *
- * This code is based on the pc386 BSP console.c so the following
- * copyright also applies :
- *
- * (C) Copyright 1997 -
- * - NavIST Group - Real-Time Distributed Systems and Industrial Automation
- *
- * http://pandora.ist.utl.pt
- *
- * Instituto Superior Tecnico * Lisboa * PORTUGAL
- * The license and distribution terms for this file may be
- * found in found in the file LICENSE in this distribution or at
- * http://www.OARcorp.com/rtems/license.html.
- *
- * $Id$
- */
-
-#include <stdlib.h>
-#include <assert.h>
-#include <stdlib.h>
-
-#undef __assert
-void __assert (const char *file, int line, const char *msg);
-extern int close(int fd);
-
-#include <bsp.h>
-#include <bsp/irq.h>
-#include <rtems/libio.h>
-#include <termios.h>
-#include <bsp/uart.h>
-#include <bsp/consoleIo.h>
-
-/* Definitions for BSPConsolePort */
-#define BSP_CONSOLE_PORT_CONSOLE (-1)
-#define BSP_CONSOLE_PORT_COM1 (BSP_UART_COM1)
-#define BSP_CONSOLE_PORT_COM2 (BSP_UART_COM2)
-/*
- * Possible value for console input/output :
- * BSP_CONSOLE_PORT_CONSOLE
- * BSP_UART_COM1
- * BSP_UART_COM2
- */
-
-extern int BSPConsolePort;
-
-/* int BSPConsolePort = BSP_UART_COM2; */
-extern int BSPBaseBaud;
-
-/*-------------------------------------------------------------------------+
-| External Prototypes
-+--------------------------------------------------------------------------*/
-
-static int conSetAttr(int minor, const struct termios *);
-static void isr_on(const rtems_irq_connect_data *);
-static void isr_off(const rtems_irq_connect_data *);
-static int isr_is_on(const rtems_irq_connect_data *);
-
-
-static rtems_irq_connect_data console_isr_data = {BSP_ISA_UART_COM1_IRQ,
- BSP_uart_termios_isr_com1,
- isr_on,
- isr_off,
- isr_is_on};
-
-static void
-isr_on(const rtems_irq_connect_data *unused)
-{
- return;
-}
-
-static void
-isr_off(const rtems_irq_connect_data *unused)
-{
- return;
-}
-
-static int
-isr_is_on(const rtems_irq_connect_data *irq)
-{
- return BSP_irq_enabled_at_i8259s(irq->name);
-}
-
-/*
-void console_reserve_resources(rtems_configuration_table *conf)
-{
- if(BSPConsolePort != BSP_CONSOLE_PORT_CONSOLE)
- {
- rtems_termios_reserve_resources(conf, 1);
- }
-
- return;
-}
-*/
-
-void __assert (const char *file, int line, const char *msg)
-{
- static char exit_msg[] = "EXECUTIVE SHUTDOWN! Any key to reboot...";
- unsigned char ch;
-
- /*
- * Note we cannot call exit or printf from here,
- * assert can fail inside ISR too
- */
-
- /*
- * Close console
- */
- close(2);
- close(1);
- close(0);
-
- printk("\nassert failed: %s: ", file);
- printk("%d: ", line);
- printk("%s\n\n", msg);
- printk(exit_msg);
- ch = debug_getc();
- printk("\n\n");
- rtemsReboot();
-
-}
-
-
-/*-------------------------------------------------------------------------+
-| Console device driver INITIALIZE entry point.
-+--------------------------------------------------------------------------+
-| Initilizes the I/O console (keyboard + VGA display) driver.
-+--------------------------------------------------------------------------*/
-rtems_device_driver
-console_initialize(rtems_device_major_number major,
- rtems_device_minor_number minor,
- void *arg)
-{
- rtems_status_code status;
-
- /*
- * The video was initialized in the start.s code and does not need
- * to be reinitialized.
- */
-
-
- /*
- * Set up TERMIOS
- */
- rtems_termios_initialize ();
-
- /*
- * Do device-specific initialization
- */
-
- /* 9600-8-N-1 */
- BSP_uart_init(BSPConsolePort, 9600, 0);
-
-
- /* Set interrupt handler */
- if(BSPConsolePort == BSP_UART_COM1)
- {
- console_isr_data.name = BSP_ISA_UART_COM1_IRQ;
- console_isr_data.hdl = BSP_uart_termios_isr_com1;
-
- }
- else
- {
- assert(BSPConsolePort == BSP_UART_COM2);
- console_isr_data.name = BSP_ISA_UART_COM2_IRQ;
- console_isr_data.hdl = BSP_uart_termios_isr_com2;
- }
-
- status = BSP_install_rtems_irq_handler(&console_isr_data);
-
- if (!status){
- printk("Error installing serial console interrupt handler!\n");
- rtems_fatal_error_occurred(status);
- }
- /*
- * Register the device
- */
- status = rtems_io_register_name ("/dev/console", major, 0);
- if (status != RTEMS_SUCCESSFUL)
- {
- printk("Error registering console device!\n");
- rtems_fatal_error_occurred (status);
- }
-
- if(BSPConsolePort == BSP_UART_COM1)
- {
- printk("Initialized console on port COM1 9600-8-N-1\n\n");
- }
- else
- {
- printk("Initialized console on port COM2 9600-8-N-1\n\n");
- }
- return RTEMS_SUCCESSFUL;
-} /* console_initialize */
-
-
-static int console_last_close(int major, int minor, void *arg)
-{
- BSP_remove_rtems_irq_handler (&console_isr_data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------+
-| Console device driver OPEN entry point
-+--------------------------------------------------------------------------*/
-rtems_device_driver
-console_open(rtems_device_major_number major,
- rtems_device_minor_number minor,
- void *arg)
-{
- rtems_status_code status;
- static rtems_termios_callbacks cb =
- {
- NULL, /* firstOpen */
- console_last_close, /* lastClose */
- NULL, /* pollRead */
- BSP_uart_termios_write_com1, /* write */
- conSetAttr, /* setAttributes */
- NULL, /* stopRemoteTx */
- NULL, /* startRemoteTx */
- 1 /* outputUsesInterrupts */
- };
-
- if(BSPConsolePort == BSP_UART_COM2)
- {
- cb.write = BSP_uart_termios_write_com2;
- }
-
- status = rtems_termios_open (major, minor, arg, &cb);
-
- if(status != RTEMS_SUCCESSFUL)
- {
- printk("Error openning console device\n");
- return status;
- }
-
- /*
- * Pass data area info down to driver
- */
- BSP_uart_termios_set(BSPConsolePort,
- ((rtems_libio_open_close_args_t *)arg)->iop->data1);
- /* Enable interrupts on channel */
- BSP_uart_intr_ctrl(BSPConsolePort, BSP_UART_INTR_CTRL_TERMIOS);
-
- return RTEMS_SUCCESSFUL;
-}
-
-/*-------------------------------------------------------------------------+
-| Console device driver CLOSE entry point
-+--------------------------------------------------------------------------*/
-rtems_device_driver
-console_close(rtems_device_major_number major,
- rtems_device_minor_number minor,
- void *arg)
-{
- rtems_device_driver res = RTEMS_SUCCESSFUL;
-
- res = rtems_termios_close (arg);
-
- return res;
-} /* console_close */
-
-
-/*-------------------------------------------------------------------------+
-| Console device driver READ entry point.
-+--------------------------------------------------------------------------+
-| Read characters from the I/O console. We only have stdin.
-+--------------------------------------------------------------------------*/
-rtems_device_driver
-console_read(rtems_device_major_number major,
- rtems_device_minor_number minor,
- void *arg)
-{
-
- return rtems_termios_read (arg);
-} /* console_read */
-
-
-/*-------------------------------------------------------------------------+
-| Console device driver WRITE entry point.
-+--------------------------------------------------------------------------+
-| Write characters to the I/O console. Stderr and stdout are the same.
-+--------------------------------------------------------------------------*/
-rtems_device_driver
-console_write(rtems_device_major_number major,
- rtems_device_minor_number minor,
- void * arg)
-{
-
- return rtems_termios_write (arg);
-
-} /* console_write */
-
-
-
-/*
- * Handle ioctl request.
- */
-rtems_device_driver
-console_control(rtems_device_major_number major,
- rtems_device_minor_number minor,
- void * arg
-)
-{
- return rtems_termios_ioctl (arg);
-}
-
-static int
-conSetAttr(int minor, const struct termios *t)
-{
- int baud;
-
- switch (t->c_cflag & CBAUD)
- {
- case B50:
- baud = 50;
- break;
- case B75:
- baud = 75;
- break;
- case B110:
- baud = 110;
- break;
- case B134:
- baud = 134;
- break;
- case B150:
- baud = 150;
- break;
- case B200:
- baud = 200;
- break;
- case B300:
- baud = 300;
- break;
- case B600:
- baud = 600;
- break;
- case B1200:
- baud = 1200;
- break;
- case B1800:
- baud = 1800;
- break;
- case B2400:
- baud = 2400;
- break;
- case B4800:
- baud = 4800;
- break;
- case B9600:
- baud = 9600;
- break;
- case B19200:
- baud = 19200;
- break;
- case B38400:
- baud = 38400;
- break;
- case B57600:
- baud = 57600;
- break;
- case B115200:
- baud = 115200;
- break;
- default:
- baud = 0;
- rtems_fatal_error_occurred (RTEMS_INTERNAL_ERROR);
- return 0;
- }
-
- BSP_uart_set_baud(BSPConsolePort, baud);
-
- return 0;
-}
-
-
-
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/console/console_reserve_resources.c b/c/src/lib/libbsp/powerpc/motorola_powerpc/console/console_reserve_resources.c
deleted file mode 100644
index 2ec22746dd..0000000000
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/console/console_reserve_resources.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * console.c -- console I/O package
- *
- * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
- *
- * This code is based on the pc386 BSP console.c so the following
- * copyright also applies :
- *
- * (C) Copyright 1997 -
- * - NavIST Group - Real-Time Distributed Systems and Industrial Automation
- *
- * http://pandora.ist.utl.pt
- *
- * Instituto Superior Tecnico * Lisboa * PORTUGAL
- * The license and distribution terms for this file may be
- * found in found in the file LICENSE in this distribution or at
- * http://www.OARcorp.com/rtems/license.html.
- *
- * $Id$
- */
-
-#include <stdlib.h>
-#include <assert.h>
-#include <stdlib.h>
-
-#undef __assert
-void __assert (const char *file, int line, const char *msg);
-extern int close(int fd);
-
-#include <bsp.h>
-#include <bsp/irq.h>
-#include <rtems/libio.h>
-#include <termios.h>
-#include <bsp/uart.h>
-#include <bsp/consoleIo.h>
-
-/* Definitions for BSPConsolePort */
-#define BSP_CONSOLE_PORT_CONSOLE (-1)
-#define BSP_CONSOLE_PORT_COM1 (BSP_UART_COM1)
-#define BSP_CONSOLE_PORT_COM2 (BSP_UART_COM2)
-/*
- * Possible value for console input/output :
- * BSP_CONSOLE_PORT_CONSOLE
- * BSP_UART_COM1
- * BSP_UART_COM2
- */
-
-int BSPConsolePort = BSP_UART_COM1;
-
-/* int BSPConsolePort = BSP_UART_COM2; */
-int BSPBaseBaud = 115200;
-
-void console_reserve_resources(rtems_configuration_table *conf)
-{
- if(BSPConsolePort != BSP_CONSOLE_PORT_CONSOLE)
- {
- rtems_termios_reserve_resources(conf, 1);
- }
-
- return;
-}
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/include/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/include/Makefile.in
index 26345294f6..cb3d87abd3 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/include/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/include/Makefile.in
@@ -13,7 +13,13 @@ PROJECT_ROOT = @PROJECT_ROOT@
VPATH = @srcdir@
-H_FILES = $(srcdir)/nvram.h $(srcdir)/bsp.h
+H_FILES = $(srcdir)/../../shared/include/nvram.h \
+ $(srcdir)/../../shared/include/bsp.h
+
+BSP_H_FILES = $(srcdir)/../../shared/console/consoleIo.h \
+ $(srcdir)/../../shared/console/uart.h \
+ $(srcdir)/../../shared/irq/irq.h \
+ $(srcdir)/../../shared/motorola/motorola.h
#
# Equate files are for including from assembly preprocessed by
@@ -43,7 +49,9 @@ CLOBBER_ADDITIONS +=
preinstall:
$(mkinstalldirs) $(PROJECT_INCLUDE)
+ $(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)
+ @$(INSTALL_CHANGE) -m 644 $(BSP_H_FILES) $(PROJECT_INCLUDE)/bsp
all: $(SRCS) preinstall
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/irq/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/irq/Makefile.in
index a44fecdfa9..2f9bb6ae04 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/irq/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/irq/Makefile.in
@@ -11,14 +11,14 @@ subdir = powerpc/mcp750/irq
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
+VPATH = @srcdir@:@srcdir@/../../shared/irq
# C source names, if any, go here -- minus the .c
C_PIECES = irq_init i8259 irq
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/irq.h
+H_FILES = $(srcdir)/../../shared/irq/irq.h
# Assembly source names, if any, go here -- minus the .s
S_PIECES = irq_asm
@@ -61,11 +61,7 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-preinstall:
- @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
- @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-
-all: ${ARCH} $(SRCS) preinstall ${OBJS}
+all: ${ARCH} $(SRCS) ${OBJS}
install: all
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/openpic/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/openpic/Makefile.in
index 3b4fc9856b..2f3e2c2c23 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/openpic/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/openpic/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/openpic
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/openpic.rel
+VPATH = @srcdir@:@srcdir@/../../shared/openpic
# C source names, if any, go here -- minus the .c
C_PIECES = $(OPENPIC_C_PIECES)
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/openpic.h
+H_FILES = $(srcdir)/../../shared/openpic/openpic.h
SRCS = $(C_FILES) $(H_FILES)
OBJS = $(C_O_FILES)
@@ -60,14 +58,11 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-$(PGM): ${OBJS}
- $(make-rel)
-
preinstall:
@$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-all: ${ARCH} $(SRCS) preinstall $(PGM)
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/pci/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/pci/Makefile.in
index b09f066481..b2192fb262 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/pci/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/pci/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/pci
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/pci.rel
+VPATH = @srcdir@:@srcdir@/../../shared/pci
# C source names, if any, go here -- minus the .c
-C_PIECES = $(PCI_C_PIECES)
+C_PIECES = pci
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/pci.h
+H_FILES = $(srcdir)/../../shared/pci/pci.h
SRCS = $(C_FILES) $(H_FILES)
OBJS = $(C_O_FILES)
@@ -36,8 +34,6 @@ INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
$(INSTALLDIRS):
@$(mkinstalldirs) $(INSTALLDIRS)
-PCI_C_PIECES = pci
-
#
# (OPTIONAL) Add local stuff here using +=
#
@@ -60,14 +56,11 @@ LDFLAGS +=
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
-$(PGM): ${OBJS}
- $(make-rel)
-
preinstall:
@$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-all: ${ARCH} $(SRCS) preinstall $(PGM)
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/residual/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/residual/Makefile.in
index d1b0ec1475..af74498505 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/residual/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/residual/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/residual
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
-
-PGM = ${ARCH}/residual.rel
+VPATH = @srcdir@:@srcdir@/../../shared/residual
# C source names, if any, go here -- minus the .c
C_PIECES = $(RESIDUAL_C_PIECES)
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/pnp.h $(srcdir)/residual.h
+H_FILES = $(srcdir)/../../shared/residual/pnp.h $(srcdir)/../../shared/residual/residual.h
SRCS = $(C_FILES) $(H_FILES)
OBJS = $(C_O_FILES)
@@ -64,10 +62,7 @@ preinstall:
@$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
@$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
-$(PGM): ${OBJS}
- $(make-rel)
-
-all: ${ARCH} $(SRCS) preinstall $(PGM)
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
install: all
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/start/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/start/Makefile.in
index 16b61c2e02..704bfeb8e8 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/start/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/start/Makefile.in
@@ -11,7 +11,7 @@ subdir = powerpc/mcp750/start
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@
+VPATH = @srcdir@:@srcdir@/../../shared/start
PGM = ${ARCH}/start.o
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/startup/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/startup/Makefile.in
index 0a246d35a2..faf523d8c4 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/startup/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/startup/Makefile.in
@@ -11,7 +11,7 @@ subdir = powerpc/mcp750/startup
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../console:@srcdir@/../../../shared
+VPATH = @srcdir@:@srcdir@/../console:@srcdir@/../../../shared:@srcdir@/../../shared/startup
# C source names, if any, go here -- minus the .c
C_PIECES = bootcard main bspstart bsppost bsplibc sbrk bspclean \
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/vectors/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/vectors/Makefile.in
index e37ed7230c..33e1b3cefa 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/vectors/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/vectors/Makefile.in
@@ -11,16 +11,14 @@ subdir = powerpc/mcp750/vectors
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
-VPATH = @srcdir@:@srcdir@/../console:
-
-PGM = ${ARCH}/vectors.rel
+VPATH = @srcdir@:@srcdir@/../console:@srcdir@/../../shared/vectors
# C source names, if any, go here -- minus the .c
C_PIECES = vectors_init
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
-H_FILES = $(srcdir)/vectors.h
+H_FILES = $(srcdir)/../../shared/vectors/vectors.h
# Assembly source names, if any, go here -- minus the .s
S_PIECES = vectors
diff --git a/c/src/lib/libbsp/powerpc/motorola_powerpc/wrapup/Makefile.in b/c/src/lib/libbsp/powerpc/motorola_powerpc/wrapup/Makefile.in
index 15c5e8e9f1..4380a59cbd 100644
--- a/c/src/lib/libbsp/powerpc/motorola_powerpc/wrapup/Makefile.in
+++ b/c/src/lib/libbsp/powerpc/motorola_powerpc/wrapup/Makefile.in
@@ -18,7 +18,7 @@ NETWORK_yes_V = dec21140
NETWORK = $(NETWORK_$(HAS_NETWORKING)_V)
BSP_PIECES = clock console irq openpic pci residual startup $(NETWORK) \
- vectors
+ vectors motorola
GENERIC_PIECES =
# bummer; have to use $foreach since % pattern subst rules only replace 1x
diff --git a/c/src/lib/libbsp/powerpc/shared/Makefile.in b/c/src/lib/libbsp/powerpc/shared/Makefile.in
new file mode 100644
index 0000000000..a9c04df2cb
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/Makefile.in
@@ -0,0 +1,36 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../..
+subdir = powerpc/shared
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/directory.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+SRCS =
+
+all: $(SRCS)
+
+# We only build the Network library if HAS_NETWORKING was defined
+NETWORK_yes_V = dec21140
+NETWORK = $(NETWORK_$(HAS_NETWORKING)_V)
+
+# wrapup is the one that actually builds and installs the library
+# from the individual .rel files built in other directories
+SUB_DIRS = clock console include pci residual openpic irq vectors \
+ start startup motorola bootloader $(NETWORK)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/Makefile.in b/c/src/lib/libbsp/powerpc/shared/bootloader/Makefile.in
new file mode 100644
index 0000000000..4ab15dcc7e
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/Makefile.in
@@ -0,0 +1,44 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/bootloader
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@:@srcdir@/../../../shared:@srcdir@/../console
+
+# C source names, if any, go here -- minus the .c
+C_PIECES =
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+H_FILES = @srcdir@/bootldr.h @srcdir@/zlib.h @srcdir@/pci.h
+
+# Assembly source names, if any, go here -- minus the .s
+S_PIECES =
+S_FILES = $(S_PIECES:%=%.S)
+S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+OBJS = $(S_O_FILES) $(C_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+CC_PIECES =
+CC_FILES = $(CC_PIECES:%=%.cc)
+CC_O_FILES = $(CC_PIECES:%=${ARCH}/%.o)
+
+all: ${ARCH} $(SRCS) ${OBJ}
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/README b/c/src/lib/libbsp/powerpc/shared/bootloader/README
new file mode 100644
index 0000000000..6d36a152ba
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/README
@@ -0,0 +1,41 @@
+#
+# $Id$
+#
+
+The code in this directory has been taken WITH PERMISSION from
+Gabriel Paubert, paubert@iram.es. The main reason for having
+a separate bootloader for PreP compliant firmware is that the
+initial code is relocated by firmware at an unknow address
+(actually 0x5000 on motorola MCP750) and that as Gabriel I
+think having a relocatable bootloder code is a must.
+
+So the way of building a binary executable that can be booted via
+hard disk or network boot goes like this :
+
+ - make a RTEMS executable,
+ - put is as data section in the bootloder binary,
+ - relink the loader (see make-exe macros for details),
+
+I would like to thank Gabriel for his support and his code.
+The original code can be found in form of a patch to official linux
+kernel at (I insist not vger ppc kernel or Imac ppc kernels!!) :
+
+<ftp://vlab1.iram.es/pub/linux-2.2/>
+
+After applying the patch, the code is located in a new directory
+called prepboot.
+
+(NB : note use ftp not netscape...)
+
+Note that the actual code differs a lot since Gabriel choose to use
+a CHRP compliant mapping instead of a Prep Mapping to save
+BATs. I had no time to upgrade the code to its new one allthough
+I agree it should be done...
+
+I have also splitted the original code to have a more modular
+design enabling to reuse code between the loader and RTEMS
+initialization (e.g printk, ...).
+
+Eric Valette (valette@crf.canon.fr)
+
+
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h b/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h
new file mode 100644
index 0000000000..e3e02b0908
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h
@@ -0,0 +1,258 @@
+/*
+ * bootldr.h -- Include file for bootloader.
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef _PPC_BOOTLDR_H
+#define _PPC_BOOTLDR_H
+
+#ifndef ASM
+#include <bsp/residual.h>
+#include <bsp/consoleIo.h>
+#include "pci.h"
+
+#define abs __builtin_abs
+
+#define PTE_REFD 0x100
+#define PTE_CHNG (0x80|PTE_REFD) /* Modified implies referenced */
+#define PTE_WTHR 0x040
+#define PTE_CINH 0x020
+#define PTE_COHER 0x010
+#define PTE_GUAR 0x008
+#define PTE_RO 0x003
+#define PTE_RW 0x002
+
+#define PTE_RAM (PTE_CHNG|PTE_COHER|PTE_RW)
+#define PTE_ROM (PTE_REFD|PTE_RO)
+#define PTE_IO (PTE_CHNG|PTE_CINH|PTE_GUAR|PTE_RW)
+
+typedef struct {}opaque;
+
+/* The context passed during MMU interrupts. */
+typedef struct _ctxt {
+ u_long lr, ctr;
+ u_int cr, xer;
+ u_long nip, msr;
+ u_long regs[32];
+} ctxt;
+
+/* The main structure which is pointed to permanently by r13. Things
+ * are not separated very well between parts because it would cause
+ * too much code bloat for such a simple program like the bootloader.
+ * The code is designed to be compiled with the -m relocatable option and
+ * tries to minimize the number of relocations/fixups and the number of
+ * functions who have to access the .got2 sections (this increases the
+ * size of the prologue in every function).
+ */
+typedef struct _boot_data {
+ RESIDUAL *residual;
+ void *load_address;
+ void *of_entry;
+ void *r6, *r7, *r8, *r9, *r10;
+ u_long cache_lsize;
+ void *image; /* Where to copy ourselves */
+ void *stack;
+ void *mover; /* where to copy codemove to avoid overlays */
+ u_long o_msr, o_hid0, o_r31;
+ opaque * mm_private;
+ const struct pci_config_access_functions * pci_functions;
+ opaque * pci_private;
+ struct pci_dev * pci_devices;
+ opaque * v86_private;
+ char cmd_line[256];
+} boot_data;
+
+register boot_data *bd __asm__("r13");
+
+extern inline int
+pcibios_read_config_byte(u_char bus, u_char dev_fn,
+ u_char where, u_char * val) {
+ return bd->pci_functions->read_config_byte(bus, dev_fn, where, val);
+}
+
+extern inline int
+pcibios_read_config_word(u_char bus, u_char dev_fn,
+ u_char where, u_short * val) {
+ return bd->pci_functions->read_config_word(bus, dev_fn, where, val);
+}
+
+extern inline int
+pcibios_read_config_dword(u_char bus, u_char dev_fn,
+ u_char where, u_int * val) {
+ return bd->pci_functions->read_config_dword(bus, dev_fn, where, val);
+}
+
+extern inline int
+pcibios_write_config_byte(u_char bus, u_char dev_fn,
+ u_char where, u_char val) {
+ return bd->pci_functions->write_config_byte(bus, dev_fn, where, val);
+}
+
+extern inline int
+pcibios_write_config_word(u_char bus, u_char dev_fn,
+ u_char where, u_short val) {
+ return bd->pci_functions->write_config_word(bus, dev_fn, where, val);
+}
+
+extern inline int
+pcibios_write_config_dword(u_char bus, u_char dev_fn,
+ u_char where, u_int val) {
+ return bd->pci_functions->write_config_dword(bus, dev_fn, where, val);
+}
+
+extern inline int
+pci_read_config_byte(struct pci_dev *dev, u_char where, u_char * val) {
+ return bd->pci_functions->read_config_byte(dev->bus->number,
+ dev->devfn,
+ where, val);
+}
+
+extern inline int
+pci_read_config_word(struct pci_dev *dev, u_char where, u_short * val) {
+ return bd->pci_functions->read_config_word(dev->bus->number,
+ dev->devfn,
+ where, val);
+}
+
+extern inline int
+pci_read_config_dword(struct pci_dev *dev, u_char where, u_int * val) {
+ return bd->pci_functions->read_config_dword(dev->bus->number,
+ dev->devfn,
+ where, val);
+}
+
+extern inline int
+pci_write_config_byte(struct pci_dev *dev, u_char where, u_char val) {
+ return bd->pci_functions->write_config_byte(dev->bus->number,
+ dev->devfn,
+ where, val);
+}
+
+extern inline int
+pci_write_config_word(struct pci_dev *dev, u_char where, u_short val) {
+ return bd->pci_functions->write_config_word(dev->bus->number,
+ dev->devfn,
+ where, val);
+}
+
+extern inline int
+pci_write_config_dword(struct pci_dev *dev, u_char where, u_int val) {
+ return bd->pci_functions->write_config_dword(dev->bus->number,
+ dev->devfn,
+ where, val);
+}
+
+/* codemove is like memmove, but it also gets the cache line size
+ * as 4th parameter to synchronize them. If this last parameter is
+ * zero, it performs more or less like memmove. No copy is performed if
+ * source and destination addresses are equal. However the caches
+ * are synchronized. Note that the size is always rounded up to the
+ * next mutiple of 4.
+ */
+extern void * codemove(void *, const void *, size_t, unsigned long);
+
+/* The physical memory allocator allows to align memory by
+ * powers of 2 given by the lower order bits of flags.
+ * By default it allocates from higher addresses towrds lower ones,
+ * setting PA_LOW reverses this behaviour.
+ */
+
+#define palloc(size) __palloc(size,0)
+
+#define isa_io_base (bd->io_base)
+
+
+void * __palloc(u_long, int);
+void pfree(void *);
+
+#define PA_LOW 0x100
+#define PA_PERM 0x200 /* Not freeable by pfree */
+#define PA_SUBALLOC 0x400 /* Allocate for suballocation by salloc */
+#define PA_ALIGN_MASK 0x1f
+
+void * valloc(u_long size);
+void vfree(void *);
+
+int vmap(void *, u_long, u_long);
+void vunmap(void *);
+
+void * salloc(u_long size);
+void sfree(void *);
+
+void pci_init(void);
+
+void * memset(void *p, int c, size_t n);
+
+void gunzip(void *, int, unsigned char *, int *);
+
+void print_all_maps(const char *);
+void print_hash_table(void);
+void MMUon(void);
+void MMUoff(void);
+void hang(const char *, u_long, ctxt *) __attribute__((noreturn));
+
+int init_v86(void);
+void cleanup_v86_mess(void);
+void em86_main(struct pci_dev *);
+int find_max_mem(struct pci_dev *);
+
+#endif
+
+#ifdef ASM
+/* These definitions simplify the ugly declarations necessary for
+ * GOT definitions.
+ */
+
+#define GOT_ENTRY(NAME) .L_ ## NAME = . - .LCTOC1 ; .long NAME
+#define GOT(NAME) .L_ ## NAME (r30)
+
+#define START_GOT \
+ .section ".got2","aw"; \
+.LCTOC1 = .+ 0x8000
+
+#define END_GOT \
+ .text
+
+#define GET_GOT \
+ bl 1f; \
+ .text 2; \
+0: .long .LCTOC1-1f; \
+ .text ; \
+1: mflr r30; \
+ lwz r0,0b-1b(r30); \
+ add r30,r0,r30
+
+#define bd r13
+#define cache_lsize 32 /* Offset into bd area */
+#define image 36
+#define stack 40
+#define mover 44
+#define o_msr 48
+#define o_hid0 52
+#define o_r31 56
+/* Stack offsets for saved registers on exceptions */
+#define save_lr 8(r1)
+#define save_ctr 12(r1)
+#define save_cr 16(r1)
+#define save_xer 20(r1)
+#define save_nip 24(r1)
+#define save_msr 28(r1)
+#define save_r(n) 32+4*n(r1)
+#endif
+
+#endif
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c b/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c
new file mode 100644
index 0000000000..7e30089f49
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c
@@ -0,0 +1,580 @@
+/*
+ * em86.c -- Include file for bootloader.
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+/*****************************************************************************
+*
+* Code to interpret Video BIOS ROM routines.
+*
+*
+******************************************************************************/
+
+/* These include are for the development version only */
+#include <sys/types.h>
+#include "pci.h"
+#include <libcpu/byteorder.h>
+#ifdef __BOOT__
+#include "bootldr.h"
+#include <limits.h>
+#endif
+
+/* Code options, put them on the compiler command line */
+/* #define EIP_STATS */ /* EIP based profiling */
+/* #undef EIP_STATS */
+
+typedef union _reg_type1 {
+ unsigned e;
+ unsigned short x;
+ struct {
+ unsigned char l, h;
+ } lh;
+} reg_type1;
+
+typedef union _reg_type2 {
+ unsigned e;
+ unsigned short x;
+} reg_type2;
+
+typedef struct _x86 {
+ reg_type1
+ _eax, _ecx, _edx, _ebx;
+ reg_type2
+ _esp, _ebp, _esi, _edi;
+ unsigned
+ es, cs, ss, ds, fs, gs, eip, eflags;
+ unsigned char
+ *esbase, *csbase, *ssbase, *dsbase, *fsbase, *gsbase;
+ volatile unsigned char *iobase;
+ unsigned char *ioperm;
+ unsigned
+ reason, nexteip, parm1, parm2, opcode, base;
+ unsigned *optable, opreg; /* no more used! */
+ unsigned char* vbase;
+ unsigned instructions;
+#ifdef __BOOT__
+ u_char * ram;
+ u_char * rom;
+ struct pci_dev * dev;
+#else
+ unsigned filler[14]; /* Skip to next 64 byte boundary */
+ unsigned eipstats[32768][2];
+#endif
+} x86;
+
+x86 v86_private __attribute__((aligned(32)));
+
+
+/* Emulator is in another source file */
+extern
+void em86_enter(x86 * p);
+
+#define EAX (p->_eax.e)
+#define ECX (p->_ecx.e)
+#define EDX (p->_edx.e)
+#define EBX (p->_ebx.e)
+#define ESP (p->_esp.e)
+#define EBP (p->_ebp.e)
+#define ESI (p->_esi.e)
+#define EDI (p->_edi.e)
+#define AX (p->_eax.x)
+#define CX (p->_ecx.x)
+#define DX (p->_edx.x)
+#define BX (p->_ebx.x)
+#define SP (p->_esp.x)
+#define BP (p->_ebp.x)
+#define SI (p->_esi.x)
+#define DI (p->_edi.x)
+#define AL (p->_eax.lh.l)
+#define CL (p->_ecx.lh.l)
+#define DL (p->_edx.lh.l)
+#define BL (p->_ebx.lh.l)
+#define AH (p->_eax.lh.h)
+#define CH (p->_ecx.lh.h)
+#define DH (p->_edx.lh.h)
+#define BH (p->_ebx.lh.h)
+
+/* Function used to debug */
+#ifdef __BOOT__
+#define printf printk
+#endif
+#ifdef DEBUG
+static void dump86(x86 * p){
+ unsigned char *s = p->csbase + p->eip;
+ printf("cs:eip=%04x:%08x, eax=%08x, ecx=%08x, edx=%08x, ebx=%08x\n",
+ p->cs, p->eip, ld_le32(&EAX),
+ ld_le32(&ECX), ld_le32(&EDX), ld_le32(&EBX));
+ printf("ss:esp=%04x:%08x, ebp=%08x, esi=%08x, edi=%08x, efl=%08x\n",
+ p->ss, ld_le32(&ESP), ld_le32(&EBP),
+ ld_le32(&ESI), ld_le32(&EDI), p->eflags);
+ printf("nip=%08x, ds=%04x, es=%04x, fs=%04x, gs=%04x, total=%d\n",
+ p->nexteip, p->ds, p->es, p->fs, p->gs, p->instructions);
+ printf("code: %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x\n",
+ s[0], s[1], s[2], s[3], s[4], s[5],
+ s[6], s[7], s[8], s[9], s[10], s[11]);
+#ifndef __BOOT__
+ printf("op1=%08x, op2=%08x, result=%08x, flags=%08x\n",
+ p->filler[11], p->filler[12], p->filler[13], p->filler[14]);
+#endif
+}
+#else
+#define dump86(x)
+#endif
+
+int bios86pci(x86 * p) {
+ unsigned reg=ld_le16(&DI);
+ reg_type2 tmp;
+
+ if (AL>=8 && AL<=13 && reg>0xff) {
+ AH = PCIBIOS_BAD_REGISTER_NUMBER;
+ } else {
+ switch(AL) {
+ case 2: /* find_device */
+ /* Should be improved for BIOS able to handle
+ * multiple devices. We simply suppose the BIOS
+ * inits a single device, and return an error
+ * if it tries to find more...
+ */
+ if (SI) {
+ AH=PCIBIOS_DEVICE_NOT_FOUND;
+ } else {
+ BH = p->dev->bus->number;
+ BL = p->dev->devfn;
+ AH = 0;
+ }
+ break;
+ /*
+ case 3: find_class not implemented for now.
+ */
+ case 8: /* read_config_byte */
+ AH=pcibios_read_config_byte(BH, BL, reg, &CL);
+ break;
+ case 9: /* read_config_word */
+ AH=pcibios_read_config_word(BH, BL, reg, &tmp.x);
+ CX=ld_le16(&tmp.x);
+ break;
+ case 10: /* read_config_dword */
+ AH=pcibios_read_config_dword(BH, BL, reg, &tmp.e);
+ ECX=ld_le32(&tmp.e);
+ break;
+ case 11: /* write_config_byte */
+ AH=pcibios_write_config_byte(BH, BL, reg, CL);
+ break;
+ case 12: /* write_config_word */
+ AH=pcibios_write_config_word(BH, BL, reg, ld_le16(&CX));
+ break;
+ case 13: /* write_config_dword */
+ AH=pcibios_write_config_dword(BH, BL, reg, ld_le32(&ECX));
+ break;
+ default:
+ printf("Unimplemented or illegal PCI service call #%d!\n",
+ AL);
+ return 1;
+ }
+ }
+ p->eip = p->nexteip;
+ /* Set/clear carry according to result */
+ if (AH) p->eflags |= 1; else p->eflags &=~1;
+ return 0;
+}
+
+void push2(x86 *p, unsigned value) {
+ unsigned char * sbase= p->ssbase;
+ unsigned newsp = (ld_le16(&SP)-2)&0xffff;
+ st_le16(&SP,newsp);
+ st_le16((unsigned short *)(sbase+newsp), value);
+}
+
+unsigned pop2(x86 *p) {
+ unsigned char * sbase=p->ssbase;
+ unsigned oldsp = ld_le16(&SP);
+ st_le16(&SP,oldsp+2);
+ return ld_le16((unsigned short *)(sbase+oldsp));
+}
+
+int int10h(x86 * p) { /* Process BIOS video interrupt */
+ unsigned vector;
+ vector=ld_le32((unsigned *)p->vbase+0x10);
+ if (((vector&0xffff0000)>>16)==0xc000) {
+ push2(p, p->eflags);
+ push2(p, p->cs);
+ push2(p, p->nexteip);
+ p->cs=vector>>16;
+ p->csbase=p->vbase + (p->cs<<4);
+ p->eip=vector&0xffff;
+#if 1
+ p->eflags&=0xfcff; /* Clear AC/TF/IF */
+#else
+ p->eflags = (p->eflags&0xfcff)|0x100; /* Set TF for debugging */
+#endif
+ /* p->eflags|=0x100; uncomment to force a trap */
+ return(0);
+ } else {
+ switch(AH) {
+ case 0x12:
+ switch(BL){
+ case 0x32:
+ p->eip=p->nexteip;
+ return(0);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ printf("unhandled soft interrupt 0x10: vector=%x\n", vector);
+ return(1);
+ }
+}
+
+int process_softint(x86 * p) {
+#if 0
+ if (p->parm1!=0x10 || AH!=0x0e) {
+ printf("Soft interrupt\n");
+ dump86(p);
+ }
+#endif
+ switch(p->parm1) {
+ case 0x10: /* BIOS video interrupt */
+ return int10h(p);
+ case 0x1a:
+ if(AH==0xb1) return bios86pci(p);
+ break;
+ default:
+ break;
+ }
+ dump86(p);
+ printf("Unhandled soft interrupt number 0x%04x, AX=0x%04x\n",
+ p->parm1, ld_le16(&AX));
+ return(1);
+}
+
+/* The only function called back by the emulator is em86_trap, all
+ instructions may that change the code segment are trapped here.
+ p->reason is one of the following codes. */
+#define code_zerdiv 0
+#define code_trap 1
+#define code_int3 3
+#define code_into 4
+#define code_bound 5
+#define code_ud 6
+#define code_dna 7
+
+#define code_iretw 256
+#define code_iretl 257
+#define code_lcallw 258
+#define code_lcalll 259
+#define code_ljmpw 260
+#define code_ljmpl 261
+#define code_lretw 262
+#define code_lretl 263
+#define code_softint 264
+#define code_lock 265 /* Lock prefix */
+/* Codes 1024 to 2047 are used for I/O port access instructions:
+ - The three LSB define the port size (1, 2 or 4)
+ - bit of weight 512 means out if set, in if clear
+ - bit of weight 256 means ins/outs if set, in/out if clear
+ - bit of weight 128 means use esi/edi if set, si/di if clear
+ (only used for ins/outs instructions, always clear for in/out)
+ */
+#define code_inb 1024+1
+#define code_inw 1024+2
+#define code_inl 1024+4
+#define code_outb 1024+512+1
+#define code_outw 1024+512+2
+#define code_outl 1024+512+4
+#define code_insb_a16 1024+256+1
+#define code_insw_a16 1024+256+2
+#define code_insl_a16 1024+256+4
+#define code_outsb_a16 1024+512+256+1
+#define code_outsw_a16 1024+512+256+2
+#define code_outsl_a16 1024+512+256+4
+#define code_insb_a32 1024+256+128+1
+#define code_insw_a32 1024+256+128+2
+#define code_insl_a32 1024+256+128+4
+#define code_outsb_a32 1024+512+256+128+1
+#define code_outsw_a32 1024+512+256+128+2
+#define code_outsl_a32 1024+512+256+128+4
+
+int em86_trap(x86 *p) {
+#ifndef __BOOT__
+ int i;
+ unsigned char command[80];
+ unsigned char *verb, *t;
+ unsigned short *fp;
+ static unsigned char def=0;
+ static unsigned char * bptaddr=NULL; /* Breakpoint address */
+ static unsigned char bptopc; /* Replaced breakpoint opcode */
+ unsigned char cmd;
+ unsigned tmp;
+#endif
+ switch(p->reason) {
+ case code_int3:
+#ifndef __BOOT__
+ if(p->csbase+p->eip == bptaddr) {
+ *bptaddr=bptopc;
+ bptaddr=NULL;
+ }
+ else printf("Unexpected ");
+#endif
+ printf("Breakpoint Interrupt !\n");
+ /* Note that this fallthrough (no break;) is on purpose */
+#ifdef __BOOT__
+ return 0;
+#else
+ case code_trap:
+ dump86(p);
+ for(;;) {
+ printf("b(reakpoint, g(o, q(uit, s(tack, t(race ? [%c] ", def);
+ fgets(command,sizeof(command),stdin);
+ verb = strtok(command," \n");
+ if(verb) cmd=*verb; else cmd=def;
+ def=0;
+ switch(cmd) {
+ case 'b':
+ case 'B':
+ if(bptaddr) *bptaddr=bptopc;
+ t=strtok(0," \n");
+ i=sscanf(t,"%x",&tmp);
+ if(i==1) {
+ bptaddr=p->vbase + tmp;
+ bptopc=*bptaddr;
+ *bptaddr=0xcc;
+ } else bptaddr=NULL;
+ break;
+ case 'q':
+ case 'Q':
+ return 1;
+ break;
+
+ case 'g':
+ case 'G':
+ p->eflags &= ~0x100;
+ return 0;
+ break;
+
+ case 's':
+ case 'S': /* Print the 8 stack top words */
+ fp = (unsigned short *)(p->ssbase+ld_le16(&SP));
+ printf("Stack [%04x:%04x]: %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ p->ss, ld_le16(&SP),
+ ld_le16(fp+0), ld_le16(fp+1), ld_le16(fp+2), ld_le16(fp+3),
+ ld_le16(fp+4), ld_le16(fp+5), ld_le16(fp+6), ld_le16(fp+7));
+ break;
+ case 't':
+ case 'T':
+ p->eflags |= 0x10100; /* Set the resume and trap flags */
+ def='t';
+ return 0;
+ break;
+ /* Should add some code to edit registers */
+ }
+ }
+#endif
+ break;
+ case code_ud:
+ printf("Attempt to execute an unimplemented"
+ "or undefined opcode!\n");
+ dump86(p);
+ return(1); /* exit interpreter */
+ break;
+ case code_dna:
+ printf("Attempt to execute a floating point instruction!\n");
+ dump86(p);
+ return(1);
+ break;
+ case code_softint:
+ return process_softint(p);
+ break;
+ case code_iretw:
+ p->eip=pop2(p);
+ p->cs=pop2(p);
+ p->csbase=p->vbase + (p->cs<<4);
+ p->eflags= (p->eflags&0xfffe0000)|pop2(p);
+ /* p->eflags|= 0x100; */ /* Uncomment to trap after iretws */
+ return(0);
+ break;
+#ifndef __BOOT__
+ case code_inb:
+ case code_inw:
+ case code_inl:
+ case code_insb_a16:
+ case code_insw_a16:
+ case code_insl_a16:
+ case code_insb_a32:
+ case code_insw_a32:
+ case code_insl_a32:
+ case code_outb:
+ case code_outw:
+ case code_outl:
+ case code_outsb_a16:
+ case code_outsw_a16:
+ case code_outsl_a16:
+ case code_outsb_a32:
+ case code_outsw_a32:
+ case code_outsl_a32:
+ /* For now we simply enable I/O to the ports and continue */
+ for(i=p->parm1; i<p->parm1+(p->reason&7); i++) {
+ p->ioperm[i/8] &= ~(1<<i%8);
+ }
+ printf("Access to ports %04x-%04x enabled.\n",
+ p->parm1, p->parm1+(p->reason&7)-1);
+ return(0);
+#endif
+ case code_lretw:
+ /* Check for the exit eyecatcher */
+ if ( *(u_int *)(p->ssbase+ld_le16(&SP)) == UINT_MAX) return 1;
+ /* No break on purpose */
+ default:
+ dump86(p);
+ printf("em86_trap called with unhandled reason code !\n");
+ return(1);
+
+ }
+}
+
+void cleanup_v86_mess(void) {
+ x86 *p = (x86 *) bd->v86_private;
+
+ /* This automatically removes the mappings ! */
+ vfree(p->vbase);
+ p->vbase = 0;
+ pfree(p->ram);
+ p->ram = 0;
+ sfree(p->ioperm);
+ p->ioperm=0;
+}
+
+
+int init_v86(void) {
+ x86 *p = (x86 *) bd->v86_private;
+
+ /* p->vbase is non null when the v86 is properly set-up */
+ if (p->vbase) return 0;
+
+ /* Set everything to 0 */
+ memset(p, 0, sizeof(*p));
+ p->ioperm = salloc(65536/8+1);
+ p->ram = palloc(0xa0000);
+ p->iobase = ptr_mem_map->io_base;
+
+ if (!p->ram || !p->ioperm) return 1;
+
+ /* The ioperm array must have an additional byte at the end ! */
+ p->ioperm[65536/8] = 0xff;
+
+ p->vbase = valloc(0x110000);
+ if (!p->vbase) return 1;
+
+ /* These calls should never fail. */
+ vmap(p->vbase, (u_long)p->ram|PTE_RAM, 0xa0000);
+ vmap(p->vbase+0x100000, (u_long)p->ram|PTE_RAM, 0x10000);
+ vmap(p->vbase+0xa0000,
+ ((u_long)ptr_mem_map->isa_mem_base+0xa0000)|PTE_IO, 0x20000);
+ return 0;
+}
+
+void em86_main(struct pci_dev *dev){
+ x86 *p = (x86 *) bd->v86_private;
+ u_short signature;
+ u_char length;
+ volatile u_int *src;
+ u_int *dst, left, saved_rom;
+#if defined(MONITOR_IO) && !defined(__BOOT__)
+#define IOMASK 0xff
+#else
+#define IOMASK 0
+#endif
+
+
+#ifndef __BOOT__
+ int i;
+ /* Allow or disable access to all ports */
+ for(i=0; i<65536/8; i++) p->ioperm[i]=IOMASK;
+ p->ioperm[i] = 0xff; /* Last unused byte must have this value */
+#endif
+ p->dev = dev;
+ memset(p->vbase, 0, 0xa0000);
+ /* Set up a few registers */
+ p->cs = 0xc000; p->csbase = p->vbase + 0xc0000;
+ p->ss = 0x1000; p->ssbase = p->vbase + 0x10000;
+ p->eflags=0x200;
+ st_le16(&SP,0xfffc); p->eip=3;
+
+ p->dsbase = p->esbase = p->fsbase = p->gsbase = p->vbase;
+
+ /* Follow the PCI BIOS specification */
+ AH=dev->bus->number;
+ AL=dev->devfn;
+
+ /* All other registers are irrelevant except ES:DI which
+ * should point to a PnP installation check block. This
+ * is not yet implemented due to lack of references. */
+
+ /* Store a return address of 0xffff:0xffff as eyecatcher */
+ *(u_int *)(p->ssbase+ld_le16(&SP)) = UINT_MAX;
+
+ /* Interrupt for BIOS EGA services is 0xf000:0xf065 (int 0x10) */
+ st_le32((u_int *)p->vbase + 0x10, 0xf000f065);
+
+ /* Enable the ROM, read it and disable it immediately */
+ pci_read_config_dword(dev, PCI_ROM_ADDRESS, &saved_rom);
+ pci_write_config_dword(dev, PCI_ROM_ADDRESS, 0x000c0001);
+
+ /* Check that there is an Intel ROM. Should we also check that
+ * the first instruction is a jump (0xe9 or 0xeb) ?
+ */
+ signature = *(u_short *)(ptr_mem_map->isa_mem_base+0xc0000);
+ if (signature!=0x55aa) {
+ printf("bad signature: %04x.\n", signature);
+ return;
+ }
+ /* Allocate memory and copy the video rom to vbase+0xc0000; */
+ length = ptr_mem_map->isa_mem_base[0xc0002];
+ p->rom = palloc(length*512);
+ if (!p->rom) return;
+
+
+ for(dst=(u_int *) p->rom,
+ src=(volatile u_int *)(ptr_mem_map->isa_mem_base+0xc0000),
+ left = length*512/sizeof(u_int);
+ left--;
+ *dst++=*src++);
+
+ /* Disable the ROM and map the copy in virtual address space, note
+ * that the ROM has to be mapped as RAM since some BIOSes (at least
+ * Cirrus) perform write accesses to their own ROM. The reason seems
+ * to be that they check that they must execute from shadow RAM
+ * because accessing the ROM prevents accessing the video RAM
+ * according to comments in linux/arch/alpha/kernel/bios32.c.
+ */
+
+ pci_write_config_dword(dev, PCI_ROM_ADDRESS, saved_rom);
+ vmap(p->vbase+0xc0000, (u_long)p->rom|PTE_RAM, length*512);
+
+ /* Now actually emulate the ROM init routine */
+ em86_enter(p);
+
+ /* Free the acquired resources */
+ vunmap(p->vbase+0xc0000);
+ pfree(p->rom);
+}
+
+
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S b/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S
new file mode 100644
index 0000000000..a462cf7bdb
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S
@@ -0,0 +1,4561 @@
+/*
+ * em86real.S
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+/* If the symbol __BOOT__ is defined, a slightly different version is
+ * generated to be compiled with the -m relocatable option
+ */
+
+#ifdef __BOOT__
+#include "bootldr.h"
+/* It is impossible to gather statistics in the boot version */
+#undef EIP_STATS
+#endif
+
+/*
+ *
+ * Given the size of this code, it deserves a few comments on how it works,
+ * and why it was implemented the way it is.
+ *
+ * The goal is to have a real mode i486SX emulator to initialize hardware,
+ * mostly graphics boards, by interpreting ROM BIOSes. The choice of a 486SX
+ * is logical since this is the lowest processor that PCI ROM BIOSes must run
+ * on.
+ *
+ * The goal of this emulator is not performance, but a small enough memory
+ * footprint to include it in a bootloader.
+ *
+ * It is actually likely to be comparable to a 25MHz 386DX on a 200MHz 603e !
+ * This is not as serious as it seems since most of the BIOS code performs
+ * a lot of accesses to I/O and non-cacheable memory spaces. For such
+ * instructions, the execution time is often dominated by bus accesses.
+ * Statistics of the code also shows that it spends a large function of
+ * the time in loops waiting for vertical retrace or programs one of the
+ * timers and waits for the count to go down to zero. This type of loop
+ * runs emulated at the same speed as on 5 GHz Pentium IV++ ;)
+ *
+ */
+
+/*
+ * Known bugs or differences with a real 486SX (real mode):
+ * - segment limits are not enforced (too costly)
+ * - xchg instructions with memory are not locked
+ * - lock prefixes are not implemented at all
+ * - long divides implemented but perhaps still buggy
+ * - miscellaneous system instructions not implemented
+ * (some probably cannot be implemented)
+ * - neither control nor debug registers are implemented for the time being
+ * (debug registers are impossible to implement at a reasonable cost)
+ */
+
+/* Code options, put them on the compiler command line */
+/* #define EIP_STATS */ /* EIP based profiling */
+/* #undef EIP_STATS */
+
+/*
+ * Implementation notes:
+ *
+ * A) flags emulation.
+ *
+ * The most important decisions when it comes to obtain a reasonable speed
+ * are related to how the EFLAGS register is emulated.
+ *
+ * Note: the code to set up flags is complex, but it is only seldom
+ * executed since cmp and test instructions use much faster flag evaluation
+ * paths. For example the overflow flag is almost only needed for pushf and
+ * int. Comparison results only involve (SF^OF) or (SF^OF)+ZF and the
+ * implementation is fast in this case.
+ *
+ * Rarely used flags: AC, NT and IOPL are kept in a memory EFLAGS image.
+ * All other flags are either kept explicitly in PPC cr (DF, IF, and TF) or
+ * lazily evaluated from the state of 4 registers called flags, result, op1,
+ * op2, and sometimes the cr itself. The emulation has been designed for
+ * minimal overhead for the common case where the flags are never used. With
+ * few exceptions, all instructions that set flags leave the result of the
+ * computation in a register called result, and operands are taken from op1
+ * and op2 registers. However a few instructions like cmp, test and bit tests
+ * (bt/btc/btr/bts/bsf/bsr) explicitly set cr bits to short circuit
+ * condition code evaluation of conditional instructions.
+ *
+ * As a very brief summary:
+ *
+ * - the result of the last flag setting operation is often either in the
+ * result register or in op2 after increment or decrement instructions
+ * because result and op1 may be needed to compute the carry.
+ *
+ * - compare instruction leave the result of the unsigned comparison
+ * in cr4 and of signed comparison in cr6. This means that:
+ * - cr4[0]=CF (short circuit for jc/jnc)
+ * - cr4[1]=~(CF+ZF) (short circuit for ja/jna)
+ * - cr6[0]=(OF^SF) (short circuit for jl/jnl)
+ * - cr6[1]=~((SF^OF)+ZF) (short circuit for jg/jng)
+ * - cr6[2]=ZF (short circuit for jz/jnz)
+ *
+ * - test instruction set flags in cr6 and clear overflow. This means that:
+ * - cr6[0]=SF=(SF^OF) (short circuit for jl/jnl/js/jns)
+ * - cr6[1]=~((SF^OF)+ZF) (short circuit for jg/jng)
+ * - cr6[2]=ZF (short circuit for jz/jnz)
+ *
+ * All flags may be lazily evaluated from several values kept in registers:
+ *
+ * Flag: Depends upon:
+ * OF result, op1, op2, flags[INCDEC_FIELD,SUBTRACTING,OF_STATE_MASK]
+ * SF result, op2, flags[INCDEC_FIELD,RES_SIZE]
+ * ZF result, op2, cr6[2], flags[INCDEC_FIELD,RES_SIZE,ZF_PROTECT]
+ * AF op1, op2, flags[INCDEC_FIELD,SUBTRACTING,CF_IN]
+ * PF result, op2, flags[INCDEC_FIELD]
+ * CF result, op1, flags[CF_STATE_MASK, CF_IN]
+ *
+ * The order of the fields in the flags register has been chosen so that a
+ * single rlwimi is necessary for common instruction that do not affect all
+ * flags. (See the code for inc/dec emulation).
+ *
+ *
+ * B) opcodes and prefixes.
+ *
+ * The register called opcode holds in its low order 8 bits the opcode
+ * (second byte if the first byte is 0x0f). More precisely it holds the
+ * last byte fetched before the modrm byte or the immediate operand(s)
+ * of the instruction, if any. High order 24 bits are zero unless the
+ * instruction has prefixes. These higher order bits have the following
+ * meaning:
+ * 0x80000000 segment override prefix
+ * 0x00001000 repnz prefix (0xf2)
+ * 0x00000800 repz prefix (0xf3)
+ * 0x00000400 address size prefix (0x67)
+ * 0x00000200 operand size prefix (0x66)
+ * (bit 0x1000 and 0x800 cannot be set simultaneously)
+ *
+ * Therefore if there is a segment override the value will be between very
+ * negative (between 0x80000000 and 0x800016ff), if there is no segment
+ * override, the value will be between 0 and 0x16ff. The reason for
+ * this choice will be understood in the next part.
+ *
+ * C) addresing mode description tables.
+ *
+ * the encoding of the modrm bytes (especially in 16 bit mode) is quite
+ * complex. Hence a table, indexed by the five useful bits of the modrm
+ * byte is used to simplify decoding. Here is a description:
+ *
+ * bit mask meaning
+ * 0x80000000 use ss as default segment register
+ * 0x00004000 means that this addressing mode needs a base register
+ * (set for all entries except sib and displacement-only)
+ * 0x00002000 set if preceding is not set
+ * 0x00001000 set if an sib follows
+ * 0x00000700 base register to use (16 and 32 bit)
+ * 0x00000080 set in 32 bit addressing mode table, cleared in 16 bit
+ * (so extsb mask,entry; ori mask,mask,0xffff gives a mask)
+ * 0x00000070 kludge field, possible values are
+ * 0: 16 bit addressing mode without index
+ * 10: 32 bit addressing mode
+ * 60: 16 bit addressing mode with %si as index
+ * 70: 16 bit addressing mode with %di as index
+ *
+ * This convention leads to the following special values used to check for
+ * sib present and displacement-only, which happen to the three lowest
+ * values in the table (unsigned):
+ * 0x00003090 sib follows (implies it is a 32 bit mode)
+ * 0x00002090 32 bit displacement-only
+ * 0x00002000 16 bit displacement-only
+ *
+ * This means that all entries are either very negative in the 0x80002000
+ * range if the segment defaults to ss or higher than 0x2000 if it defaults
+ * to ds. Combined with the value in opcode this gives the following table:
+ * opcode entry entry>opcode ? segment to use
+ * positive positive yes ds (default)
+ * negative positive yes overriden by prefix
+ * positive negative no ss
+ * negative negative yes overridden by prefix
+ *
+ * Hence a simple comparison allows to check for the need to override
+ * the current base with ss, i.e., when ss is the default base and the
+ * instruction has no override prefix.
+ *
+ * D) BUGS
+ *
+ * This software is obviously bug-free :-). Nevertheless, if you encounter
+ * an interesting feature. Mail me a note, if possible with a detailed
+ * instruction example showing where and how it fails.
+ *
+ */
+
+
+/* Now the details of flag evaluation with the necessary macros */
+
+/* Alignment check is toggable so the system believes it is a 486, but
+CPUID is not to avoid unnecessary complexities. However, alignment
+is actually never checked (real mode is CPL 0 anyway). */
+#define AC86 13 /* Can only be toggled */
+#define VM86 14 /* Not used for now */
+#define RF86 15 /* Not emulated precisely */
+/* Actually NT and IOPL are kept in memory */
+#define NT86 17
+#define IOPL86 18 /* Actually 18 and 19 */
+#define OF86 20
+#define DF86 21
+#define IF86 22
+#define TF86 23
+#define SF86 24
+#define ZF86 25
+#define AF86 27
+#define PF86 29
+#define CF86 31
+
+/* Where the less important flags are placed in PPC cr */
+#define RF 20 /* Suppress trap flag: cr5[0] */
+#define DF 21 /* Direction flag: cr5[1] */
+#define IF 22 /* Interrupt flag: cr5[2] */
+#define TF 23 /* Single step flag: cr5[3] */
+
+/* Now the flags which are frequently used */
+/*
+ * CF_IN is a copy of the input carry with PPC polarity,
+ * it is cleared for add, set for sub and cmp,
+ * equal to the x86 carry for adc and to its complement for sbb.
+ * it is used to evaluate AF and CF.
+ */
+#define CF_IN 0x80000000
+
+/* #define GET_CF_IN(dst) rlwinm dst,flags,1,0x01 */
+
+/* CF_IN_CR set in flags means that cr4[0] is a copy of carry bit */
+#define CF_IN_CR 0x40000000
+
+#define EVAL_CF andis. r3,flags,(CF_IN_CR)>>16; beql- _eval_cf
+
+/*
+ * CF_STATE tells how to compute the carry bit.
+ * NOTRESULT16 and NOTRESULT8 are never set explicitly,
+ * but they may happen after a cmc instruction.
+ */
+#define CF 16 /* cr4[0] */
+#define CF_LOCATION 0x30000000
+#define CF_ZERO 0x00000000
+#define CF_EXPLICIT 0x00000000
+#define CF_COMPLEMENT 0x08000000 /* Indeed a polarity bit */
+#define CF_STATE_MASK (CF_LOCATION|CF_COMPLEMENT)
+#define CF_VALUE 0x08000000
+#define CF_SET 0x08000000
+#define CF_RES32 0x10000000
+#define CF_NOTRES32 0x18000000
+#define CF_RES16 0x20000000
+#define CF_NOTRES16 0x28000000
+#define CF_RES8 0x30000000
+#define CF_NOTRES8 0x38000000
+
+#define CF_ADDL CF_RES32
+#define CF_SUBL CF_NOTRES32
+#define CF_ADDW CF_RES16
+#define CF_SUBW CF_RES16
+#define CF_ADDB CF_RES8
+#define CF_SUBB CF_RES8
+
+#define CF_ROTCNT(dst) rlwinm dst,flags,7,0x18
+#define CF_POL(dst,pos) rlwinm dst,flags,(36-pos)%32,pos,pos
+#define CF_POL_INSERT(dst,pos) \
+ rlwimi dst,flags,(36-pos)%32,pos,pos
+#define RES2CF(dst) rlwinm dst,result,8,7,15
+
+/*
+ * OF_STATE tells how to compute the overflow bit. When the low order bit
+ * is set (OF_EXPLICIT), it means that OF is the exclusive or of the
+ * two other bits. For the reason of this choice, see rotate instructions.
+ */
+#define OF 1 /* Only after EVAL_OF */
+#define OF_STATE_MASK 0x07000000
+#define OF_INCDEC 0x00000000
+#define OF_EXPLICIT 0x01000000
+#define OF_ZERO 0x01000000
+#define OF_VALUE 0x04000000
+#define OF_SET 0x04000000
+#define OF_ONE 0x05000000
+#define OF_XOR 0x06000000
+#define OF_ARITHL 0x06000000
+#define OF_ARITHW 0x02000000
+#define OF_ARITHB 0x04000000
+
+#define EVAL_OF rlwinm. r3,flags,6,0,1; bngl+ _eval_of; andis. r3,flags,OF_VALUE>>16
+
+/* See _eval_of to see how this can be used */
+#define OF_ROTCNT(dst) rlwinm dst,flags,10,0x1c
+
+/*
+ * SIGNED_IN_CR means that cr6 is set as after a signed compare:
+ * - cr6[0] is SF^OF for jl/jnl/setl/setnl...
+ * - cr6[1] is ~((SF^OF)+ZF) for jg/jng/setg/setng...
+ * - cr6[2] is ZF (ZF_IN_CR is always set if this bit is set)
+ */
+#define SLT 24 /* cr6[0], signed less than */
+#define SGT 25 /* cr6[1], signed greater than */
+#define SIGNED_IN_CR 0x00800000
+
+#define EVAL_SIGNED andis. r3,flags,SIGNED_IN_CR>>16; beql- _eval_signed
+
+/*
+ * Above in CR means that cr4 is set as after an unsigned compare:
+ * - cr4[0] is CF (CF_IN_CR is also set)
+ * - cr4[1] is ~(CF+ZF) (ZF_IN_CR is also set)
+ */
+#define ABOVE 17 /* cr4[1] */
+#define ABOVE_IN_CR 0x00400000
+
+#define EVAL_ABOVE andis. r3,flags,ABOVE_IN_CR>>16; beql- _eval_above
+
+/* SF_IN_CR means cr6[0] is a copy of SF. It implies ZF_IN_CR is also set */
+#define SF 24 /* cr6[0] */
+#define SF_IN_CR 0x00200000
+
+#define EVAL_SF andis. r3,flags,SF_IN_CR>>16; beql- _eval_sf_zf
+
+/* ZF_IN_CR means cr6[2] is a copy of ZF. */
+#define ZF 26
+#define ZF_IN_CR 0x00100000
+
+#define EVAL_ZF andis. r3,flags,ZF_IN_CR>>16; beql- _eval_sf_zf
+#define ZF2ZF86(s,d) rlwimi d,s,ZF-ZF86,ZF86,ZF86
+#define ZF862ZF(reg) rlwimi reg,reg,32+ZF86-ZF,ZF,ZF
+
+/*
+ * ZF_PROTECT means cr6[2] is the only valid value for ZF. This is necessary
+ * because some infrequent instructions may leave SF and ZF in an apparently
+ * inconsistent state (both set): sahf, popf and the few (not implemented)
+ * instructions that only affect ZF.
+ */
+#define ZF_PROTECT 0x00080000
+
+/* The parity is always evaluated when it is needed */
+#define PF 0 /* Only after EVAL_PF */
+#define EVAL_PF bl _eval_pf
+
+/* This field gives the shift amount to use to evaluate SF
+ and ZF when ZF_PROTECT is not set */
+#define RES_SIZE_MASK 0x00060000
+#define RESL 0x00000000
+#define RESW 0x00040000
+#define RESB 0x00060000
+
+#define RES_SHIFT(dst) rlwinm dst,flags,18,0x18
+
+/* SUBTRACTING is set if the last flag setting instruction was sub/sbb/cmp,
+ used to evaluate OF and AF */
+#define SUBTRACTING 0x00010000
+
+#define GET_ADDSUB(dst) rlwinm dst,flags,16,0x01
+
+/* rotate (rcl/rcr/rol/ror) affect CF and OF but not other flags */
+#define ROTATE_MASK (CF_IN_CR|CF_STATE_MASK|ABOVE_IN_CR|OF_STATE_MASK|SIGNED_IN_CR)
+#define ROTATE_FLAGS rlwimi flags,one,24,ROTATE_MASK
+
+/*
+ * INCDEC_FIELD has at most one bit set when the last flag setting instruction
+ * was either inc or dec (which do not affect the carry). When one of these
+ * bits is set, it affects the way OF, SF, ZF, AF, and PF are evaluated.
+ */
+#define INCDEC_FIELD 0x0000ff00
+
+#define DECB_SHIFT 8
+#define INCB_SHIFT 9
+#define DECW_SHIFT 10
+#define INCW_SHIFT 11
+#define DECL_SHIFT 14
+#define INCL_SHIFT 15
+
+#define INCDEC_MASK (OF_STATE_MASK|SIGNED_IN_CR|ABOVE_IN_CR|SF_IN_CR|\
+ ZF_IN_CR|ZF_PROTECT|RES_SIZE_MASK|SUBTRACTING|\
+ INCDEC_FIELD)
+/* Operations to perform to tell where the flags are after inc or dec */
+#define INC_FLAGS(BWL) rlwimi flags,one,INC##BWL##_SHIFT,INCDEC_MASK
+#define DEC_FLAGS(BWL) rlwimi flags,one,DEC##BWL##_SHIFT,INCDEC_MASK
+
+/* How the flags are set after arithmetic operations */
+#define FLAGS_ADD(BWL) (CF_ADD##BWL|OF_ARITH##BWL|RES##BWL)
+#define FLAGS_SBB(BWL) (CF_SUB##BWL|OF_ARITH##BWL|RES##BWL|SUBTRACTING)
+#define FLAGS_SUB(BWL) FLAGS_SBB(BWL)|CF_IN
+#define FLAGS_CMP(BWL) FLAGS_SUB(BWL)|ZF_IN_CR|CF_IN_CR|SIGNED_IN_CR|ABOVE_IN_CR
+
+/* How the flags are set after logical operations */
+#define FLAGS_LOG(BWL) (CF_ZERO|OF_ZERO|RES##BWL)
+#define FLAGS_TEST(BWL) FLAGS_LOG(BWL)|ZF_IN_CR|SIGNED_IN_CR|SF_IN_CR
+
+/* How the flags are set after bt/btc/btr/bts. */
+#define FLAGS_BTEST CF_IN_CR|CF_ADDL|OF_ZERO|RESL
+
+/* How the flags are set after bsf/bsr. */
+#define FLAGS_BSRCH(WL) CF_ZERO|OF_ZERO|RES##WL|ZF_IN_CR
+
+/* How the flags are set after logical right shifts */
+#define FLAGS_SHR(BWL) (CF_EXPLICIT|OF_ARITH##BWL|RES##BWL)
+
+/* How the flags are set after double length shifts */
+#define FLAGS_DBLSH(WL) (CF_EXPLICIT|OF_ARITH##WL|RES##WL)
+
+/* How the flags are set after multiplies */
+#define FLAGS_MUL (CF_EXPLICIT|OF_EXPLICIT)
+
+#define SET_FLAGS(fl) lis flags,(fl)>>16
+#define ADD_FLAGS(fl) addis flags,flags,(fl)>>16
+
+/*
+ * We are always off by one when compared with Intel's eip, this shortens
+ * code by allowing to load next byte with lbzu x,1(eip). The register
+ * called eip actually contains csbase+eip, and thus should be called lip
+ * for linear ip.
+ */
+
+/*
+ * Reason codes passed to the C part of the emulator, this includes all
+ * instructions which may change the current code segment. These definitions
+ * will soon go into a separate include file. Codes 0 to 255 correspond
+ * directly to the interrupt/trap that has to be generated.
+ */
+
+#define code_divide_err 0
+#define code_trap 1
+#define code_int3 3
+#define code_into 4
+#define code_bound 5
+#define code_ud 6
+#define code_dna 7 /* FPU not available */
+
+#define code_iretw 256 /* Interrupt returns */
+#define code_iretl 257
+#define code_lcallw 258 /* Far calls and jumps */
+#define code_lcalll 259
+#define code_ljmpw 260
+#define code_ljmpl 261
+#define code_lretw 262 /* Far returns */
+#define code_lretl 263
+#define code_softint 264 /* int $xx */
+#define code_lock 265 /* Lock prefix */
+/* Codes 1024 to 2047 are used for I/O port access instructions:
+ - The three LSB define the port size (1, 2 or 4)
+ - bit of weight 512 means out if set, in if clear
+ - bit of weight 256 means ins/outs if set, in/out if clear
+ - bit of weight 128 means use 32 bit addresses if set, 16 bit if clear
+ (only used for ins/outs instructions, always clear for in/out)
+ */
+#define code_inb 1024+1
+#define code_inw 1024+2
+#define code_inl 1024+4
+#define code_outb 1024+512+1
+#define code_outw 1024+512+2
+#define code_outl 1024+512+4
+#define code_insb_a16 1024+256+1
+#define code_insw_a16 1024+256+2
+#define code_insl_a16 1024+256+4
+#define code_outsb_a16 1024+512+256+1
+#define code_outsw_a16 1024+512+256+2
+#define code_outsl_a16 1024+512+256+4
+#define code_insb_a32 1024+256+128+1
+#define code_insw_a32 1024+256+128+2
+#define code_insl_a32 1024+256+128+4
+#define code_outsb_a32 1024+512+256+128+1
+#define code_outsw_a32 1024+512+256+128+2
+#define code_outsl_a32 1024+512+256+128+4
+
+#define state 31
+/* r31 (state) is a pointer to a structure describing the emulated x86
+processor, its layout is the following:
+
+first the general purpose registers, they are in little endian byte order
+
+offset name
+
+ 0 eax/ax/al
+ 1 ah
+ 4 ecx/cx/cl
+ 5 ch
+ 8 edx/dx/dl
+ 9 dh
+ 12 ebx/bx/bl
+ 13 bh
+ 16 esp/sp
+ 20 ebp/bp
+ 24 esi/si
+ 28 edi/di
+*/
+
+#define AL 0
+#define AX 0
+#define EAX 0
+#define AH 1
+#define CL 4
+#define CX 4
+#define ECX 4
+#define DX 8
+#define EDX 8
+#define BX 12
+#define EBX 12
+#define SP 16
+#define ESP 16
+#define BP 20
+#define EBP 20
+#define SI 24
+#define ESI 24
+#define DI 28
+#define EDI 28
+
+/*
+than the rest of the machine state, big endian !
+
+offset name
+
+ 32 essel segment register selectors (values)
+ 36 cssel
+ 40 sssel
+ 44 dssel
+ 48 fssel
+ 52 gssel
+ 56 eipimg true eip (register named eip is csbase+eip)
+ 60 eflags eip and eflags only valid when C code running !
+ 64 esbase segment registers bases
+ 68 csbase
+ 72 ssbase
+ 76 dsbase
+ 80 fsbase
+ 84 gsbase
+ 88 iobase For I/O instructions, I/O space virtual base
+ 92 ioperm I/O permission bitmap pointer
+ 96 reason Reason code when calling external emulator
+ 100 nexteip eip past instruction for external emulator
+ 104 parm1 parameter for external emulator
+ 108 parm2 parameter for external emulator
+ 112 _opcode current opcode register for external emulator
+ 116 _base segment register base for external emulator
+ 120 _offset intruction operand offset
+ More internal state was dumped here for debugging in first versions
+
+ 128 vbase where the 1Mb memory is mapped
+ 132 cntimg instruction counter
+ 136 scratch
+ 192 eipstat array of 32k unsigned long pairs for eip stats
+*/
+
+#define essel 32
+#define cssel 36
+#define sssel 40
+#define dssel 44
+#define fssel 48
+#define gssel 52
+#define eipimg 56
+#define eflags 60
+#define esbase 64
+#define csbase 68
+#define ssbase 72
+#define dsbase 76
+#define fsbase 80
+#define gsbase 84
+#define iobase 88
+#define ioperm 92
+#define reason 96
+#define nexteip 100
+#define parm1 104
+#define parm2 108
+#define _opcode 112
+#define _base 116
+#define _offset 120
+#define vbase 128
+#define cntimg 132
+#ifdef EIP_STATS
+#define eipstat 192
+#endif
+/* Global registers */
+
+/* Some segment register bases are permanently kept in registers since they
+are often used: these are csb, esb and ssb because they are
+required for jumps, string instructions, and pushes/pops/calls/rets.
+dsbase is not kept in a register but loaded from memory to allow somewhat
+more parallelism in the main emulation loop.
+*/
+
+#define one 30 /* Constant one, so pervasive */
+#define ssb 29
+#define csb 28
+#define esb 27
+#define eip 26 /* That one is indeed csbase+(e)ip-1 */
+#define result 25 /* For the use of result, op1, op2 */
+#define op1 24 /* see the section on flag emulation */
+#define op2 23
+#define opbase 22 /* default opcode table */
+#define flags 21 /* See earlier description */
+#define opcode 20 /* Opcode */
+#define opreg 19 /* Opcode extension/register number */
+/* base is reloaded with the base of the ds segment at the beginning of
+every instruction, it is modified by segment override prefixes, when
+the default base segment is ss, or when the modrm byte specifies a
+register operand */
+#define base 18 /* Instruction's operand segment base */
+#define offset 17 /* Instruction's memory operand offset */
+/* used to address a table telling how to decode the addressing mode
+specified by the modrm byte */
+#define adbase 16 /* addressing mode table */
+/* Following registers are used only as dedicated temporaries during decoding,
+they are free for use during emulation */
+/*
+ * ceip (current eip) is only in use when we call the external emulator for
+ * instructions that fault. Note that it is forbidden to change flags before
+ * the check for the fault happens (divide by zero...) ! ceip is also used
+ * when measuring timing.
+ */
+#define ceip 15
+
+/* A register used to measure timing information (when enabled) */
+#ifdef EIP_STATS
+#define tstamp 14
+#endif
+
+#define count 12 /* Instruction counter. */
+
+#define r0 0
+#define r1 1 /* PPC Stack pointer. */
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+
+/* Macros to read code stream */
+#define NEXTBYTE(dest) lbzu dest,1(eip)
+#define NEXTWORD(dest) lhbrx dest,eip,one; la eip,2(eip)
+#define NEXTDWORD(dest) lwbrx dest,eip,one; la eip,4(eip)
+#define NEXT b nop
+#define GOTNEXT b gotopcode
+
+#ifdef __BOOT__
+ START_GOT
+ GOT_ENTRY(_jtables)
+ GOT_ENTRY(jtab_www)
+ GOT_ENTRY(adtable)
+ END_GOT
+#else
+ .text
+#endif
+ .align 2
+ .global em86_enter
+ .type em86_enter,@function
+em86_enter: stwu r1,-96(r1) # allocate stack
+ mflr r0
+ stmw 14,24(r1)
+ mfcr r4
+ stw r0,100(r1)
+ mr state,r3
+ stw r4,20(r1)
+#ifdef __BOOT__
+/* We need this since r30 is the default GOT pointer */
+#define r30 30
+ GET_GOT
+/* The relocation of these tables is explicit, this could be done
+ * automatically with fixups but would add more than 8kb in the fixup tables.
+ */
+ lwz r3,GOT(_jtables)
+ lwz r4,_endjtables-_jtables(r3)
+ sub. r4,r3,r4
+ beq+ 1f
+ li r0,((_endjtables-_jtables)>>2)+1
+ addi r3,r3,-4
+ mtctr r0
+0: lwzu r5,4(r3)
+ add r5,r5,r4
+ stw r5,0(r3)
+ bdnz 0b
+1: lwz adbase,GOT(adtable)
+ lwz opbase,GOT(jtab_www)
+/* Now r30 is only used as constant 1 */
+#undef r30
+ li one,1 # pervasive constant
+#else
+ lis opbase,jtab_www@ha
+ lis adbase,adtable@ha
+ li one,1 # pervasive constant
+ addi opbase,opbase,jtab_www@l
+ addi adbase,adbase,adtable@l
+#ifdef EIP_STATS
+ li ceip,0
+ mftb tstamp
+#endif
+#endif
+/* We branch back here when calling an external function tells us to resume */
+restart: lwz r3,eflags(state)
+ lis flags,(OF_EXPLICIT|ZF_IN_CR|ZF_PROTECT|SF_IN_CR)>>16
+ lwz csb,csbase(state)
+ extsb result,r3 # SF/PF
+ rlwinm op1,r3,31,0x08 # AF
+ lwz eip,eipimg(state)
+ ZF862ZF(r3) # cr6
+ addi op2,op1,0 # AF
+ lwz ssb,ssbase(state)
+ rlwimi flags,r3,15,OF_VALUE # OF
+ rlwimi r3,r3,32+RF86-RF,RF,RF # RF
+ lwz esb,esbase(state)
+ ori result,result,0xfb # PF
+ mtcrf 0x06,r3 # RF/DF/IF/TF/SF/ZF
+ lbzux opcode,eip,csb
+ rlwimi flags,r3,27,CF_VALUE # CF
+ xori result,result,0xff # PF
+ lwz count,cntimg(state)
+ GOTNEXT # start the emulator
+
+/* Now return */
+exit: lwz r0,100(r1)
+ lwz r4,20(r1)
+ mtlr r0
+ lmw 14,24(r1)
+ mtcr r4
+ addi r1,r1,96
+ blr
+
+trap: crmove 0,RF
+ crclr RF
+ bt- 0,resume
+ sub ceip,eip,csb
+ li r3,code_trap
+complex: addi eip,eip,1
+ stw r3,reason(state)
+ sub eip,eip,csb
+ stw op1,240(state)
+ stw op2,244(state)
+ stw result,248(state)
+ stw flags,252(state)
+ stw r4,parm1(state)
+ stw r5,parm2(state)
+ stw opcode,_opcode(state)
+ bl _eval_flags
+ stw base,_base(state)
+ stw eip,nexteip(state)
+ stw r3,eflags(state)
+ mr r3,state
+ stw offset,_offset(state)
+ stw ceip,eipimg(state)
+ stw count,cntimg(state)
+ bl em86_trap
+ cmpwi r3,0
+ bne exit
+ b restart
+
+/* Main loop */
+/*
+ * The two LSB of each entry in the main table mean the following:
+ * 00: indirect opcode: modrm follows and the three middle bits are an
+ * opcode extension. The entry points to another jump table.
+ * 01: direct instruction, branch directly to the routine.
+ * 10: modrm specifies byte size memory and register operands.
+ * 11: modrm specifies word/long memory and register operands.
+ *
+ * The modrm byte, if present, is always loaded in r7.
+ *
+ * Note: most "mr x,y" instructions have been replaced by "addi x,y,0" since
+ * the latter can be executed in the second integer unit on 603e.
+ */
+
+/*
+ * This code is very good example of absolutely unmaintainable code.
+ * It was actually much easier to write than it is to understand !
+ * If my computations are right, the maximum path length from fetching
+ * the opcode to exiting to the actual instruction execution is
+ * 46 instructions (for non-prefixed, single byte opcode instructions).
+ *
+ */
+ .align 5
+#ifdef EIP_STATS
+nop: NEXTBYTE(opcode)
+gotopcode: slwi r3,opcode,2
+ bt- TF,trap
+resume: lwzx r4,opbase,r3
+ addi r5,state,eipstat+4
+ clrlslwi r6,ceip,17,3
+ mtctr r4
+ lwzux r7,r5,r6
+ slwi. r0,r4,30 # two lsb of table entry
+ sub r7,r7,tstamp
+ lwz r6,-4(r5)
+ mftb tstamp
+ addi r6,r6,1
+ sub ceip,eip,csb
+ stw r6,-4(r5)
+ add r7,r7,tstamp
+ lwz base,dsbase(state)
+ stw r7,0(r5)
+#else
+nop: NEXTBYTE(opcode)
+gotopcode: slwi r3,opcode,2
+ bt- TF,trap
+resume: lwzx r4,opbase,r3
+ sub ceip,eip,csb
+ mtctr r4
+ slwi. r0,r4,30 # two lsb of table entry
+ lwz base,dsbase(state)
+ addi count,count,1
+#endif
+ bgtctr- # for instructions without modrm
+
+/* modrm byte present */
+ NEXTBYTE(r7) # modrm byte
+ cmplwi cr1,r7,192
+ rlwinm opreg,r7,31,0x1c
+ beq- cr0,8f # extended opcode
+/* modrm with middle 3 bits specifying a register (non prefixed) */
+ rlwinm r0,r4,3,0x8
+ li r4,0x1c0d
+ rlwimi opreg,r7,27,0x01
+ srw r4,r4,r0
+ and opreg,opreg,r4
+ blt cr1,9f
+/* modrm with 2 register operands */
+1: rlwinm offset,r7,2,0x1c
+ addi base,state,0
+ rlwimi offset,r7,30,0x01
+ and offset,offset,r4
+ bctr
+
+/* Prefixes: first segment overrides */
+ .align 4
+_es: NEXTBYTE(r7); addi base,esb,0
+ oris opcode,opcode,0x8000; b 2f
+_cs: NEXTBYTE(r7); addi base,csb,0
+ oris opcode,opcode,0x8000; b 2f
+_fs: NEXTBYTE(r7); lwz base,fsbase(state)
+ oris opcode,opcode,0x8000; b 2f
+_gs: NEXTBYTE(r7); lwz base,gsbase(state)
+ oris opcode,opcode,0x8000; b 2f
+_ss: NEXTBYTE(r7); addi base,ssb,0
+ oris opcode,opcode,0x8000; b 2f
+_ds: NEXTBYTE(r7)
+ oris opcode,opcode,0x8000; b 2f
+
+/* Lock (unimplemented) and repeat prefixes */
+_lock: li r3,code_lock; b complex
+_repnz: NEXTBYTE(r7); rlwimi opcode,one,12,0x1800; b 2f
+_repz: NEXTBYTE(r7); rlwimi opcode,one,11,0x1800; b 2f
+
+/* Operand and address size prefixes */
+ .align 4
+_opsize: NEXTBYTE(r7); ori opcode,opcode,0x200
+ rlwinm r3,opcode,2,0x1ffc; b 2f
+_adsize: NEXTBYTE(r7); ori opcode,opcode,0x400
+ rlwinm r3,opcode,2,0x1ffc; b 2f
+
+_twobytes: NEXTBYTE(r7); addi r3,r3,0x400
+2: rlwimi r3,r7,2,0x3fc
+ lwzx r4,opbase,r3
+ rlwimi opcode,r7,0,0xff
+ mtctr r4
+ slwi. r0,r4,30
+ bgtctr- # direct instruction
+/* modrm byte in a prefixed instruction */
+ NEXTBYTE(r7) # modrm byte
+ cmpwi cr1,r7,192
+ rlwinm opreg,r7,31,0x1c
+ beq- 6f
+/* modrm with middle 3 bits specifying a register (prefixed) */
+ rlwinm r0,r4,3,0x8
+ li r4,0x1c0d
+ rlwimi opreg,r7,27,0x01
+ srw r4,r4,r0
+ and opreg,opreg,r4
+ bnl cr1,1b # 2 register operands
+/* modrm specifying memory with prefix */
+3: rlwinm r3,r3,27,0xff80
+ rlwimi adbase,r7,2,0x1c
+ extsh r3,r3
+ rlwimi r3,r7,31,0x60
+ lwzx r4,r3,adbase
+ cmpwi cr1,r4,0x3090
+ bnl+ cr1,10f
+/* displacement only addressing modes */
+4: cmpwi r4,0x2000
+ bne 5f
+ NEXTWORD(offset)
+ bctr
+5: NEXTDWORD(offset)
+ bctr
+/* modrm with opcode extension (prefixed) */
+6: lwzx r4,r4,opreg
+ mtctr r4
+ blt cr1,3b
+/* modrm with opcode extension and register operand */
+7: rlwinm offset,r7,2,0x1c
+ addi base,state,0
+ rlwinm r0,r4,3,0x8
+ li r4,0x1c0d
+ rlwimi offset,r7,30,0x01
+ srw r4,r4,r0
+ and offset,offset,r4
+ bctr
+/* modrm with opcode extension (non prefixed) */
+8: lwzx r4,r4,opreg
+ mtctr r4
+/* FIXME ? We continue fetching even if the opcode extension is undefined.
+ * It shouldn't do any harm on real mode emulation anyway, and for ROM
+ * BIOS emulation, we are supposed to read valid code.
+ */
+ bnl cr1,7b
+/* modrm specifying memory without prefix */
+9: rlwimi adbase,r7,2,0x1c # memory addressing mode computation
+ rlwinm r3,r7,31,0x60
+ lwzx r4,r3,adbase
+ cmplwi cr1,r4,0x3090
+ blt- cr1,4b # displacement only addressing mode
+10: rlwinm. r0,r7,24,0,1 # three cases distinguished
+ beq- cr1,15f # an sib follows
+ rlwinm r3,r4,30,0x1c # 16bit/32bit/%si index/%di index
+ cmpwi cr1,r3,8 # set cr1 as early as possible
+ rlwinm r6,r4,26,0x1c # base register
+ lwbrx offset,state,r6 # load the base register
+ beq cr0,14f # no displacement
+ cmpw cr2,r4,opcode # check for ss as default base
+ bgt cr0,12f # byte offset
+ beq cr1,11f # 32 bit displacement
+ NEXTWORD(r5) # 16 bit displacement
+ bgt cr1,13f # d16(base,index)
+/* d16(base) */
+ add offset,offset,r5
+ clrlwi offset,offset,16
+ bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* d32(base) */
+11: NEXTDWORD(r5)
+ add offset,offset,r5
+ bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* 8 bit displacement */
+12: NEXTBYTE(r5)
+ extsb r5,r5
+ bgt cr1,13f
+/* d8(base) */
+ extsb r6,r4
+ add offset,offset,r5
+ ori r6,r6,0xffff
+ and offset,offset,r6
+ bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* d8(base,index) and d16(base,index) share this code ! */
+13: lhbrx r3,state,r3
+ add offset,offset,r5
+ add offset,offset,r3
+ clrlwi offset,offset,16
+ bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* no displacement: only indexed modes may use ss as default base */
+14: beqctr cr1 # 32 bit register indirect
+ clrlwi offset,offset,16
+ bltctr cr1 # 16 bit register indirect
+/* (base,index) */
+ lhbrx r3,state,r3 # 16 bit [{bp,bx}+{si,di}]
+ cmpw cr2,r4,opcode # check for ss as default base
+ add offset,offset,r3
+ clrlwi offset,offset,r3
+ bgtctr+ cr2
+ addi base,ssb,0
+ bctr
+/* sib modes, note that the size of the offset can be known from cr0 */
+15: NEXTBYTE(r7) # get sib
+ rlwinm r3,r7,31,0x1c # index
+ rlwinm offset,r7,2,0x1c # base
+ cmpwi cr1,r3,ESP # has index ?
+ bne cr0,18f # base+d8/d32
+ cmpwi offset,EBP
+ beq 17f # d32(,index,scale)
+ xori r4,one,0xcc01 # build 0x0000cc00
+ rlwnm r4,r4,offset,0,1 # 0 or 0xc0000000
+ lwbrx offset,state,offset
+ cmpw cr2,r4,opcode # use ss ?
+ beq- cr1,16f # no index
+/* (base,index,scale) */
+ lwbrx r3,state,r3
+ srwi r6,r7,6
+ slw r3,r3,r6
+ add offset,offset,r3
+ bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* (base), in practice only (%esp) is coded this way */
+16: bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* d32(,index,scale) */
+17: NEXTDWORD(offset)
+ beqctr- cr1 # no index: very unlikely
+ lwbrx r3,state,r3
+ srwi r6,r7,6
+ slw r3,r3,r6
+ add offset,offset,r3
+ bctr
+/* 8 or 32 bit displacement */
+18: xori r4,one,0xcc01 # build 0x0000cc00
+ rlwnm r4,r4,offset,0,1 # 0 or 0xc0000000
+ lwbrx offset,state,offset
+ cmpw cr2,r4,opcode # use ss ?
+ bgt cr0,20f # 8 bit offset
+/* 32 bit displacement */
+ NEXTDWORD(r5)
+ beq- cr1,21f
+/* d(base,index,scale) */
+19: lwbrx r3,state,r3
+ add offset,offset,r5
+ add offset,offset,r3
+ bgtctr cr2
+ addi base,ssb,0
+ bctr
+/* 8 bit displacement */
+20: NEXTBYTE(r5)
+ extsb r5,r5
+ bne+ cr1,19b
+/* d(base), in practice base is %esp */
+21: add offset,offset,r5
+ bgtctr- cr2
+ addi base,ssb,0
+ bctr
+
+/*
+ * Flag evaluation subroutines: they have not been written for performance
+ * since they are not often used in practice. The rule of the game was to
+ * write them with as few branches as possible.
+ * The first routines eveluate either one or 2 (ZF and SF simultaneously)
+ * flags and do not use r0 and r7.
+ * The more complex routines (_eval_above, _eval_signed and _eval_flags)
+ * call the former ones, using r0 as a return address save register and
+ * r7 as a safe temporary.
+ */
+
+/*
+ * _eval_sf_zf evaluates simultaneously SF and ZF unless ZF is already valid
+ * and protected because it is possible, although it is exceptional, to have
+ * SF and ZF set at the same time after a few instructions which may leave the
+ * flags in this apparently inconsistent state: sahf, popf, iret and the few
+ * (for now unimplemented) instructions which only affect ZF (lar, lsl, arpl,
+ * cmpxchg8b). This also solves the obscure case of ZF set and PF clear.
+ * On return: SF=cr6[0], ZF=cr6[2].
+ */
+
+_eval_sf_zf: andis. r5,flags,ZF_PROTECT>>16
+ rlwinm r3,flags,0,INCDEC_FIELD
+ RES_SHIFT(r4)
+ cntlzw r3,r3
+ slw r4,result,r4
+ srwi r5,r3,5 # ? use result : use op1
+ rlwinm r3,r3,2,0x18
+ oris flags,flags,(SF_IN_CR|SIGNED_IN_CR|ZF_IN_CR)>>16
+ neg r5,r5 # mux result/op2
+ slw r3,op2,r3
+ and r4,r4,r5
+ andc r3,r3,r5
+ xoris flags,flags,(SIGNED_IN_CR)>>16
+ bne- 1f # 12 instructions between set
+ or r3,r3,r4 # and test, good for folding
+ cmpwi cr6,r3,0
+ blr
+1: or. r3,r3,r4
+ crmove SF,0
+ blr
+
+/*
+ * _eval_cf may be called at any time, no other flag is affected.
+ * On return: CF=cr4[0], r3= CF ? 0x100:0 = CF<<8.
+ */
+_eval_cf: addc r3,flags,flags # CF_IN to xer[ca]
+ RES2CF(r4) # get 8 or 16 bit carry
+ subfe r3,result,op1 # generate PPC carry for
+ CF_ROTCNT(r5) # preceding operation
+ addze r3,r4 # put carry into LSB
+ CF_POL(r4,23) # polarity & 0x100
+ oris flags,flags,(CF_IN_CR|ABOVE_IN_CR)>>16
+ rlwnm r3,r3,r5,23,23 # shift carry there
+ xor r3,r3,r4 # CF <<8
+ xoris flags,flags,(ABOVE_IN_CR)>>16
+ cmplw cr4,one,r3 # sets cr4[0]
+ blr
+
+/*
+ * eval_of returns the overflow flag in OF_STATE field, which will be
+ * either 001 (OF clear) or 101 (OF set), is is only called when the two
+ * low order bits of OF_STATE are not 01 (otherwise it will work but
+ * it is an elaborate variant of a nop with a few registers destroyed)
+ * The code multiplexes several sources in a branchless way, was fun to write.
+ */
+_eval_of: GET_ADDSUB(r4) # 0(add)/1(sub)
+ rlwinm r3,flags,0,INCDEC_FIELD
+ neg r4,r4 # 0(add)/-1(sub)
+ eqv r5,result,op1 # result[]==op1[] (bit by bit)
+ cntlzw r3,r3 # inc/dec
+ xor r4,r4,op2 # true sign of op2
+ oris r5,r5,0x0808 # bits to clear
+ clrlwi r6,r3,31 # 0(inc)/1(dec)
+ eqv r4,r4,op1 # op1[]==op2[] (bit by bit)
+ add r6,op2,r6 # add 1 if dec
+ rlwinm r3,r3,2,0x18 # incdec_shift
+ andc r4,r4,r5 # arithmetic overflow
+ slw r3,r6,r3 # shifted inc/dec result
+ addis r3,r3,0x8000 # compare with 0x80000000
+ ori r4,r4,0x0808 # bits to set
+ cntlzw r3,r3 # 32 if inc/dec overflow
+ OF_ROTCNT(r6)
+ rlwimi r4,r3,18,0x00800000 # insert inc/dec overflow
+ rlwimi flags,one,24,OF_STATE_MASK
+ rlwnm r3,r4,r6,8,8 # get field
+ rlwimi flags,r3,3,OF_VALUE # insert OF
+ blr
+
+/*
+ * _eval_pf will always be called when needed (complex but infrequent),
+ * there are a few quirks for a branchless solution.
+ * On return: PF=cr0[0], PF=MSB(r3)
+ */
+_eval_pf: rlwinm r3,flags,0,INCDEC_FIELD
+ rotrwi r4,op2,4 # from inc/dec
+ rotrwi r5,result,4 # from result
+ cntlzw r3,r3 # use result if 32
+ xor r4,r4,op2
+ xor r5,r5,result
+ rlwinm r3,r3,26,0,0 # 32 becomes 0x80000000
+ clrlwi r4,r4,28
+ lis r6,0x9669 # constant to shift
+ clrlwi r5,r5,28
+ rlwnm r4,r6,r4,0,0 # parity from inc/dec
+ rlwnm r5,r6,r5,0,0 # parity from result
+ andc r4,r4,r3 # select which one
+ and r5,r5,r3
+ add. r3,r4,r5 # and test to simplify
+ blr # returns in r3 and cr0 set.
+
+/*
+ * _eval_af will always be called when needed (complex but infrequent):
+ * - if after inc, af is set when 4 low order bits of op1 are 0
+ * - if after dec, af is set when 4 low order bits of op1 are 1
+ * (or 0 after adding 1 as implemented here)
+ * - if after add/sub/adc/sbb/cmp af is set from sum of 4 LSB of op1
+ * and 4 LSB of op2 (eventually complemented) plus carry in.
+ * - other instructions leave AF undefined so the returned value is irrelevant.
+ * Returned value must be masked with 0x10, since all other bits are undefined.
+ * There branchless code is perhaps not the most efficient, but quite parallel.
+ */
+_eval_af: rlwinm r3,flags,0,INCDEC_FIELD
+ clrlwi r5,op2,28 # 4 LSB of op2
+ addc r4,flags,flags # carry_in
+ GET_ADDSUB(r6)
+ cntlzw r3,r3 # if inc/dec 16..23 else 32
+ neg r6,r6 # add/sub
+ clrlwi r4,r3,31 # if dec 1 else 0
+ xor r5,r5,r6 # conditionally complement
+ clrlwi r6,op1,28 # 4 LSB of op1
+ add r4,op2,r4 # op2+(dec ? 1 : 0)
+ clrlwi r4,r4,28 # 4 LSB of op2+(dec ? 1 : 0)
+ adde r5,r6,r5 # op1+cy_in+(op2/~op2)
+ cntlzw r4,r4 # 28..31 if not AF, 32 if set
+ andc r5,r5,r3 # masked AF from add/sub...
+ andc r4,r3,r4 # masked AF from inc/dec
+ or r3,r4,r5
+ blr
+
+/*
+ * _eval_above will only be called if ABOVE_IN_CR is not set.
+ * On return: ZF=cr6[2], CF=cr4[0], ABOVE=cr4[1]
+ */
+_eval_above: andis. r3,flags,ZF_IN_CR>>16
+ mflr r0
+ beql+ _eval_sf_zf
+ andis. r3,flags,CF_IN_CR>>16
+ beql+ _eval_cf
+ mtlr r0
+ oris flags,flags,ABOVE_IN_CR>>16
+ crnor ABOVE,ZF,CF
+ blr
+
+/* _eval_signed may only be called when signed_in_cr is clear ! */
+_eval_signed: andis. r3,flags,SF_IN_CR>>16
+ mflr r0
+ beql+ _eval_sf_zf
+# SF_IN_CR and ZF_IN_CR are set, SIGNED_IN_CR is clear
+ rlwinm. r3,flags,5,0,1
+ xoris flags,flags,(SIGNED_IN_CR|SF_IN_CR)>>16
+ bngl+ _eval_of
+ andis. r3,flags,OF_VALUE>>16
+ mtlr r0
+ crxor SLT,SF,OF
+ crnor SGT,SLT,ZF
+ blr
+
+_eval_flags: mflr r0
+ bl _eval_cf
+ li r7,2
+ rlwimi r7,r3,24,CF86,CF86 # 2 if CF clear, 3 if set
+ bl _eval_pf
+ andis. r4,flags,SF_IN_CR>>16
+ rlwimi r7,r3,32+PF-PF86,PF86,PF86
+ bl _eval_af
+ rlwimi r7,r3,0,AF86,AF86
+ beql+ _eval_sf_zf
+ mfcr r3
+ rlwinm. r4,flags,5,0,1
+ rlwimi r7,r3,0,DF86,SF86
+ ZF2ZF86(r3,r7)
+ bngl+ _eval_of
+ mtlr r0
+ lis r4,0x0004
+ lwz r3,eflags(state)
+ addi r4,r4,0x7000
+ rlwimi r7,flags,17,OF86,OF86
+ and r3,r3,r4
+ or r3,r3,r7
+ blr
+
+/* Quite simple for real mode, input in r4, returns in r3. */
+_segment_load: lwz r5,vbase(state)
+ rlwinm r3,r4,4,0xffff0 # segment selector * 16
+ add r3,r3,r5
+ blr
+
+/* To allow I/O port virtualization if necessary, code for exception in r3,
+port number in r4 */
+_check_port: lwz r5,ioperm(state)
+ rlwinm r6,r4,29,0x1fff # 0 to 8kB
+ lis r0,0xffff
+ lhbrx r5,r5,r6
+ clrlwi r6,r4,29 # modulo 8
+ rlwnm r0,r0,r3,0x0f # 1, 3, or 0xf
+ slw r0,r0,r6
+ and. r0,r0,r5
+ bne- complex
+ blr
+/*
+ * Instructions are in approximate functional order:
+ * 1) move, exchange, lea, push/pop, pusha/popa
+ * 2) cbw/cwde/cwd/cdq, zero/sign extending moves, in/out
+ * 3) arithmetic: add/sub/adc/sbb/cmp/inc/dec/neg
+ * 4) logical: and/or/xor/test/not/bt/btc/btr/bts/bsf/bsr
+ * 5) jump, call, ret
+ * 6) string instructions and xlat
+ * 7) rotate/shift/mul/div
+ * 8) segment register, far jumps, calls and rets, interrupts
+ * 9) miscellenaous (flags, bcd,...)
+ */
+
+#define MEM offset,base
+#define REG opreg,state
+#define SELECTORS 32
+#define SELBASES 64
+
+/* Immediate moves */
+movb_imm_reg: rlwinm opreg,opcode,2,28,29; lbz r3,1(eip)
+ rlwimi opreg,opcode,30,31,31; lbzu opcode,2(eip)
+ stbx r3,REG; GOTNEXT
+
+movw_imm_reg: lhz r3,1(eip); clrlslwi opreg,opcode,29,2; lbzu opcode,3(eip)
+ sthx r3,REG; GOTNEXT
+
+movl_imm_reg: lwz r3,1(eip); clrlslwi opreg,opcode,29,2; lbzu opcode,5(eip)
+ stwx r3,REG; GOTNEXT
+
+movb_imm_mem: lbz r0,1(eip); cmpwi opreg,0
+ lbzu opcode,2(eip); bne- ud
+ stbx r0,MEM; GOTNEXT
+
+movw_imm_mem: lhz r0,1(eip); cmpwi opreg,0
+ lbzu opcode,3(eip); bne- ud
+ sthx r0,MEM; GOTNEXT
+
+movl_imm_mem: lwz r0,1(eip); cmpwi opreg,0
+ lbzu opcode,5(eip); bne- ud
+ stwx r0,MEM; GOTNEXT
+
+/* The special short form moves between memory and al/ax/eax */
+movb_al_a32: lwbrx offset,eip,one; lbz r0,AL(state); lbzu opcode,5(eip)
+ stbx r0,MEM; GOTNEXT
+
+movb_al_a16: lhbrx offset,eip,one; lbz r0,AL(state); lbzu opcode,3(eip)
+ stbx r0,MEM; GOTNEXT
+
+movw_ax_a32: lwbrx offset,eip,one; lhz r0,AX(state); lbzu opcode,5(eip)
+ sthx r0,MEM; GOTNEXT
+
+movw_ax_a16: lhbrx offset,eip,one; lhz r0,AX(state); lbzu opcode,3(eip)
+ sthx r0,MEM; GOTNEXT
+
+movl_eax_a32: lwbrx offset,eip,one; lwz r0,EAX(state); lbzu opcode,5(eip)
+ stwx r0,MEM; GOTNEXT
+
+movl_eax_a16: lhbrx offset,eip,one; lwz r0,EAX(state); lbzu opcode,3(eip)
+ stwx r0,MEM; GOTNEXT
+
+movb_a32_al: lwbrx offset,eip,one; lbzu opcode,5(eip); lbzx r0,MEM
+ stb r0,AL(state); GOTNEXT
+
+movb_a16_al: lhbrx offset,eip,one; lbzu opcode,3(eip); lbzx r0,MEM
+ stb r0,AL(state); GOTNEXT
+
+movw_a32_ax: lwbrx offset,eip,one; lbzu opcode,5(eip); lhzx r0,MEM
+ sth r0,AX(state); GOTNEXT
+
+movw_a16_ax: lhbrx offset,eip,one; lbzu opcode,3(eip); lhzx r0,MEM
+ sth r0,AX(state); GOTNEXT
+
+movl_a32_eax: lwbrx offset,eip,one; lbzu opcode,5(eip); lwzx r0,MEM
+ stw r0,EAX(state); GOTNEXT
+
+movl_a16_eax: lhbrx offset,eip,one; lbzu opcode,3(eip); lwzx r0,MEM
+ stw r0,EAX(state); GOTNEXT
+
+/* General purpose move (all are exactly 4 instructions long) */
+ .align 4
+movb_reg_mem: lbzx r0,REG
+ NEXTBYTE(opcode)
+ stbx r0,MEM
+ GOTNEXT
+
+movw_reg_mem: lhzx r0,REG
+ NEXTBYTE(opcode)
+ sthx r0,MEM
+ GOTNEXT
+
+movl_reg_mem: lwzx r0,REG
+ NEXTBYTE(opcode)
+ stwx r0,MEM
+ GOTNEXT
+
+movb_mem_reg: lbzx r0,MEM
+ NEXTBYTE(opcode)
+ stbx r0,REG
+ GOTNEXT
+
+movw_mem_reg: lhzx r0,MEM
+ NEXTBYTE(opcode)
+ sthx r0,REG
+ GOTNEXT
+
+movl_mem_reg: lwzx r0,MEM
+ NEXTBYTE(opcode)
+ stwx r0,REG
+ GOTNEXT
+
+/* short form exchange ax/eax with register */
+xchgw_ax_reg: clrlslwi opreg,opcode,29,2
+ lhz r3,AX(state)
+ lhzx r4,REG
+ sthx r3,REG
+ sth r4,AX(state)
+ NEXT
+
+xchgl_eax_reg: clrlslwi opreg,opcode,29,2
+ lwz r3,EAX(state)
+ lwzx r4,REG
+ stwx r3,REG
+ stw r4,EAX(state)
+ NEXT
+
+/* General exchange (unlocked!) */
+xchgb_reg_mem: lbzx r3,MEM
+ lbzx r4,REG
+ NEXTBYTE(opcode)
+ stbx r3,REG
+ stbx r4,MEM
+ GOTNEXT
+
+xchgw_reg_mem: lhzx r3,MEM
+ lhzx r4,REG
+ sthx r3,REG
+ sthx r4,MEM
+ NEXT
+
+xchgl_reg_mem: lwzx r3,MEM
+ lwzx r4,REG
+ stwx r3,REG
+ stwx r4,MEM
+ NEXT
+
+/* lea, one of the simplest instructions */
+leaw: cmpw base,state
+ beq- ud
+ sthbrx offset,REG
+ NEXT
+
+leal: cmpw base,state
+ beq- ud
+ stwbrx offset,REG
+ NEXT
+
+/* Short form pushes and pops */
+pushw_sp_reg: li r3,SP
+ lhbrx r4,state,r3
+ clrlslwi opreg,opcode,29,2
+ lhzx r0,REG
+ addi r4,r4,-2
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ sthx r0,ssb,r4
+ NEXT
+
+pushl_sp_reg: li r3,SP
+ lhbrx r4,state,r3
+ clrlslwi opreg,opcode,29,2
+ lwzx r0,REG
+ addi r4,r4,-4
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ stwx r0,ssb,r4
+ NEXT
+
+popw_sp_reg: li r3,SP
+ lhbrx r4,state,r3
+ clrlslwi opreg,opcode,29,2
+ lhzx r0,ssb,r4
+ addi r4,r4,2 # order is important in case of pop sp
+ sthbrx r4,state,r3
+ sthx r0,REG
+ NEXT
+
+popl_sp_reg: li r3,SP
+ lhbrx r4,state,r3
+ clrlslwi opreg,opcode,29,2
+ lwzx r0,ssb,r4
+ addi r4,r4,4
+ sthbrx r4,state,r3
+ stwx r0,REG
+ NEXT
+
+/* Push immediate */
+pushw_sp_imm: li r3,SP
+ lhbrx r4,state,r3
+ lhz r0,1(eip)
+ addi r4,r4,-2
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ lbzu opcode,3(eip)
+ sthx r0,ssb,r4
+ GOTNEXT
+
+pushl_sp_imm: li r3,SP
+ lhbrx r4,state,r3
+ lwz r0,1(eip)
+ addi r4,r4,-4
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ lbzu opcode,5(eip)
+ stwx r0,ssb,r4
+ GOTNEXT
+
+pushw_sp_imm8: li r3,SP
+ lhbrx r4,state,r3
+ lhz r0,1(eip)
+ addi r4,r4,-2
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ lbzu opcode,2(eip)
+ extsb r0,r0
+ sthx r0,ssb,r4
+ GOTNEXT
+
+pushl_sp_imm8: li r3,SP
+ lhbrx r4,state,r3
+ lhz r0,1(eip)
+ addi r4,r4,-4
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ lbzu opcode,2(eip)
+ extsb r0,r0
+ stwx r0,ssb,r4
+ GOTNEXT
+
+/* General push/pop */
+pushw_sp: lhbrx r0,MEM
+ li r3,SP
+ lhbrx r4,state,r3
+ addi r4,r4,-2
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ sthbrx r0,r4,ssb
+ NEXT
+
+pushl_sp: lwbrx r0,MEM
+ li r3,SP
+ lhbrx r4,state,r3
+ addi r4,r4,-4
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ stwbrx r0,r4,ssb
+ NEXT
+
+/* pop is an exception with 32 bit addressing modes, it is possible
+to calculate wrongly the address when esp is used as base. But 16 bit
+addressing modes are safe */
+
+popw_sp_a16: cmpw cr1,opreg,0 # first check the opcode
+ li r3,SP
+ lhbrx r4,state,r3
+ bne- cr1,ud
+ lhzx r0,ssb,r4
+ addi r4,r4,2
+ sthx r0,MEM
+ sthbrx r4,state,r3
+ NEXT
+
+popl_sp_a16: cmpw cr1,opreg,0
+ li r3,SP
+ lhbrx r4,state,r3
+ bne- cr1,ud
+ lwzx r0,ssb,r4
+ addi r4,r4,2
+ stwx r0,MEM
+ sthbrx r4,state,r3
+ NEXT
+
+/* 32 bit addressing modes for pop not implemented for now. */
+ .equ popw_sp_a32,unimpl
+ .equ popl_sp_a32,unimpl
+
+/* pusha/popa */
+pushaw_sp: li r3,SP
+ li r0,8
+ lhbrx r4,r3,state
+ mtctr r0
+ addi r5,state,-4
+1: addi r4,r4,-2
+ lhzu r6,4(r5)
+ clrlwi r4,r4,16
+ sthx r6,ssb,r4
+ bdnz 1b
+ sthbrx r4,r3,state # new sp
+ NEXT
+
+pushal_sp: li r3,SP
+ li r0,8
+ lhbrx r4,r3,state
+ mtctr r0
+ addi r5,state,-4
+1: addi r4,r4,-4
+ lwzu r6,4(r5)
+ clrlwi r4,r4,16
+ stwx r6,ssb,r4
+ bdnz 1b
+ sthbrx r4,r3,state # new sp
+ NEXT
+
+popaw_sp: li r3,SP
+ li r0,8
+ lhbrx r4,state,r3
+ mtctr r0
+ addi r5,state,32
+1: lhzx r6,ssb,r4
+ addi r4,r4,2
+ sthu r6,-4(r5)
+ clrlwi r4,r4,16
+ bdnz 1b
+ sthbrx r4,r3,state # updated sp
+ NEXT
+
+popal_sp: li r3,SP
+ lis r0,0xef00 # mask to skip esp
+ lhbrx r4,state,r3
+ addi r5,state,32
+1: add. r0,r0,r0
+ lwzx r6,ssb,r4
+ addi r4,r4,4
+ stwu r6,-4(r5)
+ clrlwi r4,r4,16
+ blt 1b
+ addi r6,r6,-4
+ beq 2f
+ addi r4,r4,4
+ clrlwi r4,r4,16
+ b 1b
+2: sthbrx r4,state,r3 # updated sp
+ NEXT
+
+/* Moves with zero or sign extension: first the special cases */
+cbw: lbz r3,AL(state)
+ extsb r3,r3
+ sthbrx r3,AX,state
+ NEXT
+
+cwde: lhbrx r3,AX,state
+ extsh r3,r3
+ stwbrx r3,EAX,state
+ NEXT
+
+cwd: lbz r3,AH(state)
+ extsb r3,r3
+ srwi r3,r3,8 # get sign bits
+ sth r3,DX(state)
+ NEXT
+
+cdq: lwbrx r3,EAX,state
+ srawi r3,r3,31
+ stw r3,EDX(state) # byte order unimportant !
+ NEXT
+
+/* The move with zero or sign extension are special since the source
+and destination are not the same size. The register describing the destination
+is modified to take this into account. */
+
+movsbw: lbzx r3,MEM
+ rlwimi opreg,opreg,4,0x10
+ extsb r3,r3
+ rlwinm opreg,opreg,0,0x1c
+ sthbrx r3,REG
+ NEXT
+
+movsbl: lbzx r3,MEM
+ rlwimi opreg,opreg,4,0x10
+ extsb r3,r3
+ rlwinm opreg,opreg,0,0x1c
+ stwbrx r3,REG
+ NEXT
+
+ .equ movsww, movw_mem_reg
+
+movswl: lhbrx r3,MEM
+ extsh r3,r3
+ stwbrx r3,REG
+ NEXT
+
+movzbw: lbzx r3,MEM
+ rlwimi opreg,opreg,4,0x10
+ rlwinm opreg,opreg,0,0x1c
+ sthbrx r3,REG
+ NEXT
+
+movzbl: lbzx r3,MEM
+ rlwimi opreg,opreg,4,0x10
+ rlwinm opreg,opreg,0,0x1c
+ stwbrx r3,REG
+ NEXT
+
+ .equ movzww, movw_mem_reg
+
+movzwl: lhbrx r3,MEM
+ stwbrx r3,REG
+ NEXT
+
+/* Byte swapping */
+bswap: clrlslwi opreg,opcode,29,2 # extract reg from opcode
+ lwbrx r0,REG
+ stwx r0,REG
+ NEXT
+
+/* Input/output */
+inb_port_al: NEXTBYTE(r4)
+ b 1f
+inb_dx_al: li r4,DX
+ lhbrx r4,r4,state
+1: li r3,code_inb
+ bl _check_port
+ lwz r3,iobase(state)
+ lbzx r5,r4,r3
+ eieio
+ stb r5,AL(state)
+ NEXT
+
+inw_port_ax: NEXTBYTE(r4)
+ b 1f
+inw_dx_ax: li r4,DX
+ lhbrx r4,r4,state
+1: li r3,code_inw
+ bl _check_port
+ lwz r3,iobase(state)
+ lhzx r5,r4,r3
+ eieio
+ sth r5,AX(state)
+ NEXT
+
+inl_port_eax: NEXTBYTE(r4)
+ b 1f
+inl_dx_eax: li r4,DX
+ lhbrx r4,r4,state
+1: li r3,code_inl
+ bl _check_port
+ lwz r3,iobase(state)
+ lwzx r5,r4,r3
+ eieio
+ stw r5,EAX(state)
+ NEXT
+
+outb_al_port: NEXTBYTE(r4)
+ b 1f
+outb_al_dx: li r4,DX
+ lhbrx r4,r4,state
+1: li r3,code_outb
+ bl _check_port
+ lwz r3,iobase(state)
+ lbz r5,AL(state)
+ stbx r5,r4,r3
+ eieio
+ NEXT
+
+outw_ax_port: NEXTBYTE(r4)
+ b 1f
+outw_ax_dx: li r4,DX
+ lhbrx r4,r4,state
+1: li r3,code_outw
+ bl _check_port
+ lwz r3,iobase(state)
+ lhz r5,AX(state)
+ sthx r5,r4,r3
+ eieio
+ NEXT
+
+outl_eax_port: NEXTBYTE(r4)
+ b 1f
+outl_eax_dx: li r4,DX
+ lhbrx r4,r4,state
+1: li r3,code_outl
+ bl _check_port
+ lwz r4,iobase(state)
+ lwz r5,EAX(state)
+ stwx r5,r4,r3
+ eieio
+ NEXT
+
+
+/* Macro used for add and sub */
+#define ARITH(op,fl) \
+op##b_reg_mem: lbzx op1,MEM; SET_FLAGS(fl(B)); lbzx op2,REG; \
+ op result,op1,op2; \
+ stbx result,MEM; NEXT; \
+op##w_reg_mem: lhbrx op1,MEM; SET_FLAGS(fl(W)); lhbrx op2,REG; \
+ op result,op1,op2; \
+ sthbrx result,MEM; NEXT; \
+op##l_reg_mem: lwbrx op1,MEM; SET_FLAGS(fl(L)); lwbrx op2,REG; \
+ op result,op1,op2; \
+ stwbrx result,MEM; NEXT; \
+op##b_mem_reg: lbzx op2,MEM; SET_FLAGS(fl(B)); lbzx op1,REG; \
+ op result,op1,op2; \
+ stbx result,REG; NEXT; \
+op##w_mem_reg: lhbrx op2,MEM; SET_FLAGS(fl(W)); lhbrx op1,REG; \
+ op result,op1,op2; \
+ sthbrx result,REG; NEXT; \
+op##l_mem_reg: lwbrx op2,MEM; SET_FLAGS(fl(L)); lwbrx op1,REG; \
+ op result,op1,op2; \
+ stwbrx result,REG; NEXT; \
+op##b_imm_al: addi base,state,0; li offset,AL; \
+op##b_imm: lbzx op1,MEM; SET_FLAGS(fl(B)); lbz op2,1(eip); \
+ op result,op1,op2; \
+ lbzu opcode,2(eip); \
+ stbx result,MEM; GOTNEXT; \
+op##w_imm_ax: addi base,state,0; li offset,AX; \
+op##w_imm: lhbrx op1,MEM; SET_FLAGS(fl(W)); lhbrx op2,eip,one; \
+ op result,op1,op2; \
+ lbzu opcode,3(eip); \
+ sthbrx result,MEM; GOTNEXT; \
+op##w_imm8: lbz op2,1(eip); SET_FLAGS(fl(W)); lhbrx op1,MEM; \
+ extsb op2,op2; clrlwi op2,op2,16; \
+ op result,op1,op2; \
+ lbzu opcode,2(eip); \
+ sthbrx result,MEM; GOTNEXT; \
+op##l_imm_eax: addi base,state,0; li offset,EAX; \
+op##l_imm: lwbrx op1,MEM; SET_FLAGS(fl(L)); lwbrx op2,eip,one; \
+ op result,op1,op2; lbzu opcode,5(eip); \
+ stwbrx result,MEM; GOTNEXT; \
+op##l_imm8: lbz op2,1(eip); SET_FLAGS(fl(L)); lwbrx op1,MEM; \
+ extsb op2,op2; lbzu opcode,2(eip); \
+ op result,op1,op2; \
+ stwbrx result,MEM; GOTNEXT
+
+ ARITH(add, FLAGS_ADD)
+ ARITH(sub, FLAGS_SUB)
+
+#define adc(result, op1, op2) adde result,op1,op2
+#define sbb(result, op1, op2) subfe result,op2,op1
+
+#define ARITH_WITH_CARRY(op, fl) \
+op##b_reg_mem: lbzx op1,MEM; bl carryfor##op; lbzx op2,REG; \
+ ADD_FLAGS(fl(B)); op(result, op1, op2); \
+ stbx result,MEM; NEXT; \
+op##w_reg_mem: lhbrx op1,MEM; bl carryfor##op; lhbrx op2,REG; \
+ ADD_FLAGS(fl(W)); op(result, op1, op2); \
+ sthbrx result,MEM; NEXT; \
+op##l_reg_mem: lwbrx op1,MEM; bl carryfor##op; lwbrx op2,REG; \
+ ADD_FLAGS(fl(L)); op(result, op1, op2); \
+ stwbrx result,MEM; NEXT; \
+op##b_mem_reg: lbzx op1,MEM; bl carryfor##op; lbzx op2,REG; \
+ ADD_FLAGS(fl(B)); op(result, op1, op2); \
+ stbx result,REG; NEXT; \
+op##w_mem_reg: lhbrx op1,MEM; bl carryfor##op; lhbrx op2,REG; \
+ ADD_FLAGS(fl(W)); op(result, op1, op2); \
+ sthbrx result,REG; NEXT; \
+op##l_mem_reg: lwbrx op1,MEM; bl carryfor##op; lwbrx op2,REG; \
+ ADD_FLAGS(fl(L)); op(result, op1, op2); \
+ stwbrx result,REG; NEXT; \
+op##b_imm_al: addi base,state,0; li offset,AL; \
+op##b_imm: lbzx op1,MEM; bl carryfor##op; lbz op2,1(eip); \
+ ADD_FLAGS(fl(B)); lbzu opcode,2(eip); op(result, op1, op2); \
+ stbx result,MEM; GOTNEXT; \
+op##w_imm_ax: addi base,state,0; li offset,AX; \
+op##w_imm: lhbrx op1,MEM; bl carryfor##op; lhbrx op2,eip,one; \
+ ADD_FLAGS(fl(W)); lbzu opcode,3(eip); op(result, op1, op2); \
+ sthbrx result,MEM; GOTNEXT; \
+op##w_imm8: lbz op2,1(eip); bl carryfor##op; lhbrx op1,MEM; \
+ extsb op2,op2; ADD_FLAGS(fl(W)); clrlwi op2,op2,16; \
+ lbzu opcode,2(eip); op(result, op1, op2); \
+ sthbrx result,MEM; GOTNEXT; \
+op##l_imm_eax: addi base,state,0; li offset,EAX; \
+op##l_imm: lwbrx op1,MEM; bl carryfor##op; lwbrx op2,eip,one; \
+ ADD_FLAGS(fl(L)); lbzu opcode,5(eip); op(result, op1, op2); \
+ stwbrx result,MEM; GOTNEXT; \
+op##l_imm8: lbz op2,1(eip); SET_FLAGS(fl(L)); lwbrx op1,MEM; \
+ extsb op2,op2; lbzu opcode,2(eip); \
+ op(result, op1, op2); \
+ stwbrx result,MEM; GOTNEXT
+
+carryforadc: addc r3,flags,flags # CF_IN to xer[ca]
+ RES2CF(r4) # get 8 or 16 bit carry
+ subfe r3,result,op1 # generate PPC carry for
+ CF_ROTCNT(r5) # preceding operation
+ addze r3,r4 # 32 bit carry in LSB
+ CF_POL(r4,23) # polarity
+ rlwnm r3,r3,r5,0x100 # shift carry there
+ xor flags,r4,r3 # CF86 ? 0x100 : 0
+ addic r4,r3,0xffffff00 # set xer[ca]
+ rlwinm flags,r3,23,CF_IN
+ blr
+
+ ARITH_WITH_CARRY(adc, FLAGS_ADD)
+
+/* for sbb the input carry must be the complement of the x86 carry */
+carryforsbb: addc r3,flags,flags # CF_IN to xer[ca]
+ RES2CF(r4) # 8/16 bit carry from result
+ subfe r3,result,op1
+ CF_ROTCNT(r5)
+ addze r3,r4
+ CF_POL(r4,23)
+ rlwnm r3,r3,r5,0x100
+ eqv flags,r4,r3 # CF86 ? 0xfffffeff:0xffffffff
+ addic r4,r3,1 # set xer[ca]
+ rlwinm flags,r3,23,CF_IN # keep only the carry
+ blr
+
+ ARITH_WITH_CARRY(sbb, FLAGS_SBB)
+
+cmpb_reg_mem: lbzx op1,MEM
+ SET_FLAGS(FLAGS_CMP(B))
+ lbzx op2,REG
+ extsb r3,op1
+ cmplw cr4,op1,op2
+ extsb r4,op2
+ sub result,op1,op2
+ cmpw cr6,r3,r4
+ NEXT
+
+cmpw_reg_mem: lhbrx op1,MEM
+ SET_FLAGS(FLAGS_CMP(W))
+ lhbrx op2,REG
+ extsh r3,op1
+ cmplw cr4,op1,op2
+ extsh r4,op2
+ sub result,op1,op2
+ cmpw cr6,r3,r4
+ NEXT
+
+cmpl_reg_mem: lwbrx op1,MEM
+ SET_FLAGS(FLAGS_CMP(L))
+ lwbrx op2,REG
+ cmplw cr4,op1,op2
+ sub result,op1,op2
+ cmpw cr6,op1,op2
+ NEXT
+
+cmpb_mem_reg: lbzx op2,MEM
+ SET_FLAGS(FLAGS_CMP(B))
+ lbzx op1,REG
+ extsb r4,op2
+ cmplw cr4,op1,op2
+ extsb r3,op1
+ sub result,op1,op2
+ cmpw cr6,r3,r4
+ NEXT
+
+cmpw_mem_reg: lhbrx op2,MEM
+ SET_FLAGS(FLAGS_CMP(W))
+ lhbrx op1,REG
+ extsh r4,op2
+ cmplw cr4,op1,op2
+ extsh r3,op1
+ sub result,op1,op2
+ cmpw cr6,r3,r4
+ NEXT
+
+cmpl_mem_reg: lwbrx op2,MEM
+ SET_FLAGS(FLAGS_CMP(L))
+ lwbrx op1,REG
+ cmpw cr6,op1,op2
+ sub result,op1,op2
+ cmplw cr4,op1,op2
+ NEXT
+
+cmpb_imm_al: addi base,state,0
+ li offset,AL
+cmpb_imm: lbzx op1,MEM
+ SET_FLAGS(FLAGS_CMP(B))
+ lbz op2,1(eip)
+ extsb r3,op1
+ cmplw cr4,op1,op2
+ lbzu opcode,2(eip)
+ extsb r4,op2
+ sub result,op1,op2
+ cmpw cr6,r3,r4
+ GOTNEXT
+
+cmpw_imm_ax: addi base,state,0
+ li offset,AX
+cmpw_imm: lhbrx op1,MEM
+ SET_FLAGS(FLAGS_CMP(W))
+ lhbrx op2,eip,one
+ extsh r3,op1
+ cmplw cr4,op1,op2
+ lbzu opcode,3(eip)
+ extsh r4,op2
+ sub result,op1,op2
+ cmpw cr6,r3,r4
+ GOTNEXT
+
+cmpw_imm8: lbz op2,1(eip)
+ SET_FLAGS(FLAGS_CMP(W))
+ lhbrx op1,MEM
+ extsb r4,op2
+ extsh r3,op1
+ lbzu opcode,2(eip)
+ clrlwi op2,r4,16
+ cmpw cr6,r3,r4
+ sub result,op1,op2
+ cmplw cr4,op1,op2
+ GOTNEXT
+
+cmpl_imm_eax: addi base,state,0
+ li offset,EAX
+cmpl_imm: lwbrx op1,MEM
+ SET_FLAGS(FLAGS_CMP(L))
+ lwbrx op2,eip,one
+ cmpw cr6,op1,op2
+ lbzu opcode,5(eip)
+ sub result,op1,op2
+ cmplw cr4,op1,op2
+ GOTNEXT
+
+cmpl_imm8: lbz op2,1(eip)
+ SET_FLAGS(FLAGS_CMP(L))
+ lwbrx op1,MEM
+ extsb op2,op2
+ lbzu opcode,2(eip)
+ cmpw cr6,op1,op2
+ sub result,op1,op2
+ cmplw cr4,op1,op2
+ GOTNEXT
+
+/* Increment and decrement */
+incb: lbzx op2,MEM
+ INC_FLAGS(B)
+ addi op2,op2,1
+ stbx op2,MEM
+ NEXT
+
+incw_reg: clrlslwi opreg,opcode,29,2 # extract reg from opcode
+ lhbrx op2,REG
+ INC_FLAGS(W)
+ addi op2,op2,1
+ sthbrx op2,REG
+ NEXT
+
+incw: lhbrx op2,MEM
+ INC_FLAGS(W)
+ addi op2,op2,1
+ sthbrx op2,MEM
+ NEXT
+
+incl_reg: clrlslwi opreg,opcode,29,2
+ lwbrx op2,REG
+ INC_FLAGS(L)
+ addi op2,op2,1
+ sthbrx op2,REG
+ NEXT
+
+incl: lwbrx op2,MEM
+ INC_FLAGS(L)
+ addi op2,op2,1
+ stwbrx op2,MEM
+ NEXT
+
+decb: lbzx op2,MEM
+ DEC_FLAGS(B)
+ addi op2,op2,-1
+ stbx op2,MEM
+ NEXT
+
+decw_reg: clrlslwi opreg,opcode,29,2 # extract reg from opcode
+ lhbrx op2,REG
+ DEC_FLAGS(W)
+ addi op2,op2,-1
+ sthbrx op2,REG
+ NEXT
+
+decw: lhbrx op2,MEM
+ DEC_FLAGS(W)
+ addi op2,op2,-1
+ sthbrx op2,MEM
+ NEXT
+
+decl_reg: clrlslwi opreg,opcode,29,2
+ lwbrx op2,REG
+ DEC_FLAGS(L)
+ addi op2,op2,-1
+ sthbrx op2,REG
+ NEXT
+
+decl: lwbrx op2,MEM
+ DEC_FLAGS(L)
+ addi op2,op2,-1
+ stwbrx op2,MEM
+ NEXT
+
+negb: lbzx op2,MEM
+ SET_FLAGS(FLAGS_SUB(B))
+ neg result,op2
+ li op1,0
+ stbx result,MEM
+ NEXT
+
+negw: lhbrx op2,MEM
+ SET_FLAGS(FLAGS_SUB(W))
+ neg result,op2
+ li op1,0
+ sthbrx r0,MEM
+ NEXT
+
+negl: lwbrx op2,MEM
+ SET_FLAGS(FLAGS_SUB(L))
+ subfic result,op2,0
+ li op1,0
+ stwbrx result,MEM
+ NEXT
+
+/* Macro used to generate code for OR/AND/XOR */
+#define LOGICAL(op) \
+op##b_reg_mem: lbzx op1,MEM; SET_FLAGS(FLAGS_LOG(B)); lbzx op2,REG; \
+ op result,op1,op2; \
+ stbx result,MEM; NEXT; \
+op##w_reg_mem: lhbrx op1,MEM; SET_FLAGS(FLAGS_LOG(W)); lhbrx op2,REG; \
+ op result,op1,op2; \
+ sthbrx result,MEM; NEXT; \
+op##l_reg_mem: lwbrx op1,MEM; SET_FLAGS(FLAGS_LOG(L)); lwbrx op2,REG; \
+ op result,op1,op2; \
+ stwbrx result,MEM; NEXT; \
+op##b_mem_reg: lbzx op1,MEM; SET_FLAGS(FLAGS_LOG(B)); lbzx op2,REG; \
+ op result,op1,op2; \
+ stbx result,REG; NEXT; \
+op##w_mem_reg: lhbrx op2,MEM; SET_FLAGS(FLAGS_LOG(W)); lhbrx op1,REG; \
+ op result,op1,op2; \
+ sthbrx result,REG; NEXT; \
+op##l_mem_reg: lwbrx op2,MEM; SET_FLAGS(FLAGS_LOG(L)); lwbrx op1,REG; \
+ op result,op1,op2; \
+ stwbrx result,REG; NEXT; \
+op##b_imm_al: addi base,state,0; li offset,AL; \
+op##b_imm: lbzx op1,MEM; SET_FLAGS(FLAGS_LOG(B)); lbz op2,1(eip); \
+ op result,op1,op2; lbzu opcode,2(eip); \
+ stbx result,MEM; GOTNEXT; \
+op##w_imm_ax: addi base,state,0; li offset,AX; \
+op##w_imm: lhbrx op1,MEM; SET_FLAGS(FLAGS_LOG(W)); lhbrx op2,eip,one; \
+ op result,op1,op2; lbzu opcode,3(eip); \
+ sthbrx result,MEM; GOTNEXT; \
+op##w_imm8: lbz op2,1(eip); SET_FLAGS(FLAGS_LOG(W)); lhbrx op1,MEM; \
+ extsb op2,op2; lbzu opcode,2(eip); \
+ op result,op1,op2; \
+ sthbrx result,MEM; GOTNEXT; \
+op##l_imm_eax: addi base,state,0; li offset,EAX; \
+op##l_imm: lwbrx op1,MEM; SET_FLAGS(FLAGS_LOG(L)); lwbrx op2,eip,one; \
+ op result,op1,op2; lbzu opcode,5(eip); \
+ stwbrx result,MEM; GOTNEXT; \
+op##l_imm8: lbz op2,1(eip); SET_FLAGS(FLAGS_LOG(L)); lwbrx op1,MEM; \
+ extsb op2,op2; lbzu opcode,2(eip); \
+ op result,op1,op2; \
+ stwbrx result,MEM; GOTNEXT
+
+ LOGICAL(or)
+
+ LOGICAL(and)
+
+ LOGICAL(xor)
+
+testb_reg_mem: lbzx op1,MEM
+ SET_FLAGS(FLAGS_TEST(B))
+ lbzx op2,REG
+ and result,op1,op2
+ extsb r3,result
+ cmpwi cr6,r3,0
+ NEXT
+
+testw_reg_mem: lhbrx op1,MEM
+ SET_FLAGS(FLAGS_TEST(W))
+ lhbrx op2,REG
+ and result,op1,op2
+ extsh r3,result
+ cmpwi cr6,r3,0
+ NEXT
+
+testl_reg_mem: lwbrx r3,MEM
+ SET_FLAGS(FLAGS_TEST(L))
+ lwbrx r4,REG
+ and result,op1,op2
+ cmpwi cr6,result,0
+ NEXT
+
+testb_imm_al: addi base,state,0
+ li offset,AL
+testb_imm: lbzx op1,MEM
+ SET_FLAGS(FLAGS_TEST(B))
+ lbz op2,1(eip)
+ and result,op1,op2
+ lbzu opcode,2(eip)
+ extsb r3,result
+ cmpwi cr6,r3,0
+ GOTNEXT
+
+testw_imm_ax: addi base,state,0
+ li offset,AX
+testw_imm: lhbrx op1,MEM
+ SET_FLAGS(FLAGS_TEST(W))
+ lhbrx op2,eip,one
+ and result,op1,op2
+ lbzu opcode,3(eip)
+ extsh r3,result
+ cmpwi cr6,r3,0
+ GOTNEXT
+
+testl_imm_eax: addi base,state,0
+ li offset,EAX
+testl_imm: lwbrx op1,MEM
+ SET_FLAGS(FLAGS_TEST(L))
+ lwbrx op2,eip,one
+ and result,r3,r4
+ lbzu opcode,5(eip)
+ cmpwi cr6,result,0
+ GOTNEXT
+
+/* Not does not affect flags */
+notb: lbzx r3,MEM
+ xori r3,r3,255
+ stbx r3,MEM
+ NEXT
+
+notw: lhzx r3,MEM
+ xori r3,r3,65535
+ sthx r3,MEM
+ NEXT
+
+notl: lwzx r3,MEM
+ not r3,r3
+ stwx r3,MEM
+ NEXT
+
+boundw: lhbrx r4,REG
+ li r3,code_bound
+ lhbrx r5,MEM
+ addi offset,offset,2
+ extsh r4,r4
+ lhbrx r6,MEM
+ extsh r5,r5
+ cmpw r4,r5
+ extsh r6,r6
+ blt- complex
+ cmpw r4,r6
+ ble+ nop
+ b complex
+
+boundl: lwbrx r4,REG
+ li r3,code_bound
+ lwbrx r5,MEM
+ addi offset,offset,4
+ lwbrx r6,MEM
+ cmpw r4,r5
+ blt- complex
+ cmpw r4,r6
+ ble+ nop
+ b complex
+
+/* Bit test and modify instructions */
+
+/* Common routine: bit index in op2, returns memory value in r3, mask in op2,
+and of mask and value in op1. CF flag is set as with 32 bit add when bit is
+non zero since result (which is cleared) will be less than op1, and in cr4,
+all other flags are undefined from Intel doc. Here OF and SF are cleared
+and ZF is set as a side effect of result being cleared. */
+_setup_bitw: cmpw base,state
+ SET_FLAGS(FLAGS_BTEST)
+ extsh op2,op2
+ beq- 1f
+ srawi r4,op2,4
+ add offset,offset,r4
+1: clrlwi op2,op2,28 # true bit index
+ lhbrx r3,MEM
+ slw op2,one,op2 # build mask
+ li result,0 # implicitly sets CF
+ and op1,r3,op2 # if result<op1
+ cmplw cr4,result,op1 # sets CF in cr4
+ blr
+
+_setup_bitl: cmpw base,state
+ SET_FLAGS(FLAGS_BTEST)
+ beq- 1f
+ srawi r4,op2,5
+ add offset,offset,r4
+1: lwbrx r3,MEM
+ rotlw op2,one,op2 # build mask
+ li result,0
+ and op1,r3,op2
+ cmplw cr4,result,op1
+ blr
+
+/* Immediate forms bit tests are not frequent since logical are often faster */
+btw_imm: NEXTBYTE(op2)
+ b 1f
+btw_reg_mem: lhbrx op2,REG
+1: bl _setup_bitw
+ NEXT
+
+btl_imm: NEXTBYTE(op2)
+ b 1f
+btl_reg_mem: lhbrx op2,REG
+1: bl _setup_bitl
+ NEXT
+
+btcw_imm: NEXTBYTE(op2)
+ b 1f
+btcw_reg_mem: lhbrx op2,REG
+1: bl _setup_bitw
+ xor r3,r3,op2
+ sthbrx r3,MEM
+ NEXT
+
+btcl_imm: NEXTBYTE(op2)
+ b 1f
+btcl_reg_mem: lhbrx op2,REG
+1: bl _setup_bitl
+ xor r3,r3,op2
+ stwbrx result,MEM
+ NEXT
+
+btrw_imm: NEXTBYTE(op2)
+ b 1f
+btrw_reg_mem: lhbrx op2,REG
+1: bl _setup_bitw
+ andc r3,r3,op2
+ sthbrx r3,MEM
+ NEXT
+
+btrl_imm: NEXTBYTE(op2)
+ b 1f
+btrl_reg_mem: lhbrx op2,REG
+1: bl _setup_bitl
+ andc r3,r3,op2
+ stwbrx r3,MEM
+ NEXT
+
+btsw_imm: NEXTBYTE(op2)
+ b 1f
+btsw_reg_mem: lhbrx op2,REG
+1: bl _setup_bitw
+ or r3,r3,op2
+ sthbrx r3,MEM
+ NEXT
+
+btsl_imm: NEXTBYTE(op2)
+ b 1f
+btsl_reg_mem: lhbrx op2,REG
+1: bl _setup_bitl
+ or r3,r3,op2
+ stwbrx r3,MEM
+ NEXT
+
+/* Bit string search instructions, only ZF is defined after these, and the
+result value is not defined when the bit field is zero. */
+bsfw: lhbrx result,MEM
+ SET_FLAGS(FLAGS_BSRCH(W))
+ neg r3,result
+ cmpwi cr6,result,0 # sets ZF
+ and r3,r3,result # keep only LSB
+ cntlzw r3,r3
+ subfic r3,r3,31
+ sthbrx r3,REG
+ NEXT
+
+bsfl: lwbrx result,MEM
+ SET_FLAGS(FLAGS_BSRCH(L))
+ neg r3,result
+ cmpwi cr6,result,0 # sets ZF
+ and r3,r3,result # keep only LSB
+ cntlzw r3,r3
+ subfic r3,r3,31
+ stwbrx r3,REG
+ NEXT
+
+bsrw: lhbrx result,MEM
+ SET_FLAGS(FLAGS_BSRCH(W))
+ cntlzw r3,result
+ cmpwi cr6,result,0
+ subfic r3,r3,31
+ sthbrx r3,REG
+ NEXT
+
+bsrl: lwbrx result,MEM
+ SET_FLAGS(FLAGS_BSRCH(L))
+ cntlzw r3,result
+ cmpwi cr6,result,0
+ subfic r3,r3,31
+ stwbrx r3,REG
+ NEXT
+
+/* Unconditional jumps, first the indirect than relative */
+jmpw: lhbrx eip,MEM
+ lbzux opcode,eip,csb
+ GOTNEXT
+
+jmpl: lwbrx eip,MEM
+ lbzux opcode,eip,csb
+ GOTNEXT
+
+sjmp_w: lbz r3,1(eip)
+ sub eip,eip,csb
+ addi eip,eip,2 # EIP after instruction
+ extsb r3,r3
+ add eip,eip,r3
+ clrlwi eip,eip,16 # module 64k
+ lbzux opcode,eip,csb
+ GOTNEXT
+
+jmp_w: lhbrx r3,eip,one # eip now off by 3
+ sub eip,eip,csb
+ addi r3,r3,3 # compensate
+ add eip,eip,r3
+ clrlwi eip,eip,16
+ lbzux opcode,eip,csb
+ GOTNEXT
+
+sjmp_l: lbz r3,1(eip)
+ addi eip,eip,2
+ extsb r3,r3
+ lbzux opcode,eip,r3
+ GOTNEXT
+
+jmp_l: lwbrx r3,eip,one # Simple
+ addi eip,eip,5
+ lbzux opcode,eip,r3
+ GOTNEXT
+
+/* The conditional jumps: although it should not happen,
+byte relative jumps (sjmp) may wrap around in 16 bit mode */
+
+#define NOTTAKEN_S lbzu opcode,2(eip); GOTNEXT
+#define NOTTAKEN_W lbzu opcode,3(eip); GOTNEXT
+#define NOTTAKEN_L lbzu opcode,5(eip); GOTNEXT
+
+#define CONDJMP(cond, eval, flag) \
+sj##cond##_w: EVAL_##eval; bt flag,sjmp_w; NOTTAKEN_S; \
+j##cond##_w: EVAL_##eval; bt flag,jmp_w; NOTTAKEN_W; \
+sj##cond##_l: EVAL_##eval; bt flag,sjmp_l; NOTTAKEN_S; \
+j##cond##_l: EVAL_##eval; bt flag,jmp_l; NOTTAKEN_L; \
+sjn##cond##_w: EVAL_##eval; bf flag,sjmp_w; NOTTAKEN_S; \
+jn##cond##_w: EVAL_##eval; bf flag,jmp_w; NOTTAKEN_W; \
+sjn##cond##_l: EVAL_##eval; bf flag,sjmp_l; NOTTAKEN_S; \
+jn##cond##_l: EVAL_##eval; bf flag,jmp_l; NOTTAKEN_L
+
+ CONDJMP(o, OF, OF)
+ CONDJMP(c, CF, CF)
+ CONDJMP(z, ZF, ZF)
+ CONDJMP(a, ABOVE, ABOVE)
+ CONDJMP(s, SF, SF)
+ CONDJMP(p, PF, PF)
+ CONDJMP(g, SIGNED, SGT)
+ CONDJMP(l, SIGNED, SLT)
+
+jcxz_w: lhz r3,CX(state); cmpwi r3,0; beq- sjmp_w; NOTTAKEN_S
+jcxz_l: lhz r3,CX(state); cmpwi r3,0; beq- sjmp_l; NOTTAKEN_S
+jecxz_w: lwz r3,ECX(state); cmpwi r3,0; beq- sjmp_w; NOTTAKEN_S
+jecxz_l: lwz r3,ECX(state); cmpwi r3,0; beq- sjmp_l; NOTTAKEN_S
+
+/* Note that loop is somewhat strange, the data size attribute gives
+the size of eip, and the address size whether the counter is cx or ecx.
+This is the same for jcxz/jecxz. */
+
+loopw_w: li opreg,CX
+ lhbrx r0,REG
+ sub. r0,r0,one
+ sthbrx r0,REG
+ bne+ sjmp_w
+ NOTTAKEN_S
+
+loopl_w: li opreg,ECX
+ lwbrx r0,REG
+ sub. r0,r0,one
+ stwbrx r0,REG
+ bne+ sjmp_w
+ NOTTAKEN_S
+
+loopw_l: li opreg,CX
+ lhbrx r0,REG
+ sub. r0,r0,one
+ sthbrx r0,REG
+ bne+ sjmp_l
+ NOTTAKEN_S
+
+loopl_l: li opreg,ECX
+ lwbrx r0,REG
+ sub. r0,r0,one
+ stwbrx r0,REG
+ bne+ sjmp_l
+ NOTTAKEN_S
+
+loopzw_w: li opreg,CX
+ lhbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ sthbrx r0,REG
+ bf ZF,1f
+ bne+ sjmp_w
+1: NOTTAKEN_S
+
+loopzl_w: li opreg,ECX
+ lwbrx r0,REG
+ EVAL_ZF
+ sub. r3,r3,one
+ stwbrx r3,REG
+ bf ZF,1f
+ bne+ sjmp_w
+1: NOTTAKEN_S
+
+loopzw_l: li opreg,CX
+ lhbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ sthbrx r0,REG
+ bf ZF,1f
+ bne+ sjmp_l
+1: NOTTAKEN_S
+
+loopzl_l: li opreg,ECX
+ lwbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ stwbrx r0,REG
+ bf ZF,1f
+ bne+ sjmp_l
+1: NOTTAKEN_S
+
+loopnzw_w: li opreg,CX
+ lhbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ sthbrx r0,REG
+ bt ZF,1f
+ bne+ sjmp_w
+1: NOTTAKEN_S
+
+loopnzl_w: li opreg,ECX
+ lwbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ stwbrx r0,REG
+ bt ZF,1f
+ bne+ sjmp_w
+1: NOTTAKEN_S
+
+loopnzw_l: li opreg,CX
+ lhbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ sthbrx r0,REG
+ bt ZF,1f
+ bne+ sjmp_l
+1: NOTTAKEN_S
+
+loopnzl_l: li opreg,ECX
+ lwbrx r0,REG
+ EVAL_ZF
+ sub. r0,r0,one
+ stwbrx r0,REG
+ bt ZF,1f
+ bne+ sjmp_l
+1: NOTTAKEN_S
+
+/* Memory indirect calls are rare enough to limit code duplication */
+callw_sp_mem: lhbrx r3,MEM
+ sub r4,eip,csb
+ addi r4,r4,1 # r4 is now return address
+ b 1f
+ .equ calll_sp_mem, unimpl
+
+callw_sp: lhbrx r3,eip,one
+ sub r4,eip,csb
+ addi r4,r4,3 # r4 is return address
+ add r3,r4,r3
+1: clrlwi eip,r3,16
+ li r5,SP
+ lhbrx r6,state,r5 # get sp
+ addi r6,r6,-2
+ lbzux opcode,eip,csb
+ sthbrx r6,state,r5 # update sp
+ clrlwi r6,r6,16
+ sthbrx r4,ssb,r6 # push return address
+ GOTNEXT
+ .equ calll_sp, unimpl
+
+retw_sp_imm: li opreg,SP
+ lhbrx r4,REG
+ lhbrx r6,eip,one
+ addi r5,r4,2
+ lhbrx eip,ssb,r4
+ lbzux opcode,eip,csb
+ add r5,r5,r6
+ sthbrx r5,REG
+ GOTNEXT
+
+ .equ retl_sp_imm, unimpl
+
+retw_sp: li opreg,SP
+ lhbrx r4,REG
+ addi r5,r4,2
+ lhbrx eip,ssb,r4
+ lbzux opcode,eip,csb
+ sthbrx r5,REG
+ GOTNEXT
+
+ .equ retl_sp, unimpl
+
+/* Enter is a mess, and the description in Intel documents is actually wrong
+ * in most revisions (all PPro/PII I have but the old Pentium is Ok) !
+ */
+
+enterw_sp: lhbrx r0,eip,one # Stack space to allocate
+ li opreg,SP
+ lhbrx r3,REG # SP
+ li r7,BP
+ lbzu r4,3(eip) # nesting level
+ addi r3,r3,-2
+ lhbrx r5,state,r7 # Original BP
+ clrlwi r3,r3,16
+ sthbrx r5,ssb,r3 # Push BP
+ andi. r4,r4,31 # modulo 32 and test
+ mr r6,r3 # Save frame pointer to temp
+ beq 3f
+ mtctr r4 # iterate level-1 times
+ b 2f
+1: addi r5,r5,-2 # copy list of frame pointers
+ clrlwi r5,r5,16
+ lhzx r4,ssb,r5
+ addi r3,r3,-2
+ clrlwi r3,r3,16
+ sthx r4,ssb,r3
+2: bdnz 1b
+ addi r3,r3,-2 # save current frame pointer
+ clrlwi r3,r3,16
+ sthbrx r6,ssb,r3
+3: sthbrx r6,state,r7 # New BP
+ sub r3,r3,r0
+ sthbrx r3,REG # Save new stack pointer
+ NEXT
+
+ .equ enterl_sp, unimpl
+
+leavew_sp: li opreg,BP
+ lhbrx r3,REG # Stack = BP
+ addi r4,r3,2 #
+ lhzx r3,ssb,r3
+ li opreg,SP
+ sthbrx r4,REG # New Stack
+ sth r3,BP(state) # Popped BP
+ NEXT
+
+ .equ leavel_sp, unimpl
+
+/* String instructions: first a generic setup routine, which exits early
+if there is a repeat prefix with a count of 0 */
+#define STRINGSRC base,offset
+#define STRINGDST esb,opreg
+
+_setup_stringw: li offset,SI #
+ rlwinm. r3,opcode,19,0,1 # lt=repnz, gt= repz, eq none
+ li opreg,DI
+ lhbrx offset,state,offset # load si
+ li r3,1 # no repeat
+ lhbrx opreg,state,opreg # load di
+ beq 1f # no repeat
+ li r3,CX
+ lhbrx r3,state,r3 # load CX
+ cmpwi r3,0
+ beq nop # early exit here !
+1: mtctr r3 # ctr=CX or 1
+ li r7,1 # stride
+ bflr+ DF
+ li r7,-1 # change stride sign
+ blr
+
+/* Ending routine to update all changed registers (goes directly to NEXT) */
+_finish_strw: li r4,SI
+ sthbrx offset,state,r4 # update si
+ li r4,DI
+ sthbrx opreg,state,r4 # update di
+ beq nop
+ mfctr r3
+ li r4,CX
+ sthbrx r3,state,r4 # update cx
+ NEXT
+
+
+lodsb_a16: bl _setup_stringw
+1: lbzx r0,STRINGSRC # [rep] lodsb
+ add offset,offset,r7
+ clrlwi offset,offset,16
+ bdnz 1b
+ stb r0,AL(state)
+ b _finish_strw
+
+lodsw_a16: bl _setup_stringw
+ slwi r7,r7,1
+1: lhzx r0,STRINGSRC # [rep] lodsw
+ add offset,offset,r7
+ clrlwi offset,offset,16
+ bdnz 1b
+ sth r0,AX(state)
+ b _finish_strw
+
+lodsl_a16: bl _setup_stringw
+ slwi r7,r7,2
+1: lwzx r0,STRINGSRC # [rep] lodsl
+ add offset,offset,r7
+ clrlwi offset,offset,16
+ bdnz 1b
+ stw r0,EAX(state)
+ b _finish_strw
+
+stosb_a16: bl _setup_stringw
+ lbz r0,AL(state)
+1: stbx r0,STRINGDST # [rep] stosb
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+stosw_a16: bl _setup_stringw
+ lhz r0,AX(state)
+ slwi r7,r7,1
+1: sthx r0,STRINGDST # [rep] stosw
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+stosl_a16: bl _setup_stringw
+ lwz r0,EAX(state)
+ slwi r7,r7,2
+1: stwx r0,STRINGDST # [rep] stosl
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+movsb_a16: bl _setup_stringw
+1: lbzx r0,STRINGSRC # [rep] movsb
+ add offset,offset,r7
+ stbx r0,STRINGDST
+ clrlwi offset,offset,16
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+movsw_a16: bl _setup_stringw
+ slwi r7,r7,1
+1: lhzx r0,STRINGSRC # [rep] movsw
+ add offset,offset,r7
+ sthx r0,STRINGDST
+ clrlwi offset,offset,16
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+movsl_a16: bl _setup_stringw
+ slwi r7,r7,2
+1: lwzx r0,STRINGSRC # [rep] movsl
+ add offset,offset,r7
+ stwx r0,STRINGDST
+ clrlwi offset,offset,16
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+/* At least on a Pentium, repeated string I/O instructions check for
+access port permission even if count is 0 ! So the order of the check is not
+important. */
+insb_a16: li r4,DX
+ li r3,code_insb_a16
+ lhbrx r4,state,r4
+ bl _check_port
+ bl _setup_stringw
+ lwz base,iobase(state)
+1: lbzx r0,base,r4 # [rep] insb
+ eieio
+ stbx r0,STRINGDST
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+insw_a16: li r4,DX
+ li r3,code_insw_a16
+ lhbrx r4,state,r4
+ bl _check_port
+ bl _setup_stringw
+ lwz base,iobase(state)
+ slwi r7,r7,1
+1: lhzx r0,base,r4 # [rep] insw
+ eieio
+ sthx r0,STRINGDST
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+insl_a16: li r4,DX
+ li r3,code_insl_a16
+ lhbrx r4,state,r4
+ bl _check_port
+ bl _setup_stringw
+ lwz base,iobase(state)
+ slwi r7,r7,2
+1: lwzx r0,base,r4 # [rep] insl
+ eieio
+ stwx r0,STRINGDST
+ add opreg,opreg,r7
+ clrlwi opreg,opreg,16
+ bdnz 1b
+ b _finish_strw
+
+outsb_a16: li r4,DX
+ li r3,code_outsb_a16
+ lhbrx r4,state,r4
+ bl _check_port
+ bl _setup_stringw
+ lwz r6,iobase(state)
+1: lbzx r0,STRINGSRC # [rep] outsb
+ add offset,offset,r7
+ stbx r0,r6,r4
+ clrlwi offset,offset,16
+ eieio
+ bdnz 1b
+ b _finish_strw
+
+outsw_a16: li r4,DX
+ li r3,code_outsw_a16
+ lhbrx r4,state,r4
+ bl _check_port
+ bl _setup_stringw
+ li r5,DX
+ lwz r6,iobase(state)
+ slwi r7,r7,1
+1: lhzx r0,STRINGSRC # [rep] outsw
+ add offset,offset,r7
+ sthx r0,r6,r4
+ clrlwi offset,offset,16
+ eieio
+ bdnz 1b
+ b _finish_strw
+
+outsl_a16: li r4,DX
+ li r3,code_outsl_a16
+ lhbrx r4,state,r4
+ bl _check_port
+ bl _setup_stringw
+ lwz r6,iobase(state)
+ slwi r7,r7,2
+1: lwzx r0,STRINGSRC # [rep] outsl
+ add offset,offset,r7
+ stwx r0,r6,r4
+ clrlwi offset,offset,16
+ eieio
+ bdnz 1b
+ b _finish_strw
+
+cmpsb_a16: bl _setup_stringw
+ SET_FLAGS(FLAGS_CMP(B))
+ blt 3f # repnz prefix
+1: lbzx op1,STRINGSRC # [repz] cmpsb
+ add offset,offset,r7
+ lbzx op2,STRINGDST
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi offset,offset,16
+ clrlwi opreg,opreg,16
+ bdnzt CF+2,1b
+2: extsb r3,op1
+ extsb r4,op2
+ cmpw cr6,r3,r4
+ sub result,op1,op2
+ b _finish_strw
+
+3: lbzx op1,STRINGSRC # repnz cmpsb
+ add offset,offset,r7
+ lbzx op2,STRINGDST
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi offset,offset,16
+ clrlwi opreg,opreg,16
+ bdnzf CF+2,3b
+ b 2b
+
+cmpsw_a16: bl _setup_stringw
+ SET_FLAGS(FLAGS_CMP(W))
+ slwi r7,r7,1
+ blt 3f # repnz prefix
+1: lhbrx op1,STRINGSRC # [repz] cmpsb
+ add offset,offset,r7
+ lhbrx op2,STRINGDST
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi offset,offset,16
+ clrlwi opreg,opreg,16
+ bdnzt CF+2,1b
+2: extsh r3,op1
+ extsh r4,op2
+ cmpw cr6,r3,r4
+ sub result,op1,op2
+ b _finish_strw
+
+3: lhbrx op1,STRINGSRC # repnz cmpsw
+ add offset,offset,r7
+ lhbrx op2,STRINGDST
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi offset,offset,16
+ clrlwi opreg,opreg,16
+ bdnzf CF+2,3b
+ b 2b
+
+cmpsl_a16: bl _setup_stringw
+ SET_FLAGS(FLAGS_CMP(L))
+ slwi r7,r7,2
+ blt 3f # repnz prefix
+1: lwbrx op1,STRINGSRC # [repz] cmpsl
+ add offset,offset,r7
+ lwbrx op2,STRINGDST
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi offset,offset,16
+ clrlwi opreg,opreg,16
+ bdnzt CF+2,1b
+2: cmpw cr6,op1,op2
+ sub result,op1,op2
+ b _finish_strw
+
+3: lwbrx op1,STRINGSRC # repnz cmpsl
+ add offset,offset,r7
+ lwbrx op2,STRINGDST
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi offset,offset,16
+ clrlwi opreg,opreg,16
+ bdnzf CF+2,3b
+ b 2b
+
+scasb_a16: bl _setup_stringw
+ lbzx op1,AL,state # AL
+ SET_FLAGS(FLAGS_CMP(B))
+ bgt 3f # repz prefix
+1: lbzx op2,STRINGDST # [repnz] scasb
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi opreg,opreg,16
+ bdnzf CF+2,1b
+2: extsb r3,op1
+ extsb r4,op2
+ cmpw cr6,r3,r4
+ sub result,op1,op2
+ b _finish_strw
+
+3: lbzx op2,STRINGDST # repz scasb
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi opreg,opreg,16
+ bdnzt CF+2,3b
+ b 2b
+
+scasw_a16: bl _setup_stringw
+ lhbrx op1,AX,state
+ SET_FLAGS(FLAGS_CMP(W))
+ slwi r7,r7,1
+ bgt 3f # repz prefix
+1: lhbrx op2,STRINGDST # [repnz] scasw
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi opreg,opreg,16
+ bdnzf CF+2,1b
+2: extsh r3,op1
+ extsh r4,op2
+ cmpw cr6,r3,r4
+ sub result,op1,op2
+ b _finish_strw
+
+3: lhbrx op2,STRINGDST # repz scasw
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi opreg,opreg,16
+ bdnzt CF+2,3b
+ b 2b
+
+scasl_a16: bl _setup_stringw
+ lwbrx op1,EAX,state
+ SET_FLAGS(FLAGS_CMP(L))
+ slwi r7,r7,2
+ bgt 3f # repz prefix
+1: lwbrx op2,STRINGDST # [repnz] scasl
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi opreg,opreg,16
+ bdnzf CF+2,1b
+2: cmpw cr6,op1,op2
+ sub result,op1,op2
+ b _finish_strw
+
+3: lwbrx op2,STRINGDST # repz scasl
+ add opreg,opreg,r7
+ cmplw cr4,op1,op2
+ clrlwi opreg,opreg,16
+ bdnzt CF+2,3b
+ b 2b
+
+ .equ lodsb_a32, unimpl
+ .equ lodsw_a32, unimpl
+ .equ lodsl_a32, unimpl
+ .equ stosb_a32, unimpl
+ .equ stosw_a32, unimpl
+ .equ stosl_a32, unimpl
+ .equ movsb_a32, unimpl
+ .equ movsw_a32, unimpl
+ .equ movsl_a32, unimpl
+ .equ insb_a32, unimpl
+ .equ insw_a32, unimpl
+ .equ insl_a32, unimpl
+ .equ outsb_a32, unimpl
+ .equ outsw_a32, unimpl
+ .equ outsl_a32, unimpl
+ .equ cmpsb_a32, unimpl
+ .equ cmpsw_a32, unimpl
+ .equ cmpsl_a32, unimpl
+ .equ scasb_a32, unimpl
+ .equ scasw_a32, unimpl
+ .equ scasl_a32, unimpl
+
+xlatb_a16: li offset,BX
+ lbz r3,AL(state)
+ lhbrx offset,offset,state
+ add r3,r3,base
+ lbzx r3,r3,offset
+ stb r3,AL(state)
+ NEXT
+
+ .equ xlatb_a32, unimpl
+
+/*
+ * Shift and rotates: note the oddity that rotates do not affect SF/ZF/AF/PF
+ * but shifts do. Also testing has indicated that rotates with a count of zero
+ * do not affect any flag. The documentation specifies this for shifts but
+ * is more obscure for rotates. The overflow flag setting is only specified
+ * when count is 1, otherwise OF is undefined which simplifies emulation.
+ */
+
+/*
+ * The rotates through carry are among the most difficult instructions,
+ * they are implemented as a shift of 2*n+some bits depending on case.
+ * First the left rotates through carry.
+ */
+
+/* Byte rcl is performed on 18 bits (17 actually used) in a single register */
+rclb_imm: NEXTBYTE(r3)
+ b 1f
+rclb_cl: lbz r3,CL(state)
+ b 1f
+rclb_1: li r3,1
+1: lbzx r0,MEM
+ andi. r3,r3,31 # count%32
+ addc r4,flags,flags # CF_IN->xer[ca]
+ RES2CF(r6)
+ subfe r4,result,op1
+ mulli r5,r3,29 # 29=ceil(256/9)
+ CF_ROTCNT(r7)
+ addze r6,r6
+ CF_POL_INSERT(r0,23)
+ srwi r5,r5,8 # count/9
+ rlwnm r6,r6,r7,0x100
+ xor r0,r0,r6 # (23)0:CF:data8
+ rlwimi r5,r5,3,26,28 # 9*(count/9)
+ rlwimi r0,r0,23,0,7 # CF:(data8):(14)0:CF:data8
+ sub r3,r3,r5 # count%9
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ rlwnm r0,r0,r3,0x000001ff # (23)0:NewCF:Result8
+ rlwimi flags,r0,19,CF_VALUE
+ stbx r0,MEM
+ rlwimi flags,r0,18,OF_XOR
+ NEXT
+
+/* Word rcl is performed on 33 bits (CF:data16:CF:(15 MSB of data16) */
+rclw_imm: NEXTBYTE(r3)
+ b 1f
+rclw_cl: lbz r3,CL(state)
+ b 1f
+rclw_1: li r3,1
+1: lhbrx r0,MEM
+ andi. r3,r3,31 # count=count%32
+ addc r4,flags,flags
+ RES2CF(r6)
+ subfe r4,result,op1
+ addi r5,r3,15 # modulo 17: >=32 if >=17
+ CF_ROTCNT(r7)
+ addze r6,r6
+ addi r7,r7,8
+ CF_POL_INSERT(r0,15)
+ srwi r5,r5,5 # count/17
+ rlwnm r6,r6,r7,0x10000
+ rlwimi r5,r5,4,27,27 # 17*(count/17)
+ xor r0,r0,r6 # (15)0:CF:data16
+ sub r3,r3,r5 # count%17
+ rlwinm r4,r0,15,0xffff0000 # CF:(15 MSB of data16):(16)0
+ slw r0,r0,r3 # New carry and MSBs
+ rlwnm r4,r4,r3,16,31 # New LSBs
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ add r0,r0,r4 # result
+ rlwimi flags,r0,11,CF_VALUE
+ sthbrx r0,MEM
+ rlwimi flags,r0,10,OF_XOR
+ NEXT
+
+/* Longword rcl only needs 64 bits because the maximum rotate count is 31 ! */
+rcll_imm: NEXTBYTE(r3)
+ b 1f
+rcll_cl: lbz r3,CL(state)
+ b 1f
+rcll_1: li r3,1
+1: lwbrx r0,MEM
+ andi. r3,r3,31 # count=count%32
+ addc r4,r4,flags # ~XER[CA]
+ RES2CF(r6)
+ subfe r4,result,op1
+ CF_ROTCNT(r7)
+ addze r6,r6
+ srwi r4,r0,1 # 0:(31 MSB of data32)
+ addi r7,r7,23
+ CF_POL_INSERT(r4,0)
+ rlwnm r6,r6,r7,0,0
+ beq- nop # no flags changed if count 0
+ subfic r5,r3,32
+ xor r4,r4,r6
+ ROTATE_FLAGS
+ slw r0,r0,r3 # New MSBs
+ srw r5,r4,r5 # New LSBs
+ rlwnm r4,r4,r3,0,0 # New Carry
+ add r0,r0,r5 # result
+ rlwimi flags,r4,28,CF_VALUE
+ rlwimi flags,r0,27,OF_XOR
+ stwbrx r0,MEM
+ NEXT
+
+/* right rotates through carry are even worse because PPC only has a left
+rotate instruction. Somewhat tough when combined with modulo 9, 17, or
+33 operation and the rules of OF and CF flag settings. */
+/* Byte rcr is performed on 17 bits */
+rcrb_imm: NEXTBYTE(r3)
+ b 1f
+rcrb_cl: lbz r3,CL(state)
+ b 1f
+rcrb_1: li r3,1
+1: lbzx r0,MEM
+ andi. r3,r3,31 # count%32
+ addc r4,flags,flags # cf_in->xer[ca]
+ RES2CF(r6)
+ mulli r5,r3,29 # 29=ceil(256/9)
+ subfe r4,result,op1
+ CF_ROTCNT(r7)
+ addze r6,r6
+ CF_POL_INSERT(r0,23)
+ srwi r5,r5,8 # count/9
+ rlwimi r0,r0,9,0x0001fe00 # (15)0:data8:0:data8
+ rlwnm r6,r6,r7,0x100
+ rlwimi r5,r5,3,26,28 # 9*(count/9)
+ xor r0,r0,r6 # (15)0:data8:CF:data8
+ sub r3,r3,r5 # count%9
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ srw r0,r0,r3 # (23)junk:NewCF:Result8
+ rlwimi flags,r0,19,CF_VALUE|OF_XOR
+ stbx r0,MEM
+ NEXT
+
+/* Word rcr is a 33 bit right shift with a quirk, because the 33rd bit
+is only needed when the rotate count is 16 and rotating left or right
+by 16 a 32 bit quantity is the same ! */
+rcrw_imm: NEXTBYTE(r3)
+ b 1f
+rcrw_cl: lbz r3,CL(state)
+ b 1f
+rcrw_1: li r3,1
+1: lhbrx r0,MEM
+ andi. r3,r3,31 # count%32
+ addc r4,flags,flags # cf_in->xer[ca]
+ RES2CF(r6)
+ subfe r4,result,op1
+ addi r5,r3,15 # >=32 if >=17
+ CF_ROTCNT(r7)
+ addze r6,r6
+ addi r7,r7,8
+ CF_POL_INSERT(r0,15)
+ srwi r5,r5,5 # count/17
+ rlwnm r6,r6,r7,0x10000
+ rlwinm r7,r0,16,0x01 # MSB of data16
+ rlwimi r0,r0,17,0xfffe0000 # (15 MSB of data16):0:data16
+ rlwimi r5,r5,4,27,27 # 17*(count/17)
+ xor r0,r0,r6 # (15 MSB of data16):CF:data16
+ sub r3,r3,r5 # count%17
+ beq- nop # no flags changed if count 0
+ srw r0,r0,r3 # shift right
+ rlwnm r7,r7,r3,0x10000 # just in case count=16
+ ROTATE_FLAGS
+ add r0,r0,r7 # junk15:NewCF:result16
+ rlwimi flags,r0,11,CF_VALUE|OF_XOR
+ sthbrx r0,MEM
+ NEXT
+
+/* Longword rcr need only 64 bits since the rotate count is limited to 31 */
+rcrl_imm: NEXTBYTE(r3)
+ b 1f
+rcrl_cl: lbz r3,CL(state)
+ b 1f
+rcrl_1: li r3,1
+1: lwbrx r0,MEM
+ andi. r3,r3,31 # count%32
+ addc r4,flags,flags
+ RES2CF(r6)
+ subfe r4,result,op1
+ CF_ROTCNT(r7)
+ slwi r4,r0,1 # (31MSB of data32):0
+ addze r6,r6
+ addi r7,r7,24
+ CF_POL_INSERT(r4,31)
+ rlwnm r6,r6,r7,0x01
+ beq- nop # no flags changed if count 0
+ subfic r7,r3,32
+ xor r4,r4,r6
+ srw r0,r0,r3 # Result LSB
+ slw r5,r4,r7 # Result MSB
+ srw r4,r4,r3 # NewCF in LSB
+ add r0,r0,r5 # result
+ rlwimi flags,r4,27,CF_VALUE
+ stwbrx r0,MEM
+ rlwimi flags,r0,27,OF_XOR
+ NEXT
+
+/* After the rotates through carry, normal rotates are so simple ! */
+rolb_imm: NEXTBYTE(r3)
+ b 1f
+rolb_cl: lbz r3,CL(state)
+ b 1f
+rolb_1: li r3,1
+1: lbzx r0,MEM
+ andi. r4,r3,31 # count%32 == 0 ?
+ clrlwi r3,r3,29 # count%8
+ rlwimi r0,r0,24,0xff000000 # replicate for shift in
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ rotlw r0,r0,r3
+ rlwimi flags,r0,27,CF_VALUE # New CF
+ stbx r0,MEM
+ rlwimi flags,r0,26,OF_XOR # New OF (CF xor MSB)
+ NEXT
+
+rolw_imm: NEXTBYTE(r3)
+ b 1f
+rolw_cl: lbz r3,CL(state)
+ b 1f
+rolw_1: li r3,1
+1: lhbrx r0,MEM
+ andi. r3,r3,31
+ rlwimi r0,r0,16,0,15 # duplicate
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ rotlw r0,r0,r3 # result word duplicated
+ rlwimi flags,r0,27,CF_VALUE # New CF
+ sthbrx r0,MEM
+ rlwimi flags,r0,26,OF_XOR # New OF (CF xor MSB)
+ NEXT
+
+roll_imm: NEXTBYTE(r3)
+ b 1f
+roll_cl: lbz r3,CL(state)
+ b 1f
+roll_1: li r3,1
+1: lwbrx r0,MEM
+ andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ rotlw r0,r0,r3 # result
+ rlwimi flags,r0,27,CF_VALUE # New CF
+ stwbrx r0,MEM
+ rlwimi flags,r0,26,OF_XOR # New OF (CF xor MSB)
+ NEXT
+
+rorb_imm: NEXTBYTE(r3)
+ b 1f
+rorb_cl: lbz r3,CL(state)
+ b 1f
+rorb_1: li r3,1
+1: lbzx r0,MEM
+ andi. r4,r3,31 # count%32 == 0 ?
+ clrlwi r3,r3,29 # count%8
+ rlwimi r0,r0,8,0x0000ff00 # replicate for shift in
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ srw r0,r0,r3
+ rlwimi flags,r0,20,CF_VALUE
+ stbx r0,MEM
+ rlwimi flags,r0,19,OF_XOR
+ NEXT
+
+rorw_imm: NEXTBYTE(r3)
+ b 1f
+rorw_cl: lbz r3,CL(state)
+ b 1f
+rorw_1: li r3,1
+1: lhbrx r0,MEM
+ andi. r4,r3,31
+ clrlwi r3,r3,28 # count %16
+ rlwimi r0,r0,16,0xffff0000 # duplicate
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ srw r0,r0,r3 # junk16:result16
+ rlwimi flags,r0,12,CF_VALUE
+ sthbrx r0,MEM
+ rlwimi flags,r0,11,OF_XOR
+ NEXT
+
+rorl_imm: NEXTBYTE(r3)
+ b 1f
+rorl_cl: lbz r3,CL(state)
+ b 1f
+rorl_1: li r3,1
+1: lwbrx r0,MEM
+ andi. r4,r3,31
+ neg r3,r3
+ beq- nop # no flags changed if count 0
+ ROTATE_FLAGS
+ rotlw r0,r0,r3 # result
+ rlwimi flags,r0,28,CF_VALUE
+ stwbrx r0,MEM
+ rlwimi flags,r0,27,OF_XOR
+ NEXT
+
+/* Right arithmetic shifts: they clear OF whenever count!=0 */
+#define SAR_FLAGS CF_ZERO|OF_ZERO|RESL
+sarb_imm: NEXTBYTE(r3)
+ b 1f
+sarb_cl: lbz r3,CL(state)
+ b 1f
+sarb_1: li r3,1
+1: lbzx r4,MEM
+ andi. r3,r3,31
+ addi r5,r3,-1
+ extsb r4,r4
+ beq- nop # no flags changed if count 0
+ SET_FLAGS(SAR_FLAGS)
+ sraw result,r4,r3
+ srw r5,r4,r5
+ stbx result,MEM
+ rlwimi flags,r5,27,CF_VALUE
+ NEXT
+
+sarw_imm: NEXTBYTE(r3)
+ b 1f
+sarw_cl: lbz r3,CL(state)
+ b 1f
+sarw_1: li r3,1
+1: lhbrx r4,MEM
+ andi. r3,r3,31
+ addi r5,r3,-1
+ extsh r4,r4
+ beq- nop # no flags changed if count 0
+ SET_FLAGS(SAR_FLAGS)
+ sraw result,r4,r3
+ srw r5,r4,r5
+ sthbrx result,MEM
+ rlwimi flags,r5,27,CF_VALUE
+ NEXT
+
+sarl_imm: NEXTBYTE(r3)
+ b 1f
+sarl_cl: lbz r3,CL(state)
+ b 1f
+sarl_1: li r3,1
+1: lwbrx r4,MEM
+ andi. r3,r3,31
+ addi r5,r3,-1
+ beq- nop # no flags changed if count 0
+ SET_FLAGS(SAR_FLAGS)
+ sraw result,r4,r3
+ srw r5,r4,r5
+ stwbrx result,MEM
+ rlwimi flags,r5,27,CF_VALUE
+ NEXT
+
+/* Left shifts are quite easy: they use the flag mechanism of add */
+shlb_imm: NEXTBYTE(r3)
+ b 1f
+shlb_cl: lbz r3,CL(state)
+ b 1f
+shlb_1: li r3,1
+1: andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ lbzx op1,MEM
+ SET_FLAGS(FLAGS_ADD(B))
+ slw result,op1,r3
+ addi op2,op1,0 # for OF computation only !
+ stbx result,MEM
+ NEXT
+
+shlw_imm: NEXTBYTE(r3)
+ b 1f
+shlw_cl: lbz r3,CL(state)
+ b 1f
+shlw_1: li r3,1
+1: andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ lhbrx op1,MEM
+ SET_FLAGS(FLAGS_ADD(W))
+ slw result,op1,r3
+ addi op2,op1,0 # for OF computation only !
+ sthbrx result,MEM
+ NEXT
+
+/* That one may be wrong */
+shll_imm: NEXTBYTE(r3)
+ b 1f
+shll_cl: lbz r3,CL(state)
+ b 1f
+shll_1: li r3,1
+1: andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ lwbrx op1,MEM
+ addi r4,r3,-1
+ SET_FLAGS(FLAGS_ADD(L))
+ slw result,op1,r3
+ addi op2,op1,0 # for OF computation only !
+ slw op1,op1,r4 # for CF computation
+ stwbrx result,MEM
+ NEXT
+
+/* Right shifts are quite complex, because of funny flag rules ! */
+shrb_imm: NEXTBYTE(r3)
+ b 1f
+shrb_cl: lbz r3,CL(state)
+ b 1f
+shrb_1: li r3,1
+1: andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ lbzx op1,MEM
+ addi r4,r3,-1
+ SET_FLAGS(FLAGS_SHR(B))
+ srw result,op1,r3
+ srw r4,op1,r4
+ li op2,-1 # for OF computation only !
+ stbx result,MEM
+ rlwimi flags,r4,27,CF_VALUE # Set CF
+ NEXT
+
+shrw_imm: NEXTBYTE(r3)
+ b 1f
+shrw_cl: lbz r3,CL(state)
+ b 1f
+shrw_1: li r3,1
+1: andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ lhbrx op1,MEM
+ addi r4,r3,-1
+ SET_FLAGS(FLAGS_SHR(W))
+ srw result,op1,r3
+ srw r4,op1,r4
+ li op2,-1 # for OF computation only !
+ sthbrx result,MEM
+ rlwimi flags,r4,27,CF_VALUE # Set CF
+ NEXT
+
+shrl_imm: NEXTBYTE(r3)
+ b 1f
+shrl_cl: lbz r3,CL(state)
+ b 1f
+shrl_1: li r3,1
+1: andi. r3,r3,31
+ beq- nop # no flags changed if count 0
+ lwbrx op1,MEM
+ addi r4,r3,-1
+ SET_FLAGS(FLAGS_SHR(L))
+ srw result,op1,r3
+ srw r4,op1,r4
+ li op2,-1 # for OF computation only !
+ stwbrx result,MEM
+ rlwimi flags,r4,27,CF_VALUE # Set CF
+ NEXT
+
+/* Double length shifts, shldw uses FLAGS_ADD for simplicity */
+shldw_imm: NEXTBYTE(r3)
+ b 1f
+shldw_cl: lbz r3,CL(state)
+1: andi. r3,r3,31
+ beq- nop
+ lhbrx op1,MEM
+ SET_FLAGS(FLAGS_ADD(W))
+ lhbrx op2,REG
+ rlwimi op1,op2,16,0,15 # op2:op1
+ addi op2,op1,0
+ rotlw result,op1,r3
+ sthbrx result,MEM
+ NEXT
+
+shldl_imm: NEXTBYTE(r3)
+ b 1f
+shldl_cl: lbz r3,CL(state)
+1: andi. r3,r3,31
+ beq- nop
+ lwbrx op1,MEM
+ SET_FLAGS(FLAGS_DBLSH(L))
+ lwbrx op2,REG
+ subfic r4,r3,32
+ slw result,op1,r3
+ srw r4,op2,r4
+ rotlw r3,op1,r3
+ or result,result,r4
+ addi op2,op1,0
+ rlwimi flags,r3,27,CF_VALUE
+ stwbrx result,MEM
+ NEXT
+
+shrdw_imm: NEXTBYTE(r3)
+ b 1f
+shrdw_cl: lbz r3,CL(state)
+1: andi. r3,r3,31
+ beq- nop
+ lhbrx op1,MEM
+ SET_FLAGS(FLAGS_DBLSH(W))
+ lhbrx op2,REG
+ addi r4,r3,-1
+ rlwimi op1,op2,16,0,15 # op2:op1
+ addi op2,op1,0
+ srw result,op1,r3
+ srw r4,op1,r4
+ sthbrx result,MEM
+ rlwimi flags,r4,27,CF_VALUE
+ NEXT
+
+shrdl_imm: NEXTBYTE(r3)
+ b 1f
+shrdl_cl: lbz r3,CL(state)
+1: andi. r3,r3,31
+ beq- nop
+ lwbrx op1,MEM
+ SET_FLAGS(FLAGS_DBLSH(L))
+ lwbrx op2,REG
+ subfic r4,r3,32
+ srw result,op1,r3
+ addi r3,r3,-1
+ slw r4,op2,r4
+ srw r3,op1,r3
+ or result,result,r4
+ addi op2,op1,0
+ rlwimi flags,r3,27,CF_VALUE
+ stwbrx result,MEM
+ NEXT
+
+/* One operand multiplies: with result double the operand size, unsigned */
+mulb: lbzx op2,MEM
+ lbz op1,AL(state)
+ mullw result,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ subfic r3,result,255
+ sthbrx result,AX,state
+ rlwimi flags,r3,0,CF_VALUE|OF_VALUE
+ NEXT
+
+mulw: lhbrx op2,MEM
+ lhbrx op1,AX,state
+ mullw result,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ li r4,DX
+ srwi r3,result,16
+ sthbrx result,AX,state
+ neg r5,r3
+ sthbrx r3,r4,state # DX
+ rlwimi flags,r5,0,CF_VALUE|OF_VALUE
+ NEXT
+
+mull: lwbrx op2,MEM
+ lwbrx op1,EAX,state
+ mullw result,op1,op2
+ mulhwu. r3,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ stwbrx result,EAX,state
+ li r4,EDX
+ stwbrx r3,r4,state
+ beq+ nop
+ oris flags,flags,(CF_SET|OF_SET)>>16
+ NEXT
+
+/* One operand multiplies: with result double the operand size, signed */
+imulb: lbzx op2,MEM
+ extsb op2,op2
+ lbz op1,AL(state)
+ extsb op1,op1
+ mullw result,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ extsb r3,result
+ sthbrx result,AX,state
+ cmpw r3,result
+ beq+ nop
+ oris flags,flags,(CF_SET|OF_SET)>>16
+ NEXT
+
+imulw: lhbrx op2,MEM
+ extsh op2,op2
+ lhbrx op1,AX,state
+ extsh op1,op1
+ mullw result,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ li r3,DX
+ extsh r4,result
+ srwi r5,result,16
+ sthbrx result,AX,state
+ cmpw r4,result
+ sthbrx r5,r3,state
+ beq+ nop
+ oris flags,flags,(CF_SET|OF_SET)>>16
+ NEXT
+
+imull: lwbrx op2,MEM
+ SET_FLAGS(FLAGS_MUL)
+ lwbrx op1,EAX,state
+ li r3,EDX
+ mulhw r4,op1,op2
+ mullw result,op1,op2
+ stwbrx r4,r3,state
+ srawi r3,result,31
+ cmpw r3,r4
+ beq+ nop
+ oris flags,flags,(CF_SET|OF_SET)>>16
+ NEXT
+
+/* Other multiplies */
+imulw_mem_reg: lhbrx op2,REG
+ extsh op2,op2
+ b 1f
+
+imulw_imm: NEXTWORD(op2)
+ extsh op2,op2
+ b 1f
+
+imulw_imm8: NEXTBYTE(op2)
+ extsb op2,op2
+1: lhbrx op1,MEM
+ extsh op1,op1
+ mullw result,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ extsh r3,result
+ sthbrx result,REG
+ cmpw r3,result
+ beq+ nop
+ oris flags,flags,(CF_SET|OF_SET)>>16
+ NEXT # SF/ZF/AF/PF undefined !
+
+imull_mem_reg: lwbrx op2,REG
+ b 1f
+
+imull_imm: NEXTDWORD(op2)
+ b 1f
+
+imull_imm8: NEXTBYTE(op2)
+ extsb op2,op2
+1: lwbrx op1,MEM
+ mullw result,op1,op2
+ SET_FLAGS(FLAGS_MUL)
+ mulhw r3,op1,op2
+ srawi r4,result,31
+ stwbrx result,REG
+ cmpw r3,r4
+ beq+ nop
+ oris flags,flags,(CF_SET|OF_SET)>>16
+ NEXT # SF/ZF/AF/PF undefined !
+
+/* aad is indeed a multiply */
+aad: NEXTBYTE(r3)
+ lbz op1,AH(state)
+ lbz op2,AL(state)
+ mullw result,op1,r3 # AH*imm
+ SET_FLAGS(FLAGS_LOG(B)) # SF/ZF/PF from result
+ add result,result,op2 # AH*imm+AL
+ slwi r3,result,8
+ sth r3,AX(state) # AH=0
+ NEXT # OF/AF/CF undefined
+
+/* Unsigned divides: we may destroy all flags */
+divb: lhbrx r4,AX,state
+ lbzx r3,MEM
+ srwi r5,r4,8
+ cmplw r5,r3
+ bnl- _divide_error
+ divwu r5,r4,r3
+ mullw r3,r5,r3
+ sub r3,r4,r3
+ stb r5,AL(state)
+ stb r3,AH(state)
+ NEXT
+
+divw: li opreg,DX
+ lhbrx r4,AX,state
+ lhbrx r5,REG
+ lhbrx r3,MEM
+ insrwi r4,r5,16,0
+ cmplw r5,r3
+ bnl- _divide_error
+ divwu r5,r4,r3
+ mullw r3,r5,r3
+ sub r3,r4,r3
+ sthbrx r5,AX,state
+ sthbrx r3,REG
+ NEXT
+
+divl: li opreg,EDX # Not yet fully implemented
+ lwbrx r3,MEM
+ lwbrx r4,REG
+ lwbrx r5,EAX,state
+ cmplw r4,r3
+ bnl- _divide_error
+ cmplwi r4,0
+ bne- 1f
+ divwu r4,r5,r3
+ mullw r3,r4,r3
+ stwbrx r4,EAX,state
+ sub r3,r5,r3
+ stwbrx r3,REG
+ NEXT
+/* full implementation of 64:32 unsigned divide, slow but rarely used */
+1: bl _div_64_32
+ stwbrx r5,EAX,state
+ stwbrx r4,REG
+ NEXT
+/*
+ * Divide r4:r5 by r3, quotient in r5, remainder in r4.
+ * The algorithm is stupid because it won't be used very often.
+ */
+_div_64_32: li r7,32
+ mtctr r7
+1: cmpwi r4,0 # always subtract in case
+ addc r5,r5,r5 # MSB is set
+ adde r4,r4,r4
+ blt 2f
+ cmplw r4,r3
+ blt 3f
+2: sub r4,r4,r3
+ addi r5,r5,1
+3: bdnz 1b
+
+/* Signed divides: we may destroy all flags */
+idivb: lbzx r3,MEM
+ lhbrx r4,AX,state
+ cmpwi r3,0
+ beq- _divide_error
+ divw r5,r4,r3
+ extsb r7,r5
+ mullw r3,r5,r3
+ cmpw r5,r7
+ sub r3,r4,r3
+ bne- _divide_error
+ stb r5,AL(state)
+ stb r3,AH(state)
+ NEXT
+
+idivw: li opreg,DX
+ lhbrx r4,AX,state
+ lhbrx r5,REG
+ lhbrx r3,MEM
+ insrwi r4,r5,16,0
+ cmpwi r3,0
+ beq- _divide_error
+ divw r5,r4,r3
+ extsh r7,r5
+ mullw r3,r5,r3
+ cmpw r5,r7
+ sub r3,r4,r3
+ bne- _divide_error
+ sthbrx r5,AX,state
+ sthbrx r3,REG
+ NEXT
+
+idivl: li opreg,EDX # Not yet fully implemented
+ lwbrx r3,MEM
+ lwbrx r5,EAX,state
+ cmpwi cr1,r3,0
+ lwbrx r4,REG
+ srwi r7,r5,31
+ beq- _divide_error
+ add. r7,r7,r4
+ bne- 1f # EDX not sign extension of EAX
+ divw r4,r5,r3
+ xoris r7,r5,0x8000 # only overflow case is
+ orc. r7,r7,r3 # 0x80000000 divided by -1
+ mullw r3,r4,r3
+ beq- _divide_error
+ stwbrx r4,EAX,state
+ sub r3,r5,r3
+ stwbrx r3,REG
+ NEXT
+
+/* full 64 by 32 signed divide, checks for overflow might be right now */
+1: srawi r6,r4,31 # absolute value of r4:r5
+ srawi r0,r3,31 # absolute value of r3
+ xor r5,r5,r6
+ xor r3,r3,r0
+ subfc r5,r6,r5
+ xor r4,r4,r6
+ sub r3,r3,r0
+ subfe r4,r6,r4
+ xor r0,r0,r6 # sign of result
+ cmplw r4,r3 # coarse overflow detection
+ bnl- _divide_error # (probably not necessary)
+ bl _div_64_32
+ xor r5,r5,r0 # apply sign to result
+ sub r5,r5,r0
+ xor. r7,r0,r5 # wrong sign: overflow
+ xor r4,r4,r6 # apply sign to remainder
+ blt- _divide_error
+ stwbrx r5,EAX,state
+ sub r4,r4,r6
+ stwbrx r4,REG
+ NEXT
+
+/* aam is indeed a divide */
+aam: NEXTBYTE(r3)
+ lbz r4,AL(state)
+ cmpwi r3,0
+ beq- _divide_error # zero divide
+ divwu op2,r4,r3 # AL/imm8
+ SET_FLAGS(FLAGS_LOG(B)) # SF/ZF/PF from AL
+ mullw r3,op2,r3 # (AL/imm8)*imm8
+ stb op2,AH(state)
+ sub result,r4,r3 # AL-imm8*(AL/imm8)
+ stb result,AL(state)
+ NEXT # OF/AF/CF undefined
+
+_divide_error: li r3,code_divide_err
+ b complex
+
+/* Instructions dealing with segment registers */
+pushw_sp_sr: li r3,SP
+ rlwinm opreg,opcode,31,27,29
+ addi r5,state,SELECTORS+2
+ lhbrx r4,state,r3
+ lhzx r0,r5,opreg
+ addi r4,r4,-2
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ sthbrx r0,r4,ssb
+ NEXT
+
+pushl_sp_sr: li r3,SP
+ rlwinm opreg,opcode,31,27,29
+ addi r5,state,SELECTORS+2
+ lhbrx r4,state,r3
+ lhzx r0,r5,opreg
+ addi r4,r4,-4
+ sthbrx r4,state,r3
+ clrlwi r4,r4,16
+ stwbrx r0,r4,ssb
+ NEXT
+
+movl_sr_mem: cmpwi opreg,20
+ addi opreg,opreg,SELECTORS+2
+ cmpw cr1,base,state # Only registers are sensitive
+ bgt- ud # to word/longword difference
+ lhzx r0,REG
+ bne cr1,1f
+ stwbrx r0,MEM # Actually a register
+ NEXT
+
+movw_sr_mem: cmpwi opreg,20 # SREG 0 to 5 only
+ addi opreg,opreg,SELECTORS+2
+ bgt- ud
+ lhzx r0,REG
+1: sthbrx r0,MEM
+ NEXT
+
+/* Now the instructions that modify the segment registers, note that
+move/pop to ss disable interrupts and traps for one instruction ! */
+popl_sp_sr: li r6,4
+ b 1f
+popw_sp_sr: li r6,2
+1: li r7,SP
+ rlwinm opreg,opcode,31,27,29
+ lhbrx offset,state,r7
+ addi opreg,opreg,SELBASES
+ lhbrx r4,ssb,offset # new selector
+ add offset,offset,r6
+ bl _segment_load
+ sthbrx offset,state,r7 # update sp
+ cmpwi opreg,8 # is ss ?
+ stwux r3,REG
+ stw r4,SELECTORS-SELBASES(opreg)
+ lwz esb,esbase(state)
+ bne+ nop
+ lwz ssb,ssbase(state) # pop ss
+ crmove RF,TF # prevent traps
+ NEXT
+
+movw_mem_sr: cmpwi opreg,20
+ addi r7,state,SELBASES
+ bgt- ud
+ cmpwi opreg,4 # CS illegal
+ beq- ud
+ lhbrx r4,MEM
+ bl _segment_load
+ stwux r3,r7,opreg
+ cmpwi opreg,8
+ stw r4,SELECTORS-SELBASES(r7)
+ lwz esb,esbase(state)
+ bne+ nop
+ lwz ssb,ssbase(state)
+ crmove RF,TF # prevent traps
+ NEXT
+
+ .equ movl_mem_sr, movw_mem_sr
+
+/* The encoding of les/lss/lds/lfs/lgs is strange, opcode is c4/b2/c5/b4/b5
+for es/ss/ds/fs/gs which are sreg 0/2/3/4/5. And obviously there is
+no lcs instruction, it's called a far jump. */
+
+ldlptrl: lwzux r7,MEM
+ li r4,4
+ bl 1f
+ stwx r7,REG
+ NEXT
+ldlptrw: lhzux r7,MEM
+ li r4,2
+ bl 1f
+ sthx r7,REG
+ NEXT
+
+1: cmpw base,state
+ lis r3,0xc011 # es/ss/ds/fs/gs
+ rlwinm r5,opcode,2,0x0c # 00/08/04/00/04
+ mflr r0
+ addi r3,r3,0x4800 # r4=0xc0114800
+ rlwimi r5,opcode,0,0x10 # 00/18/04/10/14
+ lhbrx r4,r4,offset
+ rlwnm opcode,r3,r5,0x1c # 00/08/0c/10/14 = sreg*4 !
+ beq- ud # Only mem operands allowed !
+ bl _segment_load
+ addi r5,opcode,SELBASES
+ stwux r3,r5,state
+ mtlr r0
+ stw r4,SELECTORS-SELBASES(r5)
+ lwz esb,esbase(state) # keep shadow state in sync
+ lwz ssb,ssbase(state)
+ blr
+
+
+/* Intructions that may modify the current code segment: the next optimization
+ * might be to avoid calling C code when the code segment does not change. But
+ * it's probably not worth the effort.
+ */
+/* Far calls, jumps and returns */
+lcall_w: NEXTWORD(r4)
+ NEXTWORD(r5)
+ li r3,code_lcallw
+ b complex
+
+lcall_l: NEXTDWORD(r4)
+ NEXTWORD(r5)
+ li r3,code_lcalll
+ b complex
+
+lcallw: lhbrx r4,MEM
+ addi offset,offset,2
+ lhbrx r5,MEM
+ li r3,code_lcallw
+ b complex
+
+lcalll: lwbrx r4,MEM
+ addi offset,offset,4
+ lhbrx r5,MEM
+ li r3,code_lcalll
+ b complex
+
+ljmp_w: NEXTWORD(r4)
+ NEXTWORD(r5)
+ li r3,code_ljmpw
+ b complex
+
+ljmp_l: NEXTDWORD(r4)
+ NEXTWORD(r5)
+ li r3,code_ljmpl
+ b complex
+
+ljmpw: lhbrx r4,MEM
+ addi offset,offset,2
+ lhbrx r5,MEM
+ li r3,code_ljmpw
+ b complex
+
+ljmpl: lwbrx r4,MEM
+ addi offset,offset,4
+ lhbrx r5,MEM
+ li r3,code_ljmpl
+ b complex
+
+lretw_imm: NEXTWORD(r4)
+ b 1f
+lretw: li r4,0
+1: li r3,code_lretw
+ b complex
+
+lretl_imm: NEXTWORD(r4)
+ b 1f
+lretl: li r4,0
+1: li r3,code_lretl
+ b complex
+
+/* Interrupts */
+int: li r3,code_softint # handled by C code
+ NEXTBYTE(r4)
+ b complex
+
+int3: li r3,code_int3 # handled by C code
+ b complex
+
+into: EVAL_OF
+ bf+ OF,nop
+ li r3,code_into
+ b complex # handled by C code
+
+iretw: li r3,code_iretw # handled by C code
+ b complex
+
+iretl: li r3,code_iretl
+ b complex
+
+/* Miscellaneous flag control instructions */
+clc: oris flags,flags,(CF_IN_CR|CF_STATE_MASK|ABOVE_IN_CR)>>16
+ xoris flags,flags,(CF_IN_CR|CF_STATE_MASK|ABOVE_IN_CR)>>16
+ NEXT
+
+cmc: oris flags,flags,(CF_IN_CR|ABOVE_IN_CR)>>16
+ xoris flags,flags,(CF_IN_CR|CF_COMPLEMENT|ABOVE_IN_CR)>>16
+ NEXT
+
+stc: oris flags,flags,\
+ (CF_IN_CR|CF_LOCATION|CF_COMPLEMENT|ABOVE_IN_CR)>>16
+ xoris flags,flags,(CF_IN_CR|CF_LOCATION|ABOVE_IN_CR)>>16
+ NEXT
+
+cld: crclr DF
+ NEXT
+
+std: crset DF
+ NEXT
+
+cli: crclr IF
+ NEXT
+
+sti: crset IF
+ NEXT
+
+lahf: bl _eval_flags
+ stb r3,AH(state)
+ NEXT
+
+sahf: andis. r3,flags,OF_EXPLICIT>>16
+ lbz r0,AH(state)
+ beql+ _eval_of # save OF just in case
+ rlwinm op1,r0,31,0x08 # AF
+ rlwinm flags,flags,0,OF_STATE_MASK
+ extsb result,r0 # SF/PF
+ ZF862ZF(r0)
+ oris flags,flags,(ZF_PROTECT|ZF_IN_CR|SF_IN_CR)>>16
+ addi op2,op1,0 # AF
+ ori result,result,0x00fb # set all except PF
+ mtcrf 0x02,r0 # SF/ZF
+ rlwimi flags,r0,27,CF_VALUE # CF
+ xori result,result,0x00ff # 00 if PF set, 04 if clear
+ NEXT
+
+pushfw_sp: bl _eval_flags
+ li r4,SP
+ lhbrx r5,r4,state
+ addi r5,r5,-2
+ sthbrx r5,r4,state
+ clrlwi r5,r5,16
+ sthbrx r3,ssb,r5
+ NEXT
+
+pushfl_sp: bl _eval_flags
+ li r4,SP
+ lhbrx r5,r4,state
+ addi r5,r5,-4
+ sthbrx r5,r4,state
+ clrlwi r5,r5,16
+ stwbrx r3,ssb,r5
+ NEXT
+
+popfl_sp: li r4,SP
+ lhbrx r5,r4,state
+ lwbrx r3,ssb,r5
+ addi r5,r5,4
+ stw r3,eflags(state)
+ sthbrx r5,r4,state
+ b 1f
+
+popfw_sp: li r4,SP
+ lhbrx r5,r4,state
+ lhbrx r3,ssb,r5
+ addi r5,r5,2
+ sth r3,eflags+2(state)
+ sthbrx r5,r4,state
+1: rlwinm op1,r3,31,0x08 # AF
+ xori result,r3,4 # PF
+ ZF862ZF(r3) # cr6
+ lis flags,(OF_EXPLICIT|ZF_PROTECT|ZF_IN_CR|SF_IN_CR)>>16
+ addi op2,op1,0 # AF
+ rlwinm result,result,0,0x04 # PF
+ rlwimi flags,r3,27,CF_VALUE # CF
+ mtcrf 0x6,r3 # IF,DF,TF,SF,ZF
+ rlwimi result,r3,24,0,0 # SF
+ rlwimi flags,r3,15,OF_VALUE # OF
+ NEXT
+
+/* SETcc is slightly faster for setz/setnz */
+setz: EVAL_ZF
+ bt ZF,1f
+0: cmpwi opreg,0
+ bne- ud
+ stbx opreg,MEM
+ NEXT
+
+setnz: EVAL_ZF
+ bt ZF,0b
+1: cmpwi opreg,0
+ bne- ud
+ stbx one,MEM
+ NEXT
+
+#define SETCC(cond, eval, flag) \
+set##cond: EVAL_##eval; bt flag,1b; b 0b; \
+setn##cond: EVAL_##eval; bt flag,0b; b 1b
+
+ SETCC(c, CF, CF)
+ SETCC(a, ABOVE, ABOVE)
+ SETCC(s, SF, SF)
+ SETCC(g, SIGNED, SGT)
+ SETCC(l, SIGNED, SLT)
+ SETCC(o, OF, OF)
+ SETCC(p, PF, PF)
+
+/* No wait for a 486SX */
+ .equ wait, nop
+
+/* ARPL is not recognized in real mode */
+ .equ arpl, ud
+
+/* clts and in general control and debug registers are not implemented */
+ .equ clts, unimpl
+
+aaa: lhbrx r0,AX,state
+ bl _eval_af
+ rlwinm r3,r3,0,0x10
+ SET_FLAGS(FLAGS_ADD(W))
+ rlwimi r3,r0,0,0x0f
+ li r4,0x106
+ addi r3,r3,-10
+ srwi r3,r3,16 # carry ? 0 : 0xffff
+ andc op1,r4,r3 # carry ? 0x106 : 0
+ add result,r0,op1
+ rlwinm result,result,0,28,23 # clear high half of AL
+ li op2,10 # sets AF indirectly
+ sthbrx r3,AX,state # OF/SF/ZF/PF undefined !
+ rlwimi result,op1,8,0x10000 # insert CF
+ NEXT
+
+aas: lhbrx r0,AX,state
+ bl _eval_af
+ rlwinm r3,r3,0,0x10
+ SET_FLAGS(FLAGS_ADD(W))
+ rlwimi r3,r0,0,0x0f # AF:AL&0x0f
+ li r4,0x106
+ addi r3,r3,-10
+ srwi r3,r3,16 # carry ? 0 : 0xffff
+ andc op1,r4,r3 # carry ? 0x106 : 0
+ sub result,r0,op1
+ rlwinm result,result,0,28,23 # clear high half of AL
+ li op2,10 # sets AF indirectly
+ sthbrx r3,AX,state # OF/SF/ZF/PF undefined !
+ rlwimi result,op1,8,0x10000 # insert CF
+ NEXT
+
+daa: lbz r0,AL(state)
+ bl _eval_af
+ rlwinm r7,r3,0,0x10
+ bl _eval_cf # r3=CF<<8
+ rlwimi r7,r0,0,0x0f
+ SET_FLAGS(FLAGS_ADD(B))
+ addi r4,r7,-10
+ rlwinm r4,r4,3,0x06 # 6 if AF or >9, 0 otherwise
+ srwi op1,r7,1 # 0..4, no AF, 5..f AF set
+ add r0,r0,r4 # conditional add
+ li op2,11 # sets AF depnding on op1
+ or r0,r0,r3
+ subfic r3,r0,159
+ rlwinm r3,r3,7,0x60 # mask value to add
+ add result,r0,r3 # final result for SF/ZF/PF
+ stb result,AL(state)
+ rlwimi result,r3,2,0x100 # set CF if added
+ NEXT
+
+das: lbz r0,AL(state)
+ bl _eval_af
+ rlwinm r7,r3,0,0x10
+ bl _eval_cf
+ rlwimi r7,r0,0,0x0f
+ SET_FLAGS(FLAGS_ADD(B))
+ addi r4,r7,-10
+ rlwinm r4,r4,3,0x06
+ srwi op1,r7,1 # 0..4, no AF, 5..f AF set
+ sub r0,r0,r4 # conditional add
+ li op2,11 # sets AF depending on op1
+ or r4,r0,r3 # insert CF
+ addi r3,r4,-160
+ rlwinm r3,r3,7,0x60 # mask value to add
+ sub result,r4,r3 # final result for SF/ZF/PF
+ stb result,AL(state)
+ rlwimi result,r3,2,0x100 # set CF
+ NEXT
+
+/* 486 specific instructions */
+
+/* For cmpxchg, only the zero flag is important */
+
+cmpxchgb: lbz op1,AL(state)
+ SET_FLAGS(FLAGS_SUB(B)|ZF_IN_CR)
+ lbzx op2,MEM
+ cmpw cr6,op1,op2
+ sub result,op1,op2
+ bne cr6,1f
+ lbzx r3,REG # success: swap
+ stbx r3,MEM
+ NEXT
+1: stb op2,AL(state)
+ NEXT
+
+cmpxchgw: lhbrx op1,AX,state
+ SET_FLAGS(FLAGS_SUB(W)|ZF_IN_CR)
+ lhbrx op2,MEM
+ cmpw cr6,op1,op2
+ sub result,op1,op2
+ bne cr6,1f
+ lhzx r3,REG # success: swap
+ sthx r3,MEM
+ NEXT
+1: sthbrx op2,AX,state
+ NEXT
+
+cmpxchgl: lwbrx op1,EAX,state
+ SET_FLAGS(FLAGS_SUB(L)|ZF_IN_CR|SIGNED_IN_CR)
+ lwbrx op2,MEM
+ cmpw cr6,op1,op2
+ sub result,op1,op2
+ bne cr6,1f
+ lwzx r3,REG # success: swap
+ stwx r3,MEM
+ NEXT
+1: stwbrx op2,EAX,state
+ NEXT
+
+xaddb: lbzx op2,MEM
+ SET_FLAGS(FLAGS_ADD(B))
+ lbzx op1,REG
+ add result,op1,op2
+ stbx result,MEM
+ stbx op2,REG
+ NEXT
+
+xaddw: lhbrx op2,MEM
+ SET_FLAGS(FLAGS_ADD(W))
+ lhbrx op1,REG
+ add result,op1,op2
+ sthbrx result,MEM
+ sthbrx op2,REG
+ NEXT
+
+xaddl: lwbrx op2,MEM
+ SET_FLAGS(FLAGS_ADD(L))
+ lwbrx op1,REG
+ add result,op1,op2
+ stwbrx result,MEM
+ stwbrx op2,REG
+ NEXT
+
+/* All FPU instructions skipped. This is a 486 SX ! */
+esc: li r3,code_dna # DNA interrupt
+ b complex
+
+ .equ hlt, unimpl # Cannot stop
+
+ .equ invd, unimpl
+
+/* Undefined in real address mode */
+ .equ lar, ud
+
+ .equ lgdt, unimpl
+ .equ lidt, unimpl
+ .equ lldt, ud
+ .equ lmsw, unimpl
+
+/* protected mode only */
+ .equ lsl, ud
+ .equ ltr, ud
+
+ .equ movl_cr_reg, unimpl
+ .equ movl_reg_cr, unimpl
+ .equ movl_dr_reg, unimpl
+ .equ movl_reg_dr, unimpl
+
+ .equ sgdt, unimpl
+
+ .equ sidt, unimpl
+ .equ sldt, ud
+ .equ smsw, unimpl
+
+ .equ str, ud
+
+ud: li r3,code_ud
+ li r4,0
+ b complex
+
+unimpl: li r3,code_ud
+ li r4,1
+ b complex
+
+ .equ verr, ud
+ .equ verw, ud
+ .equ wbinvd, unimpl
+
+em86_end:
+ .size em86_enter,em86_end-em86_enter
+#ifdef __BOOT__
+ .data
+#define ENTRY(x,t) .long x+t-_jtables
+#else
+ .section .rodata
+#define ENTRY(x,t) .long x+t
+#endif
+
+#define BOP(x) ENTRY(x,2) /* Byte operation with mod/rm byte */
+#define WLOP(x) ENTRY(x,3) /* 16 or 32 bit operation with mod/rm byte */
+#define EXTOP(x) ENTRY(x,0) /* Opcode with extension in mod/rm byte */
+#define OP(x) ENTRY(x,1) /* Direct one byte opcode/prefix */
+
+/* A few macros for the main table */
+#define gen6(op, wl, axeax) \
+ BOP(op##b##_reg_mem); WLOP(op##wl##_reg_mem); \
+ BOP(op##b##_mem_reg); WLOP(op##wl##_mem_reg); \
+ OP(op##b##_imm_al); OP(op##wl##_imm_##axeax)
+
+#define rep7(l,t) \
+ ENTRY(l,t); ENTRY(l,t); ENTRY(l,t); ENTRY(l,t); \
+ ENTRY(l,t); ENTRY(l,t); ENTRY(l,t)
+
+#define rep8(l) l ; l; l; l; l; l; l; l;
+
+#define allcond(pfx, sfx, t) \
+ ENTRY(pfx##o##sfx, t); ENTRY(pfx##no##sfx, t); \
+ ENTRY(pfx##c##sfx, t); ENTRY(pfx##nc##sfx, t); \
+ ENTRY(pfx##z##sfx, t); ENTRY(pfx##nz##sfx, t); \
+ ENTRY(pfx##na##sfx, t); ENTRY(pfx##a##sfx, t); \
+ ENTRY(pfx##s##sfx, t); ENTRY(pfx##ns##sfx, t); \
+ ENTRY(pfx##p##sfx, t); ENTRY(pfx##np##sfx, t); \
+ ENTRY(pfx##l##sfx, t); ENTRY(pfx##nl##sfx, t); \
+ ENTRY(pfx##ng##sfx, t); ENTRY(pfx##g##sfx, t)
+
+/* single/double register sign extensions and other oddities */
+#define h2sextw cbw /* Half to Single sign extension */
+#define s2dextw cwd /* Single to Double sign extension */
+#define h2sextl cwde
+#define s2dextl cdq
+#define j_a16_cxz_w jcxz_w
+#define j_a32_cxz_w jecxz_w
+#define j_a16_cxz_l jcxz_l
+#define j_a32_cxz_l jecxz_l
+#define loopa16_w loopw_w
+#define loopa16_l loopw_l
+#define loopa32_w loopl_w
+#define loopa32_l loopl_l
+#define loopnza16_w loopnzw_w
+#define loopnza16_l loopnzw_l
+#define loopnza32_w loopnzl_w
+#define loopnza32_l loopnzl_l
+#define loopza16_w loopzw_w
+#define loopza16_l loopzw_l
+#define loopza32_w loopzl_w
+#define loopza32_l loopzl_l
+/* No FP support */
+
+/* Addressing mode table */
+ .align 5
+# (%bx,%si), (%bx,%di), (%bp,%si), (%bp,%di)
+adtable: .long 0x00004360, 0x00004370, 0x80004560, 0x80004570
+# (%si), (%di), o16, (%bx)
+ .long 0x00004600, 0x00004700, 0x00002000, 0x00004300
+# o8(%bx,%si), o8(%bx,%di), o8(%bp,%si), o8(%bp,%di)
+ .long 0x00004360, 0x00004370, 0x80004560, 0x80004570
+# o8(%si), o8(%di), o8(%bp), o8(%bx)
+ .long 0x00004600, 0x00004700, 0x80004500, 0x00004300
+# o16(%bx,%si), o16(%bx,%di), o16(%bp,%si), o16(%bp,%di)
+ .long 0x00004360, 0x00004370, 0x80004560, 0x80004570
+# o16(%si), o16(%di), o16(%bp), o16(%bx)
+ .long 0x00004600, 0x00004700, 0x80004500, 0x00004300
+# register addressing modes do not use the table
+ .long 0, 0, 0, 0, 0, 0, 0, 0
+#now 32 bit modes
+# (%eax), (%ecx), (%edx), (%ebx)
+ .long 0x00004090, 0x00004190, 0x00004290, 0x00004390
+# sib, o32, (%esi), (%edi)
+ .long 0x00003090, 0x00002090, 0x00004690, 0x00004790
+# o8(%eax), o8(%ecx), o8(%edx), o8(%ebx)
+ .long 0x00004090, 0x00004190, 0x00004290, 0x00004390
+# sib, o8(%ebp), o8(%esi), o8(%edi)
+ .long 0x00003090, 0x80004590, 0x00004690, 0x00004790
+# o32(%eax), o32(%ecx), o32(%edx), o32(%ebx)
+ .long 0x00004090, 0x00004190, 0x00004290, 0x00004390
+# sib, o32(%ebp), o32(%esi), o32(%edi)
+ .long 0x00003090, 0x80004590, 0x00004690, 0x00004790
+# register addressing modes do not use the table
+ .long 0, 0, 0, 0, 0, 0, 0, 0
+
+#define jtable(wl, awl, spesp, axeax, name ) \
+ .align 5; \
+jtab_##name: gen6(add, wl, axeax); \
+ OP(push##wl##_##spesp##_sr); \
+ OP(pop##wl##_##spesp##_sr); \
+ gen6(or, wl, axeax); \
+ OP(push##wl##_##spesp##_sr); \
+ OP(_twobytes); \
+ gen6(adc, wl, axeax); \
+ OP(push##wl##_##spesp##_sr); \
+ OP(pop##wl##_##spesp##_sr); \
+ gen6(sbb, wl, axeax); \
+ OP(push##wl##_##spesp##_sr); \
+ OP(pop##wl##_##spesp##_sr); \
+ gen6(and, wl, axeax); OP(_es); OP(daa); \
+ gen6(sub, wl, axeax); OP(_cs); OP(das); \
+ gen6(xor, wl, axeax); OP(_ss); OP(aaa); \
+ gen6(cmp, wl, axeax); OP(_ds); OP(aas); \
+ rep8(OP(inc##wl##_reg)); \
+ rep8(OP(dec##wl##_reg)); \
+ rep8(OP(push##wl##_##spesp##_reg)); \
+ rep8(OP(pop##wl##_##spesp##_reg)); \
+ OP(pusha##wl##_##spesp); OP(popa##wl##_##spesp); \
+ WLOP(bound##wl); WLOP(arpl); \
+ OP(_fs); OP(_gs); OP(_opsize); OP(_adsize); \
+ OP(push##wl##_##spesp##_imm); WLOP(imul##wl##_imm); \
+ OP(push##wl##_##spesp##_imm8); WLOP(imul##wl##_imm8); \
+ OP(insb_##awl); OP(ins##wl##_##awl); \
+ OP(outsb_##awl); OP(outs##wl##_##awl); \
+ allcond(sj,_##wl,1); \
+ EXTOP(grp1b_imm); EXTOP(grp1##wl##_imm); \
+ EXTOP(grp1b_imm); EXTOP(grp1##wl##_imm8); \
+ BOP(testb_reg_mem); WLOP(test##wl##_reg_mem); \
+ BOP(xchgb_reg_mem); WLOP(xchg##wl##_reg_mem); \
+ BOP(movb_reg_mem); WLOP(mov##wl##_reg_mem); \
+ BOP(movb_mem_reg); WLOP(mov##wl##_mem_reg); \
+ WLOP(mov##wl##_sr_mem); WLOP(lea##wl); \
+ WLOP(mov##wl##_mem_sr); WLOP(pop##wl##_##spesp##_##awl); \
+ OP(nop); rep7(xchg##wl##_##axeax##_reg,1); \
+ OP(h2sext##wl); OP(s2dext##wl); \
+ OP(lcall_##wl); OP(wait); \
+ OP(pushf##wl##_##spesp); OP(popf##wl##_##spesp); \
+ OP(sahf); OP(lahf); \
+ OP(movb_##awl##_al); OP(mov##wl##_##awl##_##axeax); \
+ OP(movb_al_##awl); OP(mov##wl##_##axeax##_##awl); \
+ OP(movsb_##awl); OP(movs##wl##_##awl); \
+ OP(cmpsb_##awl); OP(cmps##wl##_##awl); \
+ OP(testb_imm_al); OP(test##wl##_imm_##axeax); \
+ OP(stosb_##awl); OP(stos##wl##_##awl); \
+ OP(lodsb_##awl); OP(lods##wl##_##awl); \
+ OP(scasb_##awl); OP(scas##wl##_##awl); \
+ rep8(OP(movb_imm_reg)); \
+ rep8(OP(mov##wl##_imm_reg)); \
+ EXTOP(shiftb_imm); EXTOP(shift##wl##_imm); \
+ OP(ret##wl##_##spesp##_imm); OP(ret##wl##_##spesp); \
+ WLOP(ldlptr##wl); WLOP(ldlptr##wl); \
+ BOP(movb_imm_mem); WLOP(mov##wl##_imm_mem); \
+ OP(enter##wl##_##spesp); OP(leave##wl##_##spesp); \
+ OP(lret##wl##_imm); OP(lret##wl); \
+ OP(int3); OP(int); OP(into); OP(iret##wl); \
+ EXTOP(shiftb_1); EXTOP(shift##wl##_1); \
+ EXTOP(shiftb_cl); EXTOP(shift##wl##_cl); \
+ OP(aam); OP(aad); OP(ud); OP(xlatb_##awl); \
+ rep8(OP(esc)); \
+ OP(loopnz##awl##_##wl); OP(loopz##awl##_##wl); \
+ OP(loop##awl##_##wl); OP(j_##awl##_cxz_##wl); \
+ OP(inb_port_al); OP(in##wl##_port_##axeax); \
+ OP(outb_al_port); OP(out##wl##_##axeax##_port); \
+ OP(call##wl##_##spesp); OP(jmp_##wl); \
+ OP(ljmp_##wl); OP(sjmp_##wl); \
+ OP(inb_dx_al); OP(in##wl##_dx_##axeax); \
+ OP(outb_al_dx); OP(out##wl##_##axeax##_dx); \
+ OP(_lock); OP(ud); OP(_repnz); OP(_repz); \
+ OP(hlt); OP(cmc); \
+ EXTOP(grp3b); EXTOP(grp3##wl); \
+ OP(clc); OP(stc); OP(cli); OP(sti); \
+ OP(cld); OP(std); \
+ EXTOP(grp4b); EXTOP(grp5##wl##_##spesp); \
+ /* Here we start the table for twobyte instructions */ \
+ OP(ud); OP(ud); WLOP(lar); WLOP(lsl); \
+ OP(ud); OP(ud); OP(clts); OP(ud); \
+ OP(invd); OP(wbinvd); OP(ud); OP(ud); \
+ OP(ud); OP(ud); OP(ud); OP(ud); \
+ rep8(OP(ud)); \
+ rep8(OP(ud)); \
+ OP(movl_cr_reg); OP(movl_reg_cr); \
+ OP(movl_dr_reg); OP(movl_reg_dr); \
+ OP(ud); OP(ud); OP(ud); OP(ud); \
+ rep8(OP(ud)); \
+ /* .long wrmsr, rdtsc, rdmsr, rdpmc; */\
+ rep8(OP(ud)); \
+ rep8(OP(ud)); \
+ /* allcond(cmov, wl); */ \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ /* MMX Start */ \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ /* MMX End */ \
+ allcond(j,_##wl, 1); \
+ allcond(set,,2); \
+ OP(push##wl##_##spesp##_sr); OP(pop##wl##_##spesp##_sr); \
+ OP(ud) /* cpuid */; WLOP(bt##wl##_reg_mem); \
+ WLOP(shld##wl##_imm); WLOP(shld##wl##_cl); \
+ OP(ud); OP(ud); \
+ OP(push##wl##_##spesp##_sr); OP(pop##wl##_##spesp##_sr); \
+ OP(ud) /* rsm */; WLOP(bts##wl##_reg_mem); \
+ WLOP(shrd##wl##_imm); WLOP(shrd##wl##_cl); \
+ OP(ud); WLOP(imul##wl##_mem_reg); \
+ BOP(cmpxchgb); WLOP(cmpxchg##wl); \
+ WLOP(ldlptr##wl); WLOP(btr##wl##_reg_mem); \
+ WLOP(ldlptr##wl); WLOP(ldlptr##wl); \
+ WLOP(movzb##wl); WLOP(movzw##wl); \
+ OP(ud); OP(ud); \
+ EXTOP(grp8##wl); WLOP(btc##wl##_reg_mem); \
+ WLOP(bsf##wl); WLOP(bsr##wl); \
+ WLOP(movsb##wl); WLOP(movsw##wl); \
+ BOP(xaddb); WLOP(xadd##wl); \
+ OP(ud); OP(ud); \
+ OP(ud); OP(ud); OP(ud); OP(ud); \
+ rep8(OP(bswap)); \
+ /* MMX Start */ \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ rep8(OP(ud)); rep8(OP(ud)); \
+ /* MMX End */
+ .align 5 /* 8kb of tables, 32 byte aligned */
+_jtables: jtable(w, a16, sp, ax, www) /* data16, addr16 */
+ jtable(l, a16, sp, eax, lww) /* data32, addr16 */
+ jtable(w, a32, sp, ax, wlw) /* data16, addr32 */
+ jtable(l, a32, sp, eax, llw) /* data32, addr32 */
+/* The other possible combinations are only required by protected mode
+code using a big stack segment */
+/* Here are the auxiliary tables for opcode extensions, note that
+all entries get 2 or 3 added. */
+#define grp1table(bwl,t,s8) \
+grp1##bwl##_imm##s8:; \
+ ENTRY(add##bwl##_imm##s8,t); ENTRY(or##bwl##_imm##s8,t); \
+ ENTRY(adc##bwl##_imm##s8,t); ENTRY(sbb##bwl##_imm##s8,t); \
+ ENTRY(and##bwl##_imm##s8,t); ENTRY(sub##bwl##_imm##s8,t); \
+ ENTRY(xor##bwl##_imm##s8,t); ENTRY(cmp##bwl##_imm##s8,t)
+
+ grp1table(b,2,)
+ grp1table(w,3,)
+ grp1table(w,3,8)
+ grp1table(l,3,)
+ grp1table(l,3,8)
+
+#define shifttable(bwl,t,c) \
+shift##bwl##_##c:; \
+ ENTRY(rol##bwl##_##c,t); ENTRY(ror##bwl##_##c,t); \
+ ENTRY(rcl##bwl##_##c,t); ENTRY(rcr##bwl##_##c,t); \
+ ENTRY(shl##bwl##_##c,t); ENTRY(shr##bwl##_##c,t); \
+ OP(ud); ENTRY(sar##bwl##_##c,t)
+
+ shifttable(b,2,1)
+ shifttable(w,3,1)
+ shifttable(l,3,1)
+
+ shifttable(b,2,cl)
+ shifttable(w,3,cl)
+ shifttable(l,3,cl)
+
+ shifttable(b,2,imm)
+ shifttable(w,3,imm)
+ shifttable(l,3,imm)
+
+#define grp3table(bwl,t) \
+grp3##bwl: ENTRY(test##bwl##_imm,t); OP(ud); \
+ ENTRY(not##bwl,t); ENTRY(neg##bwl,t); \
+ ENTRY(mul##bwl,t); ENTRY(imul##bwl,t); \
+ ENTRY(div##bwl,t); ENTRY(idiv##bwl,t)
+
+ grp3table(b,2)
+ grp3table(w,3)
+ grp3table(l,3)
+
+
+grp4b: BOP(incb); BOP(decb); \
+ OP(ud); OP(ud); \
+ OP(ud); OP(ud); \
+ OP(ud); OP(ud)
+
+#define grp5table(wl,spesp) \
+grp5##wl##_##spesp: \
+ WLOP(inc##wl); WLOP(dec##wl); \
+ WLOP(call##wl##_##spesp##_mem); WLOP(lcall##wl##); \
+ WLOP(jmp##wl); WLOP(ljmp##wl); \
+ WLOP(push##wl##_##spesp); OP(ud)
+
+ grp5table(w,sp)
+ grp5table(l,sp)
+
+#define grp8table(wl) \
+grp8##wl: OP(ud); OP(ud); OP(ud); OP(ud); \
+ WLOP(bt##wl##_imm); WLOP(bts##wl##_imm); \
+ WLOP(btr##wl##_imm); WLOP(btc##wl##_imm)
+
+ grp8table(w)
+ grp8table(l)
+#ifdef __BOOT__
+_endjtables: .long 0 /* Points to _jtables after relocation */
+#endif
+
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S b/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S
new file mode 100644
index 0000000000..5835ea48a2
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S
@@ -0,0 +1,473 @@
+/*
+ * exception.S -- Exception handlers for early boot.
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+/* This is an improved version of the TLB interrupt handling code from
+ * the 603e users manual (603eUM.pdf) downloaded from the WWW. All the
+ * visible bugs have been removed. Note that many have survived in the errata
+ * to the 603 user manual (603UMer.pdf).
+ *
+ * This code also pays particular attention to optimization, takes into
+ * account the differences between 603 and 603e, single/multiple processor
+ * systems and tries to order instructions for dual dispatch in many places.
+ *
+ * The optimization has been performed along two lines:
+ * 1) to minimize the number of instruction cache lines needed for the most
+ * common execution paths (the ones that do not result in an exception).
+ * 2) then to order the code to maximize the number of dual issue and
+ * completion opportunities without increasing the number of cache lines
+ * used in the same cases.
+ *
+ * The last goal of this code is to fit inside the address range
+ * assigned to the interrupt vectors: 192 instructions with fixed
+ * entry points every 64 instructions.
+ *
+ * Some typos have also been corrected and the Power l (lowercase L)
+ * instructions replaced by lwz without comment.
+ *
+ * I have attempted to describe the reasons of the order and of the choice
+ * of the instructions but the comments may be hard to understand without
+ * the processor manual.
+ *
+ * Note that the fact that the TLB are reloaded by software in theory
+ * allows tremendous flexibility, for example we could avoid setting the
+ * reference bit of the PTE which will could actually not be accessed because
+ * of protection violation by changing a few lines of code. However,
+ * this would significantly slow down most TLB reload operations, and
+ * this is the reason for which we try never to make checks which would be
+ * redundant with hardware and usually indicate a bug in a program.
+ *
+ * There are some inconsistencies in the documentation concerning the
+ * settings of SRR1 bit 15. All recent documentations say now that it is set
+ * for stores and cleared for loads. Anyway this handler never uses this bit.
+ *
+ * A final remark, the rfi instruction seems to implicitly clear the
+ * MSR<14> (tgpr)bit. The documentation claims that this bit is restored
+ * from SRR1 by rfi, but the corresponding bit in SRR1 is the LRU way bit.
+ * Anyway, the only exception which can occur while TGPR is set is a machine
+ * check which would indicate an unrecoverable problem. Recent documentation
+ * now says in some place that rfi clears MSR<14>.
+ *
+ * TLB software load for 602/603/603e/603ev:
+ * Specific Instructions:
+ * tlbld - write the dtlb with the pte in rpa reg
+ * tlbli - write the itlb with the pte in rpa reg
+ * Specific SPRs:
+ * dmiss - address of dstream miss
+ * imiss - address of istream miss
+ * hash1 - address primary hash PTEG address
+ * hash2 - returns secondary hash PTEG address
+ * iCmp - returns the primary istream compare value
+ * dCmp - returns the primary dstream compare value
+ * rpa - the second word of pte used by tlblx
+ * Other specific resources:
+ * cr0 saved in 4 high order bits of SRR1,
+ * SRR1 bit 14 [WAY] selects TLB set to load from LRU algorithm
+ * gprs r0..r3 shadowed by the setting of MSR bit 14 [TGPR]
+ * other bits in SRR1 (unused by this handler but see earlier comments)
+ *
+ * There are three basic flows corresponding to three vectors:
+ * 0x1000: Instruction TLB miss,
+ * 0x1100: Data TLB miss on load,
+ * 0x1200: Data TLB miss on store or not dirty page
+ */
+
+/* define the following if code does not have to run on basic 603 */
+/* #define USE_KEY_BIT */
+
+/* define the following for safe multiprocessing */
+/* #define MULTIPROCESSING */
+
+/* define the following for mixed endian */
+/* #define CHECK_MIXED_ENDIAN */
+
+/* define the following if entries always have the reference bit set */
+#define ASSUME_REF_SET
+
+/* Some OS kernels may want to keep a single copy of the dirty bit in a per
+ * page table. In this case writable pages are always write-protected as long
+ * as they are clean, and the dirty bit set actually means that the page
+ * is writable.
+ */
+#define DIRTY_MEANS_WRITABLE
+
+#include <libcpu/cpu.h>
+#include "asm.h"
+#include "bootldr.h"
+
+/*
+ * Instruction TLB miss flow
+ * Entry at 0x1000 with the following:
+ * srr0 -> address of instruction that missed
+ * srr1 -> 0:3=cr0, 13=1 (instruction), 14=lru way, 16:31=saved MSR
+ * msr<tgpr> -> 1
+ * iMiss -> ea that missed
+ * iCmp -> the compare value for the va that missed
+ * hash1 -> pointer to first hash pteg
+ * hash2 -> pointer to second hash pteg
+ *
+ * Register usage:
+ * r0 is limit address during search / scratch after
+ * r1 is pte data / error code for ISI exception when search fails
+ * r2 is pointer to pte
+ * r3 is compare value during search / scratch after
+ */
+/* Binutils or assembler bug ? Declaring the section executable and writable
+ * generates an error message on the @fixup entries.
+ */
+ .section .exception,"aw"
+# .org 0x1000 # instruction TLB miss entry point
+ .globl tlb_handlers
+tlb_handlers:
+ .type tlb_handlers,@function
+#define ISIVec tlb_handlers-0x1000+0x400
+#define DSIVec tlb_handlers-0x1000+0x300
+ mfspr r2,HASH1
+ lwz r1,0(r2) # Start memory access as soon as possible
+ mfspr r3,ICMP # to load the cache.
+0: la r0,48(r2) # Use explicit loop to avoid using ctr
+1: cmpw r1,r3 # In theory the loop is somewhat slower
+ beq- 2f # than documentation example
+ cmpw r0,r2 # but we gain from starting cache load
+ lwzu r1,8(r2) # earlier and using slots between load
+ bne+ 1b # and comparison for other purposes.
+ cmpw r1,r3
+ bne- 4f # Secondary hash check
+2: lwz r1,4(r2) # Found: load second word of PTE
+ mfspr r0,IMISS # get miss address during load delay
+#ifdef ASSUME_REF_SET
+ andi. r3,r1,8 # check for guarded memory
+ bne- 5f
+ mtspr RPA,r1
+ mfsrr1 r3
+ tlbli r0
+#else
+/* This is basically the original code from the manual. */
+# andi. r3,r1,8 # check for guarded memory
+# bne- 5f
+# andi. r3,r1,0x100 # check R bit ahead to help folding
+/* However there is a better solution: these last three instructions can be
+replaced by the following which should cause less pipeline stalls because
+both tests are combined and there is a single CR rename buffer */
+ extlwi r3,r1,6,23 # Keep only RCWIMG in 6 most significant bits.
+ rlwinm. r3,r3,5,0,27 # Keep only G (in sign) and R and test.
+ blt- 5f # Negative means guarded, zero R not set.
+ mfsrr1 r3 # get saved cr0 bits now to dual issue
+ ori r1,r1,0x100
+ mtspr RPA,r1
+ tlbli r0
+/* Do not update PTE if R bit already set, this will save one cache line
+writeback at a later time, and avoid even more bus traffic in
+multiprocessing systems, when several processors access the same PTEGs.
+We also hope that the reference bit will be already set. */
+ bne+ 3f
+#ifdef MULTIPROCESSING
+ srwi r1,r1,8 # get byte 7 of pte
+ stb r1,+6(r2) # update page table
+#else
+ sth r1,+6(r2) # update page table
+#endif
+#endif
+3: mtcrf 0x80,r3 # restore CR0
+ rfi # return to executing program
+
+/* The preceding code is 20 to 25 instructions long, which occupies
+3 or 4 cache lines. */
+4: andi. r0,r3,0x0040 # see if we have done second hash
+ lis r1,0x4000 # set up error code in case next branch taken
+ bne- 6f # speculatively issue the following
+ mfspr r2,HASH2 # get the second pointer
+ ori r3,r3,0x0040 # change the compare value
+ lwz r1,0(r2) # load first entry
+ b 0b # and go back to main loop
+/* We are now at 27 to 32 instructions, using 3 or 4 cache lines for all
+cases in which the TLB is successfully loaded. */
+
+/* Guarded memory protection violation: synthesize an ISI exception. */
+5: lis r1,0x1000 # set srr1<3>=1 to flag guard violation
+/* Entry Not Found branches here with r1 correctly set. */
+6: mfsrr1 r3
+ mfmsr r0
+ insrwi r1,r3,16,16 # build srr1 for ISI exception
+ mtsrr1 r1 # set srr1
+/* It seems few people have realized rlwinm can be used to clear a bit or
+a field of contiguous bits in a register by setting mask_begin>mask_end. */
+ rlwinm r0,r0,0,15,13 # clear the msr<tgpr> bit
+ mtcrf 0x80, r3 # restore CR0
+ mtmsr r0 # flip back to the native gprs
+ isync # Required from 602 doc!
+ b ISIVec # go to instruction access exception
+/* Up to now there are 37 to 42 instructions so at least 20 could be
+inserted for complex cases or for statistics recording. */
+
+
+/*
+ Data TLB miss on load flow
+ Entry at 0x1100 with the following:
+ srr0 -> address of instruction that caused the miss
+ srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=0, 16:31=saved MSR
+ msr<tgpr> -> 1
+ dMiss -> ea that missed
+ dCmp -> the compare value for the va that missed
+ hash1 -> pointer to first hash pteg
+ hash2 -> pointer to second hash pteg
+
+ Register usage:
+ r0 is limit address during search / scratch after
+ r1 is pte data / error code for DSI exception when search fails
+ r2 is pointer to pte
+ r3 is compare value during search / scratch after
+*/
+ .org tlb_handlers+0x100
+ mfspr r2,HASH1
+ lwz r1,0(r2) # Start memory access as soon as possible
+ mfspr r3,DCMP # to load the cache.
+0: la r0,48(r2) # Use explicit loop to avoid using ctr
+1: cmpw r1,r3 # In theory the loop is somewhat slower
+ beq- 2f # than documentation example
+ cmpw r0,r2 # but we gain from starting cache load
+ lwzu r1,8(r2) # earlier and using slots between load
+ bne+ 1b # and comparison for other purposes.
+ cmpw r1,r3
+ bne- 4f # Secondary hash check
+2: lwz r1,4(r2) # Found: load second word of PTE
+ mfspr r0,DMISS # get miss address during load delay
+#ifdef ASSUME_REF_SET
+ mtspr RPA,r1
+ mfsrr1 r3
+ tlbld r0
+#else
+ andi. r3,r1,0x100 # check R bit ahead to help folding
+ mfsrr1 r3 # get saved cr0 bits now to dual issue
+ ori r1,r1,0x100
+ mtspr RPA,r1
+ tlbld r0
+/* Do not update PTE if R bit already set, this will save one cache line
+writeback at a later time, and avoid even more bus traffic in
+multiprocessing systems, when several processors access the same PTEGs.
+We also hope that the reference bit will be already set. */
+ bne+ 3f
+#ifdef MULTIPROCESSING
+ srwi r1,r1,8 # get byte 7 of pte
+ stb r1,+6(r2) # update page table
+#else
+ sth r1,+6(r2) # update page table
+#endif
+#endif
+3: mtcrf 0x80,r3 # restore CR0
+ rfi # return to executing program
+
+/* The preceding code is 18 to 23 instructions long, which occupies
+3 cache lines. */
+4: andi. r0,r3,0x0040 # see if we have done second hash
+ lis r1,0x4000 # set up error code in case next branch taken
+ bne- 9f # speculatively issue the following
+ mfspr r2,HASH2 # get the second pointer
+ ori r3,r3,0x0040 # change the compare value
+ lwz r1,0(r2) # load first entry asap
+ b 0b # and go back to main loop
+/* We are now at 25 to 30 instructions, using 3 or 4 cache lines for all
+cases in which the TLB is successfully loaded. */
+
+
+/*
+ Data TLB miss on store or not dirty page flow
+ Entry at 0x1200 with the following:
+ srr0 -> address of instruction that caused the miss
+ srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=1, 16:31=saved MSR
+ msr<tgpr> -> 1
+ dMiss -> ea that missed
+ dCmp -> the compare value for the va that missed
+ hash1 -> pointer to first hash pteg
+ hash2 -> pointer to second hash pteg
+
+ Register usage:
+ r0 is limit address during search / scratch after
+ r1 is pte data / error code for DSI exception when search fails
+ r2 is pointer to pte
+ r3 is compare value during search / scratch after
+*/
+ .org tlb_handlers+0x200
+ mfspr r2,HASH1
+ lwz r1,0(r2) # Start memory access as soon as possible
+ mfspr r3,DCMP # to load the cache.
+0: la r0,48(r2) # Use explicit loop to avoid using ctr
+1: cmpw r1,r3 # In theory the loop is somewhat slower
+ beq- 2f # than documentation example
+ cmpw r0,r2 # but we gain from starting cache load
+ lwzu r1,8(r2) # earlier and using slots between load
+ bne+ 1b # and comparison for other purposes.
+ cmpw r1,r3
+ bne- 4f # Secondary hash check
+2: lwz r1,4(r2) # Found: load second word of PTE
+ mfspr r0,DMISS # get miss address during load delay
+/* We could simply set the C bit and then rely on hardware to flag protection
+violations. This raises the problem that a page which actually has not been
+modified may be marked as dirty and violates the OEA model for guaranteed
+bit settings (table 5-8 of 603eUM.pdf). This can have harmful consequences
+on operating system memory management routines, and play havoc with copy on
+write schemes. So the protection check is ABSOLUTELY necessary. */
+ andi. r3,r1,0x80 # check C bit
+ beq- 5f # if (C==0) go to check protection
+3: mfsrr1 r3 # get the saved cr0 bits
+ mtspr RPA,r1 # set the pte
+ tlbld r0 # load the dtlb
+ mtcrf 0x80,r3 # restore CR0
+ rfi # return to executing program
+/* The preceding code is 20 instructions long, which occupy
+3 cache lines. */
+4: andi. r0,r3,0x0040 # see if we have done second hash
+ lis r1,0x4200 # set up error code in case next branch taken
+ bne- 9f # speculatively issue the following
+ mfspr r2,HASH2 # get the second pointer
+ ori r3,r3,0x0040 # change the compare value
+ lwz r1,0(r2) # load first entry asap
+ b 0b # and go back to main loop
+/* We are now at 27 instructions, using 3 or 4 cache lines for all
+cases in which the TLB C bit is already set. */
+
+#ifdef DIRTY_MEANS_WRITABLE
+5: lis r1,0x0A00 # protection violation on store
+#else
+/*
+ Entry found and C==0: check protection before setting C:
+ Register usage:
+ r0 is dMiss register
+ r1 is PTE entry (to be copied to RPA if success)
+ r2 is pointer to pte
+ r3 is trashed
+
+ For the 603e, the key bit in SRR1 helps to decide whether there is a
+ protection violation. However the way the check is done in the manual is
+ not very efficient. The code shown here works as well for 603 and 603e and
+ is much more efficient for the 603 and comparable to the manual example
+ for 603e. This code however has quite a bad structure due to the fact it
+ has been reordered to speed up the most common cases.
+*/
+/* The first of the following two instructions could be replaced by
+andi. r3,r1,3 but it would compete with cmplwi for cr0 resource. */
+5: clrlwi r3,r1,30 # Extract two low order bits
+ cmplwi r3,2 # Test for PP=10
+ bne- 7f # assume fallthrough is more frequent
+6: ori r1,r1,0x180 # set referenced and changed bit
+ sth r1,6(r2) # update page table
+ b 3b # and finish loading TLB
+/* We are now at 33 instructions, using 5 cache lines. */
+7: bgt- 8f # if PP=11 then DSI protection exception
+/* This code only works if key bit is present (602/603e/603ev) */
+#ifdef USE_KEY_BIT
+ mfsrr1 r3 # get the KEY bit and test it
+ andis. r3,r3,0x0008
+ beq 6b # default prediction taken, truly better ?
+#else
+/* This code is for all 602 and 603 family models: */
+ mfsrr1 r3 # Here the trick is to use the MSR PR bit as a
+ mfsrin r0,r0 # shift count for an rlwnm. instruction which
+ extrwi r3,r3,1,17 # extracts and tests the correct key bit from
+ rlwnm. r3,r0,r3,1,1 # the segment register. RISC they said...
+ mfspr r0,DMISS # Restore fault address to r0
+ beq 6b # if 0 load tlb else protection fault
+#endif
+/* We are now at 40 instructions, (37 if using key bit), using 5 cache
+lines in all cases in which the C bit is successfully set */
+8: lis r1,0x0A00 # protection violation on store
+#endif /* DIRTY_IS_WRITABLE */
+/* PTE entry not found branch here with DSISR code in r1 */
+9: mfsrr1 r3
+ mtdsisr r1
+ clrlwi r2,r3,16 # set up srr1 for DSI exception
+ mfmsr r0
+/* I have some doubts about the usefulness of the xori instruction in
+mixed or pure little-endian environment. The address is in the same
+doubleword, hence in the same protection domain and performing an exclusive
+or with 7 is only valid for byte accesses. */
+#ifdef CHECK_MIXED_ENDIAN
+ andi. r1,r2,1 # test LE bit ahead to help folding
+#endif
+ mtsrr1 r2
+ rlwinm r0,r0,0,15,13 # clear the msr<tgpr> bit
+ mfspr r1,DMISS # get miss address
+#ifdef CHECK_MIXED_ENDIAN
+ beq 1f # if little endian then:
+ xori r1,r1,0x07 # de-mung the data address
+1:
+#endif
+ mtdar r1 # put in dar
+ mtcrf 0x80,r3 # restore CR0
+ mtmsr r0 # flip back to the native gprs
+ isync # required from 602 manual
+ b DSIVec # branch to DSI exception
+/* We are now between 50 and 56 instructions. Close to the limit
+but should be sufficient in case bugs are found. */
+/* Altogether the three handlers occupy 128 instructions in the worst
+case, 64 instructions could still be added (non contiguously). */
+ .org tlb_handlers+0x300
+ .globl _handler_glue
+_handler_glue:
+/* Entry code for exceptions: DSI (0x300), ISI(0x400), alignment(0x600) and
+ * traps(0x700). In theory it is not necessary to save and restore r13 and all
+ * higher numbered registers, but it is done because it allowed to call the
+ * firmware (PPCBug) for debugging in the very first stages when writing the
+ * bootloader.
+ */
+ stwu r1,-160(r1)
+ stw r0,save_r(0)
+ mflr r0
+ stmw r2,save_r(2)
+ bl 0f
+0: mfctr r4
+ stw r0,save_lr
+ mflr r9 /* Interrupt vector + few instructions */
+ la r10,160(r1)
+ stw r4,save_ctr
+ mfcr r5
+ lwz r8,2f-0b(r9)
+ mfxer r6
+ stw r5,save_cr
+ mtctr r8
+ stw r6,save_xer
+ mfsrr0 r7
+ stw r10,save_r(1)
+ mfsrr1 r8
+ stw r7,save_nip
+ la r4,8(r1)
+ lwz r13,1f-0b(r9)
+ rlwinm r3,r9,24,0x3f /* Interrupt vector >> 8 */
+ stw r8,save_msr
+ bctrl
+
+ lwz r7,save_msr
+ lwz r6,save_nip
+ mtsrr1 r7
+ lwz r5,save_xer
+ mtsrr0 r6
+ lwz r4,save_ctr
+ mtxer r5
+ lwz r3,save_lr
+ mtctr r4
+ lwz r0,save_cr
+ mtlr r3
+ lmw r2,save_r(2)
+ mtcr r0
+ lwz r0,save_r(0)
+ la r1,160(r1)
+ rfi
+1: .long (__bd)@fixup
+2: .long (_handler)@fixup
+ .section .fixup,"aw"
+ .align 2
+ .long 1b, 2b
+ .previous
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/head.S b/c/src/lib/libbsp/powerpc/shared/bootloader/head.S
new file mode 100644
index 0000000000..232232be50
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/head.S
@@ -0,0 +1,381 @@
+/*
+ * head.S -- Bootloader Entry point
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include "bootldr.h"
+#include <libcpu/cpu.h>
+#include <rtems/score/targopts.h>
+#include "asm.h"
+
+#undef TEST_PPCBUG_CALLS
+#define FRAME_SIZE 32
+#define LOCK_CACHES (HID0_DLOCK|HID0_ILOCK)
+#define INVL_CACHES (HID0_DCI|HID0_ICFI)
+#define ENBL_CACHES (HID0_DCE|HID0_ICE)
+
+#define USE_PPCBUG
+#undef USE_PPCBUG
+
+#define MONITOR_ENTER \
+ mfmsr r10 ; \
+ ori r10,r10,MSR_IP ; \
+ mtmsr r10 ; \
+ li r10,0x63 ; \
+ sc
+
+ START_GOT
+ GOT_ENTRY(_GOT2_TABLE_)
+ GOT_ENTRY(_FIXUP_TABLE_)
+ GOT_ENTRY(.bss)
+ GOT_ENTRY(codemove)
+ GOT_ENTRY(0)
+ GOT_ENTRY(__bd)
+ GOT_ENTRY(moved)
+ GOT_ENTRY(_binary_rtems_gz_start)
+ GOT_ENTRY(_binary_initrd_gz_start)
+ GOT_ENTRY(_binary_initrd_gz_end)
+#ifdef TEST_PPCBUG_CALLS
+ GOT_ENTRY(banner_start)
+ GOT_ENTRY(banner_end)
+#endif
+ END_GOT
+ .globl start
+ .type start,@function
+/* Point the stack into the PreP partition header in the x86 reserved
+ * code area, so that simple C routines can be called.
+ */
+start:
+#ifdef USE_PPCBUG
+ MONITOR_ENTER
+#endif
+ bl 1f
+1: mflr r1
+ li r0,0
+ stwu r0,start-1b-0x400+0x1b0-FRAME_SIZE(r1)
+ stmw r26,FRAME_SIZE-24(r1)
+ GET_GOT
+ mfmsr r28 /* Turn off interrupts */
+ ori r0,r28,MSR_EE
+ xori r0,r0,MSR_EE
+ mtmsr r0
+
+/* Enable the caches, from now on cr2.eq set means processor is 601 */
+ mfpvr r0
+ mfspr r29,HID0
+ srwi r0,r0,16
+ cmplwi cr2,r0,1
+ beq 2,2f
+#ifndef USE_PPCBUG
+ ori r0,r29,ENBL_CACHES|INVL_CACHES|LOCK_CACHES
+ xori r0,r0,INVL_CACHES|LOCK_CACHES
+ sync
+ isync
+ mtspr HID0,r0
+#endif
+2: bl reloc
+
+/* save all the parameters and the orginal msr/hid0/r31 */
+ lwz bd,GOT(__bd)
+ stw r3,0(bd)
+ stw r4,4(bd)
+ stw r5,8(bd)
+ stw r6,12(bd)
+ lis r3,__size@sectoff@ha
+ stw r7,16(bd)
+ stw r8,20(bd)
+ addi r3,r3,__size@sectoff@l
+ stw r9,24(bd)
+ stw r10,28(bd)
+ stw r28,o_msr(bd)
+ stw r29,o_hid0(bd)
+ stw r31,o_r31(bd)
+
+/* Call the routine to fill boot_data structure from residual data.
+ * And to find where the code has to be moved.
+ */
+ bl early_setup
+
+/* Now we need to relocate ourselves, where we are told to. First put a
+ * copy of the codemove routine to some place in memory.
+ * (which may be where the 0x41 partition was loaded, so size is critical).
+ */
+ lwz r4,GOT(codemove)
+ li r5,_size_codemove
+ lwz r3,mover(bd)
+ lwz r6,cache_lsize(bd)
+ bl codemove
+ mtctr r3 # Where the temporary codemove is.
+ lwz r3,image(bd)
+ lis r5,_edata@sectoff@ha
+ lwz r4,GOT(0) # Our own address
+ addi r5,r5,_edata@sectoff@l
+ lwz r6,cache_lsize(bd)
+ lwz r8,GOT(moved)
+ sub r7,r3,r4 # Difference to adjust pointers.
+ add r8,r8,r7
+ add r30,r30,r7
+ add bd,bd,r7
+/* Call the copy routine but return to the new area. */
+ mtlr r8 # for the return address
+ bctr # returns to the moved instruction
+/* Establish the new top stack frame. */
+moved: lwz r1,stack(bd)
+ li r0,0
+ stwu r0,-16(r1)
+
+/* relocate again */
+ bl reloc
+/* Clear all of BSS */
+ lwz r10,GOT(.bss)
+ li r0,__bss_words@sectoff@l
+ subi r10,r10,4
+ cmpwi r0,0
+ mtctr r0
+ li r0,0
+ beq 4f
+3: stwu r0,4(r10)
+ bdnz 3b
+
+/* Final memory initialization. First switch to unmapped mode
+ * in case the FW had set the MMU on, and flush the TLB to avoid
+ * stale entries from interfering. No I/O access is allowed
+ * during this time!
+ */
+#ifndef USE_PPCBUG
+4: bl MMUoff
+#endif
+ bl flush_tlb
+/* Some firmware versions leave stale values in the BATs, it's time
+ * to invalidate them to avoid interferences with our own mappings.
+ * But the 601 valid bit is in the BATL (IBAT only) and others are in
+ * the [ID]BATU. Bloat, bloat.. fortunately thrown away later.
+ */
+ li r3,0
+ beq cr2,5f
+ mtdbatu 0,r3
+ mtdbatu 1,r3
+ mtdbatu 2,r3
+ mtdbatu 3,r3
+5: mtibatu 0,r3
+ mtibatl 0,r3
+ mtibatu 1,r3
+ mtibatl 1,r3
+ mtibatu 2,r3
+ mtibatl 2,r3
+ mtibatu 3,r3
+ mtibatl 3,r3
+ lis r3,__size@sectoff@ha
+ addi r3,r3,__size@sectoff@l
+ sync # We are going to touch SDR1 !
+ bl mm_init
+ bl MMUon
+
+/* Now we are mapped and can perform I/O if we want */
+#ifdef TEST_PPCBUG_CALLS
+/* Experience seems to show that PPCBug can only be called with the
+ * data cache disabled and with MMU disabled. Bummer.
+ */
+ li r10,0x22 # .OUTLN
+ lwz r3,GOT(banner_start)
+ lwz r4,GOT(banner_end)
+ sc
+#endif
+ bl setup_hw
+ lwz r4,GOT(_binary_rtems_gz_start)
+ lis r5,_rtems_gz_size@sectoff@ha
+ lwz r6,GOT(_binary_initrd_gz_start)
+ lis r3,_rtems_size@sectoff@ha
+ lwz r7,GOT(_binary_initrd_gz_end)
+ addi r5,r5,_rtems_gz_size@sectoff@l
+ addi r3,r3,_rtems_size@sectoff@l
+ sub r7,r7,r6
+ bl decompress_kernel
+
+/* Back here we are unmapped and we start the kernel, passing up to eight
+ * parameters just in case, only r3 to r7 used for now. Flush the tlb so
+ * that the loaded image starts in a clean state.
+ */
+ bl flush_tlb
+ lwz r3,0(bd)
+ lwz r4,4(bd)
+ lwz r5,8(bd)
+ lwz r6,12(bd)
+ lwz r7,16(bd)
+ lwz r8,20(bd)
+ lwz r9,24(bd)
+ lwz r10,28(bd)
+
+ lwz r30,0(0)
+ mtctr r30
+/*
+ * Linux code again
+ lis r30,0xdeadc0de@ha
+ addi r30,r30,0xdeadc0de@l
+ stw r30,0(0)
+ li r30,0
+*/
+ dcbst 0,r30 /* Make sure it's in memory ! */
+/* We just flash invalidate and disable the dcache, unless it's a 601,
+ * critical areas have been flushed and we don't care about the stack
+ * and other scratch areas.
+ */
+ beq cr2,1f
+ mfspr r0,HID0
+ ori r0,r0,HID0_DCI|HID0_DCE
+ sync
+ mtspr HID0,r0
+ xori r0,r0,HID0_DCI|HID0_DCE
+ mtspr HID0,r0
+/* Provisional return to FW, works for PPCBug */
+#if 0
+ MONITOR_ENTER
+#else
+1: bctr
+#endif
+
+
+
+/* relocation function, r30 must point to got2+0x8000 */
+reloc:
+/* Adjust got2 pointers, no need to check for 0, this code already puts
+ * a few entries in the table.
+ */
+ li r0,__got2_entries@sectoff@l
+ la r12,GOT(_GOT2_TABLE_)
+ lwz r11,GOT(_GOT2_TABLE_)
+ mtctr r0
+ sub r11,r12,r11
+ addi r12,r12,-4
+1: lwzu r0,4(r12)
+ add r0,r0,r11
+ stw r0,0(r12)
+ bdnz 1b
+
+/* Now adjust the fixups and the pointers to the fixups in case we need
+ * to move ourselves again.
+ */
+2: li r0,__fixup_entries@sectoff@l
+ lwz r12,GOT(_FIXUP_TABLE_)
+ cmpwi r0,0
+ mtctr r0
+ addi r12,r12,-4
+ beqlr
+3: lwzu r10,4(r12)
+ lwzux r0,r10,r11
+ add r0,r0,r11
+ stw r10,0(r12)
+ stw r0,0(r10)
+ bdnz 3b
+ blr
+
+/* Set the MMU on and off: code is always mapped 1:1 and does not need MMU,
+ * but it does not cost so much to map it also and it catches calls through
+ * NULL function pointers.
+ */
+ .globl MMUon
+ .type MMUon,@function
+MMUon: mfmsr r0
+ ori r0,r0,MSR_IR|MSR_DR|MSR_IP
+ mflr r11
+ xori r0,r0,MSR_IP
+ mtsrr0 r11
+ mtsrr1 r0
+ rfi
+ .globl MMUoff
+ .type MMUoff,@function
+MMUoff: mfmsr r0
+ ori r0,r0,MSR_IR|MSR_DR|MSR_IP
+ mflr r11
+ xori r0,r0,MSR_IR|MSR_DR
+ mtsrr0 r11
+ mtsrr1 r0
+ rfi
+
+/* Due to the PPC architecture (and according to the specifications), a
+ * series of tlbie which goes through a whole 256 MB segment always flushes
+ * the whole TLB. This is obviously overkill and slow, but who cares ?
+ * It takes about 1 ms on a 200 MHz 603e and works even if residual data
+ * get the number of TLB entries wrong.
+ */
+flush_tlb:
+ lis r11,0x1000
+1: addic. r11,r11,-0x1000
+ tlbie r11
+ bnl 1b
+/* tlbsync is not implemented on 601, so use sync which seems to be a superset
+ * of tlbsync in all cases and do not bother with CPU dependant code
+ */
+ sync
+ blr
+
+ .globl codemove
+codemove:
+ .type codemove,@function
+/* r3 dest, r4 src, r5 length in bytes, r6 cachelinesize */
+ cmplw cr1,r3,r4
+ addi r0,r5,3
+ srwi. r0,r0,2
+ beq cr1,4f /* In place copy is not necessary */
+ beq 7f /* Protect against 0 count */
+ mtctr r0
+ bge cr1,2f
+
+ la r8,-4(r4)
+ la r7,-4(r3)
+1: lwzu r0,4(r8)
+ stwu r0,4(r7)
+ bdnz 1b
+ b 4f
+
+2: slwi r0,r0,2
+ add r8,r4,r0
+ add r7,r3,r0
+3: lwzu r0,-4(r8)
+ stwu r0,-4(r7)
+ bdnz 3b
+
+/* Now flush the cache: note that we must start from a cache aligned
+ * address. Otherwise we might miss one cache line.
+ */
+4: cmpwi r6,0
+ add r5,r3,r5
+ beq 7f /* Always flush prefetch queue in any case */
+ subi r0,r6,1
+ andc r3,r3,r0
+ mr r4,r3
+5: cmplw r4,r5
+ dcbst 0,r4
+ add r4,r4,r6
+ blt 5b
+ sync /* Wait for all dcbst to complete on bus */
+ mr r4,r3
+6: cmplw r4,r5
+ icbi 0,r4
+ add r4,r4,r6
+ blt 6b
+7: sync /* Wait for all icbi to complete on bus */
+ isync
+ blr
+ .size codemove,.-codemove
+_size_codemove=.-codemove
+
+ .section ".data" # .rodata
+ .align 2
+#ifdef TEST_PPCBUG_CALLS
+banner_start:
+ .ascii "This message was printed by PPCBug with MMU enabled"
+banner_end:
+#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c b/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c
new file mode 100644
index 0000000000..242f637b5d
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c
@@ -0,0 +1,53 @@
+/* lib.c
+ *
+ * This file contains the implementation of functions that are unresolved
+ * in the bootloader. Unfortunately it shall not use any object code
+ * from newlib or rtems because they are not compiled with the right option!!!
+ *
+ * You've been warned!!!.
+ *
+ * CopyRight (C) 1998, 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+
+void* memset(void *p, int c, unsigned int n)
+{
+ char *q =p;
+ for(; n>0; --n) *q++=c;
+ return p;
+}
+
+void* memcpy(void *dst, const void * src, unsigned int n)
+{
+ unsigned char *d=dst;
+ const unsigned char *s=src;
+
+ while(n-- > 0) *d++=*s++;
+ return dst;
+}
+
+char* strcat(char * dest, const char * src)
+{
+ char *tmp = dest;
+
+ while (*dest)
+ dest++;
+ while ((*dest++ = *src++) != '\0')
+ ;
+ return tmp;
+}
+
+int strlen(const char* string)
+{
+ register int i = 0;
+
+ while (string[i] != '\0')
+ ++i;
+ return i;
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c b/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c
new file mode 100644
index 0000000000..e7dd568c22
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c
@@ -0,0 +1,528 @@
+/*
+ * head.S -- Bootloader Entry point
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+#include <string.h>
+#include <libcpu/cpu.h>
+#include "bootldr.h"
+#include <libcpu/spr.h>
+#include "zlib.h"
+#include <libcpu/page.h>
+#include <libcpu/byteorder.h>
+
+SPR_RW(DEC)
+SPR_RO(PVR)
+
+struct inode;
+struct wait_queue;
+struct buffer_head;
+typedef struct { int counter; } atomic_t;
+
+
+typedef struct page {
+ /* these must be first (free area handling) */
+ struct page *next;
+ struct page *prev;
+ struct inode *inode;
+ unsigned long offset;
+ struct page *next_hash;
+ atomic_t count;
+ unsigned long flags; /* atomic flags, some possibly updated asynchronously */
+ struct wait_queue *wait;
+ struct page **pprev_hash;
+ struct buffer_head * buffers;
+} mem_map_t;
+
+
+extern opaque mm_private, pci_private, v86_private, console_private;
+
+#define CONSOLE_ON_SERIAL "console=ttyS0"
+
+extern struct console_io vacuum_console_functions;
+extern opaque log_console_setup, serial_console_setup, vga_console_setup;
+
+boot_data __bd = {0, 0, 0, 0, 0, 0, 0, 0,
+ 32, 0, 0, 0, 0, 0, 0,
+ &mm_private,
+ NULL,
+ &pci_private,
+ NULL,
+ &v86_private,
+ "root=/dev/hdc1"
+ };
+
+static void exit(void) __attribute__((noreturn));
+
+static void exit(void) {
+ printk("\nOnly way out is to press the reset button!\n");
+ asm volatile("": : :"memory");
+ while(1);
+}
+
+
+void hang(const char *s, u_long x, ctxt *p) {
+ u_long *r1;
+#ifdef DEBUG
+ print_all_maps("\nMemory mappings at exception time:\n");
+#endif
+ printk("%s %lx NIP: %p LR: %p\n"
+ "Callback trace (stack:return address)\n",
+ s, x, (void *) p->nip, (void *) p->lr);
+ asm volatile("lwz %0,0(1); lwz %0,0(%0); lwz %0,0(%0)": "=b" (r1));
+ while(r1) {
+ printk(" %p:%p\n", r1, (void *) r1[1]);
+ r1 = (u_long *) *r1;
+ }
+ exit();
+};
+
+
+void *zalloc(void *x, unsigned items, unsigned size)
+{
+ void *p = salloc(items*size);
+
+ if (!p) {
+ printk("oops... not enough memory for gunzip\n");
+ }
+ return p;
+}
+
+void zfree(void *x, void *addr, unsigned nb)
+{
+ sfree(addr);
+}
+
+#define HEAD_CRC 2
+#define EXTRA_FIELD 4
+#define ORIG_NAME 8
+#define COMMENT 0x10
+#define RESERVED 0xe0
+
+#define DEFLATED 8
+
+
+void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
+{
+ z_stream s;
+ int r, i, flags;
+
+ /* skip header */
+ i = 10;
+ flags = src[3];
+ if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
+ printk("bad gzipped data\n");
+ exit();
+ }
+ if ((flags & EXTRA_FIELD) != 0)
+ i = 12 + src[10] + (src[11] << 8);
+ if ((flags & ORIG_NAME) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & COMMENT) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & HEAD_CRC) != 0)
+ i += 2;
+ if (i >= *lenp) {
+ printk("gunzip: ran out of data in header\n");
+ exit();
+ }
+
+ s.zalloc = zalloc;
+ s.zfree = zfree;
+ r = inflateInit2(&s, -MAX_WBITS);
+ if (r != Z_OK) {
+ printk("inflateInit2 returned %d\n", r);
+ exit();
+ }
+ s.next_in = src + i;
+ s.avail_in = *lenp - i;
+ s.next_out = dst;
+ s.avail_out = dstlen;
+ r = inflate(&s, Z_FINISH);
+ if (r != Z_OK && r != Z_STREAM_END) {
+ printk("inflate returned %d\n", r);
+ exit();
+ }
+ *lenp = s.next_out - (unsigned char *) dst;
+ inflateEnd(&s);
+}
+
+void decompress_kernel(int kernel_size, void * zimage_start, int len,
+ void * initrd_start, int initrd_len ) {
+ u_char *parea;
+ RESIDUAL* rescopy;
+ int zimage_size= len;
+
+ /* That's a mess, we have to copy the residual data twice just in
+ * case it happens to be in the low memory area where the kernel
+ * is going to be unpacked. Later we have to copy it back to
+ * lower addresses because only the lowest part of memory is mapped
+ * during boot.
+ */
+ parea=__palloc(kernel_size, PA_LOW);
+ if(!parea) {
+ printk("Not enough memory to uncompress the kernel.");
+ exit();
+ }
+ /* Note that this clears the bss as a side effect, so some code
+ * with ugly special case for SMP could be removed from the kernel!
+ */
+ memset(parea, 0, kernel_size);
+ printk("\nUncompressing the kernel...\n");
+ rescopy=salloc(sizeof(RESIDUAL));
+ /* Let us hope that residual data is aligned on word boundary */
+ *rescopy = *bd->residual;
+ bd->residual = (void *)PAGE_ALIGN(kernel_size);
+
+ gunzip(parea, kernel_size, zimage_start, &zimage_size);
+
+ bd->of_entry = 0;
+ bd->load_address = 0;
+ bd->r6 = (char *)bd->residual+PAGE_ALIGN(sizeof(RESIDUAL));
+ bd->r7 = bd->r6+strlen(bd->cmd_line);
+ if ( initrd_len ) {
+ /* We have to leave some room for the hash table and for the
+ * whole array of struct page. The hash table would be better
+ * located at the end of memory if possible. With some bridges
+ * DMA from the last pages of memory is slower because
+ * prefetching from PCI has to be disabled to avoid accessing
+ * non existing memory. So it is the ideal place to put the
+ * hash table.
+ */
+ unsigned tmp = rescopy->TotalMemory;
+ /* It's equivalent to tmp & (-tmp), but using the negation
+ * operator on unsigned variables looks so ugly.
+ */
+ if ((tmp & (~tmp+1)) != tmp) tmp <<= 1; /* Next power of 2 */
+ tmp /= 256; /* Size of hash table */
+ if (tmp> (2<<20)) tmp=2<<20;
+ tmp = tmp*2 + 0x40000; /* Alignment can double size + 256 kB */
+ tmp += (rescopy->TotalMemory / PAGE_SIZE)
+ * sizeof(struct page);
+ bd->load_address = (void *)PAGE_ALIGN((int)bd->r7 + tmp);
+ bd->of_entry = (char *)bd->load_address+initrd_len;
+ }
+#ifdef DEBUG
+ printk("Kernel at 0x%p, size=0x%x\n", NULL, kernel_size);
+ printk("Initrd at 0x%p, size=0x%x\n",bd->load_address, initrd_len);
+ printk("Residual data at 0x%p\n", bd->residual);
+ printk("Command line at 0x%p\n",bd->r6);
+#endif
+ printk("done\nNow booting...\n");
+ MMUoff(); /* We need to access address 0 ! */
+ codemove(0, parea, kernel_size, bd->cache_lsize);
+ codemove(bd->residual, rescopy, sizeof(RESIDUAL), bd->cache_lsize);
+ codemove(bd->r6, bd->cmd_line, sizeof(bd->cmd_line), bd->cache_lsize);
+ /* codemove checks for 0 length */
+ codemove(bd->load_address, initrd_start, initrd_len, bd->cache_lsize);
+}
+
+void
+setup_hw(void)
+{
+ char *cp, ch;
+ register RESIDUAL * res;
+ /* PPC_DEVICE * nvram; */
+ struct pci_dev *p, *default_vga;
+ int timer, err;
+ u_short default_vga_cmd;
+ static unsigned int indic;
+
+ indic = 0;
+
+ res=bd->residual;
+ default_vga=NULL;
+ default_vga_cmd = 0;
+
+#define vpd res->VitalProductData
+ if (_read_PVR()>>16 != 1) {
+ if ( res && vpd.ProcessorBusHz ) {
+ ticks_per_ms = vpd.ProcessorBusHz/
+ (vpd.TimeBaseDivisor ? vpd.TimeBaseDivisor : 4000);
+ } else {
+ ticks_per_ms = 16500; /* assume 66 MHz on bus */
+ }
+ }
+
+ select_console(CONSOLE_LOG);
+
+ /* We check that the keyboard is present and immediately
+ * select the serial console if not.
+ */
+ err = kbdreset();
+ if (err) select_console(CONSOLE_SERIAL);
+
+ printk("\nModel: %s\nSerial: %s\n"
+ "Processor/Bus frequencies (Hz): %ld/%ld\n"
+ "Time Base Divisor: %ld\n"
+ "Memory Size: %x\n",
+ vpd.PrintableModel,
+ vpd.Serial,
+ vpd.ProcessorHz,
+ vpd.ProcessorBusHz,
+ (vpd.TimeBaseDivisor ? vpd.TimeBaseDivisor : 4000),
+ res->TotalMemory);
+ printk("Original MSR: %lx\nOriginal HID0: %lx\nOriginal R31: %lx\n",
+ bd->o_msr, bd->o_hid0, bd->o_r31);
+
+ /* This reconfigures all the PCI subsystem */
+ pci_init();
+
+ /* The Motorola NT firmware does not set the correct mem size */
+ if ( vpd.FirmwareSupplier == 0x10000 ) {
+ int memsize;
+ memsize = find_max_mem(bd->pci_devices);
+ if ( memsize != res->TotalMemory ) {
+ printk("Changed Memory size from %lx to %x\n",
+ res->TotalMemory, memsize);
+ res->TotalMemory = memsize;
+ res->GoodMemory = memsize;
+ }
+ }
+#define ENABLE_VGA_USAGE
+#undef ENABLE_VGA_USAGE
+#ifdef ENABLE_VGA_USAGE
+ /* Find the primary VGA device, chosing the first one found
+ * if none is enabled. The basic loop structure has been copied
+ * from linux/drivers/char/bttv.c by Alan Cox.
+ */
+ for (p = bd->pci_devices; p; p = p->next) {
+ u_short cmd;
+ if (p->class != PCI_CLASS_NOT_DEFINED_VGA &&
+ ((p->class) >> 16 != PCI_BASE_CLASS_DISPLAY))
+ continue;
+ if (p->bus->number != 0) {
+ printk("VGA device not on bus 0 not initialized!\n");
+ continue;
+ }
+ /* Only one can be active in text mode, which for now will
+ * be assumed as equivalent to having I/O response enabled.
+ */
+ pci_read_config_word(p, PCI_COMMAND, &cmd);
+ if(cmd & PCI_COMMAND_IO || !default_vga) {
+ default_vga=p;
+ default_vga_cmd=cmd;
+ }
+ }
+
+ /* Disable the enabled VGA device, if any. */
+ if (default_vga)
+ pci_write_config_word(default_vga, PCI_COMMAND,
+ default_vga_cmd&
+ ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ init_v86();
+ /* Same loop copied from bttv.c, this time doing the serious work */
+ for (p = bd->pci_devices; p; p = p->next) {
+ u_short cmd;
+ if (p->class != PCI_CLASS_NOT_DEFINED_VGA &&
+ ((p->class) >> 16 != PCI_BASE_CLASS_DISPLAY))
+ continue;
+ if (p->bus->number != 0) continue;
+ pci_read_config_word(p, PCI_COMMAND, &cmd);
+ pci_write_config_word(p, PCI_COMMAND,
+ cmd|PCI_COMMAND_IO|PCI_COMMAND_MEMORY);
+ printk("Calling the emulator.\n");
+ em86_main(p);
+ pci_write_config_word(p, PCI_COMMAND, cmd);
+ }
+
+ cleanup_v86_mess();
+#endif
+ /* Reenable the primary VGA device */
+ if (default_vga) {
+ pci_write_config_word(default_vga, PCI_COMMAND,
+ default_vga_cmd|
+ (PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ if (err) {
+ printk("Keyboard error %d, using serial console!\n",
+ err);
+ } else {
+ select_console(CONSOLE_VGA);
+ }
+ } else if (!err) {
+ select_console(CONSOLE_SERIAL);
+ if (bd->cmd_line[0] == '\0') {
+ strcat(&bd->cmd_line[0], CONSOLE_ON_SERIAL);
+ }
+ else {
+ int s = strlen (bd->cmd_line);
+ bd->cmd_line[s + 1] = ' ';
+ bd->cmd_line[s + 2] = '\0';
+ strcat(&bd->cmd_line[0], CONSOLE_ON_SERIAL);
+ }
+ }
+#if 0
+ /* In the future we may use the NVRAM to store default
+ * kernel parameters.
+ */
+ nvram=residual_find_device(~0UL, NULL, SystemPeripheral, NVRAM,
+ ~0UL, 0);
+ if (nvram) {
+ PnP_TAG_PACKET * pkt;
+ switch (nvram->DevId.Interface) {
+ case IndirectNVRAM:
+ pkt=PnP_find_packet(res->DevicePnpHeap
+ +nvram->AllocatedOffset,
+ )
+ }
+ }
+#endif
+
+ printk("\nRTEMS 4.x/PPC load: ");
+ timer = 0;
+ cp = bd->cmd_line+strlen(bd->cmd_line);
+ while (timer++ < 5*1000) {
+ if (debug_tstc()) {
+ while ((ch = debug_getc()) != '\n' && ch != '\r') {
+ if (ch == '\b' || ch == 0177) {
+ if (cp != bd->cmd_line) {
+ cp--;
+ printk("\b \b");
+ }
+ } else {
+ *cp++ = ch;
+ debug_putc(ch);
+ }
+ }
+ break; /* Exit 'timer' loop */
+ }
+ udelay(1000); /* 1 msec */
+ }
+ *cp = 0;
+}
+
+
+/* Functions to deal with the residual data */
+static int same_DevID(unsigned short vendor,
+ unsigned short Number,
+ char * str)
+{
+ static unsigned const char hexdigit[]="0123456789ABCDEF";
+ if (strlen(str)!=7) return 0;
+ if ( ( ((vendor>>10)&0x1f)+'A'-1 == str[0]) &&
+ ( ((vendor>>5)&0x1f)+'A'-1 == str[1]) &&
+ ( (vendor&0x1f)+'A'-1 == str[2]) &&
+ (hexdigit[(Number>>12)&0x0f] == str[3]) &&
+ (hexdigit[(Number>>8)&0x0f] == str[4]) &&
+ (hexdigit[(Number>>4)&0x0f] == str[5]) &&
+ (hexdigit[Number&0x0f] == str[6]) ) return 1;
+ return 0;
+}
+
+PPC_DEVICE *residual_find_device(unsigned long BusMask,
+ unsigned char * DevID,
+ int BaseType,
+ int SubType,
+ int Interface,
+ int n)
+{
+ int i;
+ RESIDUAL *res = bd->residual;
+ if ( !res || !res->ResidualLength ) return NULL;
+ for (i=0; i<res->ActualNumDevices; i++) {
+#define Dev res->Devices[i].DeviceId
+ if ( (Dev.BusId&BusMask) &&
+ (BaseType==-1 || Dev.BaseType==BaseType) &&
+ (SubType==-1 || Dev.SubType==SubType) &&
+ (Interface==-1 || Dev.Interface==Interface) &&
+ (DevID==NULL || same_DevID((Dev.DevId>>16)&0xffff,
+ Dev.DevId&0xffff, DevID)) &&
+ !(n--) ) return res->Devices+i;
+#undef Dev
+ }
+ return 0;
+}
+
+PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
+ unsigned packet_tag,
+ int n)
+{
+ unsigned mask, masked_tag, size;
+ if(!p) return 0;
+ if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
+ masked_tag = packet_tag&mask;
+ for(; *p != END_TAG; p+=size) {
+ if ((*p & mask) == masked_tag && !(n--))
+ return (PnP_TAG_PACKET *) p;
+ if (tag_type(*p))
+ size=ld_le16((unsigned short *)(p+1))+3;
+ else
+ size=tag_small_count(*p)+1;
+ }
+ return 0; /* not found */
+}
+
+PnP_TAG_PACKET *PnP_find_small_vendor_packet(unsigned char *p,
+ unsigned packet_type,
+ int n)
+{
+ int next=0;
+ while (p) {
+ p = (unsigned char *) PnP_find_packet(p, 0x70, next);
+ if (p && p[1]==packet_type && !(n--))
+ return (PnP_TAG_PACKET *) p;
+ next = 1;
+ };
+ return 0; /* not found */
+}
+
+PnP_TAG_PACKET *PnP_find_large_vendor_packet(unsigned char *p,
+ unsigned packet_type,
+ int n)
+{
+ int next=0;
+ while (p) {
+ p = (unsigned char *) PnP_find_packet(p, 0x84, next);
+ if (p && p[3]==packet_type && !(n--))
+ return (PnP_TAG_PACKET *) p;
+ next = 1;
+ };
+ return 0; /* not found */
+}
+
+/* Find out the amount of installed memory. For MPC105 and IBM 660 this
+ * can be done by finding the bank with the highest memory ending address
+ */
+int
+find_max_mem( struct pci_dev *dev )
+{
+ u_char banks,tmp;
+ int i, top, max;
+
+ max = 0;
+ for ( ; dev; dev = dev->next) {
+ if ( ((dev->vendor == PCI_VENDOR_ID_MOTOROLA) &&
+ (dev->device == PCI_DEVICE_ID_MOTOROLA_MPC105)) ||
+ ((dev->vendor == PCI_VENDOR_ID_IBM) &&
+ (dev->device == 0x0037/*IBM 660 Bridge*/)) ) {
+ pci_read_config_byte(dev, 0xa0, &banks);
+ for (i = 0; i < 8; i++) {
+ if ( banks & (1<<i) ) {
+ pci_read_config_byte(dev, 0x90+i, &tmp);
+ top = tmp;
+ pci_read_config_byte(dev, 0x98+i, &tmp);
+ top |= (tmp&3)<<8;
+ if ( top > max ) max = top;
+ }
+ }
+ if ( max ) return ((max+1)<<20);
+ else return(0);
+ }
+ }
+ return(0);
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c b/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c
new file mode 100644
index 0000000000..3807c75d85
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c
@@ -0,0 +1,982 @@
+/*
+ * mm.c -- Crude memory management for early boot.
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+/* This code is a crude memory manager for early boot for LinuxPPC.
+ * As such, it does not try to perform many optimiztions depending
+ * on the processor, it only uses features which are common to
+ * all processors (no BATs...).
+ *
+ * On PreP platorms (the only ones on which it works for now),
+ * it maps 1:1 all RAM/ROM and I/O space as claimed by the
+ * residual data. The holes between these areas can be virtually
+ * remapped to any of these, since for some functions it is very handy
+ * to have virtually contiguous but physically discontiguous memory.
+ *
+ * Physical memory allocation is also very crude, since it's only
+ * designed to manage a small number of large chunks. For valloc/vfree
+ * and palloc/pfree, the unit of allocation is the 4kB page.
+ *
+ * The salloc/sfree has been added after tracing gunzip and seeing
+ * how it performed a very large number of small allocations.
+ * For these the unit of allocation is 8 bytes (the s stands for
+ * small or subpage). This memory is cleared when allocated.
+ *
+ */
+
+#include <sys/types.h>
+#include <libcpu/spr.h>
+#include "bootldr.h"
+#include <libcpu/mmu.h>
+#include <libcpu/page.h>
+#include <limits.h>
+
+/* We use our own kind of simple memory areas for the loader, but
+ * we want to avoid potential clashes with kernel includes.
+ * Here a map maps contiguous areas from base to end,
+ * the firstpte entry corresponds to physical address and has the low
+ * order bits set for caching and permission.
+ */
+
+typedef struct _map {
+ struct _map *next;
+ u_long base;
+ u_long end;
+ u_long firstpte;
+} map;
+
+/* The LSB of the firstpte entries on map lists other than mappings
+ * are constants which can be checked for debugging. All these constants
+ * have bit of weight 4 set, this bit is zero in the mappings list entries.
+ * Actually firstpte&7 value is:
+ * - 0 or 1 should not happen
+ * - 2 for RW actual virtual->physical mappings
+ * - 3 for RO actual virtual->physical mappings
+ * - 6 for free areas to be suballocated by salloc
+ * - 7 for salloc'ated areas
+ * - 4 or 5 for all others, in this case firtpte & 63 is
+ * - 4 for unused maps (on the free list)
+ * - 12 for free physical memory
+ * - 13 for physical memory in use
+ * - 20 for free virtual address space
+ * - 21 for allocated virtual address space
+ * - 28 for physical memory space suballocated by salloc
+ * - 29 for physical memory that can't be freed
+ */
+
+#define MAP_FREE_SUBS 6
+#define MAP_USED_SUBS 7
+
+#define MAP_FREE 4
+#define MAP_FREE_PHYS 12
+#define MAP_USED_PHYS 13
+#define MAP_FREE_VIRT 20
+#define MAP_USED_VIRT 21
+#define MAP_SUBS_PHYS 28
+#define MAP_PERM_PHYS 29
+
+SPR_RW(SDR1);
+SPR_RO(DSISR);
+SPR_RO(DAR);
+
+/* We need a few statically allocated free maps to bootstrap the
+ * memory managment */
+static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
+ {free_maps+2, 0, 0, MAP_FREE},
+ {free_maps+3, 0, 0, MAP_FREE},
+ {NULL, 0, 0, MAP_FREE}};
+struct _mm_private {
+ void *sdr1;
+ u_long hashmask;
+ map *freemaps; /* Pool of unused map structs */
+ map *mappings; /* Sorted list of virtual->physical mappings */
+ map *physavail; /* Unallocated physical address space */
+ map *physused; /* Allocated physical address space */
+ map *physperm; /* Permanently allocated physical space */
+ map *virtavail; /* Unallocated virtual address space */
+ map *virtused; /* Allocated virtual address space */
+ map *sallocfree; /* Free maps for salloc */
+ map *sallocused; /* Used maps for salloc */
+ map *sallocphys; /* Physical areas used by salloc */
+ u_int hashcnt; /* Used to cycle in PTEG when they overflow */
+} mm_private = {hashmask: 0xffc0,
+ freemaps: free_maps+0};
+
+/* A simplified hash table entry declaration */
+typedef struct _hash_entry {
+ int key;
+ u_long rpn;
+} hash_entry;
+
+void print_maps(map *, const char *);
+
+/* The handler used for all exceptions although for now it is only
+ * designed to properly handle MMU interrupts to fill the hash table.
+ */
+
+
+void _handler(int vec, ctxt *p) {
+ map *area;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ u_long vaddr, cause;
+ if (vec==4 || vec==7) { /* ISI exceptions are different */
+ vaddr = p->nip;
+ cause = p->msr;
+ } else { /* Valid for DSI and alignment exceptions */
+ vaddr = _read_DAR();
+ cause = _read_DSISR();
+ }
+
+ if (vec==3 || vec==4) {
+ /* Panic if the fault is not PTE not found. */
+ if (!(cause & 0x40000000)) {
+ MMUon();
+ printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
+ hang("Memory protection violation at ", vaddr, p);
+ }
+
+ for(area=mm->mappings; area; area=area->next) {
+ if(area->base<=vaddr && vaddr<=area->end) break;
+ }
+
+ if (area) {
+ u_long hash, vsid, rpn;
+ hash_entry volatile *hte, *_hte1;
+ u_int i, alt=0, flushva;
+
+ vsid = _read_SR((void *)vaddr);
+ rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
+ hash = vsid<<6;
+ hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
+ hash &= mm->hashmask;
+ /* Find an empty entry in the PTEG, else
+ * replace a random one.
+ */
+ hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
+ for (i=0; i<8; i++) {
+ if (hte[i].key>=0) goto found;
+ }
+ hash ^= mm->hashmask;
+ alt = 0x40; _hte1 = hte;
+ hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
+
+ for (i=0; i<8; i++) {
+ if (hte[i].key>=0) goto found;
+ }
+ alt = 0;
+ hte = _hte1;
+ /* Chose a victim entry and replace it. There might be
+ * better policies to choose the victim, but in a boot
+ * loader we want simplicity as long as it works.
+ *
+ * We would not need to invalidate the TLB entry since
+ * the mapping is still valid. But this would be a mess
+ * when unmapping so we make sure that the TLB is a
+ * subset of the hash table under all circumstances.
+ */
+ i = mm->hashcnt;
+ mm->hashcnt = (mm->hashcnt+1)%8;
+ /* Note that the hash is already complemented here ! */
+ flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
+ if (hte[i].key&0x40) flushva^=0x3ff000;
+ flushva |= ((hte[i].key<<21)&0xf0000000)
+ | ((hte[i].key<<22)&0x0fc00000);
+ hte[i].key=0;
+ asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
+ found:
+ hte[i].rpn = rpn;
+ asm volatile("eieio": : );
+ hte[i].key = 0x80000000|(vsid<<7)|alt|
+ ((vaddr>>22)&0x3f);
+ return;
+ } else {
+ MMUon();
+ printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
+ hang("\nInvalid memory access attempt at ", vaddr, p);
+ }
+ } else {
+ MMUon();
+ printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
+ cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
+ if (vec == 7) {
+ unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
+ for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
+ printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
+ }
+ hang("Program or alignment exception at ", vaddr, p);
+ }
+}
+
+/* Generic routines for map handling.
+ */
+
+static inline
+void free_map(map *p) {
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ if (!p) return;
+ p->next=mm->freemaps;
+ mm->freemaps=p;
+ p->firstpte=MAP_FREE;
+}
+
+/* Sorted insertion in linked list */
+static
+int insert_map(map **head, map *p) {
+ map *q = *head;
+ if (!p) return 0;
+ if (q && (q->base < p->base)) {
+ for(;q->next && q->next->base<p->base; q = q->next);
+ if ((q->end >= p->base) ||
+ (q->next && p->end>=q->next->base)) {
+ free_map(p);
+ printk("Overlapping areas!\n");
+ return 1;
+ }
+ p->next = q->next;
+ q->next = p;
+ } else { /* Insert at head */
+ if (q && (p->end >= q->base)) {
+ free_map(p);
+ printk("Overlapping areas!\n");
+ return 1;
+ }
+ p->next = q;
+ *head = p;
+ }
+ return 0;
+}
+
+
+/* Removal from linked list */
+
+static
+map *remove_map(map **head, map *p) {
+ map *q = *head;
+
+ if (!p || !q) return NULL;
+ if (q==p) {
+ *head = q->next;
+ return p;
+ }
+ for(;q && q->next!=p; q=q->next);
+ if (q) {
+ q->next=p->next;
+ return p;
+ } else {
+ return NULL;
+ }
+}
+
+static
+map *remove_map_at(map **head, void * vaddr) {
+ map *p, *q = *head;
+
+ if (!vaddr || !q) return NULL;
+ if (q->base==(u_long)vaddr) {
+ *head = q->next;
+ return q;
+ }
+ while (q->next && q->next->base != (u_long)vaddr) q=q->next;
+ p=q->next;
+ if (p) q->next=p->next;
+ return p;
+}
+
+static inline
+map * alloc_map_page(void) {
+ map *from, *p;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ /* printk("Allocating new map page !"); */
+ /* Get the highest page */
+ for (from=mm->physavail; from && from->next; from=from->next);
+ if (!from) return NULL;
+
+ from->end -= PAGE_SIZE;
+
+ mm->freemaps = (map *) (from->end+1);
+
+ for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
+ p->next = p+1;
+ p->firstpte = MAP_FREE;
+ }
+ (p-1)->next=0;
+
+ /* Take the last one as pointer to self and insert
+ * the map into the permanent map list.
+ */
+
+ p->firstpte = MAP_PERM_PHYS;
+ p->base=(u_long) mm->freemaps;
+ p->end = p->base+PAGE_SIZE-1;
+
+ insert_map(&mm->physperm, p);
+
+ if (from->end+1 == from->base)
+ free_map(remove_map(&mm->physavail, from));
+
+ return mm->freemaps;
+}
+
+static
+map * alloc_map(void) {
+ map *p;
+ struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
+
+ p = mm->freemaps;
+ if (!p) {
+ p=alloc_map_page();
+ }
+
+ if(p) mm->freemaps=p->next;
+
+ return p;
+}
+
+static
+void coalesce_maps(map *p) {
+ while(p) {
+ if (p->next && (p->end+1 == p->next->base)) {
+ map *q=p->next;
+ p->end=q->end;
+ p->next=q->next;
+ free_map(q);
+ } else {
+ p = p->next;
+ }
+ }
+}
+
+/* These routines are used to find the free memory zones to avoid
+ * overlapping destructive copies when initializing.
+ * They work from the top because of the way we want to boot.
+ * In the following the term zone refers to the memory described
+ * by one or several contiguous so called segments in the
+ * residual data.
+ */
+#define STACK_PAGES 2
+static inline u_long
+find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
+ u_long i, newmin=0, size=0;
+ for(i=0; i<res->ActualNumMemSegs; i++) {
+ if (res->Segs[i].Usage & flags
+ && res->Segs[i].BasePage<lowpage
+ && res->Segs[i].BasePage>newmin) {
+ newmin=res->Segs[i].BasePage;
+ size=res->Segs[i].PageCount;
+ }
+ }
+ return newmin+size;
+}
+
+static inline u_long
+find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
+ u_long i;
+ int progress;
+ do {
+ progress=0;
+ for (i=0; i<res->ActualNumMemSegs; i++) {
+ if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
+ == highpage)
+ && res->Segs[i].Usage & flags) {
+ highpage=res->Segs[i].BasePage;
+ progress=1;
+ }
+ }
+ } while(progress);
+ return highpage;
+}
+
+/* The Motorola NT firmware does not provide any setting in the residual
+ * data about memory segment usage. The following table provides enough
+ * info so that this bootloader can work.
+ */
+MEM_MAP seg_fix[] = {
+ { 0x2000, 0xFFF00, 0x00100 },
+ { 0x0020, 0x02000, 0x7E000 },
+ { 0x0008, 0x00800, 0x00168 },
+ { 0x0004, 0x00000, 0x00005 },
+ { 0x0001, 0x006F1, 0x0010F },
+ { 0x0002, 0x006AD, 0x00044 },
+ { 0x0010, 0x00005, 0x006A8 },
+ { 0x0010, 0x00968, 0x00698 },
+ { 0x0800, 0xC0000, 0x3F000 },
+ { 0x0600, 0xBF800, 0x00800 },
+ { 0x0500, 0x81000, 0x3E800 },
+ { 0x0480, 0x80800, 0x00800 },
+ { 0x0440, 0x80000, 0x00800 } };
+
+
+/* The Motorola NT firmware does not set up all required info in the residual
+ * data. This routine changes some things in a way that the bootloader and
+ * linux are happy.
+ */
+void
+fix_residual( RESIDUAL *res )
+{
+#if 0
+ PPC_DEVICE *hostbridge;
+#endif
+ int i;
+
+ /* Missing memory segment information */
+ res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
+ for (i=0; i<res->ActualNumMemSegs; i++) {
+ res->Segs[i].Usage = seg_fix[i].Usage;
+ res->Segs[i].BasePage = seg_fix[i].BasePage;
+ res->Segs[i].PageCount = seg_fix[i].PageCount;
+ }
+ /* The following should be fixed in the current version of the
+ * kernel and of the bootloader.
+ */
+#if 0
+ /* PPCBug has this zero */
+ res->VitalProductData.CacheLineSize = 0;
+ /* Motorola NT firmware sets TimeBaseDivisor to 0 */
+ if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
+ res->VitalProductData.TimeBaseDivisor = 4000;
+ }
+
+ /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
+ * sets "PCIBridgeDirect". This bootloader and linux works better if
+ * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
+ */
+ hostbridge=residual_find_device(PCIDEVICE, NULL,
+ BridgeController,
+ PCIBridge, -1, 0);
+ if (hostbridge) {
+ hostbridge->DeviceId.BusId = PROCESSORDEVICE;
+ hostbridge->DeviceId.Interface = PCIBridgeIndirect;
+ }
+#endif
+}
+
+/* This routine is the first C code called with very little stack space!
+ * Its goal is to find where the boot image can be moved. This will
+ * be the highest address with enough room.
+ */
+int early_setup(u_long image_size) {
+ register RESIDUAL *res = bd->residual;
+ u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
+
+ /* Fix residual if we are loaded by Motorola NT firmware */
+ if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
+ fix_residual( res );
+
+ /* FIXME: if OF we should do something different */
+ if( !bd->of_entry && res &&
+ res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
+ u_long lowpage=ULONG_MAX, highpage;
+ u_long imghigh=0, stkhigh=0;
+ /* Find the highest and large enough contiguous zone
+ consisting of free and BootImage sections. */
+ /* Find 3 free areas of memory, one for the main image, one
+ * for the stack (STACK_PAGES), and page one to put the map
+ * structures. They are allocated from the top of memory.
+ * In most cases the stack will be put just below the image.
+ */
+ while((highpage =
+ find_next_zone(res, lowpage, BootImage|Free))) {
+ lowpage=find_zone_start(res, highpage, BootImage|Free);
+ if ((highpage-lowpage)>minpages &&
+ highpage>imghigh) {
+ imghigh=highpage;
+ highpage -=minpages;
+ }
+ if ((highpage-lowpage)>STACK_PAGES &&
+ highpage>stkhigh) {
+ stkhigh=highpage;
+ highpage-=STACK_PAGES;
+ }
+ }
+
+ bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
+ bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
+
+ /* The code mover is put at the lowest possible place
+ * of free memory. If this corresponds to the loaded boot
+ * partition image it does not matter because it overrides
+ * the unused part of it (x86 code).
+ */
+ bd->mover=(void *) (lowpage<<PAGE_SHIFT);
+
+ /* Let us flush the caches in all cases. After all it should
+ * not harm even on 601 and we don't care about performance.
+ * Right now it's easy since all processors have a line size
+ * of 32 bytes. Once again residual data has proved unreliable.
+ */
+ bd->cache_lsize = 32;
+ }
+ /* For now we always assume that it's succesful, we should
+ * handle better the case of insufficient memory.
+ */
+ return 0;
+}
+
+void * valloc(u_long size) {
+ map *p, *q;
+ struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
+
+ if (size==0) return NULL;
+ size=PAGE_ALIGN(size)-1;
+ for (p=mm->virtavail; p; p=p->next) {
+ if (p->base+size <= p->end) break;
+ }
+ if(!p) return NULL;
+ q=alloc_map();
+ q->base=p->base;
+ q->end=q->base+size;
+ q->firstpte=MAP_USED_VIRT;
+ insert_map(&mm->virtused, q);
+ if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
+ else p->base += size+1;
+ return (void *)q->base;
+}
+
+static
+void vflush(map *virtmap) {
+ struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
+ u_long i, limit=(mm->hashmask>>3)+8;
+ hash_entry volatile *p=(hash_entry *) mm->sdr1;
+
+ /* PTE handling is simple since the processor never update
+ * the entries. Writable pages always have the C bit set and
+ * all valid entries have the R bit set. From the processor
+ * point of view the hash table is read only.
+ */
+ for (i=0; i<limit; i++) {
+ if (p[i].key<0) {
+ u_long va;
+ va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
+ if (p[i].key&0x40) va^=0x3ff000;
+ va |= ((p[i].key<<21)&0xf0000000)
+ | ((p[i].key<<22)&0x0fc00000);
+ if (va>=virtmap->base && va<=virtmap->end) {
+ p[i].key=0;
+ asm volatile("sync; tlbie %0; sync" : :
+ "r" (va));
+ }
+ }
+ }
+}
+
+void vfree(void *vaddr) {
+ map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
+ struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
+
+ /* Flush memory queues */
+ asm volatile("sync": : : "memory");
+
+ virtmap = remove_map_at(&mm->virtused, vaddr);
+ if (!virtmap) return;
+
+ /* Remove mappings corresponding to virtmap */
+ for (physmap=mm->mappings; physmap; ) {
+ map *nextmap=physmap->next;
+ if (physmap->base>=virtmap->base
+ && physmap->base<virtmap->end) {
+ free_map(remove_map(&mm->mappings, physmap));
+ }
+ physmap=nextmap;
+ }
+
+ vflush(virtmap);
+
+ virtmap->firstpte= MAP_FREE_VIRT;
+ insert_map(&mm->virtavail, virtmap);
+ coalesce_maps(mm->virtavail);
+}
+
+void vunmap(void *vaddr) {
+ map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ /* Flush memory queues */
+ asm volatile("sync": : : "memory");
+
+ /* vaddr must be within one of the vm areas in use and
+ * then must correspond to one of the physical areas
+ */
+ for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
+ if (virtmap->base<=(u_long)vaddr &&
+ virtmap->end>=(u_long)vaddr) break;
+ }
+ if (!virtmap) return;
+
+ physmap = remove_map_at(&mm->mappings, vaddr);
+ if(!physmap) return;
+ vflush(physmap);
+ free_map(physmap);
+}
+
+int vmap(void *vaddr, u_long p, u_long size) {
+ map *q;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ size=PAGE_ALIGN(size);
+ if(!size) return 1;
+ /* Check that the requested area fits in one vm image */
+ for (q=mm->virtused; q; q=q->next) {
+ if ((q->base <= (u_long)vaddr) &&
+ (q->end>=(u_long)vaddr+size -1)) break;
+ }
+ if (!q) return 1;
+ q= alloc_map();
+ if (!q) return 1;
+ q->base = (u_long)vaddr;
+ q->end = (u_long)vaddr+size-1;
+ q->firstpte = p;
+ return insert_map(&mm->mappings, q);
+}
+
+static
+void create_identity_mappings(int type, int attr) {
+ u_long lowpage=ULONG_MAX, highpage;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ RESIDUAL * res=bd->residual;
+
+ while((highpage = find_next_zone(res, lowpage, type))) {
+ map *p;
+ lowpage=find_zone_start(res, highpage, type);
+ p=alloc_map();
+ /* Do not map page 0 to catch null pointers */
+ lowpage = lowpage ? lowpage : 1;
+ p->base=lowpage<<PAGE_SHIFT;
+ p->end=(highpage<<PAGE_SHIFT)-1;
+ p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
+ insert_map(&mm->mappings, p);
+ }
+}
+
+static inline
+void add_free_map(u_long base, u_long end) {
+ map *q=NULL;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ if (base<end) q=alloc_map();
+ if (!q) return;
+ q->base=base;
+ q->end=end-1;
+ q->firstpte=MAP_FREE_VIRT;
+ insert_map(&mm->virtavail, q);
+}
+
+static inline
+void create_free_vm(void) {
+ map *p;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
+ for(p=mm->mappings; p; p=p->next) {
+ add_free_map(vaddr, p->base);
+ vaddr=p->end+1;
+ }
+ /* Special end of memory case */
+ if (vaddr) add_free_map(vaddr,0);
+}
+
+/* Memory management initialization.
+ * Set up the mapping lists.
+ */
+
+static inline
+void add_perm_map(u_long start, u_long size) {
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ map *p=alloc_map();
+ p->base = start;
+ p->end = start + size - 1;
+ p->firstpte = MAP_PERM_PHYS;
+ insert_map(& mm->physperm , p);
+}
+
+void mm_init(u_long image_size)
+{
+ u_long lowpage=ULONG_MAX, highpage;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ RESIDUAL * res=bd->residual;
+ extern void (tlb_handlers)(void);
+ extern void (_handler_glue)(void);
+ int i;
+ map *p;
+
+ /* The checks are simplified by the fact that the image
+ * and stack area are always allocated at the upper end
+ * of a free block.
+ */
+ while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
+ lowpage=find_zone_start(res, highpage, BootImage|Free);
+ if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
+ == highpage) {
+ highpage=(u_long)(bd->image)>>PAGE_SHIFT;
+ add_perm_map((u_long)bd->image, image_size);
+ }
+ if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
+ highpage -= STACK_PAGES;
+ add_perm_map(highpage<<PAGE_SHIFT,
+ STACK_PAGES*PAGE_SIZE);
+ }
+ /* Protect the interrupt handlers that we need ! */
+ if (lowpage<2) lowpage=2;
+ /* Check for the special case of full area! */
+ if (highpage>lowpage) {
+ p = alloc_map();
+ p->base = lowpage<<PAGE_SHIFT;
+ p->end = (highpage<<PAGE_SHIFT)-1;
+ p->firstpte=MAP_FREE_PHYS;
+ insert_map(&mm->physavail, p);
+ }
+ }
+
+ /* Allocate the hash table */
+ mm->sdr1=__palloc(0x10000, PA_PERM|16);
+ _write_SDR1((u_long)mm->sdr1);
+ memset(mm->sdr1, 0, 0x10000);
+ mm->hashmask = 0xffc0;
+
+ /* Setup the segment registers as we want them */
+ for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
+ /* Create the maps for the physical memory, firwmarecode does not
+ * seem to be necessary. ROM is mapped read-only to reduce the risk
+ * of reprogramming it because it's often Flash and some are
+ * amazingly easy to overwrite.
+ */
+ create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
+ FirmwareStack, PTE_RAM);
+ create_identity_mappings(SystemROM, PTE_ROM);
+ create_identity_mappings(IOMemory|SystemIO|SystemRegs|
+ PCIAddr|PCIConfig|ISAAddr, PTE_IO);
+
+ create_free_vm();
+
+ /* Install our own MMU and trap handlers. */
+ codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
+}
+
+void * salloc(u_long size) {
+ map *p, *q;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ if (size==0) return NULL;
+
+ size = (size+7)&~7;
+
+ for (p=mm->sallocfree; p; p=p->next) {
+ if (p->base+size <= p->end) break;
+ }
+ if(!p) {
+ void *m;
+ m = __palloc(size, PA_SUBALLOC);
+ p = alloc_map();
+ if (!m && !p) return NULL;
+ p->base = (u_long) m;
+ p->firstpte = MAP_FREE_SUBS;
+ p->end = (u_long)m+PAGE_ALIGN(size)-1;
+ insert_map(&mm->sallocfree, p);
+ coalesce_maps(mm->sallocfree);
+ coalesce_maps(mm->sallocphys);
+ };
+ q=alloc_map();
+ q->base=p->base;
+ q->end=q->base+size-1;
+ q->firstpte=MAP_USED_SUBS;
+ insert_map(&mm->sallocused, q);
+ if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
+ else p->base += size;
+ memset((void *)q->base, 0, size);
+ return (void *)q->base;
+}
+
+void sfree(void *p) {
+ map *q;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ q=remove_map_at(&mm->sallocused, p);
+ if (!q) return;
+ q->firstpte=MAP_FREE_SUBS;
+ insert_map(&mm->sallocfree, q);
+ coalesce_maps(mm->sallocfree);
+}
+
+/* first/last area fit, flags is a power of 2 indicating the required
+ * alignment. The algorithms are stupid because we expect very little
+ * fragmentation of the areas, if any. The unit of allocation is the page.
+ * The allocation is by default performed from higher addresses down,
+ * unless flags&PA_LOW is true.
+ */
+
+void * __palloc(u_long size, int flags)
+{
+ u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
+ map *newmap, *frommap, *p, *splitmap=0;
+ map **queue;
+ u_long qflags;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+
+ /* Asking for a size which is not a multiple of the alignment
+ is likely to be an error. */
+
+ if (size & mask) return NULL;
+ size = PAGE_ALIGN(size);
+ if(!size) return NULL;
+
+ if (flags&PA_SUBALLOC) {
+ queue = &mm->sallocphys;
+ qflags = MAP_SUBS_PHYS;
+ } else if (flags&PA_PERM) {
+ queue = &mm->physperm;
+ qflags = MAP_PERM_PHYS;
+ } else {
+ queue = &mm->physused;
+ qflags = MAP_USED_PHYS;
+ }
+ /* We need to allocate that one now so no two allocations may attempt
+ * to take the same memory simultaneously. Alloc_map_page does
+ * not call back here to avoid infinite recursion in alloc_map.
+ */
+
+ if (mask&PAGE_MASK) {
+ splitmap=alloc_map();
+ if (!splitmap) return NULL;
+ }
+
+ for (p=mm->physavail, frommap=NULL; p; p=p->next) {
+ u_long high = p->end;
+ u_long limit = ((p->base+mask)&~mask) + size-1;
+ if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
+ frommap = p;
+ if (flags&PA_LOW) break;
+ }
+ }
+
+ if (!frommap) {
+ if (splitmap) free_map(splitmap);
+ return NULL;
+ }
+
+ newmap=alloc_map();
+
+ if (flags&PA_LOW) {
+ newmap->base = (frommap->base+mask)&~mask;
+ } else {
+ newmap->base = (frommap->end +1 - size) & ~mask;
+ }
+
+ newmap->end = newmap->base+size-1;
+ newmap->firstpte = qflags;
+
+ /* Add a fragment if we don't allocate until the end. */
+
+ if (splitmap) {
+ splitmap->base=newmap->base+size;
+ splitmap->end=frommap->end;
+ splitmap->firstpte= MAP_FREE_PHYS;
+ frommap->end=newmap->base-1;
+ } else if (flags & PA_LOW) {
+ frommap->base=newmap->base+size;
+ } else {
+ frommap->end=newmap->base-1;
+ }
+
+ /* Remove a fragment if it becomes empty. */
+ if (frommap->base == frommap->end+1) {
+ free_map(remove_map(&mm->physavail, frommap));
+ }
+
+ if (splitmap) {
+ if (splitmap->base == splitmap->end+1) {
+ free_map(remove_map(&mm->physavail, splitmap));
+ } else {
+ insert_map(&mm->physavail, splitmap);
+ }
+ }
+
+ insert_map(queue, newmap);
+ return (void *) newmap->base;
+
+}
+
+void pfree(void * p) {
+ map *q;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ q=remove_map_at(&mm->physused, p);
+ if (!q) return;
+ q->firstpte=MAP_FREE_PHYS;
+ insert_map(&mm->physavail, q);
+ coalesce_maps(mm->physavail);
+}
+
+#ifdef DEBUG
+/* Debugging functions */
+void print_maps(map *chain, const char *s) {
+ map *p;
+ printk("%s",s);
+ for(p=chain; p; p=p->next) {
+ printk(" %08lx-%08lx: %08lx\n",
+ p->base, p->end, p->firstpte);
+ }
+}
+
+void print_all_maps(const char * s) {
+ u_long freemaps;
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ map *free;
+ printk("%s",s);
+ print_maps(mm->mappings, " Currently defined mappings:\n");
+ print_maps(mm->physavail, " Currently available physical areas:\n");
+ print_maps(mm->physused, " Currently used physical areas:\n");
+ print_maps(mm->virtavail, " Currently available virtual areas:\n");
+ print_maps(mm->virtused, " Currently used virtual areas:\n");
+ print_maps(mm->physperm, " Permanently used physical areas:\n");
+ print_maps(mm->sallocphys, " Physical memory used for salloc:\n");
+ print_maps(mm->sallocfree, " Memory available for salloc:\n");
+ print_maps(mm->sallocused, " Memory allocated through salloc:\n");
+ for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
+ printk(" %ld free maps.\n", freemaps);
+}
+
+void print_hash_table(void) {
+ struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
+ hash_entry *p=(hash_entry *) mm->sdr1;
+ u_int i, valid=0;
+ for (i=0; i<((mm->hashmask)>>3)+8; i++) {
+ if (p[i].key<0) valid++;
+ }
+ printk("%u valid hash entries on pass 1.\n", valid);
+ valid = 0;
+ for (i=0; i<((mm->hashmask)>>3)+8; i++) {
+ if (p[i].key<0) valid++;
+ }
+ printk("%u valid hash entries on pass 2.\n"
+ " vpn:rpn_attr, p/s, pteg.i\n", valid);
+ for (i=0; i<((mm->hashmask)>>3)+8; i++) {
+ if (p[i].key<0) {
+ u_int pteg=(i>>3);
+ u_long vpn;
+ vpn = (pteg^((p[i].key)>>7)) &0x3ff;
+ if (p[i].key&0x40) vpn^=0x3ff;
+ vpn |= ((p[i].key<<9)&0xffff0000)
+ | ((p[i].key<<10)&0xfc00);
+ printk("%08lx:%08lx, %s, %5d.%d\n",
+ vpn, p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
+ pteg, i%8);
+ }
+ }
+}
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c
new file mode 100644
index 0000000000..59cdf9e219
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c
@@ -0,0 +1,931 @@
+/*
+ * pci.c -- Crude pci handling for early boot.
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+
+#include <sys/types.h>
+#include <libcpu/spr.h>
+#include "bootldr.h"
+#include "pci.h"
+#include <libcpu/io.h>
+#include <bsp/consoleIo.h>
+
+typedef unsigned int u32;
+
+/*#define DEBUG*/
+/* Used to reorganize PCI space on stupid machines which spread resources
+ * across a wide address space. This is bad when P2P bridges are present
+ * or when it limits the mappings that a resource hog like a PCI<->VME
+ * bridge can use.
+ */
+
+typedef struct _pci_resource {
+ struct _pci_resource *next;
+ struct pci_dev *dev;
+ u_long base; /* will be 64 bits on 64 bits machines */
+ u_long size;
+ u_char type; /* 1 is I/O else low order 4 bits of the memory type */
+ u_char reg; /* Register # in conf space header */
+ u_short cmd; /* Original cmd byte */
+} pci_resource;
+
+typedef struct _pci_area {
+ struct _pci_area *next;
+ u_long start;
+ u_long end;
+ struct pci_bus *bus;
+ u_int flags;
+} pci_area;
+
+typedef struct _pci_area_head {
+ pci_area *head;
+ u_long mask;
+ int high; /* To allocate from top */
+} pci_area_head;
+
+#define PCI_AREA_PREFETCHABLE 0
+#define PCI_AREA_MEMORY 1
+#define PCI_AREA_IO 2
+
+struct _pci_private {
+ volatile u_int * config_addr;
+ volatile u_char * config_data;
+ struct pci_dev **last_dev_p;
+ struct pci_bus pci_root;
+ pci_resource *resources;
+ pci_area_head io, mem;
+
+} pci_private = {
+ config_addr: NULL,
+ config_data: (volatile u_char *) 0x80800000,
+ last_dev_p: NULL,
+ resources: NULL,
+ io: {NULL, 0xfff, 0},
+ mem: {NULL, 0xfffff, 0}
+};
+
+#define pci ((struct _pci_private *)(bd->pci_private))
+#define pci_root pci->pci_root
+
+#if !defined(DEBUG)
+#undef PCI_DEBUG
+/*
+ #else
+ #define PCI_DEBUG
+*/
+#endif
+
+#if defined(PCI_DEBUG)
+static void
+print_pci_resources(const char *s) {
+ pci_resource *p;
+ printk("%s", s);
+ for (p=pci->resources; p; p=p->next) {
+ printk(" %p:%p %06x %08lx %08lx %d\n",
+ p, p->next,
+ (p->dev->devfn<<8)+(p->dev->bus->number<<16)
+ +0x10+p->reg*4,
+ p->base,
+ p->size,
+ p->type);
+ }
+}
+
+static void
+print_pci_area(pci_area *p) {
+ for (; p; p=p->next) {
+ printk(" %p:%p %p %08lx %08lx\n",
+ p, p->next, p->bus, p->start, p->end);
+ }
+}
+
+static void
+print_pci_areas(const char *s) {
+ printk("%s PCI I/O areas:\n",s);
+ print_pci_area(pci->io.head);
+ printk(" PCI memory areas:\n");
+ print_pci_area(pci->mem.head);
+}
+#else
+#define print_pci_areas(x)
+#define print_pci_resources(x)
+#endif
+
+/* Maybe there are some devices who use a size different
+ * from the alignment. For now we assume both are the same.
+ * The blacklist might be used for other weird things in the future too,
+ * since weird non PCI complying devices seem to proliferate these days.
+ */
+
+struct blacklist_entry {
+ u_short vendor, device;
+ u_char reg;
+ u_long actual_size;
+};
+
+#define BLACKLIST(vid, did, breg, actual_size) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##vid##_##did, breg, actual_size}
+
+static struct blacklist_entry blacklist[] = {
+ BLACKLIST(S3, TRIO, 0, 0x04000000),
+ {0xffff, 0, 0, 0}
+};
+
+
+/* This function filters resources and then inserts them into a list of
+ * configurable pci resources.
+ */
+
+
+#define AREA(r) \
+(((r->type&PCI_BASE_ADDRESS_SPACE)==PCI_BASE_ADDRESS_SPACE_IO) ? PCI_AREA_IO :\
+ ((r->type&PCI_BASE_ADDRESS_MEM_PREFETCH) ? PCI_AREA_PREFETCHABLE :\
+ PCI_AREA_MEMORY))
+
+static int insert_before(pci_resource *e, pci_resource *t) {
+ if (e->dev->bus->number != t->dev->bus->number)
+ return e->dev->bus->number > t->dev->bus->number;
+ if (AREA(e) != AREA(t)) return AREA(e)<AREA(t);
+ return (e->size > t->size);
+}
+
+static void insert_resource(pci_resource *r) {
+ struct blacklist_entry *b;
+ pci_resource *p;
+ if (!r) return;
+
+ /* First fixup in case we have a blacklist entry. Note that this
+ * may temporarily leave a resource in an inconsistent state: with
+ * (base & (size-1)) !=0. This is harmless.
+ */
+ for (b=blacklist; b->vendor!=0xffff; b++) {
+ if ((r->dev->vendor==b->vendor) &&
+ (r->dev->device==b->device) &&
+ (r->reg==b->reg)) {
+ r->size=b->actual_size;
+ break;
+ }
+ }
+
+ /* Motorola NT firmware does not configure pci devices which are not
+ * required for booting, others do. For now:
+ * - allocated devices in the ISA range (64kB I/O, 16Mb memory)
+ * but non zero base registers are left as is.
+ * - all other registers, whether already allocated or not, are
+ * reallocated unless they require an inordinate amount of
+ * resources (>256 Mb for memory >64kB for I/O). These
+ * devices with too large mapping requirements are simply ignored
+ * and their bases are set to 0. This should disable the
+ * corresponding decoders according to the PCI specification.
+ * Many devices are buggy in this respect, however, but the
+ * limits have hopefully been set high enough to avoid problems.
+ */
+
+ if ((r->type==PCI_BASE_ADDRESS_SPACE_IO)
+ ? (r->base && r->base <0x10000)
+ : (r->base && r->base <0x1000000)) {
+ sfree(r);
+ return;
+ }
+
+ if ((r->type==PCI_BASE_ADDRESS_SPACE_IO)
+ ? (r->size >= 0x10000)
+ : (r->size >= 0x10000000)) {
+ r->size = 0;
+ r->base = 0;
+ }
+
+ /* Now insert into the list sorting by
+ * 1) decreasing bus number
+ * 2) space: prefetchable memory, non-prefetchable and finally I/O
+ * 3) decreasing size
+ */
+ if (!pci->resources || insert_before(r, pci->resources)) {
+ r->next = pci->resources;
+ pci->resources=r;
+ } else {
+ for (p=pci->resources; p->next; p=p->next) {
+ if (insert_before(r, p->next)) break;
+ }
+ r->next=p->next;
+ p->next=r;
+ }
+}
+
+/* This version only works for bus 0. I don't have any P2P bridges to test
+ * a more sophisticated version which has therefore not been implemented.
+ * Prefetchable memory is not yet handled correctly either.
+ * And several levels of PCI bridges much less even since there must be
+ * allocated together to be able to setup correctly the top bridge.
+ */
+
+static u_long find_range(u_char bus, u_char type,
+ pci_resource **first,
+ pci_resource **past, u_int *flags) {
+ pci_resource *p;
+ u_long total=0;
+ u_int fl=0;
+
+ for (p=pci->resources; p; p=p->next) {
+ if ((p->dev->bus->number == bus) &&
+ AREA(p)==type) break;
+ }
+ *first = p;
+ for (; p; p=p->next) {
+ if ((p->dev->bus->number != bus) ||
+ AREA(p)!=type || p->size == 0) break;
+ total = total+p->size;
+ fl |= 1<<p->type;
+ }
+ *past = p;
+ /* This will be used later to tell whether there are any 32 bit
+ * devices in an area which could be mapped higher than 4Gb
+ * on 64 bits architectures
+ */
+ *flags = fl;
+ return total;
+}
+
+static inline void init_free_area(pci_area_head *h, u_long start,
+ u_long end, u_int mask, int high) {
+ pci_area *p;
+ p = salloc(sizeof(pci_area));
+ if (!p) return;
+ h->head = p;
+ p->next = NULL;
+ p->start = (start+mask)&~mask;
+ p->end = (end-mask)|mask;
+ p->bus = NULL;
+ h->mask = mask;
+ h->high = high;
+}
+
+static void insert_area(pci_area_head *h, pci_area *p) {
+ pci_area *q = h->head;
+ if (!p) return;
+ if (q && (q->start< p->start)) {
+ for(;q->next && q->next->start<p->start; q = q->next);
+ if ((q->end >= p->start) ||
+ (q->next && p->end>=q->next->start)) {
+ sfree(p);
+ printk("Overlapping pci areas!\n");
+ return;
+ }
+ p->next = q->next;
+ q->next = p;
+ } else { /* Insert at head */
+ if (q && (p->end >= q->start)) {
+ sfree(p);
+ printk("Overlapping pci areas!\n");
+ return;
+ }
+ p->next = q;
+ h->head = p;
+ }
+}
+
+static
+void remove_area(pci_area_head *h, pci_area *p) {
+ pci_area *q = h->head;
+
+ if (!p || !q) return;
+ if (q==p) {
+ h->head = q->next;
+ return;
+ }
+ for(;q && q->next!=p; q=q->next);
+ if (q) q->next=p->next;
+}
+
+static pci_area * alloc_area(pci_area_head *h, struct pci_bus *bus,
+ u_long required, u_long mask, u_int flags) {
+ pci_area *p;
+ pci_area *from, *split, *new;
+
+ required = (required+h->mask) & ~h->mask;
+ for (p=h->head, from=NULL; p; p=p->next) {
+ u_long l1 = ((p->start+required+mask)&~mask)-1;
+ u_long l2 = ((p->start+mask)&~mask)+required-1;
+ /* Allocated areas point to the bus to which they pertain */
+ if (p->bus) continue;
+ if ((p->end)>=l1 || (p->end)>=l2) from=p;
+ if (from && !h->high) break;
+ }
+ if (!from) return NULL;
+
+ split = salloc(sizeof(pci_area));
+ new = salloc(sizeof(pci_area));
+ /* If allocation of new succeeds then allocation of split has
+ * also been successful (given the current mm algorithms) !
+ */
+ if (!new) {
+ sfree(split);
+ return NULL;
+ }
+ new->bus = bus;
+ new->flags = flags;
+ /* Now allocate pci_space taking alignment into account ! */
+ if (h->high) {
+ u_long l1 = ((from->end+1)&~mask)-required;
+ u_long l2 = (from->end+1-required)&~mask;
+ new->start = (l1>l2) ? l1 : l2;
+ split->end = from->end;
+ from->end = new->start-1;
+ split->start = new->start+required;
+ new->end = new->start+required-1;
+ } else {
+ u_long l1 = ((from->start+mask)&~mask)+required-1;
+ u_long l2 = ((from->start+required+mask)&~mask)-1;
+ new->end = (l1<l2) ? l1 : l2;
+ split->start = from->start;
+ from->start = new->end+1;
+ new->start = new->end+1-required;
+ split->end = new->start-1;
+ }
+
+ if (from->end+1 == from->start) remove_area(h, from);
+ if (split->end+1 != split->start) {
+ split->bus = NULL;
+ insert_area(h, split);
+ } else {
+ sfree(split);
+ }
+ insert_area(h, new);
+ print_pci_areas("alloc_area called:\n");
+ return new;
+}
+
+static inline
+void alloc_space(pci_area *p, pci_resource *r) {
+ if (p->start & (r->size-1)) {
+ r->base = p->end+1-r->size;
+ p->end -= r->size;
+ } else {
+ r->base = p->start;
+ p->start += r->size;
+ }
+}
+
+static void reconfigure_bus_space(u_char bus, u_char type, pci_area_head *h) {
+ pci_resource *first, *past, *r;
+ pci_area *area, tmp;
+ u_int flags;
+ u_int required = find_range(bus, type, &first, &past, &flags);
+
+ if (required==0) return;
+ area = alloc_area(h, first->dev->bus, required, first->size-1, flags);
+ if (!area) return;
+ tmp = *area;
+ for (r=first; r!=past; r=r->next) {
+ alloc_space(&tmp, r);
+ }
+}
+
+static void reconfigure_pci(void) {
+ pci_resource *r;
+ struct pci_dev *dev;
+ /* FIXME: for now memory is relocated from low, it's better
+ * to start from higher addresses.
+ */
+ init_free_area(&pci->io, 0x10000, 0x7fffff, 0xfff, 0);
+ init_free_area(&pci->mem, 0x1000000, 0x3cffffff, 0xfffff, 0);
+
+ /* First reconfigure the I/O space, this will be more
+ * complex when there is more than 1 bus. And 64 bits
+ * devices are another kind of problems.
+ */
+ reconfigure_bus_space(0, PCI_AREA_IO, &pci->io);
+ reconfigure_bus_space(0, PCI_AREA_MEMORY, &pci->mem);
+ reconfigure_bus_space(0, PCI_AREA_PREFETCHABLE, &pci->mem);
+
+ /* Now we have to touch the configuration space of all
+ * the devices to remap them better than they are right now.
+ * This is done in 3 steps:
+ * 1) first disable I/O and memory response of all devices
+ * 2) modify the base registers
+ * 3) restore the original PCI_COMMAND register.
+ */
+ for (r=pci->resources; r; r= r->next) {
+ if (!r->dev->sysdata) {
+ r->dev->sysdata=r;
+ pci_read_config_word(r->dev, PCI_COMMAND, &r->cmd);
+ pci_write_config_word(r->dev, PCI_COMMAND,
+ r->cmd & ~(PCI_COMMAND_IO|
+ PCI_COMMAND_MEMORY));
+ }
+ }
+
+ for (r=pci->resources; r; r= r->next) {
+ pci_write_config_dword(r->dev,
+ PCI_BASE_ADDRESS_0+(r->reg<<2),
+ r->base);
+ if ((r->type&
+ (PCI_BASE_ADDRESS_SPACE|
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
+ (PCI_BASE_ADDRESS_SPACE_MEMORY|
+ PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ pci_write_config_dword(r->dev,
+ PCI_BASE_ADDRESS_1+
+ (r->reg<<2),
+ 0);
+ }
+ }
+ for (dev=bd->pci_devices; dev; dev= dev->next) {
+ if (dev->sysdata) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ ((pci_resource *)dev->sysdata)
+ ->cmd);
+ dev->sysdata=NULL;
+ }
+ }
+}
+
+static int
+indirect_pci_read_config_byte(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned char *val) {
+ out_be32(pci->config_addr,
+ 0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
+ *val=in_8(pci->config_data + (offset&3));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned short *val) {
+ *val = 0xffff;
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32(pci->config_addr,
+ 0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
+ *val=in_le16((volatile u_short *)(pci->config_data + (offset&3)));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned int *val) {
+ *val = 0xffffffff;
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32(pci->config_addr,
+ 0x80|(bus<<8)|(dev_fn<<16)|(offset<<24));
+ *val=in_le32((volatile u_int *)pci->config_data);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_write_config_byte(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned char val) {
+ out_be32(pci->config_addr,
+ 0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
+ out_8(pci->config_data + (offset&3), val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned short val) {
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32(pci->config_addr,
+ 0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
+ out_le16((volatile u_short *)(pci->config_data + (offset&3)), val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_write_config_dword(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned int val) {
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32(pci->config_addr,
+ 0x80|(bus<<8)|(dev_fn<<16)|(offset<<24));
+ out_le32((volatile u_int *)pci->config_data, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static const struct pci_config_access_functions indirect_functions = {
+ indirect_pci_read_config_byte,
+ indirect_pci_read_config_word,
+ indirect_pci_read_config_dword,
+ indirect_pci_write_config_byte,
+ indirect_pci_write_config_word,
+ indirect_pci_write_config_dword
+};
+
+
+static int
+direct_pci_read_config_byte(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned char *val) {
+ if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
+ *val=0xff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *val=in_8(pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned short *val) {
+ *val = 0xffff;
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *val=in_le16((volatile u_short *)
+ (pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned int *val) {
+ *val = 0xffffffff;
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *val=in_le32((volatile u_int *)
+ (pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_write_config_byte(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned char val) {
+ if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ out_8(pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset,
+ val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned short val) {
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ out_le16((volatile u_short *)
+ (pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset),
+ val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_write_config_dword(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned int val) {
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ out_le32((volatile u_int *)
+ (pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset),
+ val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static const struct pci_config_access_functions direct_functions = {
+ direct_pci_read_config_byte,
+ direct_pci_read_config_word,
+ direct_pci_read_config_dword,
+ direct_pci_write_config_byte,
+ direct_pci_write_config_word,
+ direct_pci_write_config_dword
+};
+
+
+void pci_read_bases(struct pci_dev *dev, unsigned int howmany)
+{
+ unsigned int reg, nextreg;
+#define REG (PCI_BASE_ADDRESS_0 + (reg<<2))
+ u_short cmd;
+ u32 l, ml;
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ for(reg=0; reg<howmany; reg=nextreg) {
+ pci_resource *r;
+ nextreg=reg+1;
+ pci_read_config_dword(dev, REG, &l);
+#if 0
+ if (l == 0xffffffff /*AJF || !l*/) continue;
+#endif
+ /* Note that disabling the memory response of a host bridge
+ * would lose data if a DMA transfer were in progress. In a
+ * bootloader we don't care however. Also we can't print any
+ * message for a while since we might just disable the console.
+ */
+ pci_write_config_word(dev, PCI_COMMAND, cmd &
+ ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ pci_write_config_dword(dev, REG, ~0);
+ pci_read_config_dword(dev, REG, &ml);
+ pci_write_config_dword(dev, REG, l);
+
+ /* Reenable the device now that we've played with
+ * base registers.
+ */
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ /* seems to be an unused entry skip it */
+ if ( ml == 0 || ml == 0xffffffff ) continue;
+
+ if ((l &
+ (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK))
+ == (PCI_BASE_ADDRESS_MEM_TYPE_64
+ |PCI_BASE_ADDRESS_SPACE_MEMORY)) {
+ nextreg=reg+2;
+ }
+ dev->base_address[reg] = l;
+ r = salloc(sizeof(pci_resource));
+ if (!r) {
+ printk("Error allocating pci_resource struct.\n");
+ continue;
+ }
+ r->dev = dev;
+ r->reg = reg;
+ if ((l&PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ r->type = l&~PCI_BASE_ADDRESS_IO_MASK;
+ r->base = l&PCI_BASE_ADDRESS_IO_MASK;
+ r->size = ~(ml&PCI_BASE_ADDRESS_IO_MASK)+1;
+ } else {
+ r->type = l&~PCI_BASE_ADDRESS_MEM_MASK;
+ r->base = l&PCI_BASE_ADDRESS_MEM_MASK;
+ r->size = ~(ml&PCI_BASE_ADDRESS_MEM_MASK)+1;
+ }
+ /* Check for the blacklisted entries */
+ insert_resource(r);
+ }
+}
+
+
+
+
+u_int pci_scan_bus(struct pci_bus *bus)
+{
+ unsigned int devfn, l, max, class;
+ unsigned char irq, hdr_type, is_multi = 0;
+ struct pci_dev *dev, **bus_last;
+ struct pci_bus *child;
+
+ bus_last = &bus->devices;
+ max = bus->secondary;
+ for (devfn = 0; devfn < 0xff; ++devfn) {
+ if (PCI_FUNC(devfn) && !is_multi) {
+ /* not a multi-function device */
+ continue;
+ }
+ if (pcibios_read_config_byte(bus->number, devfn, PCI_HEADER_TYPE, &hdr_type))
+ continue;
+ if (!PCI_FUNC(devfn))
+ is_multi = hdr_type & 0x80;
+
+ if (pcibios_read_config_dword(bus->number, devfn, PCI_VENDOR_ID, &l) ||
+ /* some broken boards return 0 if a slot is empty: */
+ l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000) {
+ is_multi = 0;
+ continue;
+ }
+
+ dev = salloc(sizeof(*dev));
+ dev->bus = bus;
+ dev->devfn = devfn;
+ dev->vendor = l & 0xffff;
+ dev->device = (l >> 16) & 0xffff;
+
+ pcibios_read_config_dword(bus->number, devfn,
+ PCI_CLASS_REVISION, &class);
+ class >>= 8; /* upper 3 bytes */
+ dev->class = class;
+ class >>= 8;
+ dev->hdr_type = hdr_type;
+
+ switch (hdr_type & 0x7f) { /* header type */
+ case PCI_HEADER_TYPE_NORMAL: /* standard header */
+ if (class == PCI_CLASS_BRIDGE_PCI)
+ goto bad;
+ /*
+ * If the card generates interrupts, read IRQ number
+ * (some architectures change it during pcibios_fixup())
+ */
+ pcibios_read_config_byte(bus->number, dev->devfn, PCI_INTERRUPT_PIN, &irq);
+ if (irq)
+ pcibios_read_config_byte(bus->number, dev->devfn, PCI_INTERRUPT_LINE, &irq);
+ dev->irq = irq;
+ /*
+ * read base address registers, again pcibios_fixup() can
+ * tweak these
+ */
+ pci_read_bases(dev, 6);
+ pcibios_read_config_dword(bus->number, devfn, PCI_ROM_ADDRESS, &l);
+ dev->rom_address = (l == 0xffffffff) ? 0 : l;
+ break;
+ case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
+ if (class != PCI_CLASS_BRIDGE_PCI)
+ goto bad;
+ pci_read_bases(dev, 2);
+ pcibios_read_config_dword(bus->number, devfn, PCI_ROM_ADDRESS1, &l);
+ dev->rom_address = (l == 0xffffffff) ? 0 : l;
+ break;
+ case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
+ if (class != PCI_CLASS_BRIDGE_CARDBUS)
+ goto bad;
+ pci_read_bases(dev, 1);
+ break;
+ default: /* unknown header */
+ bad:
+ printk("PCI device with unknown "
+ "header type %d ignored.\n",
+ hdr_type&0x7f);
+ continue;
+ }
+
+ /*
+ * Put it into the global PCI device chain. It's used to
+ * find devices once everything is set up.
+ */
+ *pci->last_dev_p = dev;
+ pci->last_dev_p = &dev->next;
+
+ /*
+ * Now insert it into the list of devices held
+ * by the parent bus.
+ */
+ *bus_last = dev;
+ bus_last = &dev->sibling;
+
+ }
+
+ /*
+ * After performing arch-dependent fixup of the bus, look behind
+ * all PCI-to-PCI bridges on this bus.
+ */
+ for(dev=bus->devices; dev; dev=dev->sibling)
+ /*
+ * If it's a bridge, scan the bus behind it.
+ */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ unsigned int buses;
+ unsigned int devfn = dev->devfn;
+ unsigned short cr;
+
+ /*
+ * Insert it into the tree of buses.
+ */
+ child = salloc(sizeof(*child));
+ child->next = bus->children;
+ bus->children = child;
+ child->self = dev;
+ child->parent = bus;
+
+ /*
+ * Set up the primary, secondary and subordinate
+ * bus numbers.
+ */
+ child->number = child->secondary = ++max;
+ child->primary = bus->secondary;
+ child->subordinate = 0xff;
+ /*
+ * Clear all status bits and turn off memory,
+ * I/O and master enables.
+ */
+ pcibios_read_config_word(bus->number, devfn, PCI_COMMAND, &cr);
+ pcibios_write_config_word(bus->number, devfn, PCI_COMMAND, 0x0000);
+ pcibios_write_config_word(bus->number, devfn, PCI_STATUS, 0xffff);
+ /*
+ * Read the existing primary/secondary/subordinate bus
+ * number configuration to determine if the PCI bridge
+ * has already been configured by the system. If so,
+ * do not modify the configuration, merely note it.
+ */
+ pcibios_read_config_dword(bus->number, devfn, PCI_PRIMARY_BUS, &buses);
+ if ((buses & 0xFFFFFF) != 0)
+ {
+ unsigned int cmax;
+
+ child->primary = buses & 0xFF;
+ child->secondary = (buses >> 8) & 0xFF;
+ child->subordinate = (buses >> 16) & 0xFF;
+ child->number = child->secondary;
+ cmax = pci_scan_bus(child);
+ if (cmax > max) max = cmax;
+ }
+ else
+ {
+ /*
+ * Configure the bus numbers for this bridge:
+ */
+ buses &= 0xff000000;
+ buses |=
+ (((unsigned int)(child->primary) << 0) |
+ ((unsigned int)(child->secondary) << 8) |
+ ((unsigned int)(child->subordinate) << 16));
+ pcibios_write_config_dword(bus->number, devfn, PCI_PRIMARY_BUS, buses);
+ /*
+ * Now we can scan all subordinate buses:
+ */
+ max = pci_scan_bus(child);
+ /*
+ * Set the subordinate bus number to its real
+ * value:
+ */
+ child->subordinate = max;
+ buses = (buses & 0xff00ffff)
+ | ((unsigned int)(child->subordinate) << 16);
+ pcibios_write_config_dword(bus->number, devfn, PCI_PRIMARY_BUS, buses);
+ }
+ pcibios_write_config_word(bus->number, devfn, PCI_COMMAND, cr);
+ }
+
+ /*
+ * We've scanned the bus and so we know all about what's on
+ * the other side of any bridges that may be on this bus plus
+ * any devices.
+ *
+ * Return how far we've got finding sub-buses.
+ */
+ return max;
+}
+
+void
+pci_fixup(void) {
+ struct pci_dev *p;
+ struct pci_bus *bus;
+ for (bus = &pci_root; bus; bus=bus->next) {
+
+ for (p=bus->devices; p; p=p->sibling) {
+ }
+ }
+}
+
+void pci_init(void) {
+ PPC_DEVICE *hostbridge;
+
+ if (pci->last_dev_p) {
+ printk("Two or more calls to pci_init!\n");
+ return;
+ }
+ pci->last_dev_p = &(bd->pci_devices);
+ hostbridge=residual_find_device(PROCESSORDEVICE, NULL,
+ BridgeController,
+ PCIBridge, -1, 0);
+ if (hostbridge) {
+ if (hostbridge->DeviceId.Interface==PCIBridgeIndirect) {
+ bd->pci_functions=&indirect_functions;
+ /* Should be extracted from residual data,
+ * indeed MPC106 in CHRP mode is different,
+ * but we should not use residual data in
+ * this case anyway.
+ */
+ pci->config_addr = ((volatile u_int *)
+ (ptr_mem_map->io_base+0xcf8));
+ pci->config_data = ptr_mem_map->io_base+0xcfc;
+ } else if(hostbridge->DeviceId.Interface==PCIBridgeDirect) {
+ bd->pci_functions=&direct_functions;
+ pci->config_data=(u_char *) 0x80800000;
+ } else {
+ }
+ } else {
+ /* Let us try by experimentation at our own risk! */
+ u_int id0;
+ bd->pci_functions = &direct_functions;
+ /* On all direct bridges I know the host bridge itself
+ * appears as device 0 function 0.
+ */
+ pcibios_read_config_dword(0, 0, PCI_VENDOR_ID, &id0);
+ if (id0==~0U) {
+ bd->pci_functions = &indirect_functions;
+ pci->config_addr = ((volatile u_int *)
+ (ptr_mem_map->io_base+0xcf8));
+ pci->config_data = ptr_mem_map->io_base+0xcfc;
+ }
+ /* Here we should check that the host bridge is actually
+ * present, but if it not, we are in such a desperate
+ * situation, that we probably can't even tell it.
+ */
+ }
+ /* Now build a small database of all found PCI devices */
+ printk("\nPCI: Probing PCI hardware\n");
+ pci_root.subordinate=pci_scan_bus(&pci_root);
+ print_pci_resources("Configurable PCI resources:\n");
+ reconfigure_pci();
+ print_pci_resources("Allocated PCI resources:\n");
+}
+
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h
new file mode 100644
index 0000000000..caf0c3e12f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h
@@ -0,0 +1,1159 @@
+/*
+ * $Id$
+ *
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ * Copyright 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * For more information, please consult the following manuals (look at
+ * http://www.pcisig.com/ for how to get them):
+ *
+ * PCI BIOS Specification
+ * PCI Local Bus Specification
+ * PCI to PCI Bridge Specification
+ * PCI System Design Guide
+ */
+
+#ifndef BOOTLOADER_PCI_H
+#define BOOTLOADER_PCI_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features */
+
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8
+ revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_NORMAL 0
+#define PCI_HEADER_TYPE_BRIDGE 1
+#define PCI_HEADER_TYPE_CARDBUS 2
+
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
+/* bit 1 is reserved if address_space = 1 */
+
+/* Header type 0 (normal devices) */
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
+#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
+#define PCI_ROM_ADDRESS_ENABLE 0x01
+#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+
+/* 0x34-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+/* Header type 1 (PCI-to-PCI bridges) */
+#define PCI_PRIMARY_BUS 0x18 /* Primary bus number */
+#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
+#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
+#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
+#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
+#define PCI_IO_LIMIT 0x1d
+#define PCI_IO_RANGE_TYPE_MASK 0x0f /* I/O bridging type */
+#define PCI_IO_RANGE_TYPE_16 0x00
+#define PCI_IO_RANGE_TYPE_32 0x01
+#define PCI_IO_RANGE_MASK ~0x0f
+#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
+#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
+#define PCI_MEMORY_LIMIT 0x22
+#define PCI_MEMORY_RANGE_TYPE_MASK 0x0f
+#define PCI_MEMORY_RANGE_MASK ~0x0f
+#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */
+#define PCI_PREF_MEMORY_LIMIT 0x26
+#define PCI_PREF_RANGE_TYPE_MASK 0x0f
+#define PCI_PREF_RANGE_TYPE_32 0x00
+#define PCI_PREF_RANGE_TYPE_64 0x01
+#define PCI_PREF_RANGE_MASK ~0x0f
+#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */
+#define PCI_PREF_LIMIT_UPPER32 0x2c
+#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */
+#define PCI_IO_LIMIT_UPPER16 0x32
+/* 0x34-0x3b is reserved */
+#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
+/* 0x3c-0x3d are same as for htype 0 */
+#define PCI_BRIDGE_CONTROL 0x3e
+#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */
+#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */
+#define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */
+#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */
+#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */
+#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */
+#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */
+
+/* Header type 2 (CardBus bridges) */
+/* 0x14-0x15 reserved */
+#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */
+#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */
+#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */
+#define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */
+#define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */
+#define PCI_CB_MEMORY_BASE_0 0x1c
+#define PCI_CB_MEMORY_LIMIT_0 0x20
+#define PCI_CB_MEMORY_BASE_1 0x24
+#define PCI_CB_MEMORY_LIMIT_1 0x28
+#define PCI_CB_IO_BASE_0 0x2c
+#define PCI_CB_IO_BASE_0_HI 0x2e
+#define PCI_CB_IO_LIMIT_0 0x30
+#define PCI_CB_IO_LIMIT_0_HI 0x32
+#define PCI_CB_IO_BASE_1 0x34
+#define PCI_CB_IO_BASE_1_HI 0x36
+#define PCI_CB_IO_LIMIT_1 0x38
+#define PCI_CB_IO_LIMIT_1_HI 0x3a
+#define PCI_CB_IO_RANGE_MASK ~0x03
+/* 0x3c-0x3d are same as for htype 0 */
+#define PCI_CB_BRIDGE_CONTROL 0x3e
+#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */
+#define PCI_CB_BRIDGE_CTL_SERR 0x02
+#define PCI_CB_BRIDGE_CTL_ISA 0x04
+#define PCI_CB_BRIDGE_CTL_VGA 0x08
+#define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20
+#define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */
+#define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */
+#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */
+#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
+#define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400
+#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
+#define PCI_CB_SUBSYSTEM_ID 0x42
+#define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */
+/* 0x48-0x7f reserved */
+
+/* Device classes and subclasses */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a01
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+
+#define PCI_CLASS_OTHERS 0xff
+
+/*
+ * Vendor and card ID's: sort these numerically according to vendor
+ * (and according to card ID within vendor). Send all updates to
+ * <linux-pcisupport@cck.uni-kl.de>.
+ */
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_1280 0x3033
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215LG 0x4c47
+#define PCI_DEVICE_ID_ATI_264LT 0x4c54
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7542 0x1200
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_7541 0x1204
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_82G2675 0x001d
+#define PCI_DEVICE_ID_IBM_MCA 0x0020
+#define PCI_DEVICE_ID_IBM_82351 0x0022
+#define PCI_DEVICE_ID_IBM_SERVERAID 0x002e
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_MPIC 0x0046
+#define PCI_DEVICE_ID_IBM_3780IDSP 0x007d
+#define PCI_DEVICE_ID_IBM_MPIC_2 0xffff
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_7197 0x3296
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_601 0x0601
+#define PCI_DEVICE_ID_SI_5107 0x5107
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_7001 0x7001
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_92C178 0xc178
+#define PCI_DEVICE_ID_OPTI_82C557 0xc557
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C701 0xc701
+#define PCI_DEVICE_ID_OPTI_82C814 0xc814
+#define PCI_DEVICE_ID_OPTI_82C822 0xc822
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_SGS 0x104a
+#define PCI_DEVICE_ID_SGS_2000 0x0008
+#define PCI_DEVICE_ID_SGS_1764 0x0009
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4010 0x3d04
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_PCI1130 0xac12
+#define PCI_DEVICE_ID_TI_PCI1031 0xac13
+#define PCI_DEVICE_ID_TI_PCI1131 0xac15
+#define PCI_DEVICE_ID_TI_PCI1250 0xac16
+#define PCI_DEVICE_ID_TI_PCI1220 0xac17
+
+#define PCI_VENDOR_ID_OAK 0x104e
+#define PCI_DEVICE_ID_OAK_OTI107 0x0107
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940 0x0940
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_5300 0x5300
+
+#define PCI_VENDOR_ID_N9 0x105d
+#define PCI_DEVICE_ID_N9_I128 0x2309
+#define PCI_DEVICE_ID_N9_I128_2 0x2339
+#define PCI_DEVICE_ID_N9_I128_T2R 0x493d
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8891A 0x0891
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+#define PCI_DEVICE_ID_UMC_UM8881F 0x8881
+#define PCI_DEVICE_ID_UMC_UM8886F 0x8886
+#define PCI_DEVICE_ID_UMC_UM9017F 0x9017
+#define PCI_DEVICE_ID_UMC_UM8886N 0xe886
+#define PCI_DEVICE_ID_UMC_UM8891N 0xe891
+
+#define PCI_VENDOR_ID_X 0x1061
+#define PCI_DEVICE_ID_X_AGX016 0x0001
+
+#define PCI_VENDOR_ID_PICOP 0x1066
+#define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001
+#define PCI_DEVICE_ID_PICOP_PT80C524 0x8002
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_GC 0x0002
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+
+#define PCI_VENDOR_ID_NEXGEN 0x1074
+#define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+#define PCI_VENDOR_ID_LEADTEK 0x107d
+#define PCI_DEVICE_ID_LEADTEK_805 0x0000
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C599 0x0600
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+#define PCI_VENDOR_ID_FOREX 0x1083
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC3136 0x0001
+#define PCI_DEVICE_ID_OLICOM_OC2315 0x0011
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#define PCI_DEVICE_ID_OLICOM_OC6151 0x0021
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_640 0x0640
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_647 0x0647
+#define PCI_DEVICE_ID_CMD_670 0x0670
+
+#define PCI_VENDOR_ID_VISION 0x1098
+#define PCI_DEVICE_ID_VISION_QD8500 0x0001
+#define PCI_DEVICE_ID_VISION_QD8580 0x0002
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_848 0x0350
+#define PCI_DEVICE_ID_BROOKTREE_849A 0x0351
+#define PCI_DEVICE_ID_BROOKTREE_8474 0x8474
+
+#define PCI_VENDOR_ID_SIERRA 0x10a8
+#define PCI_DEVICE_ID_SIERRA_STB 0x0000
+
+#define PCI_VENDOR_ID_ACC 0x10aa
+#define PCI_DEVICE_ID_ACC_2056 0x0000
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_83769 0x0001
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+#define PCI_VENDOR_ID_DATABOOK 0x10b3
+#define PCI_DEVICE_ID_DATABOOK_87144 0xb106
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9060 0x9060
+#define PCI_DEVICE_ID_PLX_9060ES 0x906E
+#define PCI_DEVICE_ID_PLX_9060SD 0x906D
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+#define PCI_DEVICE_ID_MADGE_C155S 0x1001
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C590 0x5900
+#define PCI_DEVICE_ID_3COM_3C595TX 0x5950
+#define PCI_DEVICE_ID_3COM_3C595T4 0x5951
+#define PCI_DEVICE_ID_3COM_3C595MII 0x5952
+#define PCI_DEVICE_ID_3COM_3C900TPO 0x9000
+#define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001
+#define PCI_DEVICE_ID_3COM_3C905TX 0x9050
+#define PCI_DEVICE_ID_3COM_3C905T4 0x9051
+#define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055
+
+#define PCI_VENDOR_ID_SMC 0x10b8
+#define PCI_DEVICE_ID_SMC_EPIC100 0x0005
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1445 0x1445
+#define PCI_DEVICE_ID_AL_M1449 0x1449
+#define PCI_DEVICE_ID_AL_M1451 0x1451
+#define PCI_DEVICE_ID_AL_M1461 0x1461
+#define PCI_DEVICE_ID_AL_M1489 0x1489
+#define PCI_DEVICE_ID_AL_M1511 0x1511
+#define PCI_DEVICE_ID_AL_M1513 0x1513
+#define PCI_DEVICE_ID_AL_M1521 0x1521
+#define PCI_DEVICE_ID_AL_M1523 0x1523
+#define PCI_DEVICE_ID_AL_M1531 0x1531
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M3307 0x3307
+#define PCI_DEVICE_ID_AL_M4803 0x5215
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5237 0x5237
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+#define PCI_VENDOR_ID_MITSUBISHI 0x10ba
+
+#define PCI_VENDOR_ID_SURECOM 0x10bd
+#define PCI_DEVICE_ID_SURECOM_NE34 0x0e34
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
+#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
+
+#define PCI_VENDOR_ID_MACRONIX 0x10d9
+#define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512
+#define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531
+
+#define PCI_VENDOR_ID_CERN 0x10dc
+#define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001
+#define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002
+#define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021
+#define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_8849 0x8849
+
+#define PCI_VENDOR_ID_TEKRAM2 0x10e1
+#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
+
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_DEVICE_ID_AMCC_MYRINET 0x8043
+#define PCI_DEVICE_ID_AMCC_PARASTATION 0x8062
+#define PCI_DEVICE_ID_AMCC_S5933 0x807d
+#define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1680 0x1680
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
+#define PCI_DEVICE_ID_REALTEK_8129 0x8129
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_TRUEVISION 0x10fa
+#define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c
+
+#define PCI_VENDOR_ID_INIT 0x1101
+#define PCI_DEVICE_ID_INIT_320P 0x9100
+#define PCI_DEVICE_ID_INIT_360P 0x9500
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_82C505 0x0505
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C585 0x0585
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C595 0x0595
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C926 0x0926
+#define PCI_DEVICE_ID_VIA_82C416 0x1571
+#define PCI_DEVICE_ID_VIA_82C595_97 0x1595
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_86C100A 0x6100
+#define PCI_DEVICE_ID_VIA_82C597_1 0x8597
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200PC 0x0210
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+#define PCI_VENDOR_ID_IMAGINGTECH 0x112f
+#define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7145 0x7145
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+
+#define PCI_VENDOR_ID_CYCLONE 0x113c
+#define PCI_DEVICE_ID_CYCLONE_SDK 0x0001
+
+#define PCI_VENDOR_ID_ALLIANCE 0x1142
+#define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210
+#define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422
+#define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424
+#define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d
+
+#define PCI_VENDOR_ID_SK 0x1148
+#define PCI_DEVICE_ID_SK_FP 0x4000
+#define PCI_DEVICE_ID_SK_TR 0x4200
+#define PCI_DEVICE_ID_SK_GE 0x4300
+
+#define PCI_VENDOR_ID_VMIC 0x114a
+#define PCI_DEVICE_ID_VMIC_VME 0x7587
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_EPC 0x0002
+#define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003
+#define PCI_DEVICE_ID_DIGI_XEM 0x0004
+#define PCI_DEVICE_ID_DIGI_XR 0x0005
+#define PCI_DEVICE_ID_DIGI_CX 0x0006
+#define PCI_DEVICE_ID_DIGI_XRJ 0x0009
+#define PCI_DEVICE_ID_DIGI_EPCJ 0x000a
+#define PCI_DEVICE_ID_DIGI_XR_920 0x0027
+
+#define PCI_VENDOR_ID_MUTECH 0x1159
+#define PCI_DEVICE_ID_MUTECH_MV1000 0x0001
+
+#define PCI_VENDOR_ID_RENDITION 0x1163
+#define PCI_DEVICE_ID_RENDITION_VERITE 0x0001
+#define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_601 0x0601
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
+#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP8400 0x0004
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+
+#define PCI_VENDOR_ID_LITEON 0x11ad
+#define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002
+
+#define PCI_VENDOR_ID_NP 0x11bc
+#define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_L56XMF 0x0440
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
+#define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+
+#define PCI_VENDOR_ID_AURAVISION 0x11d1
+#define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7
+
+#define PCI_VENDOR_ID_IKON 0x11d5
+#define PCI_DEVICE_ID_IKON_10115 0x0115
+#define PCI_DEVICE_ID_IKON_10117 0x0117
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+#define PCI_VENDOR_ID_KINETIC 0x11f4
+#define PCI_DEVICE_ID_KINETIC_2915 0x2915
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#define PCI_DEVICE_ID_COMPEX_RL2000 0x1401
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP32INTF 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP4QUAD 0x0004
+#define PCI_DEVICE_ID_RP8OCTA 0x0005
+#define PCI_DEVICE_ID_RP8J 0x0006
+#define PCI_DEVICE_ID_RPP4 0x000A
+#define PCI_DEVICE_ID_RPP8 0x000B
+#define PCI_DEVICE_ID_RP8M 0x000C
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6729 0x6729
+#define PCI_DEVICE_ID_O2_6730 0x673a
+#define PCI_DEVICE_ID_O2_6832 0x6832
+#define PCI_DEVICE_ID_O2_6836 0x6836
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+
+#define PCI_VENDOR_ID_SIGMADES 0x1236
+#define PCI_DEVICE_ID_SIGMADES_6425 0x6401
+
+#define PCI_VENDOR_ID_CCUBE 0x123f
+
+#define PCI_VENDOR_ID_DIPIX 0x1246
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+#define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000
+#define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002
+#define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003
+
+#define PCI_VENDOR_ID_OPTIBASE 0x1255
+#define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110
+#define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210
+#define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110
+#define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120
+#define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352
+#define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b
+
+#define PCI_VENDOR_ID_HUGHES 0x1273
+#define PCI_DEVICE_ID_HUGHES_DIRECPC 0x0002
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000
+
+#define PCI_VENDOR_ID_ALTEON 0x12ae
+#define PCI_DEVICE_ID_ALTEON_ACENIC 0x0001
+
+#define PCI_VENDOR_ID_PICTUREL 0x12c5
+#define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_VENDOR_ID_CBOARDS 0x1307
+#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
+
+#define PCI_VENDOR_ID_SYMPHONY 0x1c1c
+#define PCI_DEVICE_ID_SYMPHONY_101 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_300SX 0x0001
+#define PCI_DEVICE_ID_3DLABS_500TX 0x0002
+#define PCI_DEVICE_ID_3DLABS_DELTA 0x0003
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004
+#define PCI_DEVICE_ID_3DLABS_MX 0x0006
+
+#define PCI_VENDOR_ID_AVANCE 0x4005
+#define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064
+#define PCI_DEVICE_ID_AVANCE_2302 0x2302
+
+#define PCI_VENDOR_ID_NETVIN 0x4a14
+#define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551
+#define PCI_DEVICE_ID_S3_ViRGE 0x5631
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_AURORA64VP 0x8812
+#define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814
+#define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_928 0x88b0
+#define PCI_DEVICE_ID_S3_864_1 0x88c0
+#define PCI_DEVICE_ID_S3_864_2 0x88c1
+#define PCI_DEVICE_ID_S3_964_1 0x88d0
+#define PCI_DEVICE_ID_S3_964_2 0x88d1
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_TRIO64V2 0x8901
+#define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902
+#define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01
+#define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10
+#define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01
+#define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02
+#define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82430 0x0486
+#define PCI_DEVICE_ID_INTEL_82434 0x04a3
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
+#define PCI_DEVICE_ID_INTEL_7116 0x1223
+#define PCI_DEVICE_ID_INTEL_82596 0x1226
+#define PCI_DEVICE_ID_INTEL_82865 0x1227
+#define PCI_DEVICE_ID_INTEL_82557 0x1229
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82437MX 0x1235
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_P6 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+#define PCI_DEVICE_ID_KTI_ET32P2 0x3000
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_5800 0x5800
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+
+#define PCI_VENDOR_ID_ATRONICS 0x907f
+#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+
+#define PCI_VENDOR_ID_ARK 0xedd8
+#define PCI_DEVICE_ID_ARK_STING 0xa091
+#define PCI_DEVICE_ID_ARK_STINGARK 0xa099
+#define PCI_DEVICE_ID_ARK_2000MT 0xa0a1
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/* Functions used to access pci configuration space */
+struct pci_config_access_functions {
+ int (*read_config_byte)(unsigned char, unsigned char,
+ unsigned char, unsigned char *);
+ int (*read_config_word)(unsigned char, unsigned char,
+ unsigned char, unsigned short *);
+ int (*read_config_dword)(unsigned char, unsigned char,
+ unsigned char, unsigned int *);
+ int (*write_config_byte)(unsigned char, unsigned char,
+ unsigned char, unsigned char);
+ int (*write_config_word)(unsigned char, unsigned char,
+ unsigned char, unsigned short);
+ int (*write_config_dword)(unsigned char, unsigned char,
+ unsigned char, unsigned int);
+};
+
+/*
+ * There is one pci_dev structure for each slot-number/function-number
+ * combination:
+ */
+struct pci_dev {
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_dev *sibling; /* next device on this bus */
+ struct pci_dev *next; /* chain of all devices */
+
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ unsigned int hdr_type; /* PCI header type */
+ unsigned int master : 1; /* set if device is master capable */
+ /*
+ * In theory, the irq level can be read from configuration
+ * space and all would be fine. However, old PCI chips don't
+ * support these registers and return 0 instead. For example,
+ * the Vision864-P rev 0 chip can uses INTA, but returns 0 in
+ * the interrupt line and pin registers. pci_init()
+ * initializes this field with the value at PCI_INTERRUPT_LINE
+ * and it is the job of pcibios_fixup() to change it if
+ * necessary. The field must not be 0 unless the device
+ * cannot generate interrupts at all.
+ */
+ unsigned int irq; /* irq generated by this device */
+
+ /* Base registers for this device, can be adjusted by
+ * pcibios_fixup() as necessary.
+ */
+ unsigned long base_address[6];
+ unsigned long rom_address;
+};
+
+struct pci_bus {
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct pci_bus *children; /* chain of P2P bridges on this bus */
+ struct pci_bus *next; /* chain of all PCI buses */
+
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct pci_dev *devices; /* devices behind this bridge */
+
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char secondary; /* number of secondary bridge */
+ unsigned char subordinate; /* max number of subordinate buses */
+};
+
+extern struct pci_bus pci_root; /* root bus */
+extern struct pci_dev *pci_devices; /* list of all devices */
+
+/*
+ * Error values that may be returned by the PCI bios.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+
+#endif /* BOOTLOADER_PCI_H */
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/ppcboot.lds b/c/src/lib/libbsp/powerpc/shared/bootloader/ppcboot.lds
new file mode 100644
index 0000000000..9d46c0a83f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/ppcboot.lds
@@ -0,0 +1,94 @@
+OUTPUT_ARCH(powerpc)
+OUTPUT_FORMAT(ppcboot)
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ .text :
+ {
+ /* We have to build the header by hand, painful since ppcboot
+ format support is very poor in binutils.
+ objdump -b ppcboot zImage --all-headers can be used to check. */
+ /* The following line can be added as a branch to use the same image
+ * for netboot as for prepboots, the only problem is that objdump
+ * did not in this case recognize the format since it insisted
+ * in checking the x86 code area held only zeroes.
+ */
+ LONG(0x48000000+start);
+ . = 0x1be; BYTE(0x80); BYTE(0)
+ BYTE(2); BYTE(0); BYTE(0x41); BYTE(1);
+ BYTE(0x12); BYTE(0x4f); LONG(0);
+ BYTE(((_edata + 0x1ff)>>9)&0xff);
+ BYTE(((_edata + 0x1ff)>>17)&0xff);
+ BYTE(((_edata + 0x1ff)>>25)&0xff);
+ . = 0x1fe;
+ BYTE(0x55);
+ BYTE(0xaa);
+ BYTE(start&0xff);
+ BYTE((start>>8)&0xff);
+ BYTE((start>>16)&0xff);
+ BYTE((start>>24)&0xff);
+ BYTE(_edata&0xff);
+ BYTE((_edata>>8)&0xff);
+ BYTE((_edata>>16)&0xff);
+ BYTE((_edata>>24)&0xff);
+ BYTE(0); /* flags */
+ BYTE(0); /* os_id */
+ BYTE(0x4C); BYTE(0x69); BYTE(0x6e);
+ BYTE(0x75); BYTE(0x78); /* Partition name */
+ . = 0x400;
+ *(.text)
+ *(.sdata2)
+ *(.rodata)
+ }
+/* . = ALIGN(16); */
+ .image :
+ {
+ rtems.gz(*)
+ . = ALIGN(4);
+ *.gz(*)
+ }
+ /* Read-write section, merged into data segment: */
+ /* . = ALIGN(4096); */
+ .reloc :
+ {
+ *(.got)
+ _GOT2_TABLE_ = .;
+ *(.got2)
+ _FIXUP_TABLE_ = .;
+ *(.fixup)
+ }
+
+ __got2_entries = (_FIXUP_TABLE_ - _GOT2_TABLE_) >>2;
+ __fixup_entries = (. - _FIXUP_TABLE_)>>2;
+
+ .handlers :
+ {
+ *(.exception)
+ }
+
+ .data :
+ {
+ *(.data)
+ *(.sdata)
+ . = ALIGN(4);
+ _edata = .;
+ }
+ PROVIDE(_binary_initrd_gz_start = 0);
+ PROVIDE(_binary_initrd_gz_end = 0);
+ _rtems_gz_size = _binary_rtems_gz_end - _binary_rtems_gz_start;
+ _rtems_size = __rtems_end - __rtems_start;
+ .bss :
+ {
+ *(.sbss)
+ *(.bss)
+ . = ALIGN(4);
+ }
+ __bss_words = SIZEOF(.bss)>>2;
+ __size = . ;
+ /DISCARD/ :
+ {
+ *(.comment)
+ }
+}
+
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c
new file mode 100644
index 0000000000..78ba7867fc
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c
@@ -0,0 +1,2143 @@
+/*
+ * This file is derived from various .h and .c files from the zlib-0.95
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets. See zlib.h for conditions of
+ * distribution and use.
+ *
+ * Changes that have been made include:
+ * - changed functions not used outside this file to "local"
+ * - added minCompression parameter to deflateInit2
+ * - added Z_PACKET_FLUSH (see zlib.h for details)
+ * - added inflateIncomp
+ *
+ * $Id$
+ */
+
+/*+++++*/
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
+
+#define _Z_UTIL_H
+
+#include "zlib.h"
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+#define FAR
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern char *z_errmsg[]; /* indexed by 1-zlib_error */
+
+#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
+/* To be used only when the state is known to be valid */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+ /* common constants */
+
+#define DEFLATED 8
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+ /* functions */
+
+#include <string.h>
+#define zmemcpy memcpy
+#define zmemzero(dest, len) memset(dest, 0, len)
+
+/* Diagnostic functions */
+#ifdef DEBUG_ZLIB
+# include <stdio.h>
+# ifndef verbose
+# define verbose 0
+# endif
+# define Assert(cond, msg) {if(!(cond)) Trace(msg);}
+# define Trace(x) printk(x)
+# define Tracev(x) {if (verbose) printk x ;}
+# define Tracevv(x) {if (verbose>1) printk x ;}
+# define Tracec(c,x) {if (verbose && (c)) printk x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) printk x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
+
+/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
+/* void zcfree OF((voidpf opaque, voidpf ptr)); */
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr, size) \
+ (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
+#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
+
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/*+++++*/
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state FAR inflate_blocks_statef;
+
+local inflate_blocks_statef * inflate_blocks_new OF((
+ z_stream *z,
+ check_func c, /* check function */
+ uInt w)); /* window size */
+
+local int inflate_blocks OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int)); /* initial return code */
+
+local void inflate_blocks_reset OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ uLongf *)); /* check value on output */
+
+local int inflate_blocks_free OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ uLongf *)); /* check value on output */
+
+local int inflate_addhistory OF((
+ inflate_blocks_statef *,
+ z_stream *));
+
+local int inflate_packet_flush OF((
+ inflate_blocks_statef *));
+
+/*+++++*/
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+typedef struct inflate_huft_s FAR inflate_huft;
+
+struct inflate_huft_s {
+ union {
+ struct {
+ Byte Exop; /* number of extra bits or operation */
+ Byte Bits; /* number of bits in this code or subcode */
+ } what;
+ uInt Nalloc; /* number of these allocated here */
+ Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
+ } word; /* 16-bit, 8 bytes for 32-bit machines) */
+ union {
+ uInt Base; /* literal, length base, or distance base */
+ inflate_huft *Next; /* pointer to next level of table */
+ } more;
+};
+
+#ifdef DEBUG_ZLIB
+ local uInt inflate_hufts;
+#endif
+
+local int inflate_trees_bits OF((
+ uIntf *, /* 19 code lengths */
+ uIntf *, /* bits tree desired/actual depth */
+ inflate_huft * FAR *, /* bits tree result */
+ z_stream *)); /* for zalloc, zfree functions */
+
+local int inflate_trees_dynamic OF((
+ uInt, /* number of literal/length codes */
+ uInt, /* number of distance codes */
+ uIntf *, /* that many (total) code lengths */
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ z_stream *)); /* for zalloc, zfree functions */
+
+local int inflate_trees_fixed OF((
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *)); /* distance tree result */
+
+local int inflate_trees_free OF((
+ inflate_huft *, /* tables to free */
+ z_stream *)); /* for zfree function */
+
+
+/*+++++*/
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+local inflate_codes_statef *inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_stream *));
+
+local int inflate_codes OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int));
+
+local void inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_stream *));
+
+
+/*+++++*/
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* inflate private state */
+struct internal_state {
+
+ /* mode */
+ enum {
+ METHOD, /* waiting for method byte */
+ FLAG, /* waiting for flag byte */
+ BLOCKS, /* decompressing blocks */
+ CHECK4, /* four check bytes to go */
+ CHECK3, /* three check bytes to go */
+ CHECK2, /* two check bytes to go */
+ CHECK1, /* one check byte to go */
+ DONE, /* finished check, done */
+ BAD} /* got an error--stay here */
+ mode; /* current inflate mode */
+
+ /* mode dependent information */
+ union {
+ uInt method; /* if FLAGS, method byte */
+ struct {
+ uLong was; /* computed check value */
+ uLong need; /* stream check value */
+ } check; /* if CHECK, check values to compare */
+ uInt marker; /* if BAD, inflateSync's marker bytes count */
+ } sub; /* submode */
+
+ /* mode independent information */
+ int nowrap; /* flag for no wrapper */
+ uInt wbits; /* log2(window size) (8..15, defaults to 15) */
+ inflate_blocks_statef
+ *blocks; /* current inflate_blocks state */
+
+};
+
+
+int inflateReset(z)
+z_stream *z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->total_in = z->total_out = 0;
+ z->msg = Z_NULL;
+ z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+ inflate_blocks_reset(z->state->blocks, z, &c);
+ Trace("inflate: reset\n");
+ return Z_OK;
+}
+
+
+int inflateEnd(z)
+z_stream *z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->blocks != Z_NULL)
+ inflate_blocks_free(z->state->blocks, z, &c);
+ ZFREE(z, z->state, sizeof(struct internal_state));
+ z->state = Z_NULL;
+ Trace("inflate: end\n");
+ return Z_OK;
+}
+
+
+int inflateInit2(z, w)
+z_stream *z;
+int w;
+{
+ /* initialize state */
+ if (z == Z_NULL)
+ return Z_STREAM_ERROR;
+/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
+/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
+ if ((z->state = (struct internal_state FAR *)
+ ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
+ return Z_MEM_ERROR;
+ z->state->blocks = Z_NULL;
+
+ /* handle undocumented nowrap option (no zlib header or check) */
+ z->state->nowrap = 0;
+ if (w < 0)
+ {
+ w = - w;
+ z->state->nowrap = 1;
+ }
+
+ /* set window size */
+ if (w < 8 || w > 15)
+ {
+ inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ z->state->wbits = (uInt)w;
+
+ /* create inflate_blocks state */
+ if ((z->state->blocks =
+ inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
+ == Z_NULL)
+ {
+ inflateEnd(z);
+ return Z_MEM_ERROR;
+ }
+ Trace("inflate: allocated\n");
+
+ /* reset state */
+ inflateReset(z);
+ return Z_OK;
+}
+
+
+int inflateInit(z)
+z_stream *z;
+{
+ return inflateInit2(z, DEF_WBITS);
+}
+
+
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int inflate(z, f)
+z_stream *z;
+int f;
+{
+ int r;
+ uInt b;
+
+ if (z == Z_NULL || z->next_in == Z_NULL)
+ return Z_STREAM_ERROR;
+ r = Z_BUF_ERROR;
+ while (1) switch (z->state->mode)
+ {
+ case METHOD:
+ NEEDBYTE
+ if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
+ {
+ z->state->mode = BAD;
+ z->msg = "unknown compression method";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+ {
+ z->state->mode = BAD;
+ z->msg = "invalid window size";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = FLAG;
+ case FLAG:
+ NEEDBYTE
+ if ((b = NEXTBYTE) & 0x20)
+ {
+ z->state->mode = BAD;
+ z->msg = "invalid reserved bit";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if (((z->state->sub.method << 8) + b) % 31)
+ {
+ z->state->mode = BAD;
+ z->msg = "incorrect header check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace("inflate: zlib header ok\n");
+ z->state->mode = BLOCKS;
+ case BLOCKS:
+ r = inflate_blocks(z->state->blocks, z, r);
+ if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+ r = inflate_packet_flush(z->state->blocks);
+ if (r == Z_DATA_ERROR)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ break;
+ }
+ if (r != Z_STREAM_END)
+ return r;
+ r = Z_OK;
+ inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+ if (z->state->nowrap)
+ {
+ z->state->mode = DONE;
+ break;
+ }
+ z->state->mode = CHECK4;
+ case CHECK4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = CHECK3;
+ case CHECK3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = CHECK2;
+ case CHECK2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = CHECK1;
+ case CHECK1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+
+ if (z->state->sub.check.was != z->state->sub.check.need)
+ {
+ z->state->mode = BAD;
+ z->msg = "incorrect data check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace( "inflate: zlib check ok\n");
+ z->state->mode = DONE;
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ empty:
+ if (f != Z_PACKET_FLUSH)
+ return r;
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_DATA_ERROR;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int inflateIncomp(z)
+z_stream *z;
+{
+ if (z->state->mode != BLOCKS)
+ return Z_DATA_ERROR;
+ return inflate_addhistory(z->state->blocks, z);
+}
+
+
+int inflateSync(z)
+z_stream *z;
+{
+ uInt n; /* number of bytes to look at */
+ Bytef *p; /* pointer to bytes */
+ uInt m; /* number of marker bytes found in a row */
+ uLong r, w; /* temporaries to save total_in and total_out */
+
+ /* set up */
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->mode != BAD)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0;
+ }
+ if ((n = z->avail_in) == 0)
+ return Z_BUF_ERROR;
+ p = z->next_in;
+ m = z->state->sub.marker;
+
+ /* search */
+ while (n && m < 4)
+ {
+ if (*p == (Byte)(m < 2 ? 0 : 0xff))
+ m++;
+ else if (*p)
+ m = 0;
+ else
+ m = 4 - m;
+ p++, n--;
+ }
+
+ /* restore */
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ z->avail_in = n;
+ z->state->sub.marker = m;
+
+ /* return no joy or set up to restart on a new block */
+ if (m != 4)
+ return Z_DATA_ERROR;
+ r = z->total_in; w = z->total_out;
+ inflateReset(z);
+ z->total_in = r; z->total_out = w;
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+
+/*+++++*/
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ DONEB, /* finished last block, done */
+ BADB} /* got a data error--stuck here */
+ mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ int nblens; /* # elements allocated at blens */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_huft *tl, *td; /* trees to free */
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=WAVAIL;}
+#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
+#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/* And'ing with mask[n] masks the lower n bits */
+local uInt inflate_mask[] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+/* copy as much as possible from the sliding window to the output area */
+local int inflate_flush OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int));
+
+/*+++++*/
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+local int inflate_fast OF((
+ uInt,
+ uInt,
+ inflate_huft *,
+ inflate_huft *,
+ inflate_blocks_statef *,
+ z_stream *));
+
+
+/*+++++*/
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* Table for deflate from PKZIP's appnote.txt. */
+local uInt border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarily, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+
+
+local void inflate_blocks_reset(s, z, c)
+inflate_blocks_statef *s;
+z_stream *z;
+uLongf *c;
+{
+ if (s->checkfn != Z_NULL)
+ *c = s->check;
+ if (s->mode == BTREE || s->mode == DTREE)
+ ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
+ if (s->mode == CODES)
+ {
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ }
+ s->mode = TYPE;
+ s->bitk = 0;
+ s->bitb = 0;
+ s->read = s->write = s->window;
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(0L, Z_NULL, 0);
+ Trace("inflate: blocks reset\n");
+}
+
+
+local inflate_blocks_statef *inflate_blocks_new(z, c, w)
+z_stream *z;
+check_func c;
+uInt w;
+{
+ inflate_blocks_statef *s;
+
+ if ((s = (inflate_blocks_statef *)ZALLOC
+ (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
+ return s;
+ if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
+ {
+ ZFREE(z, s, sizeof(struct inflate_blocks_state));
+ return Z_NULL;
+ }
+ s->end = s->window + w;
+ s->checkfn = c;
+ s->mode = TYPE;
+ Trace("inflate: blocks allocated\n");
+ inflate_blocks_reset(s, z, &s->check);
+ return s;
+}
+
+
+local int inflate_blocks(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt t; /* temporary storage */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input based on current state */
+ while (1) switch (s->mode)
+ {
+ case TYPE:
+ NEEDBITS(3)
+ t = (uInt)b & 7;
+ s->last = t & 1;
+ switch (t >> 1)
+ {
+ case 0: /* stored */
+ Trace(("inflate: stored block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ t = k & 7; /* go to byte boundary */
+ DUMPBITS(t)
+ s->mode = LENS; /* get length of stored block */
+ break;
+ case 1: /* fixed */
+ Trace(( "inflate: fixed codes block%s\n",
+ s->last ? " (last)" : ""));
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+
+ inflate_trees_fixed(&bl, &bd, &tl, &td);
+ s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
+ if (s->sub.decode.codes == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.tl = Z_NULL; /* don't try to free these */
+ s->sub.decode.td = Z_NULL;
+ }
+ DUMPBITS(3)
+ s->mode = CODES;
+ break;
+ case 2: /* dynamic */
+ Trace(( "inflate: dynamic codes block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ s->mode = TABLE;
+ break;
+ case 3: /* illegal */
+ DUMPBITS(3)
+ s->mode = BADB;
+ z->msg = "invalid block type";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ break;
+ case LENS:
+ NEEDBITS(32)
+ if (((~b) >> 16) != (b & 0xffff))
+ {
+ s->mode = BADB;
+ z->msg = "invalid stored block lengths";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ s->sub.left = (uInt)b & 0xffff;
+ b = k = 0; /* dump bits */
+ Tracev(( "inflate: stored length %u\n", s->sub.left));
+ s->mode = s->sub.left ? STORED : TYPE;
+ break;
+ case STORED:
+ if (n == 0)
+ LEAVE
+ NEEDOUT
+ t = s->sub.left;
+ if (t > n) t = n;
+ if (t > m) t = m;
+ zmemcpy(q, p, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((s->sub.left -= t) != 0)
+ break;
+ Tracev(( "inflate: stored end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ s->mode = s->last ? DRY : TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14)
+ s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ s->mode = BADB;
+ z->msg = "too many length or distance symbols";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+#endif
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (t < 19)
+ t = 19;
+ if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.trees.nblens = t;
+ DUMPBITS(14)
+ s->sub.trees.index = 0;
+ Tracev(( "inflate: table sizes ok\n"));
+ s->mode = BTREE;
+ case BTREE:
+ while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+ {
+ NEEDBITS(3)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+ DUMPBITS(3)
+ }
+ while (s->sub.trees.index < 19)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+ s->sub.trees.bb = 7;
+ t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+ &s->sub.trees.tb, z);
+ if (t != Z_OK)
+ {
+ r = t;
+ if (r == Z_DATA_ERROR)
+ s->mode = BADB;
+ LEAVE
+ }
+ s->sub.trees.index = 0;
+ Tracev(( "inflate: bits tree ok\n"));
+ s->mode = DTREE;
+ case DTREE:
+ while (t = s->sub.trees.table,
+ s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+ {
+ inflate_huft *h;
+ uInt i, j, c;
+
+ t = s->sub.trees.bb;
+ NEEDBITS(t)
+ h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
+ t = h->word.what.Bits;
+ c = h->more.Base;
+ if (c < 16)
+ {
+ DUMPBITS(t)
+ s->sub.trees.blens[s->sub.trees.index++] = c;
+ }
+ else /* c == 16..18 */
+ {
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+ NEEDBITS(t + i)
+ DUMPBITS(t)
+ j += (uInt)b & inflate_mask[i];
+ DUMPBITS(i)
+ i = s->sub.trees.index;
+ t = s->sub.trees.table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+ (c == 16 && i < 1))
+ {
+ s->mode = BADB;
+ z->msg = "invalid bit length repeat";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+ do {
+ s->sub.trees.blens[i++] = c;
+ } while (--j);
+ s->sub.trees.index = i;
+ }
+ }
+ inflate_trees_free(s->sub.trees.tb, z);
+ s->sub.trees.tb = Z_NULL;
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+ inflate_codes_statef *c;
+
+ bl = 9; /* must be <= 9 for lookahead assumptions */
+ bd = 6; /* must be <= 9 for lookahead assumptions */
+ t = s->sub.trees.table;
+ t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+ s->sub.trees.blens, &bl, &bd, &tl, &td, z);
+ if (t != Z_OK)
+ {
+ if (t == (uInt)Z_DATA_ERROR)
+ s->mode = BADB;
+ r = t;
+ LEAVE
+ }
+ Tracev(( "inflate: trees ok\n"));
+ if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
+ {
+ inflate_trees_free(td, z);
+ inflate_trees_free(tl, z);
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
+ s->sub.decode.codes = c;
+ s->sub.decode.tl = tl;
+ s->sub.decode.td = td;
+ }
+ s->mode = CODES;
+ case CODES:
+ UPDATE
+ if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
+ return inflate_flush(s, z, r);
+ r = Z_OK;
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ LOAD
+ Tracev(( "inflate: codes end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ if (!s->last)
+ {
+ s->mode = TYPE;
+ break;
+ }
+ if (k > 7) /* return unused byte, if any */
+ {
+ Assert(k < 16, "inflate_codes grabbed too many bytes")
+ k -= 8;
+ n++;
+ p--; /* can always return one */
+ }
+ s->mode = DRY;
+ case DRY:
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ s->mode = DONEB;
+ case DONEB:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADB:
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+local int inflate_blocks_free(s, z, c)
+inflate_blocks_statef *s;
+z_stream *z;
+uLongf *c;
+{
+ inflate_blocks_reset(s, z, c);
+ ZFREE(z, s->window, s->end - s->window);
+ ZFREE(z, s, sizeof(struct inflate_blocks_state));
+ Trace(( "inflate: blocks freed\n"));
+ return Z_OK;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+local int inflate_addhistory(s, z)
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ uLong b; /* bit buffer */ /* NOT USED HERE */
+ uInt k; /* bits in bit buffer */ /* NOT USED HERE */
+ uInt t; /* temporary storage */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ if (s->read != s->write)
+ return Z_STREAM_ERROR;
+ if (s->mode != TYPE)
+ return Z_DATA_ERROR;
+
+ /* we're ready to rock */
+ LOAD
+ /* while there is input ready, copy to output buffer, moving
+ * pointers as needed.
+ */
+ while (n) {
+ t = n; /* how many to do */
+ /* is there room until end of buffer? */
+ if (t > m) t = m;
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, t);
+ zmemcpy(q, p, t);
+ q += t;
+ p += t;
+ n -= t;
+ z->total_out += t;
+ s->read = q; /* drag read pointer forward */
+/* WRAP */ /* expand WRAP macro by hand to handle s->read */
+ if (q == s->end) {
+ s->read = q = s->window;
+ m = WAVAIL;
+ }
+ }
+ UPDATE
+ return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+local int inflate_packet_flush(s)
+ inflate_blocks_statef *s;
+{
+ if (s->mode != LENS)
+ return Z_DATA_ERROR;
+ s->mode = TYPE;
+ return Z_OK;
+}
+
+
+/*+++++*/
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+local int huft_build OF((
+ uIntf *, /* code lengths in bits */
+ uInt, /* number of codes */
+ uInt, /* number of "simple" codes */
+ uIntf *, /* list of base values for non-simple codes */
+ uIntf *, /* list of extra bits for non-simple codes */
+ inflate_huft * FAR*,/* result: starting table */
+ uIntf *, /* maximum lookup bits (returns actual) */
+ z_stream *)); /* for zalloc function */
+
+local voidpf falloc OF((
+ voidpf, /* opaque pointer (not used) */
+ uInt, /* number of items */
+ uInt)); /* size of item */
+
+local void ffree OF((
+ voidpf q, /* opaque pointer (not used) */
+ voidpf p, /* what to free (not used) */
+ uInt n)); /* number of bytes (not used) */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* actually lengths - 2; also see note #13 above about 258 */
+local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
+local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+local uInt cpdext[] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15 /* maximum bit length of any code */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+#ifdef DEBUG_ZLIB
+ uInt inflate_hufts;
+#endif
+
+local int huft_build(b, n, s, d, e, t, m, zs)
+uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
+uInt n; /* number of codes (assumed <= N_MAX) */
+uInt s; /* number of simple-valued codes (0..s-1) */
+uIntf *d; /* list of base values for non-simple codes */
+uIntf *e; /* list of extra bits for non-simple codes */
+inflate_huft * FAR *t; /* result: starting table */
+uIntf *m; /* maximum lookup bits, returns actual */
+z_stream *zs; /* for zalloc function */
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ if the given code set is incomplete (the tables are still built in this
+ case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
+ over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+ uInt a; /* counter for codes of length k */
+ uInt c[BMAX+1]; /* bit length count table */
+ uInt f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register uInt i; /* counter, current code */
+ register uInt j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register uIntf *p; /* pointer into c[], b[], or v[] */
+ inflate_huft *q; /* points to current table */
+ struct inflate_huft_s r; /* table entry for structure assignment */
+ inflate_huft *u[BMAX]; /* table stack */
+ uInt v[N_MAX]; /* values in order of bit length */
+ register int w; /* bits before this table == (l * h) */
+ uInt x[BMAX+1]; /* bit offsets, then code stack */
+ uIntf *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ uInt z; /* number of entries in current table */
+
+
+ /* Generate counts for each bit length */
+ p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+ C4 /* clear c[]--assume BMAX+1 is 16 */
+ p = b; i = n;
+ do {
+ c[*p++]++; /* assume all entries <= BMAX */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+ return Z_OK;
+ }
+
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((uInt)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return Z_DATA_ERROR;
+ if ((y -= c[i]) < 0)
+ return Z_DATA_ERROR;
+ c[i] += y;
+
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
+ q = (inflate_huft *)Z_NULL; /* ditto */
+ z = 0; /* ditto */
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a--)
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (inflate_huft *)ZALLOC
+ (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
+ {
+ if (h)
+ inflate_trees_free(u[0], zs);
+ return Z_MEM_ERROR; /* not enough memory */
+ }
+ q->word.Nalloc = z + 1;
+#ifdef DEBUG_ZLIB
+ inflate_hufts += z + 1;
+#endif
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->next)) = Z_NULL;
+ u[h] = ++q; /* table starts after link */
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.bits = (Byte)l; /* bits to dump before this table */
+ r.exop = (Byte)j; /* bits in this table */
+ r.next = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+ }
+
+ /* set up table entry in r */
+ r.bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.exop = 128 + 64; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
+ r.base = *p++; /* simple code is just the value */
+ }
+ else
+ {
+ r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
+ r.base = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+ }
+ }
+
+
+ /* Return Z_BUF_ERROR if we were given an incomplete table */
+ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+local int inflate_trees_bits(c, bb, tb, z)
+uIntf *c; /* 19 code lengths */
+uIntf *bb; /* bits tree desired/actual depth */
+inflate_huft * FAR *tb; /* bits tree result */
+z_stream *z; /* for zfree function */
+{
+ int r;
+
+ r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed dynamic bit lengths tree";
+ else if (r == Z_BUF_ERROR)
+ {
+ inflate_trees_free(*tb, z);
+ z->msg = "incomplete dynamic bit lengths tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+}
+
+
+local int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
+uInt nl; /* number of literal/length codes */
+uInt nd; /* number of distance codes */
+uIntf *c; /* that many (total) code lengths */
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+z_stream *z; /* for zfree function */
+{
+ int r;
+
+ /* build literal/length tree */
+ if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed literal/length tree";
+ else if (r == Z_BUF_ERROR)
+ {
+ inflate_trees_free(*tl, z);
+ z->msg = "incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+ }
+
+ /* build distance tree */
+ if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed literal/length tree";
+ else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+ r = Z_OK;
+ }
+#else
+ inflate_trees_free(*td, z);
+ z->msg = "incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ inflate_trees_free(*tl, z);
+ return r;
+#endif
+ }
+
+ /* done */
+ return Z_OK;
+}
+
+
+/* build fixed tables only once--keep them here */
+local int fixed_lock = 0;
+local int fixed_built = 0;
+#define FIXEDH 530 /* number of hufts used by fixed tables */
+local uInt fixed_left = FIXEDH;
+local inflate_huft fixed_mem[FIXEDH];
+local uInt fixed_bl;
+local uInt fixed_bd;
+local inflate_huft *fixed_tl;
+local inflate_huft *fixed_td;
+
+
+local voidpf falloc(q, n, s)
+voidpf q; /* opaque pointer (not used) */
+uInt n; /* number of items */
+uInt s; /* size of item */
+{
+ Assert(s == sizeof(inflate_huft) && n <= fixed_left,
+ "inflate_trees falloc overflow");
+ if (q) s++; /* to make some compilers happy */
+ fixed_left -= n;
+ return (voidpf)(fixed_mem + fixed_left);
+}
+
+
+local void ffree(q, p, n)
+voidpf q;
+voidpf p;
+uInt n;
+{
+ Assert(0, "inflate_trees ffree called!");
+ if (q) q = p; /* to make some compilers happy */
+}
+
+
+local int inflate_trees_fixed(bl, bd, tl, td)
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+{
+ /* build fixed tables if not built already--lock out other instances */
+ while (++fixed_lock > 1)
+ fixed_lock--;
+ if (!fixed_built)
+ {
+ int k; /* temporary variable */
+ unsigned c[288]; /* length list for huft_build */
+ z_stream z; /* for falloc function */
+
+ /* set up fake z_stream for memory routines */
+ z.zalloc = falloc;
+ z.zfree = ffree;
+ z.opaque = Z_NULL;
+
+ /* literal table */
+ for (k = 0; k < 144; k++)
+ c[k] = 8;
+ for (; k < 256; k++)
+ c[k] = 9;
+ for (; k < 280; k++)
+ c[k] = 7;
+ for (; k < 288; k++)
+ c[k] = 8;
+ fixed_bl = 7;
+ huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
+
+ /* distance table */
+ for (k = 0; k < 30; k++)
+ c[k] = 5;
+ fixed_bd = 5;
+ huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
+
+ /* done */
+ fixed_built = 1;
+ }
+ fixed_lock--;
+ *bl = fixed_bl;
+ *bd = fixed_bd;
+ *tl = fixed_tl;
+ *td = fixed_td;
+ return Z_OK;
+}
+
+
+local int inflate_trees_free(t, z)
+inflate_huft *t; /* table to free */
+z_stream *z; /* for zfree function */
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register inflate_huft *p, *q;
+
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ p = t;
+ while (p != Z_NULL)
+ {
+ q = (--p)->next;
+ ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
+ p = q;
+ }
+ return Z_OK;
+}
+
+/*+++++*/
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* inflate codes private state */
+struct inflate_codes_state {
+
+ /* mode */
+ enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ START, /* x: set up for LEN */
+ LEN, /* i: get length/literal/eob next */
+ LENEXT, /* i: getting length extra (have base) */
+ DIST, /* i: get distance next */
+ DISTEXT, /* i: getting distance extra */
+ COPY, /* o: copying bytes in window, waiting for space */
+ LIT, /* o: got literal, waiting for output space */
+ WASH, /* o: got eob, possibly still output waiting */
+ END, /* x: got eob and all data flushed */
+ BADCODE} /* x: got error */
+ mode; /* current inflate_codes mode */
+
+ /* mode dependent information */
+ uInt len;
+ union {
+ struct {
+ inflate_huft *tree; /* pointer into tree */
+ uInt need; /* bits needed */
+ } code; /* if LEN or DIST, where in tree */
+ uInt lit; /* if LIT, literal */
+ struct {
+ uInt get; /* bits to get for extra */
+ uInt dist; /* distance back to copy from */
+ } copy; /* if EXT or COPY, where and how much */
+ } sub; /* submode */
+
+ /* mode independent information */
+ Byte lbits; /* ltree bits decoded per branch */
+ Byte dbits; /* dtree bits decoder per branch */
+ inflate_huft *ltree; /* literal/length/eob tree */
+ inflate_huft *dtree; /* distance tree */
+
+};
+
+
+local inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
+uInt bl, bd;
+inflate_huft *tl, *td;
+z_stream *z;
+{
+ inflate_codes_statef *c;
+
+ if ((c = (inflate_codes_statef *)
+ ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
+ {
+ c->mode = START;
+ c->lbits = (Byte)bl;
+ c->dbits = (Byte)bd;
+ c->ltree = tl;
+ c->dtree = td;
+ Tracev(( "inflate: codes new\n"));
+ }
+ return c;
+}
+
+
+local int inflate_codes(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt j; /* temporary storage */
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ Bytef *f; /* pointer to copy strings from */
+ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input and output based on current state */
+ while (1) switch (c->mode)
+ { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ case START: /* x: set up for LEN */
+#ifndef SLOW
+ if (m >= 258 && n >= 10)
+ {
+ UPDATE
+ r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+ LOAD
+ if (r != Z_OK)
+ {
+ c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+#endif /* !SLOW */
+ c->sub.code.need = c->lbits;
+ c->sub.code.tree = c->ltree;
+ c->mode = LEN;
+ case LEN: /* i: get length/literal/eob next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e == 0) /* literal */
+ {
+ c->sub.lit = t->base;
+ Tracevv(( t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", t->base));
+ c->mode = LIT;
+ break;
+ }
+ if (e & 16) /* length */
+ {
+ c->sub.copy.get = e & 15;
+ c->len = t->base;
+ c->mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ if (e & 32) /* end of block */
+ {
+ Tracevv(( "inflate: end of block\n"));
+ c->mode = WASH;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = "invalid literal/length code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case LENEXT: /* i: getting length extra (have base) */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->len += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ c->sub.code.need = c->dbits;
+ c->sub.code.tree = c->dtree;
+ Tracevv(( "inflate: length %u\n", c->len));
+ c->mode = DIST;
+ case DIST: /* i: get distance next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e & 16) /* distance */
+ {
+ c->sub.copy.get = e & 15;
+ c->sub.copy.dist = t->base;
+ c->mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = "invalid distance code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case DISTEXT: /* i: getting distance extra */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->sub.copy.dist += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ Tracevv(( "inflate: distance %u\n", c->sub.copy.dist));
+ c->mode = COPY;
+ case COPY: /* o: copying bytes in window, waiting for space */
+#ifndef __TURBOC__ /* Turbo C bug for following expression */
+ f = (uInt)(q - s->window) < c->sub.copy.dist ?
+ s->end - (c->sub.copy.dist - (q - s->window)) :
+ q - c->sub.copy.dist;
+#else
+ f = q - c->sub.copy.dist;
+ if ((uInt)(q - s->window) < c->sub.copy.dist)
+ f = s->end - (c->sub.copy.dist - (q - s->window));
+#endif
+ while (c->len)
+ {
+ NEEDOUT
+ OUTBYTE(*f++)
+ if (f == s->end)
+ f = s->window;
+ c->len--;
+ }
+ c->mode = START;
+ break;
+ case LIT: /* o: got literal, waiting for output space */
+ NEEDOUT
+ OUTBYTE(c->sub.lit)
+ c->mode = START;
+ break;
+ case WASH: /* o: got eob, possibly more output */
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ c->mode = END;
+ case END:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADCODE: /* x: got error */
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+local void inflate_codes_free(c, z)
+inflate_codes_statef *c;
+z_stream *z;
+{
+ ZFREE(z, c, sizeof(struct inflate_codes_state));
+ Tracev(( "inflate: codes free\n"));
+}
+
+/*+++++*/
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* copy as much as possible from the sliding window to the output area */
+local int inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt n;
+ Bytef *p, *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
+
+
+/*+++++*/
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
+
+/* Called with number of bytes left to write in window at least 258
+ (the maximum string length) and number of input bytes available
+ at least ten. The ten bytes are six bytes for the longest length/
+ distance pair plus four bytes for overloading the bit buffer. */
+
+local int inflate_fast(bl, bd, tl, td, s, z)
+uInt bl, bd;
+inflate_huft *tl, *td;
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ uInt ml; /* mask for literal/length tree */
+ uInt md; /* mask for distance tree */
+ uInt c; /* bytes to copy */
+ uInt d; /* distance back to copy from */
+ Bytef *r; /* copy source pointer */
+
+ /* load input, output, bit values */
+ LOAD
+
+ /* initialize masks */
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ /* do until not enough input or output space for fast loop */
+ do { /* assume called with m >= 258 && n >= 10 */
+ /* get literal/length code */
+ GRABBITS(20) /* max bits for literal/length code */
+ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv(( t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ continue;
+ }
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits for length */
+ e &= 15;
+ c = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv(( "inflate: * length %u\n", c));
+
+ /* decode distance base of block to copy */
+ GRABBITS(15); /* max bits for distance code */
+ e = (t = td + ((uInt)b & md))->exop;
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits to add to distance base */
+ e &= 15;
+ GRABBITS(e) /* get extra bits (up to 13) */
+ d = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv(( "inflate: * distance %u\n", d));
+
+ /* do the copy */
+ m -= c;
+ if ((uInt)(q - s->window) >= d) /* offset before dest */
+ { /* just copy */
+ r = q - d;
+ *q++ = *r++; c--; /* minimum count is three, */
+ *q++ = *r++; c--; /* so unroll loop a little */
+ }
+ else /* else offset after destination */
+ {
+ e = d - (q - s->window); /* bytes from offset to end */
+ r = s->end - e; /* pointer to offset */
+ if (c > e) /* if source crosses, */
+ {
+ c -= e; /* copy to end of window */
+ do {
+ *q++ = *r++;
+ } while (--e);
+ r = s->window; /* copy rest from start of window */
+ }
+ }
+ do { /* copy all or what's left */
+ *q++ = *r++;
+ } while (--c);
+ break;
+ }
+ else if ((e & 64) == 0)
+ e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
+ else
+ {
+ z->msg = "invalid distance code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv(( t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ break;
+ }
+ }
+ else if (e & 32)
+ {
+ Tracevv(( "inflate: * end of block\n"));
+ UNGRAB
+ UPDATE
+ return Z_STREAM_END;
+ }
+ else
+ {
+ z->msg = "invalid literal/length code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ } while (m >= 258 && n >= 10);
+
+ /* not enough input or output--restore pointers and return */
+ UNGRAB
+ UPDATE
+ return Z_OK;
+}
+
+
+/*+++++*/
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
+
+char *zlib_version = ZLIB_VERSION;
+
+char *z_errmsg[] = {
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+""};
+
+
+/*+++++*/
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf) {s1 += *buf++; s2 += s1;}
+#define DO2(buf) DO1(buf); DO1(buf);
+#define DO4(buf) DO2(buf); DO2(buf);
+#define DO8(buf) DO4(buf); DO4(buf);
+#define DO16(buf) DO8(buf); DO8(buf);
+
+/* ========================================================================= */
+uLong adler32(adler, buf, len)
+ uLong adler;
+ Bytef *buf;
+ uInt len;
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == Z_NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ k -= 16;
+ }
+ if (k != 0) do {
+ DO1(buf);
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h
new file mode 100644
index 0000000000..31485f4632
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h
@@ -0,0 +1,438 @@
+/* $Id$ */
+
+/*
+ * This file is derived from zlib.h and zconf.h from the zlib-0.95
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets.
+ */
+
+/*
+ * ==FILEVERSION 960122==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 0.95, Aug 16th, 1995.
+
+ Copyright (C) 1995 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ gzip@prep.ai.mit.edu madler@alumni.caltech.edu
+ */
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+#define local
+#ifdef DEBUG_ZLIB
+#include <bsp/consoleIo.h>
+#define fprintf printk
+#endif
+
+/* #include "zconf.h" */ /* included directly here */
+
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
+
+/*
+ The library does not install any signal handler. It is recommended to
+ add at least a handler for SIGSEGV when decompressing; the library checks
+ the consistency of the input data whenever possible but may go nuts
+ for some forms of corrupted input.
+ */
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
+ * at addresses which are not a multiple of their size.
+ * Under DOS, -DFAR=far or -DFAR=__far may be needed.
+ */
+
+#ifndef STDC
+# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
+# define STDC
+# endif
+#endif
+
+#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
+# include <unix.h>
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ 1 << (windowBits+2) + 1 << (memLevel+9)
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+typedef Byte FAR Bytef;
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+/* end of original zconf.h */
+
+#define ZLIB_VERSION "0.95P"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms may be added later and will have the same
+ stream interface.
+
+ For compression the application must provide the output buffer and
+ may optionally provide the input buffer for optimization. For decompression,
+ the application must provide the input buffer and may optionally provide
+ the output buffer for optimization.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidp opaque; /* private data object passed to zalloc and zfree */
+
+ Byte data_type; /* best guess about the data type: ascii or binary */
+
+} z_stream;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_FULL_FLUSH 2
+#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
+#define Z_FINISH 4
+#define Z_PACKET_FLUSH 5
+/* See deflate() below for the usage of these constants */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+/* error codes for the compression/decompression functions */
+
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Used to set the data_type field */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+extern char *zlib_version;
+/* The application can compare zlib_version and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ */
+
+ /* basic functions */
+
+extern int inflateInit OF((z_stream *strm));
+/*
+ Initializes the internal stream state for decompression. The fields
+ zalloc and zfree must be initialized before by the caller. If zalloc and
+ zfree are set to Z_NULL, inflateInit updates them to use default allocation
+ functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory. msg is set to null if there is no error message.
+ inflateInit does not perform any decompression: this will be done by
+ inflate().
+*/
+
+
+extern int inflate OF((z_stream *strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() always provides as much output as possible
+ (until there is no more input data or no more space in the output buffer).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate().
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
+ inflate flushes as much output as possible to the output buffer. The
+ flushing behavior of inflate is not specified for values of the flush
+ parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
+ current implementation actually flushes as much output as possible
+ anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
+ has been consumed, it is expecting to see the length field of a stored
+ block; if not, it returns Z_DATA_ERROR.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ inflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if the end of the
+ compressed data has been reached and all uncompressed output has been
+ produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
+ the stream structure was inconsistent (for example if next_in or next_out
+ was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
+ progress is possible or if there was not enough room in the output buffer
+ when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
+ call inflateSync to look for a good compression block. */
+
+
+extern int inflateEnd OF((z_stream *strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* advanced functions */
+
+extern int inflateInit2 OF((z_stream *strm,
+ int windowBits));
+/*
+ This is another version of inflateInit with more compression options. The
+ fields next_out, zalloc and zfree must be initialized before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library (the value 16 will be allowed soon). The
+ default value is 15 if inflateInit is used instead. If a compressed stream
+ with a larger window size is given as input, inflate() will return with
+ the error code Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ If next_out is not null, the library will use this buffer for the history
+ buffer; the buffer must either be large enough to hold the entire output
+ data, or have at least 1<<windowBits bytes. If next_out is null, the
+ library will allocate its own buffer (and leave next_out null). next_in
+ need not be provided here but must be provided by the application for the
+ next call of inflate().
+
+ If the history buffer is provided by the application, next_out must
+ never be changed by the application since the decompressor maintains
+ history information inside this buffer from call to call; the application
+ can only reset next_out to the beginning of the history buffer when
+ avail_out is zero and all output has been consumed.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ windowBits < 8). msg is set to null if there is no error message.
+ inflateInit2 does not perform any decompression: this will be done by
+ inflate().
+*/
+
+extern int inflateSync OF((z_stream *strm));
+/*
+ Skips invalid compressed data until the special marker (see deflate()
+ above) can be found, or until all available input is skipped. No output
+ is provided.
+
+ inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no marker has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+extern int inflateReset OF((z_stream *strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* checksum functions */
+
+/*
+ This function is not related to compression but is exported
+ anyway because it might be useful in applications using the
+ compression library.
+*/
+
+extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
+
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+#ifndef _Z_UTIL_H
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+#endif /* _ZLIB_H */
diff --git a/c/src/lib/libbsp/powerpc/shared/clock/Makefile.in b/c/src/lib/libbsp/powerpc/shared/clock/Makefile.in
new file mode 100644
index 0000000000..9a02c0bf63
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/clock/Makefile.in
@@ -0,0 +1,38 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/clock
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES =
+
+SRCS = $(C_FILES) $(H_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+all: ${ARCH} $(SRCS)
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/clock/p_clock.c b/c/src/lib/libbsp/powerpc/shared/clock/p_clock.c
new file mode 100644
index 0000000000..5de55190d8
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/clock/p_clock.c
@@ -0,0 +1,37 @@
+/*
+ * Clock Tick interrupt conexion code.
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * Modified to support the MPC750.
+ * Modifications Copyright (c) 1999 Eric Valette valette@crf.canon.fr
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <bsp/irq.h>
+#include <libcpu/c_clock.h>
+
+static rtems_irq_connect_data clockIrqData = {BSP_DECREMENTER,
+ clockIsr,
+ (rtems_irq_enable)clockOn,
+ (rtems_irq_disable)clockOff,
+ (rtems_irq_is_enabled) clockIsOn};
+
+
+int BSP_disconnect_clock_handler (void)
+{
+ return BSP_remove_rtems_irq_handler (&clockIrqData);
+}
+
+int BSP_connect_clock_handler (void)
+{
+ return BSP_install_rtems_irq_handler (&clockIrqData);
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/console/Makefile.in b/c/src/lib/libbsp/powerpc/shared/console/Makefile.in
new file mode 100644
index 0000000000..1709fc8612
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/Makefile.in
@@ -0,0 +1,48 @@
+#
+# $Id:
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/console
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@:@srcdir@/../../../shared
+
+# C source names, if any, go here -- minus the .c
+H_FILES = $(srcdir)/consoleIo.h $(srcdir)/keyboard.h $(srcdir)/uart.h
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/mcp750/console/console.c b/c/src/lib/libbsp/powerpc/shared/console/console.c
index fc50656af1..834f960cfc 100644
--- a/c/src/lib/libbsp/powerpc/mcp750/console/console.c
+++ b/c/src/lib/libbsp/powerpc/shared/console/console.c
@@ -45,10 +45,10 @@ extern int close(int fd);
* BSP_UART_COM2
*/
-extern int BSPConsolePort;
+int BSPConsolePort = BSP_UART_COM1;
/* int BSPConsolePort = BSP_UART_COM2; */
-extern int BSPBaseBaud;
+int BSPBaseBaud = 115200;
/*-------------------------------------------------------------------------+
| External Prototypes
@@ -84,7 +84,6 @@ isr_is_on(const rtems_irq_connect_data *irq)
return BSP_irq_enabled_at_i8259s(irq->name);
}
-/*
void console_reserve_resources(rtems_configuration_table *conf)
{
if(BSPConsolePort != BSP_CONSOLE_PORT_CONSOLE)
@@ -94,7 +93,6 @@ void console_reserve_resources(rtems_configuration_table *conf)
return;
}
-*/
void __assert (const char *file, int line, const char *msg)
{
diff --git a/c/src/lib/libbsp/powerpc/shared/console/consoleIo.h b/c/src/lib/libbsp/powerpc/shared/console/consoleIo.h
new file mode 100644
index 0000000000..c1d76f5a77
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/consoleIo.h
@@ -0,0 +1,45 @@
+/*
+ * consoleIo.h -- console I/O package interface
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CONSOLE_IO_H
+#define __CONSOLE_IO_H
+
+
+typedef enum {
+ CONSOLE_LOG = 1,
+ CONSOLE_SERIAL = 2,
+ CONSOLE_VGA = 3,
+ CONSOLE_VACUUM = 4
+}ioType;
+
+typedef volatile unsigned char * __io_ptr;
+
+typedef struct {
+ __io_ptr io_base;
+ __io_ptr isa_mem_base;
+ __io_ptr pci_mmio_base;
+ __io_ptr pci_dma_offset;
+} board_memory_map;
+
+extern board_memory_map *ptr_mem_map;
+extern unsigned long ticks_per_ms;
+
+extern int select_console(ioType t);
+extern int printk(const char *, ...) __attribute__((format(printf, 1, 2)));
+extern void udelay(int);
+extern void debug_putc(const unsigned char c);
+extern int debug_getc(void);
+extern int debug_tstc(void);
+int kbdreset(void);
+
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/console/inch.c b/c/src/lib/libbsp/powerpc/shared/console/inch.c
new file mode 100644
index 0000000000..5cd7148e97
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/inch.c
@@ -0,0 +1,318 @@
+/*
+ * inch.c -- keyboard minimal driver
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * This code is based on the pc386 BSP inch.c so the following
+ * copyright also applies :
+ *
+ * (C) Copyright 1997 -
+ * - NavIST Group - Real-Time Distributed Systems and Industrial Automation
+ *
+ * http://pandora.ist.utl.pt
+ *
+ * Instituto Superior Tecnico * Lisboa * PORTUGAL
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <bsp/irq.h>
+
+/*-------------------------------------------------------------------------+
+| Constants
++--------------------------------------------------------------------------*/
+#define KBD_CTL 0x61 /* -------------------------------- */
+#define KBD_DATA 0x60 /* Ports for PC keyboard controller */
+#define KBD_STATUS 0x64 /* -------------------------------- */
+
+#define KBD_BUF_SIZE 256
+
+/*-------------------------------------------------------------------------+
+| Global Variables
++--------------------------------------------------------------------------*/
+static char key_map[] =
+{
+ 0,033,'1','2','3','4','5','6','7','8','9','0','-','=','\b','\t',
+ 'q','w','e','r','t','y','u','i','o','p','[',']',015,0x80,
+ 'a','s','d','f','g','h','j','k','l',';',047,0140,0x80,
+ 0134,'z','x','c','v','b','n','m',',','.','/',0x80,
+ '*',0x80,' ',0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
+ 0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
+ 0x80,0x80,0x80,'0',0177
+}; /* Keyboard scancode -> character map with no modifiers. */
+
+static char shift_map[] =
+{
+ 0,033,'!','@','#','$','%','^','&','*','(',')','_','+','\b','\t',
+ 'Q','W','E','R','T','Y','U','I','O','P','{','}',015,0x80,
+ 'A','S','D','F','G','H','J','K','L',':',042,'~',0x80,
+ '|','Z','X','C','V','B','N','M','<','>','?',0x80,
+ '*',0x80,' ',0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
+ 0x80,0x80,0x80,0x80,'7','8','9',0x80,'4','5','6',0x80,
+ '1','2','3','0',177
+}; /* Keyboard scancode -> character map with SHIFT key modifier. */
+
+static char kbd_buffer[KBD_BUF_SIZE];
+static rtems_unsigned16 kbd_first = 0;
+static rtems_unsigned16 kbd_last = 0;
+static rtems_unsigned16 kbd_end = KBD_BUF_SIZE - 1;
+
+/*-------------------------------------------------------------------------+
+| Function: rtemsReboot
+| Description: Reboot the PC.
+| Global Variables: None.
+| Arguments: None.
+| Returns: Nothing.
++--------------------------------------------------------------------------*/
+void rtemsReboot(void)
+{
+ /* shutdown and reboot */
+ outport_byte(0x64, 0xFE); /* use keyboard controler to do the job... */
+} /* rtemsReboot */
+
+/*-------------------------------------------------------------------------+
+| Function: _IBMPC_scankey
+| Description: This function can be called during a poll for input, or by
+| an ISR. Basically any time you want to process a keypress.
+| Global Variables: key_map, shift_map.
+| Arguments: outChar - character read in case of a valid reading,
+| otherwise unchanged.
+| Returns: TRUE in case a valid character has been read,
+| FALSE otherwise.
++--------------------------------------------------------------------------*/
+rtems_boolean
+_IBMPC_scankey(char *outChar)
+{
+ unsigned char inChar;
+ static int alt_pressed = 0;
+ static int ctrl_pressed = 0;
+ static int shift_pressed = 0;
+ static int caps_pressed = 0;
+ static int extended = 0;
+
+ *outChar = NULL; /* default value if we return FALSE */
+
+ /* Read keyboard controller, toggle enable */
+ inport_byte(KBD_CTL, inChar);
+ outport_byte(KBD_CTL, inChar & ~0x80);
+ outport_byte(KBD_CTL, inChar | 0x80);
+ outport_byte(KBD_CTL, inChar & ~0x80);
+
+ /* See if it has data */
+ inport_byte(KBD_STATUS, inChar);
+ if ((inChar & 0x01) == 0)
+ return FALSE;
+
+ /* Read the data. Handle nonsense with shift, control, etc. */
+ inport_byte(KBD_DATA, inChar);
+
+ if (extended)
+ extended--;
+
+ switch (inChar)
+ {
+ case 0xe0:
+ extended = 2;
+ return FALSE;
+ break;
+
+ case 0x38:
+ alt_pressed = 1;
+ return FALSE;
+ break;
+ case 0xb8:
+ alt_pressed = 0;
+ return FALSE;
+ break;
+
+ case 0x1d:
+ ctrl_pressed = 1;
+ return FALSE;
+ break;
+ case 0x9d:
+ ctrl_pressed = 0;
+ return FALSE;
+ break;
+
+ case 0x2a:
+ if (extended)
+ return FALSE;
+ case 0x36:
+ shift_pressed = 1;
+ return FALSE;
+ break;
+ case 0xaa:
+ if (extended)
+ return FALSE;
+ case 0xb6:
+ shift_pressed = 0;
+ return FALSE;
+ break;
+
+ case 0x3a:
+ caps_pressed = 1;
+ return FALSE;
+ break;
+ case 0xba:
+ caps_pressed = 0;
+ return FALSE;
+ break;
+
+ case 0x53:
+ if (ctrl_pressed && alt_pressed)
+ rtemsReboot(); /* ctrl+alt+del -> reboot */
+ break;
+
+ /*
+ * Ignore unrecognized keys--usually arrow and such
+ */
+ default:
+ if ((inChar & 0x80) || (inChar > 0x39))
+ /* High-bit on means key is being released, not pressed */
+ return FALSE;
+ break;
+ } /* switch */
+
+ /* Strip high bit, look up in our map */
+ inChar &= 0x7f;
+ if (ctrl_pressed)
+ {
+ *outChar = key_map[inChar];
+ *outChar &= 037;
+ }
+ else
+ {
+ *outChar = shift_pressed ? shift_map[inChar] : key_map[inChar];
+ if (caps_pressed)
+ {
+ if (*outChar >= 'A' && *outChar <= 'Z')
+ *outChar += 'a' - 'A';
+ else if (*outChar >= 'a' && *outChar <= 'z')
+ *outChar -= 'a' - 'A';
+ }
+ }
+
+ return TRUE;
+} /* _IBMPC_scankey */
+
+/*-------------------------------------------------------------------------+
+| Function: _IBMPC_keyboard_isr
+| Description: Interrupt Service Routine for keyboard (0x01) IRQ.
+| Global Variables: kbd_buffer, kbd_first, kbd_last.
+| Arguments: vector - standard RTEMS argument - see documentation.
+| Returns: standard return value - see documentation.
++--------------------------------------------------------------------------*/
+void _IBMPC_keyboard_isr()
+{
+ if (_IBMPC_scankey(&kbd_buffer[kbd_last]))
+ {
+ /* Got one; save it if there is enough room in buffer. */
+ unsigned int next = (kbd_last == kbd_end) ? 0 : kbd_last + 1;
+
+ if (next != kbd_first)
+ {
+ kbd_last = next;
+ }
+ }
+} /* _IBMPC_keyboard_isr */
+
+
+/*-------------------------------------------------------------------------+
+| Function: _IBMPC_chrdy
+| Description: Check keyboard ISR buffer and return character if not empty.
+| Global Variables: kbd_buffer, kbd_first, kbd_last.
+| Arguments: c - character read if keyboard buffer not empty, otherwise
+| unchanged.
+| Returns: TRUE if keyboard buffer not empty, FALSE otherwise.
++--------------------------------------------------------------------------*/
+rtems_boolean
+_IBMPC_chrdy(char *c)
+{
+ /* Check buffer our ISR builds */
+ if (kbd_first != kbd_last)
+ {
+ *c = kbd_buffer[kbd_first];
+
+ kbd_first = (kbd_first + 1) % KBD_BUF_SIZE;
+ return TRUE;
+ }
+ else
+ return FALSE;
+} /* _IBMPC_chrdy */
+
+
+/*-------------------------------------------------------------------------+
+| Function: _IBMPC_inch
+| Description: Poll keyboard until a character is ready and return it.
+| Global Variables: None.
+| Arguments: None.
+| Returns: character read from keyboard.
++--------------------------------------------------------------------------*/
+char
+_IBMPC_inch(void)
+{
+ char c;
+ while (!_IBMPC_chrdy(&c))
+ continue;
+
+ return c;
+} /* _IBMPC_inch */
+
+
+ /*
+ * Routine that can be used before interrupt management is initialized.
+ */
+
+char
+BSP_wait_polled_input(void)
+{
+ char c;
+ while (!_IBMPC_scankey(&c))
+ continue;
+
+ return c;
+}
+
+/*-------------------------------------------------------------------------+
+| Function: _IBMPC_inch_sleep
+| Description: If charcter is ready return it, otherwise sleep until
+| it is ready
+| Global Variables: None.
+| Arguments: None.
+| Returns: character read from keyboard.
++--------------------------------------------------------------------------*/
+char
+_IBMPC_inch_sleep(void)
+{
+ char c;
+ rtems_interval ticks_per_second;
+
+ ticks_per_second = 0;
+
+ for(;;)
+ {
+ if(_IBMPC_chrdy(&c))
+ {
+ return c;
+ }
+
+ if(ticks_per_second == 0)
+ {
+ rtems_clock_get(RTEMS_CLOCK_GET_TICKS_PER_SECOND,
+ &ticks_per_second);
+ }
+ rtems_task_wake_after((ticks_per_second+24)/25);
+ }
+
+ return c;
+} /* _IBMPC_inch */
+
+
+
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/shared/console/keyboard.h b/c/src/lib/libbsp/powerpc/shared/console/keyboard.h
new file mode 100644
index 0000000000..a7cb39bbd1
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/keyboard.h
@@ -0,0 +1,433 @@
+/*
+ * keyboard.h -- keyboard definitions.
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __LINUX_KEYBOARD_H
+#define __LINUX_KEYBOARD_H
+
+#define KG_SHIFT 0
+#define KG_CTRL 2
+#define KG_ALT 3
+#define KG_ALTGR 1
+#define KG_SHIFTL 4
+#define KG_SHIFTR 5
+#define KG_CTRLL 6
+#define KG_CTRLR 7
+#define KG_CAPSSHIFT 8
+
+#define NR_SHIFT 9
+
+#define NR_KEYS 128
+#define MAX_NR_KEYMAPS 256
+/* This means 64Kb if all keymaps are allocated. Only the superuser
+ may increase the number of keymaps beyond MAX_NR_OF_USER_KEYMAPS. */
+#define MAX_NR_OF_USER_KEYMAPS 256 /* should be at least 7 */
+
+#define MAX_NR_FUNC 256 /* max nr of strings assigned to keys */
+
+#define KT_LATIN 0 /* we depend on this being zero */
+#define KT_LETTER 11 /* symbol that can be acted upon by CapsLock */
+#define KT_FN 1
+#define KT_SPEC 2
+#define KT_PAD 3
+#define KT_DEAD 4
+#define KT_CONS 5
+#define KT_CUR 6
+#define KT_SHIFT 7
+#define KT_META 8
+#define KT_ASCII 9
+#define KT_LOCK 10
+#define KT_SLOCK 12
+
+#define K(t,v) (((t)<<8)|(v))
+#define KTYP(x) ((x) >> 8)
+#define KVAL(x) ((x) & 0xff)
+
+#define K_F1 K(KT_FN,0)
+#define K_F2 K(KT_FN,1)
+#define K_F3 K(KT_FN,2)
+#define K_F4 K(KT_FN,3)
+#define K_F5 K(KT_FN,4)
+#define K_F6 K(KT_FN,5)
+#define K_F7 K(KT_FN,6)
+#define K_F8 K(KT_FN,7)
+#define K_F9 K(KT_FN,8)
+#define K_F10 K(KT_FN,9)
+#define K_F11 K(KT_FN,10)
+#define K_F12 K(KT_FN,11)
+#define K_F13 K(KT_FN,12)
+#define K_F14 K(KT_FN,13)
+#define K_F15 K(KT_FN,14)
+#define K_F16 K(KT_FN,15)
+#define K_F17 K(KT_FN,16)
+#define K_F18 K(KT_FN,17)
+#define K_F19 K(KT_FN,18)
+#define K_F20 K(KT_FN,19)
+#define K_FIND K(KT_FN,20)
+#define K_INSERT K(KT_FN,21)
+#define K_REMOVE K(KT_FN,22)
+#define K_SELECT K(KT_FN,23)
+#define K_PGUP K(KT_FN,24) /* PGUP is a synonym for PRIOR */
+#define K_PGDN K(KT_FN,25) /* PGDN is a synonym for NEXT */
+#define K_MACRO K(KT_FN,26)
+#define K_HELP K(KT_FN,27)
+#define K_DO K(KT_FN,28)
+#define K_PAUSE K(KT_FN,29)
+#define K_F21 K(KT_FN,30)
+#define K_F22 K(KT_FN,31)
+#define K_F23 K(KT_FN,32)
+#define K_F24 K(KT_FN,33)
+#define K_F25 K(KT_FN,34)
+#define K_F26 K(KT_FN,35)
+#define K_F27 K(KT_FN,36)
+#define K_F28 K(KT_FN,37)
+#define K_F29 K(KT_FN,38)
+#define K_F30 K(KT_FN,39)
+#define K_F31 K(KT_FN,40)
+#define K_F32 K(KT_FN,41)
+#define K_F33 K(KT_FN,42)
+#define K_F34 K(KT_FN,43)
+#define K_F35 K(KT_FN,44)
+#define K_F36 K(KT_FN,45)
+#define K_F37 K(KT_FN,46)
+#define K_F38 K(KT_FN,47)
+#define K_F39 K(KT_FN,48)
+#define K_F40 K(KT_FN,49)
+#define K_F41 K(KT_FN,50)
+#define K_F42 K(KT_FN,51)
+#define K_F43 K(KT_FN,52)
+#define K_F44 K(KT_FN,53)
+#define K_F45 K(KT_FN,54)
+#define K_F46 K(KT_FN,55)
+#define K_F47 K(KT_FN,56)
+#define K_F48 K(KT_FN,57)
+#define K_F49 K(KT_FN,58)
+#define K_F50 K(KT_FN,59)
+#define K_F51 K(KT_FN,60)
+#define K_F52 K(KT_FN,61)
+#define K_F53 K(KT_FN,62)
+#define K_F54 K(KT_FN,63)
+#define K_F55 K(KT_FN,64)
+#define K_F56 K(KT_FN,65)
+#define K_F57 K(KT_FN,66)
+#define K_F58 K(KT_FN,67)
+#define K_F59 K(KT_FN,68)
+#define K_F60 K(KT_FN,69)
+#define K_F61 K(KT_FN,70)
+#define K_F62 K(KT_FN,71)
+#define K_F63 K(KT_FN,72)
+#define K_F64 K(KT_FN,73)
+#define K_F65 K(KT_FN,74)
+#define K_F66 K(KT_FN,75)
+#define K_F67 K(KT_FN,76)
+#define K_F68 K(KT_FN,77)
+#define K_F69 K(KT_FN,78)
+#define K_F70 K(KT_FN,79)
+#define K_F71 K(KT_FN,80)
+#define K_F72 K(KT_FN,81)
+#define K_F73 K(KT_FN,82)
+#define K_F74 K(KT_FN,83)
+#define K_F75 K(KT_FN,84)
+#define K_F76 K(KT_FN,85)
+#define K_F77 K(KT_FN,86)
+#define K_F78 K(KT_FN,87)
+#define K_F79 K(KT_FN,88)
+#define K_F80 K(KT_FN,89)
+#define K_F81 K(KT_FN,90)
+#define K_F82 K(KT_FN,91)
+#define K_F83 K(KT_FN,92)
+#define K_F84 K(KT_FN,93)
+#define K_F85 K(KT_FN,94)
+#define K_F86 K(KT_FN,95)
+#define K_F87 K(KT_FN,96)
+#define K_F88 K(KT_FN,97)
+#define K_F89 K(KT_FN,98)
+#define K_F90 K(KT_FN,99)
+#define K_F91 K(KT_FN,100)
+#define K_F92 K(KT_FN,101)
+#define K_F93 K(KT_FN,102)
+#define K_F94 K(KT_FN,103)
+#define K_F95 K(KT_FN,104)
+#define K_F96 K(KT_FN,105)
+#define K_F97 K(KT_FN,106)
+#define K_F98 K(KT_FN,107)
+#define K_F99 K(KT_FN,108)
+#define K_F100 K(KT_FN,109)
+#define K_F101 K(KT_FN,110)
+#define K_F102 K(KT_FN,111)
+#define K_F103 K(KT_FN,112)
+#define K_F104 K(KT_FN,113)
+#define K_F105 K(KT_FN,114)
+#define K_F106 K(KT_FN,115)
+#define K_F107 K(KT_FN,116)
+#define K_F108 K(KT_FN,117)
+#define K_F109 K(KT_FN,118)
+#define K_F110 K(KT_FN,119)
+#define K_F111 K(KT_FN,120)
+#define K_F112 K(KT_FN,121)
+#define K_F113 K(KT_FN,122)
+#define K_F114 K(KT_FN,123)
+#define K_F115 K(KT_FN,124)
+#define K_F116 K(KT_FN,125)
+#define K_F117 K(KT_FN,126)
+#define K_F118 K(KT_FN,127)
+#define K_F119 K(KT_FN,128)
+#define K_F120 K(KT_FN,129)
+#define K_F121 K(KT_FN,130)
+#define K_F122 K(KT_FN,131)
+#define K_F123 K(KT_FN,132)
+#define K_F124 K(KT_FN,133)
+#define K_F125 K(KT_FN,134)
+#define K_F126 K(KT_FN,135)
+#define K_F127 K(KT_FN,136)
+#define K_F128 K(KT_FN,137)
+#define K_F129 K(KT_FN,138)
+#define K_F130 K(KT_FN,139)
+#define K_F131 K(KT_FN,140)
+#define K_F132 K(KT_FN,141)
+#define K_F133 K(KT_FN,142)
+#define K_F134 K(KT_FN,143)
+#define K_F135 K(KT_FN,144)
+#define K_F136 K(KT_FN,145)
+#define K_F137 K(KT_FN,146)
+#define K_F138 K(KT_FN,147)
+#define K_F139 K(KT_FN,148)
+#define K_F140 K(KT_FN,149)
+#define K_F141 K(KT_FN,150)
+#define K_F142 K(KT_FN,151)
+#define K_F143 K(KT_FN,152)
+#define K_F144 K(KT_FN,153)
+#define K_F145 K(KT_FN,154)
+#define K_F146 K(KT_FN,155)
+#define K_F147 K(KT_FN,156)
+#define K_F148 K(KT_FN,157)
+#define K_F149 K(KT_FN,158)
+#define K_F150 K(KT_FN,159)
+#define K_F151 K(KT_FN,160)
+#define K_F152 K(KT_FN,161)
+#define K_F153 K(KT_FN,162)
+#define K_F154 K(KT_FN,163)
+#define K_F155 K(KT_FN,164)
+#define K_F156 K(KT_FN,165)
+#define K_F157 K(KT_FN,166)
+#define K_F158 K(KT_FN,167)
+#define K_F159 K(KT_FN,168)
+#define K_F160 K(KT_FN,169)
+#define K_F161 K(KT_FN,170)
+#define K_F162 K(KT_FN,171)
+#define K_F163 K(KT_FN,172)
+#define K_F164 K(KT_FN,173)
+#define K_F165 K(KT_FN,174)
+#define K_F166 K(KT_FN,175)
+#define K_F167 K(KT_FN,176)
+#define K_F168 K(KT_FN,177)
+#define K_F169 K(KT_FN,178)
+#define K_F170 K(KT_FN,179)
+#define K_F171 K(KT_FN,180)
+#define K_F172 K(KT_FN,181)
+#define K_F173 K(KT_FN,182)
+#define K_F174 K(KT_FN,183)
+#define K_F175 K(KT_FN,184)
+#define K_F176 K(KT_FN,185)
+#define K_F177 K(KT_FN,186)
+#define K_F178 K(KT_FN,187)
+#define K_F179 K(KT_FN,188)
+#define K_F180 K(KT_FN,189)
+#define K_F181 K(KT_FN,190)
+#define K_F182 K(KT_FN,191)
+#define K_F183 K(KT_FN,192)
+#define K_F184 K(KT_FN,193)
+#define K_F185 K(KT_FN,194)
+#define K_F186 K(KT_FN,195)
+#define K_F187 K(KT_FN,196)
+#define K_F188 K(KT_FN,197)
+#define K_F189 K(KT_FN,198)
+#define K_F190 K(KT_FN,199)
+#define K_F191 K(KT_FN,200)
+#define K_F192 K(KT_FN,201)
+#define K_F193 K(KT_FN,202)
+#define K_F194 K(KT_FN,203)
+#define K_F195 K(KT_FN,204)
+#define K_F196 K(KT_FN,205)
+#define K_F197 K(KT_FN,206)
+#define K_F198 K(KT_FN,207)
+#define K_F199 K(KT_FN,208)
+#define K_F200 K(KT_FN,209)
+#define K_F201 K(KT_FN,210)
+#define K_F202 K(KT_FN,211)
+#define K_F203 K(KT_FN,212)
+#define K_F204 K(KT_FN,213)
+#define K_F205 K(KT_FN,214)
+#define K_F206 K(KT_FN,215)
+#define K_F207 K(KT_FN,216)
+#define K_F208 K(KT_FN,217)
+#define K_F209 K(KT_FN,218)
+#define K_F210 K(KT_FN,219)
+#define K_F211 K(KT_FN,220)
+#define K_F212 K(KT_FN,221)
+#define K_F213 K(KT_FN,222)
+#define K_F214 K(KT_FN,223)
+#define K_F215 K(KT_FN,224)
+#define K_F216 K(KT_FN,225)
+#define K_F217 K(KT_FN,226)
+#define K_F218 K(KT_FN,227)
+#define K_F219 K(KT_FN,228)
+#define K_F220 K(KT_FN,229)
+#define K_F221 K(KT_FN,230)
+#define K_F222 K(KT_FN,231)
+#define K_F223 K(KT_FN,232)
+#define K_F224 K(KT_FN,233)
+#define K_F225 K(KT_FN,234)
+#define K_F226 K(KT_FN,235)
+#define K_F227 K(KT_FN,236)
+#define K_F228 K(KT_FN,237)
+#define K_F229 K(KT_FN,238)
+#define K_F230 K(KT_FN,239)
+#define K_F231 K(KT_FN,240)
+#define K_F232 K(KT_FN,241)
+#define K_F233 K(KT_FN,242)
+#define K_F234 K(KT_FN,243)
+#define K_F235 K(KT_FN,244)
+#define K_F236 K(KT_FN,245)
+#define K_F237 K(KT_FN,246)
+#define K_F238 K(KT_FN,247)
+#define K_F239 K(KT_FN,248)
+#define K_F240 K(KT_FN,249)
+#define K_F241 K(KT_FN,250)
+#define K_F242 K(KT_FN,251)
+#define K_F243 K(KT_FN,252)
+#define K_F244 K(KT_FN,253)
+#define K_F245 K(KT_FN,254)
+#define K_UNDO K(KT_FN,255)
+
+
+#define K_HOLE K(KT_SPEC,0)
+#define K_ENTER K(KT_SPEC,1)
+#define K_SH_REGS K(KT_SPEC,2)
+#define K_SH_MEM K(KT_SPEC,3)
+#define K_SH_STAT K(KT_SPEC,4)
+#define K_BREAK K(KT_SPEC,5)
+#define K_CONS K(KT_SPEC,6)
+#define K_CAPS K(KT_SPEC,7)
+#define K_NUM K(KT_SPEC,8)
+#define K_HOLD K(KT_SPEC,9)
+#define K_SCROLLFORW K(KT_SPEC,10)
+#define K_SCROLLBACK K(KT_SPEC,11)
+#define K_BOOT K(KT_SPEC,12)
+#define K_CAPSON K(KT_SPEC,13)
+#define K_COMPOSE K(KT_SPEC,14)
+#define K_SAK K(KT_SPEC,15)
+#define K_DECRCONSOLE K(KT_SPEC,16)
+#define K_INCRCONSOLE K(KT_SPEC,17)
+#define K_SPAWNCONSOLE K(KT_SPEC,18)
+#define K_BARENUMLOCK K(KT_SPEC,19)
+
+#define K_ALLOCATED K(KT_SPEC,126) /* dynamically allocated keymap */
+#define K_NOSUCHMAP K(KT_SPEC,127) /* returned by KDGKBENT */
+
+#define K_P0 K(KT_PAD,0)
+#define K_P1 K(KT_PAD,1)
+#define K_P2 K(KT_PAD,2)
+#define K_P3 K(KT_PAD,3)
+#define K_P4 K(KT_PAD,4)
+#define K_P5 K(KT_PAD,5)
+#define K_P6 K(KT_PAD,6)
+#define K_P7 K(KT_PAD,7)
+#define K_P8 K(KT_PAD,8)
+#define K_P9 K(KT_PAD,9)
+#define K_PPLUS K(KT_PAD,10) /* key-pad plus */
+#define K_PMINUS K(KT_PAD,11) /* key-pad minus */
+#define K_PSTAR K(KT_PAD,12) /* key-pad asterisk (star) */
+#define K_PSLASH K(KT_PAD,13) /* key-pad slash */
+#define K_PENTER K(KT_PAD,14) /* key-pad enter */
+#define K_PCOMMA K(KT_PAD,15) /* key-pad comma: kludge... */
+#define K_PDOT K(KT_PAD,16) /* key-pad dot (period): kludge... */
+#define K_PPLUSMINUS K(KT_PAD,17) /* key-pad plus/minus */
+#define K_PPARENL K(KT_PAD,18) /* key-pad left parenthesis */
+#define K_PPARENR K(KT_PAD,19) /* key-pad right parenthesis */
+
+#define NR_PAD 20
+
+#define K_DGRAVE K(KT_DEAD,0)
+#define K_DACUTE K(KT_DEAD,1)
+#define K_DCIRCM K(KT_DEAD,2)
+#define K_DTILDE K(KT_DEAD,3)
+#define K_DDIERE K(KT_DEAD,4)
+#define K_DCEDIL K(KT_DEAD,5)
+
+#define NR_DEAD 6
+
+#define K_DOWN K(KT_CUR,0)
+#define K_LEFT K(KT_CUR,1)
+#define K_RIGHT K(KT_CUR,2)
+#define K_UP K(KT_CUR,3)
+
+#define K_SHIFT K(KT_SHIFT,KG_SHIFT)
+#define K_CTRL K(KT_SHIFT,KG_CTRL)
+#define K_ALT K(KT_SHIFT,KG_ALT)
+#define K_ALTGR K(KT_SHIFT,KG_ALTGR)
+#define K_SHIFTL K(KT_SHIFT,KG_SHIFTL)
+#define K_SHIFTR K(KT_SHIFT,KG_SHIFTR)
+#define K_CTRLL K(KT_SHIFT,KG_CTRLL)
+#define K_CTRLR K(KT_SHIFT,KG_CTRLR)
+#define K_CAPSSHIFT K(KT_SHIFT,KG_CAPSSHIFT)
+
+#define K_ASC0 K(KT_ASCII,0)
+#define K_ASC1 K(KT_ASCII,1)
+#define K_ASC2 K(KT_ASCII,2)
+#define K_ASC3 K(KT_ASCII,3)
+#define K_ASC4 K(KT_ASCII,4)
+#define K_ASC5 K(KT_ASCII,5)
+#define K_ASC6 K(KT_ASCII,6)
+#define K_ASC7 K(KT_ASCII,7)
+#define K_ASC8 K(KT_ASCII,8)
+#define K_ASC9 K(KT_ASCII,9)
+#define K_HEX0 K(KT_ASCII,10)
+#define K_HEX1 K(KT_ASCII,11)
+#define K_HEX2 K(KT_ASCII,12)
+#define K_HEX3 K(KT_ASCII,13)
+#define K_HEX4 K(KT_ASCII,14)
+#define K_HEX5 K(KT_ASCII,15)
+#define K_HEX6 K(KT_ASCII,16)
+#define K_HEX7 K(KT_ASCII,17)
+#define K_HEX8 K(KT_ASCII,18)
+#define K_HEX9 K(KT_ASCII,19)
+#define K_HEXa K(KT_ASCII,20)
+#define K_HEXb K(KT_ASCII,21)
+#define K_HEXc K(KT_ASCII,22)
+#define K_HEXd K(KT_ASCII,23)
+#define K_HEXe K(KT_ASCII,24)
+#define K_HEXf K(KT_ASCII,25)
+
+#define NR_ASCII 26
+
+#define K_SHIFTLOCK K(KT_LOCK,KG_SHIFT)
+#define K_CTRLLOCK K(KT_LOCK,KG_CTRL)
+#define K_ALTLOCK K(KT_LOCK,KG_ALT)
+#define K_ALTGRLOCK K(KT_LOCK,KG_ALTGR)
+#define K_SHIFTLLOCK K(KT_LOCK,KG_SHIFTL)
+#define K_SHIFTRLOCK K(KT_LOCK,KG_SHIFTR)
+#define K_CTRLLLOCK K(KT_LOCK,KG_CTRLL)
+#define K_CTRLRLOCK K(KT_LOCK,KG_CTRLR)
+
+#define K_SHIFT_SLOCK K(KT_SLOCK,KG_SHIFT)
+#define K_CTRL_SLOCK K(KT_SLOCK,KG_CTRL)
+#define K_ALT_SLOCK K(KT_SLOCK,KG_ALT)
+#define K_ALTGR_SLOCK K(KT_SLOCK,KG_ALTGR)
+#define K_SHIFTL_SLOCK K(KT_SLOCK,KG_SHIFTL)
+#define K_SHIFTR_SLOCK K(KT_SLOCK,KG_SHIFTR)
+#define K_CTRLL_SLOCK K(KT_SLOCK,KG_CTRLL)
+#define K_CTRLR_SLOCK K(KT_SLOCK,KG_CTRLR)
+
+#define NR_LOCK 8
+
+#define MAX_DIACR 256
+#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/console/polled_io.c b/c/src/lib/libbsp/powerpc/shared/console/polled_io.c
new file mode 100644
index 0000000000..ecd6dd5f88
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/polled_io.c
@@ -0,0 +1,1080 @@
+/*
+ * polled_io.c -- Basic input/output for early boot
+ *
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+#include <libcpu/byteorder.h>
+#include <libcpu/page.h>
+#include <libcpu/cpu.h>
+#include <libcpu/mmu.h>
+#include "keyboard.h"
+#include <libcpu/io.h>
+#include <string.h>
+#include <stdarg.h>
+#include <bsp/consoleIo.h>
+#include <libcpu/spr.h>
+
+typedef unsigned long long u64;
+typedef long long s64;
+typedef unsigned int u32;
+
+unsigned short plain_map[NR_KEYS] = {
+ 0xf200, 0xf01b, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036,
+ 0xf037, 0xf038, 0xf039, 0xf030, 0xf02d, 0xf03d, 0xf07f, 0xf009,
+ 0xfb71, 0xfb77, 0xfb65, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69,
+ 0xfb6f, 0xfb70, 0xf05b, 0xf05d, 0xf201, 0xf702, 0xfb61, 0xfb73,
+ 0xfb64, 0xfb66, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf03b,
+ 0xf027, 0xf060, 0xf700, 0xf05c, 0xfb7a, 0xfb78, 0xfb63, 0xfb76,
+ 0xfb62, 0xfb6e, 0xfb6d, 0xf02c, 0xf02e, 0xf02f, 0xf700, 0xf30c,
+ 0xf703, 0xf020, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104,
+ 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf208, 0xf209, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf03c, 0xf10a,
+ 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+unsigned short shift_map[NR_KEYS] = {
+ 0xf200, 0xf01b, 0xf021, 0xf040, 0xf023, 0xf024, 0xf025, 0xf05e,
+ 0xf026, 0xf02a, 0xf028, 0xf029, 0xf05f, 0xf02b, 0xf07f, 0xf009,
+ 0xfb51, 0xfb57, 0xfb45, 0xfb52, 0xfb54, 0xfb59, 0xfb55, 0xfb49,
+ 0xfb4f, 0xfb50, 0xf07b, 0xf07d, 0xf201, 0xf702, 0xfb41, 0xfb53,
+ 0xfb44, 0xfb46, 0xfb47, 0xfb48, 0xfb4a, 0xfb4b, 0xfb4c, 0xf03a,
+ 0xf022, 0xf07e, 0xf700, 0xf07c, 0xfb5a, 0xfb58, 0xfb43, 0xfb56,
+ 0xfb42, 0xfb4e, 0xfb4d, 0xf03c, 0xf03e, 0xf03f, 0xf700, 0xf30c,
+ 0xf703, 0xf020, 0xf207, 0xf10a, 0xf10b, 0xf10c, 0xf10d, 0xf10e,
+ 0xf10f, 0xf110, 0xf111, 0xf112, 0xf113, 0xf213, 0xf203, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf03e, 0xf10a,
+ 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf20b, 0xf601, 0xf602, 0xf117, 0xf600, 0xf20a, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+unsigned short altgr_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf040, 0xf200, 0xf024, 0xf200, 0xf200,
+ 0xf07b, 0xf05b, 0xf05d, 0xf07d, 0xf05c, 0xf200, 0xf200, 0xf200,
+ 0xfb71, 0xfb77, 0xf918, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69,
+ 0xfb6f, 0xfb70, 0xf200, 0xf07e, 0xf201, 0xf702, 0xf914, 0xfb73,
+ 0xf917, 0xf919, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf200,
+ 0xf200, 0xf200, 0xf700, 0xf200, 0xfb7a, 0xfb78, 0xf916, 0xfb76,
+ 0xf915, 0xfb6e, 0xfb6d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c,
+ 0xf703, 0xf200, 0xf207, 0xf50c, 0xf50d, 0xf50e, 0xf50f, 0xf510,
+ 0xf511, 0xf512, 0xf513, 0xf514, 0xf515, 0xf208, 0xf202, 0xf911,
+ 0xf912, 0xf913, 0xf30b, 0xf90e, 0xf90f, 0xf910, 0xf30a, 0xf90b,
+ 0xf90c, 0xf90d, 0xf90a, 0xf310, 0xf206, 0xf200, 0xf07c, 0xf516,
+ 0xf517, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+unsigned short ctrl_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf000, 0xf01b, 0xf01c, 0xf01d, 0xf01e,
+ 0xf01f, 0xf07f, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf008, 0xf200,
+ 0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009,
+ 0xf00f, 0xf010, 0xf01b, 0xf01d, 0xf201, 0xf702, 0xf001, 0xf013,
+ 0xf004, 0xf006, 0xf007, 0xf008, 0xf00a, 0xf00b, 0xf00c, 0xf200,
+ 0xf007, 0xf000, 0xf700, 0xf01c, 0xf01a, 0xf018, 0xf003, 0xf016,
+ 0xf002, 0xf00e, 0xf00d, 0xf200, 0xf20e, 0xf07f, 0xf700, 0xf30c,
+ 0xf703, 0xf000, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104,
+ 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf208, 0xf204, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf200, 0xf10a,
+ 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+unsigned short shift_ctrl_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf000, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf200, 0xf200,
+ 0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009,
+ 0xf00f, 0xf010, 0xf200, 0xf200, 0xf201, 0xf702, 0xf001, 0xf013,
+ 0xf004, 0xf006, 0xf007, 0xf008, 0xf00a, 0xf00b, 0xf00c, 0xf200,
+ 0xf200, 0xf200, 0xf700, 0xf200, 0xf01a, 0xf018, 0xf003, 0xf016,
+ 0xf002, 0xf00e, 0xf00d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c,
+ 0xf703, 0xf200, 0xf207, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf208, 0xf200, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+unsigned short alt_map[NR_KEYS] = {
+ 0xf200, 0xf81b, 0xf831, 0xf832, 0xf833, 0xf834, 0xf835, 0xf836,
+ 0xf837, 0xf838, 0xf839, 0xf830, 0xf82d, 0xf83d, 0xf87f, 0xf809,
+ 0xf871, 0xf877, 0xf865, 0xf872, 0xf874, 0xf879, 0xf875, 0xf869,
+ 0xf86f, 0xf870, 0xf85b, 0xf85d, 0xf80d, 0xf702, 0xf861, 0xf873,
+ 0xf864, 0xf866, 0xf867, 0xf868, 0xf86a, 0xf86b, 0xf86c, 0xf83b,
+ 0xf827, 0xf860, 0xf700, 0xf85c, 0xf87a, 0xf878, 0xf863, 0xf876,
+ 0xf862, 0xf86e, 0xf86d, 0xf82c, 0xf82e, 0xf82f, 0xf700, 0xf30c,
+ 0xf703, 0xf820, 0xf207, 0xf500, 0xf501, 0xf502, 0xf503, 0xf504,
+ 0xf505, 0xf506, 0xf507, 0xf508, 0xf509, 0xf208, 0xf209, 0xf907,
+ 0xf908, 0xf909, 0xf30b, 0xf904, 0xf905, 0xf906, 0xf30a, 0xf901,
+ 0xf902, 0xf903, 0xf900, 0xf310, 0xf206, 0xf200, 0xf83c, 0xf50a,
+ 0xf50b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf210, 0xf211, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+unsigned short ctrl_alt_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf811, 0xf817, 0xf805, 0xf812, 0xf814, 0xf819, 0xf815, 0xf809,
+ 0xf80f, 0xf810, 0xf200, 0xf200, 0xf201, 0xf702, 0xf801, 0xf813,
+ 0xf804, 0xf806, 0xf807, 0xf808, 0xf80a, 0xf80b, 0xf80c, 0xf200,
+ 0xf200, 0xf200, 0xf700, 0xf200, 0xf81a, 0xf818, 0xf803, 0xf816,
+ 0xf802, 0xf80e, 0xf80d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c,
+ 0xf703, 0xf200, 0xf207, 0xf500, 0xf501, 0xf502, 0xf503, 0xf504,
+ 0xf505, 0xf506, 0xf507, 0xf508, 0xf509, 0xf208, 0xf200, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf20c, 0xf206, 0xf200, 0xf200, 0xf50a,
+ 0xf50b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf20c,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+ushort *key_maps[MAX_NR_KEYMAPS] = {
+ plain_map, shift_map, altgr_map, 0,
+ ctrl_map, shift_ctrl_map, 0, 0,
+ alt_map, 0, 0, 0,
+ ctrl_alt_map, 0
+};
+
+unsigned int keymap_count = 7;
+
+/*
+ * Philosophy: most people do not define more strings, but they who do
+ * often want quite a lot of string space. So, we statically allocate
+ * the default and allocate dynamically in chunks of 512 bytes.
+ */
+
+char func_buf[] = {
+ '\033', '[', '[', 'A', 0,
+ '\033', '[', '[', 'B', 0,
+ '\033', '[', '[', 'C', 0,
+ '\033', '[', '[', 'D', 0,
+ '\033', '[', '[', 'E', 0,
+ '\033', '[', '1', '7', '~', 0,
+ '\033', '[', '1', '8', '~', 0,
+ '\033', '[', '1', '9', '~', 0,
+ '\033', '[', '2', '0', '~', 0,
+ '\033', '[', '2', '1', '~', 0,
+ '\033', '[', '2', '3', '~', 0,
+ '\033', '[', '2', '4', '~', 0,
+ '\033', '[', '2', '5', '~', 0,
+ '\033', '[', '2', '6', '~', 0,
+ '\033', '[', '2', '8', '~', 0,
+ '\033', '[', '2', '9', '~', 0,
+ '\033', '[', '3', '1', '~', 0,
+ '\033', '[', '3', '2', '~', 0,
+ '\033', '[', '3', '3', '~', 0,
+ '\033', '[', '3', '4', '~', 0,
+ '\033', '[', '1', '~', 0,
+ '\033', '[', '2', '~', 0,
+ '\033', '[', '3', '~', 0,
+ '\033', '[', '4', '~', 0,
+ '\033', '[', '5', '~', 0,
+ '\033', '[', '6', '~', 0,
+ '\033', '[', 'M', 0,
+ '\033', '[', 'P', 0,
+};
+
+char *funcbufptr = func_buf;
+int funcbufsize = sizeof(func_buf);
+int funcbufleft = 0; /* space left */
+
+char *func_table[MAX_NR_FUNC] = {
+ func_buf + 0,
+ func_buf + 5,
+ func_buf + 10,
+ func_buf + 15,
+ func_buf + 20,
+ func_buf + 25,
+ func_buf + 31,
+ func_buf + 37,
+ func_buf + 43,
+ func_buf + 49,
+ func_buf + 55,
+ func_buf + 61,
+ func_buf + 67,
+ func_buf + 73,
+ func_buf + 79,
+ func_buf + 85,
+ func_buf + 91,
+ func_buf + 97,
+ func_buf + 103,
+ func_buf + 109,
+ func_buf + 115,
+ func_buf + 120,
+ func_buf + 125,
+ func_buf + 130,
+ func_buf + 135,
+ func_buf + 140,
+ func_buf + 145,
+ 0,
+ 0,
+ func_buf + 149,
+ 0,
+};
+
+struct kbdiacr {
+ unsigned char diacr, base, result;
+};
+
+struct kbdiacr accent_table[MAX_DIACR] = {
+ {'`', 'A', '\300'}, {'`', 'a', '\340'},
+ {'\'', 'A', '\301'}, {'\'', 'a', '\341'},
+ {'^', 'A', '\302'}, {'^', 'a', '\342'},
+ {'~', 'A', '\303'}, {'~', 'a', '\343'},
+ {'"', 'A', '\304'}, {'"', 'a', '\344'},
+ {'O', 'A', '\305'}, {'o', 'a', '\345'},
+ {'0', 'A', '\305'}, {'0', 'a', '\345'},
+ {'A', 'A', '\305'}, {'a', 'a', '\345'},
+ {'A', 'E', '\306'}, {'a', 'e', '\346'},
+ {',', 'C', '\307'}, {',', 'c', '\347'},
+ {'`', 'E', '\310'}, {'`', 'e', '\350'},
+ {'\'', 'E', '\311'}, {'\'', 'e', '\351'},
+ {'^', 'E', '\312'}, {'^', 'e', '\352'},
+ {'"', 'E', '\313'}, {'"', 'e', '\353'},
+ {'`', 'I', '\314'}, {'`', 'i', '\354'},
+ {'\'', 'I', '\315'}, {'\'', 'i', '\355'},
+ {'^', 'I', '\316'}, {'^', 'i', '\356'},
+ {'"', 'I', '\317'}, {'"', 'i', '\357'},
+ {'-', 'D', '\320'}, {'-', 'd', '\360'},
+ {'~', 'N', '\321'}, {'~', 'n', '\361'},
+ {'`', 'O', '\322'}, {'`', 'o', '\362'},
+ {'\'', 'O', '\323'}, {'\'', 'o', '\363'},
+ {'^', 'O', '\324'}, {'^', 'o', '\364'},
+ {'~', 'O', '\325'}, {'~', 'o', '\365'},
+ {'"', 'O', '\326'}, {'"', 'o', '\366'},
+ {'/', 'O', '\330'}, {'/', 'o', '\370'},
+ {'`', 'U', '\331'}, {'`', 'u', '\371'},
+ {'\'', 'U', '\332'}, {'\'', 'u', '\372'},
+ {'^', 'U', '\333'}, {'^', 'u', '\373'},
+ {'"', 'U', '\334'}, {'"', 'u', '\374'},
+ {'\'', 'Y', '\335'}, {'\'', 'y', '\375'},
+ {'T', 'H', '\336'}, {'t', 'h', '\376'},
+ {'s', 's', '\337'}, {'"', 'y', '\377'},
+ {'s', 'z', '\337'}, {'i', 'j', '\377'},
+};
+
+unsigned int accent_table_size = 68;
+
+
+
+
+/* These #defines have been copied from drivers/char/pc_keyb.h, by
+ * Martin Mares (mj@ucw.cz).
+ */
+#define KBD_STATUS_REG 0x64 /* Status register (R) */
+#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
+#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
+
+/*
+ * Keyboard Controller Commands
+ */
+
+#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */
+#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */
+#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */
+#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */
+#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */
+#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */
+#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */
+#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */
+#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */
+
+/*
+ * Keyboard Commands
+ */
+
+#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */
+#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */
+#define KBD_CMD_RESET 0xFF /* Reset */
+
+/*
+ * Keyboard Replies
+ */
+
+#define KBD_REPLY_POR 0xAA /* Power on reset */
+#define KBD_REPLY_ACK 0xFA /* Command ACK */
+#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */
+
+/*
+ * Status Register Bits
+ */
+
+#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
+#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
+#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
+#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
+#define KBD_STAT_PERR 0x80 /* Parity error */
+
+/*
+ * Controller Mode Register Bits
+ */
+
+#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */
+#define KBD_MODE_SYS 0x04 /* The system flag (?) */
+#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */
+#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */
+#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */
+#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */
+#define KBD_MODE_RFU 0x80
+
+SPR_RW(DEC)
+SPR_RO(PVR)
+
+
+/* Early messages after mm init but before console init are kept in log
+ * buffers.
+ */
+#define PAGE_LOG_CHARS (PAGE_SIZE-sizeof(int)-sizeof(u_long)-1)
+
+typedef struct _console_log {
+ struct _console_log *next;
+ int offset;
+ u_char data[PAGE_LOG_CHARS];
+} console_log;
+
+#ifdef STATIC_LOG_ALLOC
+
+#define STATIC_LOG_DATA_PAGE_NB 3
+
+static u_char log_page_pool [STATIC_LOG_DATA_PAGE_NB * PAGE_SIZE];
+
+#endif
+
+static board_memory_map mem_map = {
+ (__io_ptr) 0x80000000,
+ (__io_ptr) 0xc0000000,
+ (__io_ptr) 0xc0000000,
+ (__io_ptr) 0x80000000
+};
+
+board_memory_map *ptr_mem_map = &mem_map;
+
+
+struct _console_global_data {
+ console_log *log;
+ int vacuum_sent;
+ int lines;
+ int cols;
+ int orig_x;
+ int orig_y;
+ u_char shfts, ctls, alts, caps;
+} console_global_data = {NULL, 0, 25, 80, 0, 24, 0, 0, 0, 0};
+
+typedef struct console_io {
+ void (*putc) (const u_char);
+ int (*getc) (void);
+ int (*tstc) (void);
+}console_io;
+
+extern console_io* curIo;
+
+unsigned long ticks_per_ms = 1000000; /* Decrementer ticks per ms (true for 601) */
+
+/* The decrementer is present on all processors and the RTC on the 601
+ * has the annoying characteristic of jumping from 1e9 to 0, so we
+ * use the decrementer.
+ */
+void udelay(int us) {
+ us = us*ticks_per_ms/1000;
+ _write_DEC(us);
+ while((int)_read_DEC() >= 0);
+}
+
+void debug_putc(const u_char c)
+{
+ curIo->putc(c);
+}
+
+int debug_getc(void)
+{
+ return curIo->getc();
+}
+
+int debug_tstc(void)
+{
+ return curIo->tstc();
+}
+
+
+
+#define vidmem ((__io_ptr)(ptr_mem_map->isa_mem_base+0xb8000))
+
+void vacuum_putc(u_char c) {
+ console_global_data.vacuum_sent++;
+}
+
+int vacuum_getc(void) {
+ return -1;
+}
+
+int vacuum_tstc(void) {
+ return 0;
+}
+
+/*
+ * COM1 NS16550 support
+ */
+
+#define rbr 0
+#define ier 1
+#define fcr 2
+#define lcr 3
+#define mcr 4
+#define lsr 5
+#define msr 6
+#define scr 7
+#define thr rbr
+#define iir fcr
+#define dll rbr
+#define dlm ier
+
+#define LSR_DR 0x01 /* Data ready */
+#define LSR_OE 0x02 /* Overrun */
+#define LSR_PE 0x04 /* Parity error */
+#define LSR_FE 0x08 /* Framing error */
+#define LSR_BI 0x10 /* Break */
+#define LSR_THRE 0x20 /* Xmit holding register empty */
+#define LSR_TEMT 0x40 /* Xmitter empty */
+#define LSR_ERR 0x80 /* Error */
+
+#define COM1 0x3F8
+
+#ifdef STATIC_LOG_ALLOC
+static int global_index = 0;
+
+static void *__palloc(int s)
+{
+ if (global_index ==( STATIC_LOG_DATA_PAGE_NB - 1) ) return (void*) 0;
+ return (void*) &(log_page_pool [PAGE_SIZE * global_index++]);
+}
+
+static void pfree(void* p)
+{
+ --global_index;
+}
+#endif
+
+
+void log_putc(const u_char c) {
+ console_log *l;
+ for(l=console_global_data.log; l; l=l->next) {
+ if (l->offset<PAGE_LOG_CHARS) break;
+ }
+ if (!l) {
+ l=__palloc(sizeof(console_log));
+ memset(l, 0, sizeof(console_log));
+ if (!console_global_data.log)
+ console_global_data.log = l;
+ else {
+ console_log *p;
+ for (p=console_global_data.log;
+ p->next; p=p->next);
+ p->next = l;
+ }
+ }
+ l->data[l->offset++] = c;
+}
+
+/* This puts is non standard since it does not automatically add a newline
+ * at the end. So it is made private to avoid confusion in other files.
+ */
+static
+void puts(const u_char *s)
+{
+ char c;
+
+ while ( ( c = *s++ ) != '\0' ) {
+ debug_putc(c);
+ if ( c == '\n' )
+ debug_putc('\r');
+ }
+}
+
+
+static
+void flush_log(void) {
+ console_log *p, *next;
+ if (console_global_data.vacuum_sent) {
+#ifdef TRACE_FLUSH_LOG
+ printk("%d characters sent into oblivion before MM init!\n",
+ console_global_data.vacuum_sent);
+#endif
+ }
+ for(p=console_global_data.log; p; p=next) {
+ puts(p->data);
+ next = p->next;
+ pfree(p);
+ }
+}
+
+void serial_putc(const u_char c)
+{
+ while ((inb(COM1+lsr) & LSR_THRE) == 0) ;
+ outb(c, COM1+thr);
+}
+
+int serial_getc(void)
+{
+ while ((inb(COM1+lsr) & LSR_DR) == 0) ;
+ return (inb(COM1+rbr));
+}
+
+int serial_tstc(void)
+{
+ return ((inb(COM1+lsr) & LSR_DR) != 0);
+}
+
+static void scroll(void)
+{
+ int i;
+
+ memcpy ( (u_char *)vidmem, (u_char *)vidmem + console_global_data.cols * 2,
+ ( console_global_data.lines - 1 ) * console_global_data.cols * 2 );
+ for ( i = ( console_global_data.lines - 1 ) * console_global_data.cols * 2;
+ i < console_global_data.lines * console_global_data.cols * 2;
+ i += 2 )
+ vidmem[i] = ' ';
+}
+
+/*
+ * cursor() sets an offset (0-1999) into the 80x25 text area
+ */
+static void
+cursor(int x, int y)
+{
+ int pos = console_global_data.cols*y + x;
+ outb(14, 0x3D4);
+ outb(pos>>8, 0x3D5);
+ outb(15, 0x3D4);
+ outb(pos, 0x3D5);
+}
+
+void
+vga_putc(const u_char c)
+{
+ int x,y;
+
+ x = console_global_data.orig_x;
+ y = console_global_data.orig_y;
+
+ if ( c == '\n' ) {
+ if ( ++y >= console_global_data.lines ) {
+ scroll();
+ y--;
+ }
+ } else if (c == '\b') {
+ if (x > 0) {
+ x--;
+ }
+ } else if (c == '\r') {
+ x = 0;
+ } else {
+ vidmem [ ( x + console_global_data.cols * y ) * 2 ] = c;
+ if ( ++x >= console_global_data.cols ) {
+ x = 0;
+ if ( ++y >= console_global_data.lines ) {
+ scroll();
+ y--;
+ }
+ }
+ }
+
+ cursor(x, y);
+
+ console_global_data.orig_x = x;
+ console_global_data.orig_y = y;
+}
+
+/* Keyboard support */
+static int kbd_getc(void)
+{
+ unsigned char dt, brk, val;
+ unsigned code;
+loop:
+ while((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) ;
+
+ dt = inb(KBD_DATA_REG);
+
+ brk = dt & 0x80; /* brk == 1 on key release */
+ dt = dt & 0x7f; /* keycode */
+
+ if (console_global_data.shfts)
+ code = shift_map[dt];
+ else if (console_global_data.ctls)
+ code = ctrl_map[dt];
+ else
+ code = plain_map[dt];
+
+ val = KVAL(code);
+ switch (KTYP(code) & 0x0f) {
+ case KT_LATIN:
+ if (brk)
+ break;
+ if (console_global_data.alts)
+ val |= 0x80;
+ if (val == 0x7f) /* map delete to backspace */
+ val = '\b';
+ return val;
+
+ case KT_LETTER:
+ if (brk)
+ break;
+ if (console_global_data.caps)
+ val -= 'a'-'A';
+ return val;
+
+ case KT_SPEC:
+ if (brk)
+ break;
+ if (val == KVAL(K_CAPS))
+ console_global_data.caps = !console_global_data.caps;
+ else if (val == KVAL(K_ENTER)) {
+enter: /* Wait for key up */
+ while (1) {
+ while((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) ;
+ dt = inb(KBD_DATA_REG);
+ if (dt & 0x80) /* key up */ break;
+ }
+ return 10;
+ }
+ break;
+
+ case KT_PAD:
+ if (brk)
+ break;
+ if (val < 10)
+ return val;
+ if (val == KVAL(K_PENTER))
+ goto enter;
+ break;
+
+ case KT_SHIFT:
+ switch (val) {
+ case KG_SHIFT:
+ case KG_SHIFTL:
+ case KG_SHIFTR:
+ console_global_data.shfts = brk ? 0 : 1;
+ break;
+ case KG_ALT:
+ case KG_ALTGR:
+ console_global_data.alts = brk ? 0 : 1;
+ break;
+ case KG_CTRL:
+ case KG_CTRLL:
+ case KG_CTRLR:
+ console_global_data.ctls = brk ? 0 : 1;
+ break;
+ }
+ break;
+
+ case KT_LOCK:
+ switch (val) {
+ case KG_SHIFT:
+ case KG_SHIFTL:
+ case KG_SHIFTR:
+ if (brk)
+ console_global_data.shfts = !console_global_data.shfts;
+ break;
+ case KG_ALT:
+ case KG_ALTGR:
+ if (brk)
+ console_global_data.alts = !console_global_data.alts;
+ break;
+ case KG_CTRL:
+ case KG_CTRLL:
+ case KG_CTRLR:
+ if (brk)
+ console_global_data.ctls = !console_global_data.ctls;
+ break;
+ }
+ break;
+ }
+ /* if (brk) return (0); */ /* Ignore initial 'key up' codes */
+ goto loop;
+}
+
+static int kbd_get(int ms) {
+ int status, data;
+ while(1) {
+ status = inb(KBD_STATUS_REG);
+ if (status & KBD_STAT_OBF) {
+ data = inb(KBD_DATA_REG);
+ if (status & (KBD_STAT_GTO | KBD_STAT_PERR))
+ return -1;
+ else
+ return data;
+ }
+ if (--ms < 0) return -1;
+ udelay(1000);
+ }
+}
+
+static void kbd_put(u_char c, int ms, int port) {
+ while (inb(KBD_STATUS_REG) & KBD_STAT_IBF) {
+ if (--ms < 0) return;
+ udelay(1000);
+ }
+ outb(c, port);
+}
+
+int kbdreset(void)
+{
+ int c;
+
+ /* Flush all pending data */
+ while(kbd_get(10) != -1);
+
+ /* Send self-test */
+ kbd_put(KBD_CCMD_SELF_TEST, 10, KBD_CNTL_REG);
+ c = kbd_get(1000);
+ if (c != 0x55) return 1;
+
+ /* Enable then reset the KB */
+ kbd_put(KBD_CCMD_KBD_ENABLE, 10, KBD_CNTL_REG);
+
+ while (1) {
+ kbd_put(KBD_CMD_RESET, 10, KBD_DATA_REG);
+ c = kbd_get(1000);
+ if (c == KBD_REPLY_ACK) break;
+ if (c != KBD_REPLY_RESEND) return 2;
+ }
+
+ if (kbd_get(1000) != KBD_REPLY_POR) return 3;
+
+ /* Disable the keyboard while setting up the controller */
+ kbd_put(KBD_CMD_DISABLE, 10, KBD_DATA_REG);
+ if (kbd_get(10)!=KBD_REPLY_ACK) return 4;
+
+ /* Enable interrupts and keyboard controller */
+ kbd_put(KBD_CCMD_WRITE_MODE, 10, KBD_CNTL_REG);
+ kbd_put(KBD_MODE_KBD_INT | KBD_MODE_SYS |
+ KBD_MODE_DISABLE_MOUSE | KBD_MODE_KCC,
+ 10, KBD_DATA_REG);
+
+ /* Reenable the keyboard */
+ kbd_put(KBD_CMD_ENABLE, 10, KBD_DATA_REG);
+ if (kbd_get(10)!=KBD_REPLY_ACK) return 5;
+
+ return 0;
+}
+
+int kbd_tstc(void)
+{
+ return ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) != 0);
+}
+
+const struct console_io
+vacuum_console_functions = {
+ vacuum_putc,
+ vacuum_getc,
+ vacuum_tstc
+};
+
+static const struct console_io
+log_console_functions = {
+ log_putc,
+ vacuum_getc,
+ vacuum_tstc
+},
+
+serial_console_functions = {
+ serial_putc,
+ serial_getc,
+ serial_tstc
+},
+
+vga_console_functions = {
+ vga_putc,
+ kbd_getc,
+ kbd_tstc
+};
+
+console_io* curIo = (console_io*) &vacuum_console_functions;
+
+int select_console(ioType t) {
+ static ioType curType = CONSOLE_VACUUM;
+
+ switch (t) {
+ case CONSOLE_VACUUM : curIo = (console_io*)&vacuum_console_functions; break;
+ case CONSOLE_LOG : curIo = (console_io*)&log_console_functions; break;
+ case CONSOLE_SERIAL : curIo = (console_io*)&serial_console_functions; break;
+ case CONSOLE_VGA : curIo = (console_io*)&vga_console_functions; break;
+ default : curIo = (console_io*)&vacuum_console_functions;break;
+ }
+ if (curType == CONSOLE_LOG) flush_log();
+ curType = t;
+ return 0;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (is_digit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+/* Based on linux/lib/vsprintf.c and modified to suit our needs,
+ * bloat has been limited since we basically only need %u, %x, %s and %c.
+ * But we need 64 bit values !
+ */
+int vsprintf(char *buf, const char *fmt, va_list args);
+
+int printk(const char *fmt, ...) {
+ va_list args;
+ int i;
+ /* Should not be a problem with 8kB of stack */
+ char buf[1024];
+
+ va_start(args, fmt);
+ i = vsprintf(buf, fmt, args);
+ va_end(args);
+ puts(buf);
+ return i;
+}
+
+/* Necessary to avoid including a library, and GCC won't do this inline. */
+#define div10(num, rmd) \
+do { u32 t1, t2, t3; \
+ asm("lis %4,0xcccd; " \
+ "addi %4,%4,0xffffcccd; " /* Build 0xcccccccd */ \
+ "mulhwu %3,%0+1,%4; " /* (num.l*cst.l).h */ \
+ "mullw %2,%0,%4; " /* (num.h*cst.l).l */ \
+ "addc %3,%3,%2; " \
+ "mulhwu %2,%0,%4; " /* (num.h*cst.l).h */ \
+ "addi %4,%4,-1; " /* Build 0xcccccccc */ \
+ "mullw %1,%0,%4; " /* (num.h*cst.h).l */ \
+ "adde %2,%2,%1; " \
+ "mulhwu %1,%0,%4; " /* (num.h*cst.h).h */ \
+ "addze %1,%1; " \
+ "mullw %0,%0+1,%4; " /* (num.l*cst.h).l */ \
+ "addc %3,%3,%0; " \
+ "mulhwu %0,%0+1,%4; " /* (num.l*cst.h).h */ \
+ "adde %2,%2,%0; " \
+ "addze %1,%1; " \
+ "srwi %2,%2,3; " \
+ "srwi %0,%1,3; " \
+ "rlwimi %2,%1,29,0,2; " \
+ "mulli %4,%2,10; " \
+ "sub %4,%0+1,%4; " \
+ "mr %0+1,%2; " : \
+ "=r" (num), "=&r" (t1), "=&r" (t2), "=&r"(t3), "=&b" (rmd) : \
+ "0" (num)); \
+ \
+} while(0);
+
+#define SIGN 1 /* unsigned/signed long */
+#define LARGE 2 /* use 'ABCDEF' instead of 'abcdef' */
+#define HEX 4 /* hexadecimal instead of decimal */
+#define ADDR 8 /* Value is an addres (p) */
+#define ZEROPAD 16 /* pad with zero */
+#define HALF 32
+#define LONG 64 /* long argument */
+#define LLONG 128 /* 64 bit argument */
+
+static char * number(char * str, int size, int type, u64 num)
+{
+ char fill,sign,tmp[24];
+ const char *digits="0123456789abcdef";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEF";
+ fill = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if ((s64)num <0) {
+ sign = '-';
+ num = -num;
+ size--;
+ }
+ }
+
+ i = 0;
+ do {
+ unsigned rem;
+ if (type&HEX) {
+ rem = num & 0x0f;
+ num >>=4;
+ } else {
+ div10(num, rem);
+ }
+ tmp[i++] = digits[rem];
+ } while (num != 0);
+
+ size -= i;
+ if (!(type&(ZEROPAD)))
+ while(size-->0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+
+ while (size-- > 0)
+ *str++ = fill;
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ int len;
+ u64 num;
+ int i;
+ char * str;
+ const char *s;
+
+ int flags; /* flags to number() and private */
+
+ int field_width; /* width of output field */
+
+ for (str=buf ; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags, only 0 padding needed */
+ flags = 0;
+ if (*++fmt == '0' ) {
+ flags |= ZEROPAD;
+ fmt++;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit(*fmt))
+ field_width = skip_atoi(&fmt);
+
+ /* get the conversion qualifier */
+ if (*fmt == 'h') {
+ flags |= HALF;
+ fmt++;
+ } else if (*fmt == 'L') {
+ flags |= LLONG;
+ fmt++;
+ } else if (*fmt == 'l') {
+ flags |= LONG;
+ fmt++;
+ }
+
+ switch (*fmt) {
+ case 'c':
+ *str++ = (unsigned char) va_arg(args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ len = strlen(s);
+
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ }
+ flags |= ZEROPAD|HEX|ADDR;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ flags |= HEX;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ /* This ugly code tries to minimize the number of va_arg()
+ * since they expand to a lot of code on PPC under the SYSV
+ * calling conventions (but not with -mcall-aix which has
+ * other problems). Arguments have at least the size of a
+ * long allocated, and we use this fact to minimize bloat.
+ * (and pointers are assimilated to unsigned long too).
+ */
+ if (sizeof(long long) > sizeof(long) && flags & LLONG)
+ num = va_arg(args, unsigned long long);
+ else {
+ u_long n = va_arg(args, unsigned long);
+ if (flags & HALF) {
+ if (flags & SIGN)
+ n = (short) n;
+ else
+ n = (unsigned short) n;
+ } else if (! flags & LONG) {
+ /* Here the compiler correctly removes this
+ * do nothing code on 32 bit PPC.
+ */
+ if (flags & SIGN)
+ n = (int) n;
+ else
+ n = (unsigned) n;
+ }
+ if (flags & SIGN) num = (long) n; else num = n;
+ }
+ str = number(str, field_width, flags, num);
+ }
+ *str = '\0';
+ return str-buf;
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/console/uart.c b/c/src/lib/libbsp/powerpc/shared/console/uart.c
new file mode 100644
index 0000000000..da44cc2e99
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/uart.c
@@ -0,0 +1,778 @@
+/*
+ * This software is Copyright (C) 1998 by T.sqware - all rights limited
+ * It is provided in to the public domain "as is", can be freely modified
+ * as far as this copyight notice is kept unchanged, but does not imply
+ * an endorsement by T.sqware of the product in which it is included.
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <bsp/irq.h>
+#include <bsp/uart.h>
+#include <rtems/libio.h>
+#include <assert.h>
+
+/*
+ * Basic 16552 driver
+ */
+
+struct uart_data
+{
+ int hwFlow;
+ int baud;
+};
+
+static struct uart_data uart_data[2];
+
+/*
+ * Macros to read/wirte register of uart, if configuration is
+ * different just rewrite these macros
+ */
+
+static inline unsigned char
+uread(int uart, unsigned int reg)
+{
+ register unsigned char val;
+
+ if(uart == 0)
+ {
+ inport_byte(COM1_BASE_IO+reg, val);
+ }
+ else
+ {
+ inport_byte(COM2_BASE_IO+reg, val);
+ }
+
+ return val;
+}
+
+static inline void
+uwrite(int uart, int reg, unsigned int val)
+{
+ if(uart == 0)
+ {
+ outport_byte(COM1_BASE_IO+reg, val);
+ }
+ else
+ {
+ outport_byte(COM2_BASE_IO+reg, val);
+ }
+}
+
+#ifdef UARTDEBUG
+ static void
+uartError(int uart)
+{
+ unsigned char uartStatus, dummy;
+
+ uartStatus = uread(uart, LSR);
+ dummy = uread(uart, RBR);
+
+ if (uartStatus & OE)
+ printk("********* Over run Error **********\n");
+ if (uartStatus & PE)
+ printk("********* Parity Error **********\n");
+ if (uartStatus & FE)
+ printk("********* Framing Error **********\n");
+ if (uartStatus & BI)
+ printk("********* Parity Error **********\n");
+ if (uartStatus & ERFIFO)
+ printk("********* Error receive Fifo **********\n");
+
+}
+#else
+inline void uartError(int uart)
+{
+ unsigned char uartStatus;
+
+ uartStatus = uread(uart, LSR);
+ uartStatus = uread(uart, RBR);
+}
+#endif
+
+/*
+ * Uart initialization, it is hardcoded to 8 bit, no parity,
+ * one stop bit, FIFO, things to be changed
+ * are baud rate and nad hw flow control,
+ * and longest rx fifo setting
+ */
+void
+BSP_uart_init(int uart, int baud, int hwFlow)
+{
+ unsigned char tmp;
+
+ /* Sanity check */
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ switch(baud)
+ {
+ case 50:
+ case 75:
+ case 110:
+ case 134:
+ case 300:
+ case 600:
+ case 1200:
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+ /* Set DLAB bit to 1 */
+ uwrite(uart, LCR, DLAB);
+
+ /* Set baud rate */
+ uwrite(uart, DLL, (BSPBaseBaud/baud) & 0xff);
+ uwrite(uart, DLM, ((BSPBaseBaud/baud) >> 8) & 0xff);
+
+ /* 8-bit, no parity , 1 stop */
+ uwrite(uart, LCR, CHR_8_BITS);
+
+
+ /* Set DTR, RTS and OUT2 high */
+ uwrite(uart, MCR, DTR | RTS | OUT_2);
+
+ /* Enable FIFO */
+ uwrite(uart, FCR, FIFO_EN | XMIT_RESET | RCV_RESET | RECEIVE_FIFO_TRIGGER12);
+
+ /* Disable Interrupts */
+ uwrite(uart, IER, 0);
+
+ /* Read status to clear them */
+ tmp = uread(uart, LSR);
+ tmp = uread(uart, RBR);
+ tmp = uread(uart, MSR);
+
+ /* Remember state */
+ uart_data[uart].hwFlow = hwFlow;
+ uart_data[uart].baud = baud;
+ return;
+}
+
+/*
+ * Set baud
+ */
+void
+BSP_uart_set_baud(int uart, int baud)
+{
+ unsigned char mcr, ier;
+
+ /* Sanity check */
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ /*
+ * This function may be called whenever TERMIOS parameters
+ * are changed, so we have to make sire that baud change is
+ * indeed required
+ */
+
+ if(baud == uart_data[uart].baud)
+ {
+ return;
+ }
+
+ mcr = uread(uart, MCR);
+ ier = uread(uart, IER);
+
+ BSP_uart_init(uart, baud, uart_data[uart].hwFlow);
+
+ uwrite(uart, MCR, mcr);
+ uwrite(uart, IER, ier);
+
+ return;
+}
+
+/*
+ * Enable/disable interrupts
+ */
+void
+BSP_uart_intr_ctrl(int uart, int cmd)
+{
+
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ switch(cmd)
+ {
+ case BSP_UART_INTR_CTRL_DISABLE:
+ uwrite(uart, IER, INTERRUPT_DISABLE);
+ break;
+ case BSP_UART_INTR_CTRL_ENABLE:
+ if(uart_data[uart].hwFlow)
+ {
+ uwrite(uart, IER,
+ (RECEIVE_ENABLE |
+ TRANSMIT_ENABLE |
+ RECEIVER_LINE_ST_ENABLE |
+ MODEM_ENABLE
+ )
+ );
+ }
+ else
+ {
+ uwrite(uart, IER,
+ (RECEIVE_ENABLE |
+ TRANSMIT_ENABLE |
+ RECEIVER_LINE_ST_ENABLE
+ )
+ );
+ }
+ break;
+ case BSP_UART_INTR_CTRL_TERMIOS:
+ if(uart_data[uart].hwFlow)
+ {
+ uwrite(uart, IER,
+ (RECEIVE_ENABLE |
+ RECEIVER_LINE_ST_ENABLE |
+ MODEM_ENABLE
+ )
+ );
+ }
+ else
+ {
+ uwrite(uart, IER,
+ (RECEIVE_ENABLE |
+ RECEIVER_LINE_ST_ENABLE
+ )
+ );
+ }
+ break;
+ case BSP_UART_INTR_CTRL_GDB:
+ uwrite(uart, IER, RECEIVE_ENABLE);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ return;
+}
+
+void
+BSP_uart_throttle(int uart)
+{
+ unsigned int mcr;
+
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ if(!uart_data[uart].hwFlow)
+ {
+ /* Should not happen */
+ assert(0);
+ return;
+ }
+ mcr = uread (uart, MCR);
+ /* RTS down */
+ mcr &= ~RTS;
+ uwrite(uart, MCR, mcr);
+
+ return;
+}
+
+void
+BSP_uart_unthrottle(int uart)
+{
+ unsigned int mcr;
+
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ if(!uart_data[uart].hwFlow)
+ {
+ /* Should not happen */
+ assert(0);
+ return;
+ }
+ mcr = uread (uart, MCR);
+ /* RTS up */
+ mcr |= RTS;
+ uwrite(uart, MCR, mcr);
+
+ return;
+}
+
+/*
+ * Status function, -1 if error
+ * detected, 0 if no received chars available,
+ * 1 if received char available, 2 if break
+ * is detected, it will eat break and error
+ * chars. It ignores overruns - we cannot do
+ * anything about - it execpt count statistics
+ * and we are not counting it.
+ */
+int
+BSP_uart_polled_status(int uart)
+{
+ unsigned char val;
+
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ val = uread(uart, LSR);
+
+ if(val & BI)
+ {
+ /* BREAK found, eat character */
+ uread(uart, RBR);
+ return BSP_UART_STATUS_BREAK;
+ }
+
+ if((val & (DR | OE | FE)) == 1)
+ {
+ /* No error, character present */
+ return BSP_UART_STATUS_CHAR;
+ }
+
+ if((val & (DR | OE | FE)) == 0)
+ {
+ /* Nothing */
+ return BSP_UART_STATUS_NOCHAR;
+ }
+
+ /*
+ * Framing or parity error
+ * eat character
+ */
+ uread(uart, RBR);
+
+ return BSP_UART_STATUS_ERROR;
+}
+
+
+/*
+ * Polled mode write function
+ */
+void
+BSP_uart_polled_write(int uart, int val)
+{
+ unsigned char val1;
+
+ /* Sanity check */
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ for(;;)
+ {
+ if((val1=uread(uart, LSR)) & THRE)
+ {
+ break;
+ }
+ }
+
+ if(uart_data[uart].hwFlow)
+ {
+ for(;;)
+ {
+ if(uread(uart, MSR) & CTS)
+ {
+ break;
+ }
+ }
+ }
+
+ uwrite(uart, THR, val & 0xff);
+
+ return;
+}
+
+void
+BSP_output_char_via_serial(int val)
+{
+ BSP_uart_polled_write(BSPConsolePort, val);
+ if (val == '\n') BSP_uart_polled_write(BSPConsolePort,'\r');
+}
+
+/*
+ * Polled mode read function
+ */
+int
+BSP_uart_polled_read(int uart)
+{
+ unsigned char val;
+
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ for(;;)
+ {
+ if(uread(uart, LSR) & DR)
+ {
+ break;
+ }
+ }
+
+ val = uread(uart, RBR);
+
+ return (int)(val & 0xff);
+}
+
+unsigned
+BSP_poll_char_via_serial()
+{
+ return BSP_uart_polled_read(BSPConsolePort);
+}
+
+
+/* ================ Termios support =================*/
+
+static volatile int termios_stopped_com1 = 0;
+static volatile int termios_tx_active_com1 = 0;
+static void* termios_ttyp_com1 = NULL;
+static char termios_tx_hold_com1 = 0;
+static volatile char termios_tx_hold_valid_com1 = 0;
+
+static volatile int termios_stopped_com2 = 0;
+static volatile int termios_tx_active_com2 = 0;
+static void* termios_ttyp_com2 = NULL;
+static char termios_tx_hold_com2 = 0;
+static volatile char termios_tx_hold_valid_com2 = 0;
+
+/*
+ * Set channel parameters
+ */
+void
+BSP_uart_termios_set(int uart, void *ttyp)
+{
+ unsigned char val;
+ assert(uart == BSP_UART_COM1 || uart == BSP_UART_COM2);
+
+ if(uart == BSP_UART_COM1)
+ {
+ if(uart_data[uart].hwFlow)
+ {
+ val = uread(uart, MSR);
+
+ termios_stopped_com1 = (val & CTS) ? 0 : 1;
+ }
+ else
+ {
+ termios_stopped_com1 = 0;
+ }
+ termios_tx_active_com1 = 0;
+ termios_ttyp_com1 = ttyp;
+ termios_tx_hold_com1 = 0;
+ termios_tx_hold_valid_com1 = 0;
+ }
+ else
+ {
+ if(uart_data[uart].hwFlow)
+ {
+ val = uread(uart, MSR);
+
+ termios_stopped_com2 = (val & CTS) ? 0 : 1;
+ }
+ else
+ {
+ termios_stopped_com2 = 0;
+ }
+ termios_tx_active_com2 = 0;
+ termios_ttyp_com2 = ttyp;
+ termios_tx_hold_com2 = 0;
+ termios_tx_hold_valid_com2 = 0;
+ }
+
+ return;
+}
+
+int
+BSP_uart_termios_write_com1(int minor, const char *buf, int len)
+{
+ assert(buf != NULL);
+
+ if(len <= 0)
+ {
+ return 0;
+ }
+
+ /* If there TX buffer is busy - something is royally screwed up */
+ /* assert((uread(BSP_UART_COM1, LSR) & THRE) != 0); */
+
+
+ if(termios_stopped_com1)
+ {
+ /* CTS low */
+ termios_tx_hold_com1 = *buf;
+ termios_tx_hold_valid_com1 = 1;
+ return 0;
+ }
+
+ /* Write character */
+ uwrite(BSP_UART_COM1, THR, *buf & 0xff);
+
+ /* Enable interrupts if necessary */
+ if(!termios_tx_active_com1 && uart_data[BSP_UART_COM1].hwFlow)
+ {
+ termios_tx_active_com1 = 1;
+ uwrite(BSP_UART_COM1, IER,
+ (RECEIVE_ENABLE |
+ TRANSMIT_ENABLE |
+ RECEIVER_LINE_ST_ENABLE |
+ MODEM_ENABLE
+ )
+ );
+ }
+ else if(!termios_tx_active_com1)
+ {
+ termios_tx_active_com1 = 1;
+ uwrite(BSP_UART_COM1, IER,
+ (RECEIVE_ENABLE |
+ TRANSMIT_ENABLE |
+ RECEIVER_LINE_ST_ENABLE
+ )
+ );
+ }
+
+ return 0;
+}
+
+int
+BSP_uart_termios_write_com2(int minor, const char *buf, int len)
+{
+ assert(buf != NULL);
+
+ if(len <= 0)
+ {
+ return 0;
+ }
+
+
+ /* If there TX buffer is busy - something is royally screwed up */
+ assert((uread(BSP_UART_COM2, LSR) & THRE) != 0);
+
+ if(termios_stopped_com2)
+ {
+ /* CTS low */
+ termios_tx_hold_com2 = *buf;
+ termios_tx_hold_valid_com2 = 1;
+ return 0;
+ }
+
+ /* Write character */
+
+ uwrite(BSP_UART_COM2, THR, *buf & 0xff);
+
+ /* Enable interrupts if necessary */
+ if(!termios_tx_active_com2 && uart_data[BSP_UART_COM2].hwFlow)
+ {
+ termios_tx_active_com2 = 1;
+ uwrite(BSP_UART_COM2, IER,
+ (RECEIVE_ENABLE |
+ TRANSMIT_ENABLE |
+ RECEIVER_LINE_ST_ENABLE |
+ MODEM_ENABLE
+ )
+ );
+ }
+ else if(!termios_tx_active_com2)
+ {
+ termios_tx_active_com2 = 1;
+ uwrite(BSP_UART_COM2, IER,
+ (RECEIVE_ENABLE |
+ TRANSMIT_ENABLE |
+ RECEIVER_LINE_ST_ENABLE
+ )
+ );
+ }
+
+ return 0;
+}
+
+
+void
+BSP_uart_termios_isr_com1(void)
+{
+ unsigned char buf[40];
+ unsigned char val;
+ int off, ret, vect;
+
+ off = 0;
+
+ for(;;)
+ {
+ vect = uread(BSP_UART_COM1, IIR) & 0xf;
+
+ switch(vect)
+ {
+ case MODEM_STATUS :
+ val = uread(BSP_UART_COM1, MSR);
+ if(uart_data[BSP_UART_COM1].hwFlow)
+ {
+ if(val & CTS)
+ {
+ /* CTS high */
+ termios_stopped_com1 = 0;
+ if(termios_tx_hold_valid_com1)
+ {
+ termios_tx_hold_valid_com1 = 0;
+ BSP_uart_termios_write_com1(0, &termios_tx_hold_com1,
+ 1);
+ }
+ }
+ else
+ {
+ /* CTS low */
+ termios_stopped_com1 = 1;
+ }
+ }
+ break;
+ case NO_MORE_INTR :
+ /* No more interrupts */
+ if(off != 0)
+ {
+ /* Update rx buffer */
+ rtems_termios_enqueue_raw_characters(termios_ttyp_com1,
+ (char *)buf,
+ off);
+ }
+ return;
+ case TRANSMITTER_HODING_REGISTER_EMPTY :
+ /*
+ * TX holding empty: we have to disable these interrupts
+ * if there is nothing more to send.
+ */
+
+ ret = rtems_termios_dequeue_characters(termios_ttyp_com1, 1);
+
+ /* If nothing else to send disable interrupts */
+ if(ret == 0 && uart_data[BSP_UART_COM1].hwFlow)
+ {
+ uwrite(BSP_UART_COM1, IER,
+ (RECEIVE_ENABLE |
+ RECEIVER_LINE_ST_ENABLE |
+ MODEM_ENABLE
+ )
+ );
+ termios_tx_active_com1 = 0;
+ }
+ else if(ret == 0)
+ {
+ uwrite(BSP_UART_COM1, IER,
+ (RECEIVE_ENABLE |
+ RECEIVER_LINE_ST_ENABLE
+ )
+ );
+ termios_tx_active_com1 = 0;
+ }
+ break;
+ case RECEIVER_DATA_AVAIL :
+ case CHARACTER_TIMEOUT_INDICATION:
+ /* RX data ready */
+ assert(off < sizeof(buf));
+ buf[off++] = uread(BSP_UART_COM1, RBR);
+ break;
+ case RECEIVER_ERROR:
+ /* RX error: eat character */
+ uartError(BSP_UART_COM1);
+ break;
+ default:
+ /* Should not happen */
+ assert(0);
+ return;
+ }
+ }
+}
+
+void
+BSP_uart_termios_isr_com2()
+{
+ unsigned char buf[40];
+ unsigned char val;
+ int off, ret, vect;
+
+ off = 0;
+
+ for(;;)
+ {
+ vect = uread(BSP_UART_COM2, IIR) & 0xf;
+
+ switch(vect)
+ {
+ case MODEM_STATUS :
+ val = uread(BSP_UART_COM2, MSR);
+ if(uart_data[BSP_UART_COM2].hwFlow)
+ {
+ if(val & CTS)
+ {
+ /* CTS high */
+ termios_stopped_com2 = 0;
+ if(termios_tx_hold_valid_com2)
+ {
+ termios_tx_hold_valid_com2 = 0;
+ BSP_uart_termios_write_com2(0, &termios_tx_hold_com2,
+ 1);
+ }
+ }
+ else
+ {
+ /* CTS low */
+ termios_stopped_com2 = 1;
+ }
+ }
+ break;
+ case NO_MORE_INTR :
+ /* No more interrupts */
+ if(off != 0)
+ {
+ /* Update rx buffer */
+ rtems_termios_enqueue_raw_characters(termios_ttyp_com2,
+ (char *)buf,
+ off);
+ }
+ return;
+ case TRANSMITTER_HODING_REGISTER_EMPTY :
+ /*
+ * TX holding empty: we have to disable these interrupts
+ * if there is nothing more to send.
+ */
+
+ ret = rtems_termios_dequeue_characters(termios_ttyp_com2, 1);
+
+ /* If nothing else to send disable interrupts */
+ if(ret == 0 && uart_data[BSP_UART_COM2].hwFlow)
+ {
+ uwrite(BSP_UART_COM2, IER,
+ (RECEIVE_ENABLE |
+ RECEIVER_LINE_ST_ENABLE |
+ MODEM_ENABLE
+ )
+ );
+ termios_tx_active_com2 = 0;
+ }
+ else if(ret == 0)
+ {
+ uwrite(BSP_UART_COM2, IER,
+ (RECEIVE_ENABLE |
+ RECEIVER_LINE_ST_ENABLE
+ )
+ );
+ termios_tx_active_com2 = 0;
+ }
+ break;
+ case RECEIVER_DATA_AVAIL :
+ case CHARACTER_TIMEOUT_INDICATION:
+ /* RX data ready */
+ assert(off < sizeof(buf));
+ buf[off++] = uread(BSP_UART_COM2, RBR);
+ break;
+ case RECEIVER_ERROR:
+ /* RX error: eat character */
+ uartError(BSP_UART_COM2);
+ break;
+ default:
+ /* Should not happen */
+ assert(0);
+ return;
+ }
+ }
+}
+
+
+
+
+
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/shared/console/uart.h b/c/src/lib/libbsp/powerpc/shared/console/uart.h
new file mode 100644
index 0000000000..e43ac9900c
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/console/uart.h
@@ -0,0 +1,169 @@
+
+
+/*
+ * This software is Copyright (C) 1998 by T.sqware - all rights limited
+ * It is provided in to the public domain "as is", can be freely modified
+ * as far as this copyight notice is kept unchanged, but does not imply
+ * an endorsement by T.sqware of the product in which it is included.
+ */
+
+#ifndef _BSPUART_H
+#define _BSPUART_H
+
+void BSP_uart_init(int uart, int baud, int hwFlow);
+void BSP_uart_set_baud(int aurt, int baud);
+void BSP_uart_intr_ctrl(int uart, int cmd);
+void BSP_uart_throttle(int uart);
+void BSP_uart_unthrottle(int uart);
+int BSP_uart_polled_status(int uart);
+void BSP_uart_polled_write(int uart, int val);
+int BSP_uart_polled_read(int uart);
+void BSP_uart_termios_set(int uart, void *ttyp);
+int BSP_uart_termios_write_com1(int minor, const char *buf, int len);
+int BSP_uart_termios_write_com2(int minor, const char *buf, int len);
+void BSP_uart_termios_isr_com1();
+void BSP_uart_termios_isr_com2();
+void BSP_uart_dbgisr_com1(void);
+void BSP_uart_dbgisr_com2(void);
+extern unsigned BSP_poll_char_via_serial(void);
+extern void BSP_output_char_via_serial(int val);
+extern int BSPConsolePort;
+extern int BSPBaseBaud;
+/*
+ * Command values for BSP_uart_intr_ctrl(),
+ * values are strange in order to catch errors
+ * with assert
+ */
+#define BSP_UART_INTR_CTRL_DISABLE (0)
+#define BSP_UART_INTR_CTRL_GDB (0xaa) /* RX only */
+#define BSP_UART_INTR_CTRL_ENABLE (0xbb) /* Normal operations */
+#define BSP_UART_INTR_CTRL_TERMIOS (0xcc) /* RX & line status */
+
+/* Return values for uart_polled_status() */
+#define BSP_UART_STATUS_ERROR (-1) /* No character */
+#define BSP_UART_STATUS_NOCHAR (0) /* No character */
+#define BSP_UART_STATUS_CHAR (1) /* Character present */
+#define BSP_UART_STATUS_BREAK (2) /* Break point is detected */
+
+/* PC UART definitions */
+#define BSP_UART_COM1 (0)
+#define BSP_UART_COM2 (1)
+
+/*
+ * Base IO for UART
+ */
+
+#define COM1_BASE_IO 0x3F8
+#define COM2_BASE_IO 0x2F8
+
+/*
+ * Offsets from base
+ */
+
+/* DLAB 0 */
+#define RBR (0) /* Rx Buffer Register (read) */
+#define THR (0) /* Tx Buffer Register (write) */
+#define IER (1) /* Interrupt Enable Register */
+
+/* DLAB X */
+#define IIR (2) /* Interrupt Ident Register (read) */
+#define FCR (2) /* FIFO Control Register (write) */
+#define LCR (3) /* Line Control Register */
+#define MCR (4) /* Modem Control Register */
+#define LSR (5) /* Line Status Register */
+#define MSR (6) /* Modem Status Register */
+#define SCR (7) /* Scratch register */
+
+/* DLAB 1 */
+#define DLL (0) /* Divisor Latch, LSB */
+#define DLM (1) /* Divisor Latch, MSB */
+#define AFR (2) /* Alternate Function register */
+
+/*
+ * Interrupt source definition via IIR
+ */
+#define MODEM_STATUS 0
+#define NO_MORE_INTR 1
+#define TRANSMITTER_HODING_REGISTER_EMPTY 2
+#define RECEIVER_DATA_AVAIL 4
+#define RECEIVER_ERROR 6
+#define CHARACTER_TIMEOUT_INDICATION 12
+
+/*
+ * Bits definition of IER
+ */
+#define RECEIVE_ENABLE 0x1
+#define TRANSMIT_ENABLE 0x2
+#define RECEIVER_LINE_ST_ENABLE 0x4
+#define MODEM_ENABLE 0x8
+#define INTERRUPT_DISABLE 0x0
+
+/*
+ * Bits definition of the Line Status Register (LSR)
+ */
+#define DR 0x01 /* Data Ready */
+#define OE 0x02 /* Overrun Error */
+#define PE 0x04 /* Parity Error */
+#define FE 0x08 /* Framing Error */
+#define BI 0x10 /* Break Interrupt */
+#define THRE 0x20 /* Transmitter Holding Register Empty */
+#define TEMT 0x40 /* Transmitter Empty */
+#define ERFIFO 0x80 /* Error receive Fifo */
+
+/*
+ * Bits definition of the MODEM Control Register (MCR)
+ */
+#define DTR 0x01 /* Data Terminal Ready */
+#define RTS 0x02 /* Request To Send */
+#define OUT_1 0x04 /* Output 1, (reserved on COMPAQ I/O Board) */
+#define OUT_2 0x08 /* Output 2, Enable Asynchronous Port Interrupts */
+#define LB 0x10 /* Enable Internal Loop Back */
+
+/*
+ * Bits definition of the Line Control Register (LCR)
+ */
+#define CHR_5_BITS 0
+#define CHR_6_BITS 1
+#define CHR_7_BITS 2
+#define CHR_8_BITS 3
+
+#define WL 0x03 /* Word length mask */
+#define STB 0x04 /* 1 Stop Bit, otherwise 2 Stop Bits */
+#define PEN 0x08 /* Parity Enabled */
+#define EPS 0x10 /* Even Parity Select, otherwise Odd */
+#define SP 0x20 /* Stick Parity */
+#define BCB 0x40 /* Break Control Bit */
+#define DLAB 0x80 /* Enable Divisor Latch Access */
+
+/*
+ * Bits definition of the MODEM Status Register (MSR)
+ */
+#define DCTS 0x01 /* Delta Clear To Send */
+#define DDSR 0x02 /* Delta Data Set Ready */
+#define TERI 0x04 /* Trailing Edge Ring Indicator */
+#define DDCD 0x08 /* Delta Carrier Detect Indicator */
+#define CTS 0x10 /* Clear To Send (when loop back is active) */
+#define DSR 0x20 /* Data Set Ready (when loop back is active) */
+#define RI 0x40 /* Ring Indicator (when loop back is active) */
+#define DCD 0x80 /* Data Carrier Detect (when loop back is active) */
+
+/*
+ * Bits definition of the FIFO Control Register : WD16C552 or NS16550
+ */
+
+#define FIFO_CTRL 0x01 /* Set to 1 permit access to other bits */
+#define FIFO_EN 0x01 /* Enable the FIFO */
+#define XMIT_RESET 0x02 /* Transmit FIFO Reset */
+#define RCV_RESET 0x04 /* Receive FIFO Reset */
+#define FCR3 0x08 /* do not understand manual! */
+
+#define RECEIVE_FIFO_TRIGGER1 0x0 /* trigger recieve interrupt after 1 byte */
+#define RECEIVE_FIFO_TRIGGER4 0x40 /* trigger recieve interrupt after 4 byte */
+#define RECEIVE_FIFO_TRIGGER8 0x80 /* trigger recieve interrupt after 8 byte */
+#define RECEIVE_FIFO_TRIGGER12 0xc0 /* trigger recieve interrupt after 12 byte */
+#define TRIG_LEVEL 0xc0 /* Mask for the trigger level */
+
+#endif /* _BSPUART_H */
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/shared/dec21140/Makefile.in b/c/src/lib/libbsp/powerpc/shared/dec21140/Makefile.in
new file mode 100644
index 0000000000..aeee75f86e
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/dec21140/Makefile.in
@@ -0,0 +1,32 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/dec21140
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES =
+
+SRCS = $(C_FILES) $(H_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+all: ${ARCH} $(SRCS)
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/dec21140/dec21140.c b/c/src/lib/libbsp/powerpc/shared/dec21140/dec21140.c
new file mode 100644
index 0000000000..00c87c4358
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/dec21140/dec21140.c
@@ -0,0 +1,905 @@
+/*
+ * RTEMS driver for TULIP based Ethernet Controller
+ *
+ * Copyright (C) 1999 Emmanuel Raguet. raguet@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <bsp/pci.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rtems/error.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include <libcpu/cpu.h>
+#include <libcpu/io.h>
+#include <libcpu/byteorder.h>
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <bsp/irq.h>
+
+#ifdef malloc
+#undef malloc
+#endif
+#ifdef free
+#undef free
+#endif
+
+#define DEC_DEBUG
+
+#define PCI_INVALID_VENDORDEVICEID 0xffffffff
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+
+#define IO_MASK 0x3
+#define MEM_MASK 0xF
+#define MASK_OFFSET 0xF
+
+/* command and status registers, 32-bit access, only if IO-ACCESS */
+#define ioCSR0 0x00 /* bus mode register */
+#define ioCSR1 0x08 /* transmit poll demand */
+#define ioCSR2 0x10 /* receive poll demand */
+#define ioCSR3 0x18 /* receive list base address */
+#define ioCSR4 0x20 /* transmit list base address */
+#define ioCSR5 0x28 /* status register */
+#define ioCSR6 0x30 /* operation mode register */
+#define ioCSR7 0x38 /* interrupt mask register */
+#define ioCSR8 0x40 /* missed frame counter */
+#define ioCSR9 0x48 /* Ethernet ROM register */
+#define ioCSR10 0x50 /* reserved */
+#define ioCSR11 0x58 /* full-duplex register */
+#define ioCSR12 0x60 /* SIA status register */
+#define ioCSR13 0x68
+#define ioCSR14 0x70
+#define ioCSR15 0x78 /* SIA general register */
+
+/* command and status registers, 32-bit access, only if MEMORY-ACCESS */
+#define memCSR0 0x00 /* bus mode register */
+#define memCSR1 0x02 /* transmit poll demand */
+#define memCSR2 0x04 /* receive poll demand */
+#define memCSR3 0x06 /* receive list base address */
+#define memCSR4 0x08 /* transmit list base address */
+#define memCSR5 0x0A /* status register */
+#define memCSR6 0x0C /* operation mode register */
+#define memCSR7 0x0E /* interrupt mask register */
+#define memCSR8 0x10 /* missed frame counter */
+#define memCSR9 0x12 /* Ethernet ROM register */
+#define memCSR10 0x14 /* reserved */
+#define memCSR11 0x16 /* full-duplex register */
+#define memCSR12 0x18 /* SIA status register */
+#define memCSR13 0x1A
+#define memCSR14 0x1C
+#define memCSR15 0x1E /* SIA general register */
+
+#define DEC_REGISTER_SIZE 0x100 /* to reserve virtual memory */
+
+#define RESET_CHIP 0x00000001
+#define CSR0_MODE 0x01b08000 /* 01a08000 */
+#define ROM_ADDRESS 0x00004800
+#define CSR6_INIT 0x020c0000 /* 020c0000 */
+#define CSR6_TX 0x00002000
+#define CSR6_TXRX 0x00002002
+#define IT_SETUP 0x00010040 /* 0001ebef */
+#define CLEAR_IT 0xFFFFFFFF
+#define NO_IT 0x00000000
+
+#define NRXBUFS 7 /* number of receive buffers */
+#define NTXBUFS 1 /* number of transmit buffers */
+
+/* message descriptor entry */
+struct MD {
+ volatile unsigned long status;
+ volatile unsigned long counts;
+ volatile unsigned char *buf1, *buf2;
+};
+
+/*
+ * Number of WDs supported by this driver
+ */
+#define NDECDRIVER 1
+
+/*
+ * Receive buffer size -- Allow for a full ethernet packet including CRC
+ */
+#define RBUF_SIZE 1536
+
+#define ET_MINLEN 60 /* minimum message length */
+
+/*
+ * RTEMS event used by interrupt handler to signal driver tasks.
+ * This must not be any of the events used by the network task synchronization.
+ */
+#define INTERRUPT_EVENT RTEMS_EVENT_1
+
+/*
+ * RTEMS event used to start transmit daemon.
+ * This must not be the same as INTERRUPT_EVENT.
+ */
+#define START_TRANSMIT_EVENT RTEMS_EVENT_2
+
+#if (MCLBYTES < RBUF_SIZE)
+# error "Driver must have MCLBYTES > RBUF_SIZE"
+#endif
+
+/*
+ * Per-device data
+ */
+ struct dec21140_softc {
+ struct arpcom arpcom;
+ rtems_irq_connect_data irqInfo;
+ volatile struct MD *MDbase;
+ volatile unsigned char *bufferBase;
+ int acceptBroadcast;
+ int rxBdCount;
+ int txBdCount;
+ rtems_id rxDaemonTid;
+ rtems_id txDaemonTid;
+
+ unsigned int port;
+ volatile unsigned int *base;
+ unsigned long bpar;
+
+ /*
+ * Statistics
+ */
+ unsigned long rxInterrupts;
+ unsigned long rxNotFirst;
+ unsigned long rxNotLast;
+ unsigned long rxGiant;
+ unsigned long rxNonOctet;
+ unsigned long rxRunt;
+ unsigned long rxBadCRC;
+ unsigned long rxOverrun;
+ unsigned long rxCollision;
+
+ unsigned long txInterrupts;
+ unsigned long txDeferred;
+ unsigned long txHeartbeat;
+ unsigned long txLateCollision;
+ unsigned long txRetryLimit;
+ unsigned long txUnderrun;
+ unsigned long txLostCarrier;
+ unsigned long txRawWait;
+};
+
+static struct dec21140_softc dec21140_softc[NDECDRIVER];
+
+/*
+ * DEC21140 interrupt handler
+ */
+static rtems_isr
+dec21140Enet_interrupt_handler (rtems_vector_number v)
+{
+ volatile unsigned int *tbase;
+ unsigned long status;
+
+ unsigned int sc;
+
+ tbase = dec21140_softc[0].base ;
+
+ /*
+ * Read status
+ */
+ st_le32((tbase+memCSR7), NO_IT);
+ status = ld_le32(tbase+memCSR5);
+ st_le32((tbase+memCSR5), CLEAR_IT);
+
+ /*
+ * Frame received?
+ */
+ if (status & 0x00000040){
+ dec21140_softc[0].rxInterrupts++;
+ sc = rtems_event_send (dec21140_softc[0].rxDaemonTid, INTERRUPT_EVENT);
+ }
+}
+
+static void nopOn(const rtems_irq_connect_data* notUsed)
+{
+ /*
+ * code should be moved from dec21140Enet_initialize_hardware
+ * to this location
+ */
+}
+
+static int dec21140IsOn(const rtems_irq_connect_data* irq)
+{
+ return BSP_irq_enabled_at_i8259s (irq->name);
+}
+
+/*
+ * Read and write the MII registers using software-generated serial
+ * MDIO protocol.
+ */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static int mdio_read(volatile unsigned int *ioaddr, int phy_id, int location)
+{
+ int i, i3;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned short retval = 0;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ st_le32(ioaddr, MDIO_ENB | MDIO_DATA_WRITE1);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ }
+ /* Shift the read command bits out. */
+ for (i = 17; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ st_le32(ioaddr, dataval);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, dataval | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, dataval);
+ for(i3=0; i3<1000; i3++);
+ }
+ st_le32(ioaddr, MDIO_ENB_IN | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, MDIO_ENB_IN);
+
+ for (i = 16; i > 0; i--) {
+ st_le32(ioaddr, MDIO_ENB_IN | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ retval = (retval << 1) | ((ld_le32(ioaddr) & MDIO_DATA_READ) ? 1 : 0);
+ st_le32(ioaddr, MDIO_ENB_IN);
+ for(i3=0; i3<1000; i3++);
+ }
+ /* Clear out extra bits. */
+ for (i = 16; i > 0; i--) {
+ st_le32(ioaddr, MDIO_ENB_IN | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, MDIO_ENB_IN);
+ for(i3=0; i3<1000; i3++);
+ }
+ return ( ((retval<<8)&0xff00) | ((retval>>8)&0xff) );
+}
+
+static int mdio_write(volatile unsigned int *ioaddr, int phy_id, int location, int value)
+{
+ int i, i3;
+ int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ st_le32(ioaddr, MDIO_ENB | MDIO_DATA_WRITE1);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ }
+ /* Shift the read command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ st_le32(ioaddr, dataval);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, dataval | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ }
+
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ st_le32(ioaddr, MDIO_ENB_IN);
+ for(i3=0; i3<1000; i3++);
+ st_le32(ioaddr, MDIO_ENB_IN | MDIO_SHIFT_CLK);
+ for(i3=0; i3<1000; i3++);
+ }
+ return 0;
+
+
+}
+
+/*
+ * This routine reads a word (16 bits) from the serial EEPROM.
+ */
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
+#define EE_CS 0x01 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x05
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+#define EE_ENB (0x4800 | EE_CS)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int eeget16(volatile unsigned int *ioaddr, int location)
+{
+ int i, i3;
+ unsigned short retval = 0;
+ int read_cmd = location | EE_READ_CMD;
+
+ st_le32(ioaddr, EE_ENB & ~EE_CS);
+ st_le32(ioaddr, EE_ENB);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ st_le32(ioaddr, EE_ENB | dataval);
+ for (i3=0; i3<1000; i3++) ;
+ st_le32(ioaddr, EE_ENB | dataval | EE_SHIFT_CLK);
+ for (i3=0; i3<1000; i3++) ;
+ st_le32(ioaddr, EE_ENB | dataval); /* Finish EEPROM a clock tick. */
+ for (i3=0; i3<1000; i3++) ;
+ }
+ st_le32(ioaddr, EE_ENB);
+
+ for (i = 16; i > 0; i--) {
+ st_le32(ioaddr, EE_ENB | EE_SHIFT_CLK);
+ for (i3=0; i3<1000; i3++) ;
+ retval = (retval << 1) | ((ld_le32(ioaddr) & EE_DATA_READ) ? 1 : 0);
+ st_le32(ioaddr, EE_ENB);
+ for (i3=0; i3<1000; i3++) ;
+ }
+
+ /* Terminate the EEPROM access. */
+ st_le32(ioaddr, EE_ENB & ~EE_CS);
+ return ( ((retval<<8)&0xff00) | ((retval>>8)&0xff) );
+}
+
+/*
+ * Initialize the ethernet hardware
+ */
+static void
+dec21140Enet_initialize_hardware (struct dec21140_softc *sc)
+{
+ rtems_status_code st;
+ volatile unsigned int *tbase;
+ union {char c[64]; unsigned short s[32];} rombuf;
+ int i, i2, i3;
+ volatile unsigned char *cp, direction, *setup_frm, *eaddrs;
+ unsigned long csr12_val, mii_reg0;
+ volatile unsigned char *buffer;
+ volatile struct MD *rmd;
+
+
+ tbase = sc->base;
+
+
+
+ /*
+ * WARNING : First write in CSR6
+ * Then Reset the chip ( 1 in CSR0)
+ */
+
+ st_le32( (tbase+memCSR6), CSR6_INIT);
+ st_le32( (tbase+memCSR0), RESET_CHIP);
+ for(i3=0; i3<1000; i3++);
+
+ /*
+ * Init CSR0
+ */
+ st_le32( (tbase+memCSR0), CSR0_MODE);
+
+ csr12_val = ld_le32( (tbase+memCSR8) );
+
+ for (i=0; i<32; i++){
+ rombuf.s[i] = eeget16(tbase+memCSR9, i);
+ }
+ memcpy (sc->arpcom.ac_enaddr, rombuf.c+20, ETHER_ADDR_LEN);
+
+ mii_reg0 = mdio_read(tbase+memCSR9, 0, 0);
+ mdio_write(tbase+memCSR9, 0, 0, mii_reg0 | 0x1000);
+
+#ifdef DEC_DEBUG
+ printk("DC21140 %x:%x:%x:%x:%x:%x IRQ %d IO %x M %x .........\n",
+ sc->arpcom.ac_enaddr[0], sc->arpcom.ac_enaddr[1],
+ sc->arpcom.ac_enaddr[2], sc->arpcom.ac_enaddr[3],
+ sc->arpcom.ac_enaddr[4], sc->arpcom.ac_enaddr[5],
+ sc->irqInfo.name, sc->port, (unsigned) sc->base);
+#endif
+
+ /*
+ * Init RX ring
+ */
+ sc->rxBdCount = 0;
+ cp = (volatile unsigned char *)malloc((NRXBUFS+NTXBUFS)*(sizeof(struct MD)+ RBUF_SIZE) + PPC_CACHE_ALIGNMENT);
+ sc->bufferBase = cp;
+ if ((unsigned int)cp & (PPC_CACHE_ALIGNMENT-1))
+ cp = (volatile unsigned char *) (((unsigned int)cp + PPC_CACHE_ALIGNMENT) & ~(PPC_CACHE_ALIGNMENT-1));
+#ifdef PCI_BRIDGE_DOES_NOT_ENSURE_CACHE_COHERENCY_FOR_DMA
+ if (_CPU_is_paging_enabled())
+ _CPU_change_memory_mapping_attribute
+ (NULL, cp,
+ (NRXBUFS+NTXBUFS)*(sizeof(struct MD)+ RBUF_SIZE),
+ PTE_CACHE_DISABLE | PTE_WRITABLE);
+#endif
+ rmd = (volatile struct MD*)cp;
+ sc->MDbase = rmd;
+ buffer = cp + ((NRXBUFS+NTXBUFS)*sizeof(struct MD));
+ st_le32( (tbase+memCSR3), (long)((long)(sc->MDbase) + PREP_PCI_DRAM_OFFSET));
+ for (i=0 ; i<NRXBUFS; i++){
+ rmd->buf2 = (volatile unsigned char *) 0;
+ rmd->buf1 = (buffer + (i*RBUF_SIZE) + PREP_PCI_DRAM_OFFSET);
+ rmd->counts = 0xfcc00000 | (RBUF_SIZE);
+ rmd->status = 0x80000000;
+ rmd++;
+ }
+ /*
+ * mark last RX buffer.
+ */
+ sc->MDbase [NRXBUFS-1].counts = 0xfec00000 | (RBUF_SIZE);
+ /*
+ * Init TX ring
+ */
+ sc->txBdCount = 0;
+ st_le32( (tbase+memCSR4), (long)(((long)(rmd)) + PREP_PCI_DRAM_OFFSET));
+ rmd->buf2 = (volatile unsigned char *) 0;
+ rmd->buf1 = buffer + (NRXBUFS*RBUF_SIZE) + PREP_PCI_DRAM_OFFSET;
+ rmd->counts = 0x62000000;
+ rmd->status = 0x0;
+
+ /*
+ * Set up interrupts
+ */
+ st_le32( (tbase+memCSR5), IT_SETUP);
+ st_le32( (tbase+memCSR7), IT_SETUP);
+
+ sc->irqInfo.hdl = (rtems_irq_hdl)dec21140Enet_interrupt_handler;
+ sc->irqInfo.on = nopOn;
+ sc->irqInfo.off = nopOn;
+ sc->irqInfo.isOn = dec21140IsOn;
+ st = BSP_install_rtems_irq_handler (&sc->irqInfo);
+ if (!st)
+ rtems_panic ("Can't attach DEC21140 interrupt handler for irq %d\n",
+ sc->irqInfo.name);
+
+ /*
+ * Start TX for setup frame
+ */
+ st_le32( (tbase+memCSR6), CSR6_INIT | CSR6_TX);
+
+ /*
+ * Build setup frame
+ */
+ setup_frm = rmd->buf1 - PREP_PCI_DRAM_OFFSET;
+ eaddrs = (char *)(sc->arpcom.ac_enaddr);
+ /* Fill the buffer with our physical address. */
+ for (i = 1; i < 16; i++) {
+ *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2];
+ *setup_frm++ = eaddrs[3];
+ *setup_frm++ = eaddrs[2];
+ *setup_frm++ = eaddrs[3];
+ *setup_frm++ = eaddrs[4];
+ *setup_frm++ = eaddrs[5];
+ *setup_frm++ = eaddrs[4];
+ *setup_frm++ = eaddrs[5];
+ }
+ /* Add the broadcast address when doing perfect filtering */
+ memset((void*) setup_frm, 0xff, 12);
+ rmd->counts = 0x0a000000 | 192 ;
+ rmd->status = 0x80000000;
+ st_le32( (tbase+memCSR1), 1);
+ while (rmd->status != 0x7fffffff);
+
+ /*
+ * Enable RX and TX
+ */
+ st_le32( (unsigned int*)(tbase+memCSR6), CSR6_INIT | CSR6_TXRX);
+
+ /*
+ * Set up PHY
+ */
+
+ i = rombuf.c[27];
+ i+=2;
+ direction = rombuf.c[i];
+ i +=4;
+ st_le32( (tbase+memCSR12), direction | 0x100);
+ for (i2 = 0; i2 < rombuf.c[(i+2) + rombuf.c[i+1]]; i2++){
+ st_le32( (tbase + memCSR12), rombuf.c[(i+3) + rombuf.c[i+1] + i2]);
+ }
+ for (i2 = 0; i2 < rombuf.c[i+1]; i2++){
+ st_le32( (tbase + memCSR12), rombuf.c[(i+2) + i2]);
+ }
+}
+
+static void
+dec21140_rxDaemon (void *arg)
+{
+ volatile unsigned int *tbase;
+ struct ether_header *eh;
+ struct dec21140_softc *dp = (struct dec21140_softc *)&dec21140_softc[0];
+ struct ifnet *ifp = &dp->arpcom.ac_if;
+ struct mbuf *m;
+ volatile struct MD *rmd;
+ unsigned int len;
+ char *temp;
+ rtems_event_set events;
+ int nbMD;
+
+ tbase = dec21140_softc[0].base ;
+
+ for (;;){
+
+ rtems_bsdnet_event_receive (INTERRUPT_EVENT,
+ RTEMS_WAIT|RTEMS_EVENT_ANY,
+ RTEMS_NO_TIMEOUT,
+ &events);
+ rmd = dec21140_softc[0].MDbase;
+ nbMD = 0;
+
+ while (nbMD < NRXBUFS){
+ if ( (rmd->status & 0x80000000) == 0){
+ len = (rmd->status >> 16) & 0x7ff;
+ MGETHDR (m, M_WAIT, MT_DATA);
+ MCLGET (m, M_WAIT);
+ m->m_pkthdr.rcvif = ifp;
+ temp = m->m_data;
+ m->m_len = m->m_pkthdr.len = len - sizeof(struct ether_header);
+ memcpy(temp, (void*) (rmd->buf1-PREP_PCI_DRAM_OFFSET), len);
+ rmd->status = 0x80000000;
+ eh = mtod (m, struct ether_header *);
+ m->m_data += sizeof(struct ether_header);
+ ether_input (ifp, eh, m);
+ }
+ rmd++;
+ nbMD++;
+ }
+ st_le32( (tbase+memCSR7), IT_SETUP);
+ }
+}
+
+static void
+sendpacket (struct ifnet *ifp, struct mbuf *m)
+{
+ struct dec21140_softc *dp = ifp->if_softc;
+ volatile struct MD *tmd;
+ volatile unsigned char *temp;
+ struct mbuf *n;
+ unsigned int len;
+ volatile unsigned int *tbase;
+
+ tbase = dp->base;
+
+ /*
+ * Waiting for Transmitter ready
+ */
+ tmd = dec21140_softc[0].MDbase + NRXBUFS;
+ while ( (tmd->status & 0x80000000) != 0 );
+ len = 0;
+ n = m;
+ temp = tmd->buf1-PREP_PCI_DRAM_OFFSET;
+
+ for (;;){
+ len += m->m_len;
+ memcpy((void*) temp, (char *)m->m_data, m->m_len);
+ temp += m->m_len ;
+ if ((m = m->m_next) == NULL)
+ break;
+ }
+
+ if (len < ET_MINLEN) len = ET_MINLEN;
+ tmd->counts = 0xe2000000 | len;
+ tmd->status = 0x80000000;
+
+ st_le32( (tbase+memCSR1), 0x1);
+
+ m_freem(n);
+}
+
+/*
+ * Driver transmit daemon
+ */
+void
+dec21140_txDaemon (void *arg)
+{
+ struct dec21140_softc *sc = (struct dec21140_softc *)arg;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct mbuf *m;
+ rtems_event_set events;
+
+ for (;;) {
+ /*
+ * Wait for packet
+ */
+
+ rtems_bsdnet_event_receive (START_TRANSMIT_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, RTEMS_NO_TIMEOUT, &events);
+
+ /*
+ * Send packets till queue is empty
+ */
+ for (;;) {
+ /*
+ * Get the next mbuf chain to transmit.
+ */
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (!m)
+ break;
+ sendpacket (ifp, m);
+ }
+ ifp->if_flags &= ~IFF_OACTIVE;
+ }
+}
+
+
+static void
+dec21140_start (struct ifnet *ifp)
+{
+ struct dec21140_softc *sc = ifp->if_softc;
+
+ rtems_event_send (sc->txDaemonTid, START_TRANSMIT_EVENT);
+ ifp->if_flags |= IFF_OACTIVE;
+}
+
+/*
+ * Initialize and start the device
+ */
+static void
+dec21140_init (void *arg)
+{
+ struct dec21140_softc *sc = arg;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+
+ if (sc->txDaemonTid == 0) {
+
+ /*
+ * Set up DEC21140 hardware
+ */
+ dec21140Enet_initialize_hardware (sc);
+
+ /*
+ * Start driver tasks
+ */
+ sc->rxDaemonTid = rtems_bsdnet_newproc ("DCrx", 4096,
+ dec21140_rxDaemon, sc);
+ sc->txDaemonTid = rtems_bsdnet_newproc ("DCtx", 4096,
+ dec21140_txDaemon, sc);
+ }
+
+ /*
+ * Tell the world that we're running.
+ */
+ ifp->if_flags |= IFF_RUNNING;
+
+}
+
+/*
+ * Stop the device
+ */
+static void
+dec21140_stop (struct dec21140_softc *sc)
+{
+ volatile unsigned int *tbase;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+
+ ifp->if_flags &= ~IFF_RUNNING;
+
+ /*
+ * Stop the transmitter
+ */
+ tbase=dec21140_softc[0].base ;
+ st_le32( (tbase+memCSR7), NO_IT);
+ st_le32( (tbase+memCSR6), CSR6_INIT);
+ free((void*)sc->bufferBase);
+}
+
+
+/*
+ * Show interface statistics
+ */
+static void
+dec21140_stats (struct dec21140_softc *sc)
+{
+ printf (" Rx Interrupts:%-8lu", sc->rxInterrupts);
+ printf (" Not First:%-8lu", sc->rxNotFirst);
+ printf (" Not Last:%-8lu\n", sc->rxNotLast);
+ printf (" Giant:%-8lu", sc->rxGiant);
+ printf (" Runt:%-8lu", sc->rxRunt);
+ printf (" Non-octet:%-8lu\n", sc->rxNonOctet);
+ printf (" Bad CRC:%-8lu", sc->rxBadCRC);
+ printf (" Overrun:%-8lu", sc->rxOverrun);
+ printf (" Collision:%-8lu\n", sc->rxCollision);
+
+ printf (" Tx Interrupts:%-8lu", sc->txInterrupts);
+ printf (" Deferred:%-8lu", sc->txDeferred);
+ printf (" Missed Hearbeat:%-8lu\n", sc->txHeartbeat);
+ printf (" No Carrier:%-8lu", sc->txLostCarrier);
+ printf ("Retransmit Limit:%-8lu", sc->txRetryLimit);
+ printf (" Late Collision:%-8lu\n", sc->txLateCollision);
+ printf (" Underrun:%-8lu", sc->txUnderrun);
+ printf (" Raw output wait:%-8lu\n", sc->txRawWait);
+}
+
+/*
+ * Driver ioctl handler
+ */
+static int
+dec21140_ioctl (struct ifnet *ifp, int command, caddr_t data)
+{
+ struct dec21140_softc *sc = ifp->if_softc;
+ int error = 0;
+
+ switch (command) {
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ ether_ioctl (ifp, command, data);
+ break;
+
+ case SIOCSIFFLAGS:
+ switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
+ case IFF_RUNNING:
+ dec21140_stop (sc);
+ break;
+
+ case IFF_UP:
+ dec21140_init (sc);
+ break;
+
+ case IFF_UP | IFF_RUNNING:
+ dec21140_stop (sc);
+ dec21140_init (sc);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SIO_RTEMS_SHOW_STATS:
+ dec21140_stats (sc);
+ break;
+
+ /*
+ * FIXME: All sorts of multicast commands need to be added here!
+ */
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+/*
+ * Attach an DEC21140 driver to the system
+ */
+int
+rtems_dec21140_driver_attach (struct rtems_bsdnet_ifconfig *config)
+{
+ struct dec21140_softc *sc;
+ struct ifnet *ifp;
+ int mtu;
+ int i;
+ unsigned char ucSlotNumber, ucFnNumber;
+ unsigned int ulDeviceID, lvalue, tmp;
+ unsigned char cvalue;
+
+ /*
+ * First, find a DEC board
+ */
+ for(ucSlotNumber=0;ucSlotNumber<PCI_MAX_DEVICES;ucSlotNumber++) {
+ for(ucFnNumber=0;ucFnNumber<PCI_MAX_FUNCTIONS;ucFnNumber++) {
+ (void)pci_read_config_dword(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_VENDOR_ID,
+ &ulDeviceID);
+ if(ulDeviceID==PCI_INVALID_VENDORDEVICEID) {
+ /*
+ * This slot is empty
+ */
+ continue;
+ }
+ if (ulDeviceID == ((PCI_DEVICE_ID_DEC_TULIP_FAST<<16) + PCI_VENDOR_ID_DEC))
+ break;
+ }
+ if (ulDeviceID == ((PCI_DEVICE_ID_DEC_TULIP_FAST<<16) + PCI_VENDOR_ID_DEC)){
+ printk("DEC Adapter found !!\n");
+ break;
+ }
+ }
+
+ if(ulDeviceID==PCI_INVALID_VENDORDEVICEID)
+ rtems_panic("DEC PCI board not found !!\n");
+
+ /*
+ * Find a free driver
+ */
+ for (i = 0 ; i < NDECDRIVER ; i++) {
+ sc = &dec21140_softc[i];
+ ifp = &sc->arpcom.ac_if;
+ if (ifp->if_softc == NULL)
+ break;
+ }
+ if (i >= NDECDRIVER) {
+ printk ("Too many DEC drivers.\n");
+ return 0;
+ }
+
+ /*
+ * Process options
+ */
+
+ (void)pci_read_config_dword(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_BASE_ADDRESS_0,
+ &lvalue);
+
+ sc->port = lvalue & (unsigned int)(~IO_MASK);
+
+ (void)pci_read_config_dword(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_BASE_ADDRESS_1 ,
+ &lvalue);
+
+
+ tmp = (unsigned int)(lvalue & (unsigned int)(~MEM_MASK))
+ + (unsigned int)PREP_ISA_MEM_BASE;
+ sc->base = (unsigned int *)(tmp);
+
+ (void)pci_read_config_byte(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_INTERRUPT_LINE,
+ &cvalue);
+ sc->irqInfo.name = (rtems_irq_symbolic_name)cvalue;
+
+ if (config->hardware_address) {
+ memcpy (sc->arpcom.ac_enaddr, config->hardware_address,
+ ETHER_ADDR_LEN);
+ }
+ else {
+ memset (sc->arpcom.ac_enaddr, 0x08,ETHER_ADDR_LEN);
+ }
+ if (config->mtu)
+ mtu = config->mtu;
+ else
+ mtu = ETHERMTU;
+
+ sc->acceptBroadcast = !config->ignore_broadcast;
+
+ /*
+ * Set up network interface values
+ */
+ ifp->if_softc = sc;
+ ifp->if_unit = i + 1;
+ ifp->if_name = "dc";
+ ifp->if_mtu = mtu;
+ ifp->if_init = dec21140_init;
+ ifp->if_ioctl = dec21140_ioctl;
+ ifp->if_start = dec21140_start;
+ ifp->if_output = ether_output;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
+ if (ifp->if_snd.ifq_maxlen == 0)
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+
+ /*
+ * Attach the interface
+ */
+ if_attach (ifp);
+ ether_ifattach (ifp);
+
+ return 1;
+};
+
diff --git a/c/src/lib/libbsp/powerpc/shared/include/Makefile.in b/c/src/lib/libbsp/powerpc/shared/include/Makefile.in
new file mode 100644
index 0000000000..b33c92dc03
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/include/Makefile.in
@@ -0,0 +1,48 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/include
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES = $(srcdir)/nvram.h $(srcdir)/bsp.h
+
+#
+# Equate files are for including from assembly preprocessed by
+# gm4 or gasp. No examples are provided except for those for
+# other CPUs. The best way to generate them would be to
+# provide a program which generates the constants used based
+# on the C equivalents.
+#
+
+EQ_FILES =
+
+SRCS = $(H_FILES) $(EQ_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+all: $(SRCS)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/include/bsp.h b/c/src/lib/libbsp/powerpc/shared/include/bsp.h
new file mode 100644
index 0000000000..7672298231
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/include/bsp.h
@@ -0,0 +1,57 @@
+/*
+ * bsp.h -- contain BSP API definition.
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+#ifndef LIBBSP_POWERPC_MCP750_BSP_H
+#define LIBBSP_POWERPC_MCP750_BSP_H
+
+#include <rtems.h>
+#include <console.h>
+#include <libcpu/io.h>
+#include <clockdrv.h>
+#include <bsp/vectors.h>
+
+#ifndef ASM
+#define outport_byte(port,value) outb(value,port)
+#define outport_word(port,value) outw(value,port)
+#define outport_long(port,value) outl(value,port)
+
+#define inport_byte(port,value) (value = inb(port))
+#define inport_word(port,value) (value = inw(port))
+#define inport_long(port,value) (value = inl(port))
+/*
+ * Vital Board data Start using DATA RESIDUAL
+ */
+/*
+ * Total memory using RESIDUAL DATA
+ */
+unsigned int BSP_mem_size;
+/*
+ * PCI Bus Frequency
+ */
+unsigned int BSP_bus_frequency;
+/*
+ * processor clock frequency
+ */
+unsigned int BSP_processor_frequency;
+/*
+ * Time base divisior (how many tick for 1 second).
+ */
+unsigned int BSP_time_base_divisor;
+
+extern rtems_configuration_table BSP_Configuration;
+extern void BSP_panic(char *s);
+extern void rtemsReboot(void);
+extern int printk(const char *, ...) __attribute__((format(printf, 1, 2)));
+extern int BSP_disconnect_clock_handler (void);
+extern int BSP_connect_clock_handler (void);
+#endif
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/include/nvram.h b/c/src/lib/libbsp/powerpc/shared/include/nvram.h
new file mode 100644
index 0000000000..49edc54d3d
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/include/nvram.h
@@ -0,0 +1,170 @@
+/*
+ * PreP compliant NVRAM access
+ *
+ * This file can be found in motorla or IBP PPC site.
+ *
+ * $Id$
+ */
+
+#ifndef _PPC_NVRAM_H
+#define _PPC_NVRAM_H
+
+#define NVRAM_AS0 0x74
+#define NVRAM_AS1 0x75
+#define NVRAM_DATA 0x77
+
+
+/* RTC Offsets */
+
+#define MOTO_RTC_SECONDS 0x1FF9
+#define MOTO_RTC_MINUTES 0x1FFA
+#define MOTO_RTC_HOURS 0x1FFB
+#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
+#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
+#define MOTO_RTC_MONTH 0x1FFE
+#define MOTO_RTC_YEAR 0x1FFF
+#define MOTO_RTC_CONTROLA 0x1FF8
+#define MOTO_RTC_CONTROLB 0x1FF9
+
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+/* Structure map for NVRAM on PowerPC Reference Platform */
+/* All fields are either character/byte strings which are valid either
+ endian or they are big-endian numbers.
+
+ There are a number of Date and Time fields which are in RTC format,
+ big-endian. These are stored in UT (GMT).
+
+ For enum's: if given in hex then they are bit significant, i.e. only
+ one bit is on for each enum.
+*/
+
+#define NVSIZE 4096 /* size of NVRAM */
+#define OSAREASIZE 512 /* size of OSArea space */
+#define CONFSIZE 1024 /* guess at size of Configuration space */
+
+#ifndef ASM
+
+typedef struct _SECURITY {
+ unsigned long BootErrCnt; /* Count of boot password errors */
+ unsigned long ConfigErrCnt; /* Count of config password errors */
+ unsigned long BootErrorDT[2]; /* Date&Time from RTC of last error in pw */
+ unsigned long ConfigErrorDT[2]; /* Date&Time from RTC of last error in pw */
+ unsigned long BootCorrectDT[2]; /* Date&Time from RTC of last correct pw */
+ unsigned long ConfigCorrectDT[2]; /* Date&Time from RTC of last correct pw */
+ unsigned long BootSetDT[2]; /* Date&Time from RTC of last set of pw */
+ unsigned long ConfigSetDT[2]; /* Date&Time from RTC of last set of pw */
+ unsigned char Serial[16]; /* Box serial number */
+} SECURITY;
+
+typedef enum _OS_ID {
+ Unknown = 0,
+ Firmware = 1,
+ AIX = 2,
+ NT = 3,
+ MKOS2 = 4,
+ MKAIX = 5,
+ Taligent = 6,
+ Solaris = 7,
+ MK = 12
+} OS_ID;
+
+typedef struct _ERROR_LOG {
+ unsigned char ErrorLogEntry[40]; /* To be architected */
+} ERROR_LOG;
+
+typedef enum _BOOT_STATUS {
+ BootStarted = 0x01,
+ BootFinished = 0x02,
+ RestartStarted = 0x04,
+ RestartFinished = 0x08,
+ PowerFailStarted = 0x10,
+ PowerFailFinished = 0x20,
+ ProcessorReady = 0x40,
+ ProcessorRunning = 0x80,
+ ProcessorStart = 0x0100
+} BOOT_STATUS;
+
+typedef struct _RESTART_BLOCK {
+ unsigned short Version;
+ unsigned short Revision;
+ unsigned long ResumeReserve1[2];
+ volatile unsigned long BootStatus;
+ unsigned long CheckSum; /* Checksum of RESTART_BLOCK */
+ void* RestartAddress;
+ void* SaveAreaAddr;
+ unsigned long SaveAreaLength;
+} RESTART_BLOCK;
+
+typedef enum _OSAREA_USAGE {
+ Empty = 0,
+ Used = 1
+} OSAREA_USAGE;
+
+typedef enum _PM_MODE {
+ Suspend = 0x80, /* Part of state is in memory */
+ Normal = 0x00 /* No power management in effect */
+} PMMode;
+
+typedef struct _HEADER {
+ unsigned short Size; /* NVRAM size in K(1024) */
+ unsigned char Version; /* Structure map different */
+ unsigned char Revision; /* Structure map the same -may
+ be new values in old fields
+ in other words old code still works */
+ unsigned short Crc1; /* check sum from beginning of nvram to OSArea */
+ unsigned short Crc2; /* check sum of config */
+ unsigned char LastOS; /* OS_ID */
+ unsigned char Endian; /* B if big endian, L if little endian */
+ unsigned char OSAreaUsage;/* OSAREA_USAGE */
+ unsigned char PMMode; /* Shutdown mode */
+ RESTART_BLOCK RestartBlock;
+ SECURITY Security;
+ ERROR_LOG ErrorLog[2];
+
+ /* Global Environment information */
+ void* GEAddress;
+ unsigned long GELength;
+
+ /* Date&Time from RTC of last change to Global Environment */
+ unsigned long GELastWriteDT[2];
+
+ /* Configuration information */
+ void* ConfigAddress;
+ unsigned long ConfigLength;
+
+ /* Date&Time from RTC of last change to Configuration */
+ unsigned long ConfigLastWriteDT[2];
+ unsigned long ConfigCount; /* Count of entries in Configuration */
+
+ /* OS dependent temp area */
+ void* OSAreaAddress;
+ unsigned long OSAreaLength;
+
+ /* Date&Time from RTC of last change to OSAreaArea */
+ unsigned long OSAreaLastWriteDT[2];
+} HEADER;
+
+/* Here is the whole map of the NVRAM */
+typedef struct _NVRAM_MAP {
+ HEADER Header;
+ unsigned char GEArea[NVSIZE-CONFSIZE-OSAREASIZE-sizeof(HEADER)];
+ unsigned char OSArea[OSAREASIZE];
+ unsigned char ConfigArea[CONFSIZE];
+} NVRAM_MAP;
+
+/* Routines to manipulate the NVRAM */
+void init_prep_nvram(void);
+char *prep_nvram_get_var(const char *name);
+char *prep_nvram_first_var(void);
+char *prep_nvram_next_var(char *name);
+
+#endif /* ASM */
+
+#endif /* _PPC_NVRAM_H */
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/Makefile.in b/c/src/lib/libbsp/powerpc/shared/irq/Makefile.in
new file mode 100644
index 0000000000..bd971f4a10
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/irq/Makefile.in
@@ -0,0 +1,41 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/irq
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES = $(srcdir)/irq.h
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/i8259.c b/c/src/lib/libbsp/powerpc/shared/irq/i8259.c
new file mode 100644
index 0000000000..00ed073956
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/irq/i8259.c
@@ -0,0 +1,152 @@
+
+/*
+ * This file contains the implementation of the function described in irq.h
+ * related to Intel 8259 Programmable Interrupt controller.
+ *
+ * Copyright (C) 1998, 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <bsp/irq.h>
+
+/*-------------------------------------------------------------------------+
+| Cache for 1st and 2nd PIC IRQ line's status (enabled or disabled) register.
++--------------------------------------------------------------------------*/
+/*
+ * lower byte is interrupt mask on the master PIC.
+ * while upper bits are interrupt on the slave PIC.
+ */
+volatile rtems_i8259_masks i8259s_cache = 0xfffb;
+
+/*-------------------------------------------------------------------------+
+| Function: BSP_irq_disable_at_i8259s
+| Description: Mask IRQ line in appropriate PIC chip.
+| Global Variables: i8259s_cache
+| Arguments: vector_offset - number of IRQ line to mask.
+| Returns: Nothing.
++--------------------------------------------------------------------------*/
+int BSP_irq_disable_at_i8259s (const rtems_irq_symbolic_name irqLine)
+{
+ unsigned short mask;
+ unsigned int level;
+
+ if ( ((int)irqLine < BSP_ISA_IRQ_LOWEST_OFFSET) ||
+ ((int)irqLine > BSP_ISA_IRQ_MAX_OFFSET)
+ )
+ return 1;
+
+ _CPU_ISR_Disable(level);
+
+ mask = 1 << irqLine;
+ i8259s_cache |= mask;
+
+ if (irqLine < 8)
+ {
+ outport_byte(PIC_MASTER_IMR_IO_PORT, i8259s_cache & 0xff);
+ }
+ else
+ {
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, ((i8259s_cache & 0xff00) >> 8));
+ }
+ _CPU_ISR_Enable (level);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------+
+| Function: BSP_irq_enable_at_i8259s
+| Description: Unmask IRQ line in appropriate PIC chip.
+| Global Variables: i8259s_cache
+| Arguments: irqLine - number of IRQ line to mask.
+| Returns: Nothing.
++--------------------------------------------------------------------------*/
+int BSP_irq_enable_at_i8259s (const rtems_irq_symbolic_name irqLine)
+{
+ unsigned short mask;
+ unsigned int level;
+
+ if ( ((int)irqLine < BSP_ISA_IRQ_LOWEST_OFFSET) ||
+ ((int)irqLine > BSP_ISA_IRQ_MAX_OFFSET )
+ )
+ return 1;
+
+ _CPU_ISR_Disable(level);
+
+ mask = ~(1 << irqLine);
+ i8259s_cache &= mask;
+
+ if (irqLine < 8)
+ {
+ outport_byte(PIC_MASTER_IMR_IO_PORT, i8259s_cache & 0xff);
+ }
+ else
+ {
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, ((i8259s_cache & 0xff00) >> 8));
+ }
+ _CPU_ISR_Enable (level);
+
+ return 0;
+} /* mask_irq */
+
+int BSP_irq_enabled_at_i8259s (const rtems_irq_symbolic_name irqLine)
+{
+ unsigned short mask;
+
+ if ( ((int)irqLine < BSP_ISA_IRQ_LOWEST_OFFSET) ||
+ ((int)irqLine > BSP_ISA_IRQ_MAX_OFFSET)
+ )
+ return 1;
+
+ mask = (1 << irqLine);
+ return (~(i8259s_cache & mask));
+}
+
+
+/*-------------------------------------------------------------------------+
+| Function: BSP_irq_ack_at_i8259s
+| Description: Signal generic End Of Interrupt (EOI) to appropriate PIC.
+| Global Variables: None.
+| Arguments: irqLine - number of IRQ line to acknowledge.
+| Returns: Nothing.
++--------------------------------------------------------------------------*/
+int BSP_irq_ack_at_i8259s (const rtems_irq_symbolic_name irqLine)
+{
+ if (irqLine >= 8) {
+ outport_byte(PIC_MASTER_COMMAND_IO_PORT, SLAVE_PIC_EOSI);
+ outport_byte(PIC_SLAVE_COMMAND_IO_PORT, (PIC_EOSI | (irqLine - 8)));
+ }
+ else {
+ outport_byte(PIC_MASTER_COMMAND_IO_PORT, (PIC_EOSI | irqLine));
+ }
+
+ return 0;
+
+} /* ackIRQ */
+
+void BSP_i8259s_init(void)
+{
+ /*
+ * init master 8259 interrupt controller
+ */
+ outport_byte(PIC_MASTER_COMMAND_IO_PORT, 0x11); /* Start init sequence */
+ outport_byte(PIC_MASTER_IMR_IO_PORT, 0x00);/* Vector base = 0 */
+ outport_byte(PIC_MASTER_IMR_IO_PORT, 0x04);/* edge tiggered, Cascade (slave) on IRQ2 */
+ outport_byte(PIC_MASTER_IMR_IO_PORT, 0x01);/* Select 8086 mode */
+ outport_byte(PIC_MASTER_IMR_IO_PORT, 0xFB); /* Mask all except cascade */
+ /*
+ * init slave interrupt controller
+ */
+ outport_byte(PIC_SLAVE_COMMAND_IO_PORT, 0x11); /* Start init sequence */
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, 0x08);/* Vector base = 8 */
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, 0x02);/* edge triggered, Cascade (slave) on IRQ2 */
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, 0x01); /* Select 8086 mode */
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, 0xFF); /* Mask all */
+
+}
+
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/irq.c b/c/src/lib/libbsp/powerpc/shared/irq/irq.c
new file mode 100644
index 0000000000..4c2226a48f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/irq/irq.c
@@ -0,0 +1,398 @@
+/*
+ *
+ * This file contains the implementation of the function described in irq.h
+ *
+ * Copyright (C) 1998, 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <bsp/irq.h>
+#include <bsp/openpic.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/apiext.h>
+#include <libcpu/raw_exception.h>
+#include <bsp/vectors.h>
+#include <libcpu/cpu.h>
+
+#define RAVEN_INTR_ACK_REG 0xfeff0030
+
+/*
+ * pointer to the mask representing the additionnal irq vectors
+ * that must be disabled when a particular entry is activated.
+ * They will be dynamically computed from teh prioruty table given
+ * in BSP_rtems_irq_mngt_set();
+ * CAUTION : this table is accessed directly by interrupt routine
+ * prologue.
+ */
+rtems_i8259_masks irq_mask_or_tbl[BSP_IRQ_NUMBER];
+/*
+ * default handler connected on each irq after bsp initialization
+ */
+static rtems_irq_connect_data default_rtems_entry;
+
+/*
+ * location used to store initial tables used for interrupt
+ * management.
+ */
+static rtems_irq_global_settings* internal_config;
+static rtems_irq_connect_data* rtems_hdl_tbl;
+
+/*
+ * Check if IRQ is an ISA IRQ
+ */
+static inline int is_isa_irq(const rtems_irq_symbolic_name irqLine)
+{
+ return (((int) irqLine <= BSP_ISA_IRQ_MAX_OFFSET) &
+ ((int) irqLine >= BSP_ISA_IRQ_LOWEST_OFFSET)
+ );
+}
+
+/*
+ * Check if IRQ is an OPENPIC IRQ
+ */
+static inline int is_pci_irq(const rtems_irq_symbolic_name irqLine)
+{
+ return (((int) irqLine <= BSP_PCI_IRQ_MAX_OFFSET) &
+ ((int) irqLine >= BSP_PCI_IRQ_LOWEST_OFFSET)
+ );
+}
+
+/*
+ * Check if IRQ is a Porcessor IRQ
+ */
+static inline int is_processor_irq(const rtems_irq_symbolic_name irqLine)
+{
+ return (((int) irqLine <= BSP_PROCESSOR_IRQ_MAX_OFFSET) &
+ ((int) irqLine >= BSP_PROCESSOR_IRQ_LOWEST_OFFSET)
+ );
+}
+
+
+/*
+ * ------------------------ RTEMS Irq helper functions ----------------
+ */
+
+/*
+ * Caution : this function assumes the variable "internal_config"
+ * is already set and that the tables it contains are still valid
+ * and accessible.
+ */
+static void compute_i8259_masks_from_prio ()
+{
+ unsigned int i;
+ unsigned int j;
+ /*
+ * Always mask at least current interrupt to prevent re-entrance
+ */
+ for (i=BSP_ISA_IRQ_LOWEST_OFFSET; i < BSP_ISA_IRQ_NUMBER; i++) {
+ * ((unsigned short*) &irq_mask_or_tbl[i]) = (1 << i);
+ for (j = BSP_ISA_IRQ_LOWEST_OFFSET; j < BSP_ISA_IRQ_NUMBER; j++) {
+ /*
+ * Mask interrupts at i8259 level that have a lower priority
+ */
+ if (internal_config->irqPrioTbl [i] > internal_config->irqPrioTbl [j]) {
+ * ((unsigned short*) &irq_mask_or_tbl[i]) |= (1 << j);
+ }
+ }
+ }
+}
+
+/*
+ * This function check that the value given for the irq line
+ * is valid.
+ */
+
+static int isValidInterrupt(int irq)
+{
+ if ( (irq < BSP_LOWEST_OFFSET) || (irq > BSP_MAX_OFFSET))
+ return 0;
+ return 1;
+}
+
+/*
+ * ------------------------ RTEMS Single Irq Handler Mngt Routines ----------------
+ */
+
+int BSP_install_rtems_irq_handler (const rtems_irq_connect_data* irq)
+{
+ unsigned int level;
+
+ if (!isValidInterrupt(irq->name)) {
+ return 0;
+ }
+ /*
+ * Check if default handler is actually connected. If not issue an error.
+ * You must first get the current handler via i386_get_current_idt_entry
+ * and then disconnect it using i386_delete_idt_entry.
+ * RATIONALE : to always have the same transition by forcing the user
+ * to get the previous handler before accepting to disconnect.
+ */
+ if (rtems_hdl_tbl[irq->name].hdl != default_rtems_entry.hdl) {
+ return 0;
+ }
+ _CPU_ISR_Disable(level);
+
+ /*
+ * store the data provided by user
+ */
+ rtems_hdl_tbl[irq->name] = *irq;
+
+ if (is_isa_irq(irq->name)) {
+ /*
+ * Enable interrupt at PIC level
+ */
+ BSP_irq_enable_at_i8259s (irq->name);
+ }
+
+ if (is_pci_irq(irq->name)) {
+ /*
+ * Enable interrupt at OPENPIC level
+ */
+ openpic_enable_irq ((int) irq->name - BSP_PCI_IRQ_LOWEST_OFFSET);
+ }
+
+ if (is_processor_irq(irq->name)) {
+ /*
+ * Enable exception at processor level
+ */
+ }
+ /*
+ * Enable interrupt on device
+ */
+ irq->on(irq);
+
+ _CPU_ISR_Enable(level);
+
+ return 1;
+}
+
+
+int BSP_get_current_rtems_irq_handler (rtems_irq_connect_data* irq)
+{
+ if (!isValidInterrupt(irq->name)) {
+ return 0;
+ }
+ *irq = rtems_hdl_tbl[irq->name];
+ return 1;
+}
+
+int BSP_remove_rtems_irq_handler (const rtems_irq_connect_data* irq)
+{
+ unsigned int level;
+
+ if (!isValidInterrupt(irq->name)) {
+ return 0;
+ }
+ /*
+ * Check if default handler is actually connected. If not issue an error.
+ * You must first get the current handler via i386_get_current_idt_entry
+ * and then disconnect it using i386_delete_idt_entry.
+ * RATIONALE : to always have the same transition by forcing the user
+ * to get the previous handler before accepting to disconnect.
+ */
+ if (rtems_hdl_tbl[irq->name].hdl != irq->hdl) {
+ return 0;
+ }
+ _CPU_ISR_Disable(level);
+
+ if (is_isa_irq(irq->name)) {
+ /*
+ * disable interrupt at PIC level
+ */
+ BSP_irq_disable_at_i8259s (irq->name);
+ }
+ if (is_pci_irq(irq->name)) {
+ /*
+ * disable interrupt at OPENPIC level
+ */
+ openpic_disable_irq ((int) irq->name - BSP_PCI_IRQ_LOWEST_OFFSET);
+ }
+ if (is_processor_irq(irq->name)) {
+ /*
+ * disable exception at processor level
+ */
+ }
+
+ /*
+ * Disable interrupt on device
+ */
+ irq->off(irq);
+
+ /*
+ * restore the default irq value
+ */
+ rtems_hdl_tbl[irq->name] = default_rtems_entry;
+
+ _CPU_ISR_Enable(level);
+
+ return 1;
+}
+
+/*
+ * ------------------------ RTEMS Global Irq Handler Mngt Routines ----------------
+ */
+
+int BSP_rtems_irq_mngt_set(rtems_irq_global_settings* config)
+{
+ int i;
+ unsigned int level;
+ /*
+ * Store various code accelerators
+ */
+ internal_config = config;
+ default_rtems_entry = config->defaultEntry;
+ rtems_hdl_tbl = config->irqHdlTbl;
+
+ _CPU_ISR_Disable(level);
+ /*
+ * set up internal tables used by rtems interrupt prologue
+ */
+ /*
+ * start with ISA IRQ
+ */
+ compute_i8259_masks_from_prio ();
+
+ for (i=BSP_ISA_IRQ_LOWEST_OFFSET; i < BSP_ISA_IRQ_NUMBER; i++) {
+ if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
+ BSP_irq_enable_at_i8259s (i);
+ rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
+ }
+ else {
+ rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
+ BSP_irq_disable_at_i8259s (i);
+ }
+ }
+ /*
+ * must enable slave pic anyway
+ */
+ BSP_irq_enable_at_i8259s (2);
+ /*
+ * continue with PCI IRQ
+ */
+ for (i=BSP_PCI_IRQ_LOWEST_OFFSET; i < BSP_PCI_IRQ_LOWEST_OFFSET + BSP_PCI_IRQ_NUMBER ; i++) {
+ openpic_set_priority(0, internal_config->irqPrioTbl [i]);
+ if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
+ openpic_enable_irq ((int) i - BSP_PCI_IRQ_LOWEST_OFFSET);
+ rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
+ }
+ else {
+ rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
+ openpic_disable_irq ((int) i - BSP_PCI_IRQ_LOWEST_OFFSET);
+ }
+ }
+ /*
+ * Must enable PCI/ISA bridge IRQ
+ */
+ openpic_enable_irq (0);
+ /*
+ * finish with Processor exceptions handled like IRQ
+ */
+ for (i=BSP_PROCESSOR_IRQ_LOWEST_OFFSET; i < BSP_PROCESSOR_IRQ_LOWEST_OFFSET + BSP_PROCESSOR_IRQ_NUMBER; i++) {
+ if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
+ rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
+ }
+ else {
+ rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
+ }
+ }
+ _CPU_ISR_Enable(level);
+ return 1;
+}
+
+int BSP_rtems_irq_mngt_get(rtems_irq_global_settings** config)
+{
+ *config = internal_config;
+ return 0;
+}
+
+static unsigned spuriousIntr = 0;
+/*
+ * High level IRQ handler called from shared_raw_irq_code_entry
+ */
+void C_dispatch_irq_handler (CPU_Interrupt_frame *frame, unsigned int excNum)
+{
+ register unsigned int irq;
+ register unsigned isaIntr; /* boolean */
+ register unsigned oldMask; /* old isa pic masks */
+ register unsigned newMask; /* new isa pic masks */
+ register unsigned msr;
+ register unsigned new_msr;
+
+
+ if (excNum == ASM_DEC_VECTOR) {
+ _CPU_MSR_GET(msr);
+ new_msr = msr | MSR_EE;
+ _CPU_MSR_SET(new_msr);
+
+ rtems_hdl_tbl[BSP_DECREMENTER].hdl();
+
+ _CPU_MSR_SET(msr);
+ return;
+
+ }
+ irq = openpic_irq(0);
+ if (irq == OPENPIC_VEC_SPURIOUS) {
+ ++spuriousIntr;
+ return;
+ }
+ isaIntr = (irq == BSP_PCI_ISA_BRIDGE_IRQ);
+ if (isaIntr) {
+ /*
+ * Acknowledge and read 8259 vector
+ */
+ irq = (unsigned int) (*(unsigned char *) RAVEN_INTR_ACK_REG);
+ /*
+ * store current PIC mask
+ */
+ oldMask = i8259s_cache;
+ newMask = oldMask | irq_mask_or_tbl [irq];
+ i8259s_cache = newMask;
+ outport_byte(PIC_MASTER_IMR_IO_PORT, i8259s_cache & 0xff);
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, ((i8259s_cache & 0xff00) >> 8));
+ BSP_irq_ack_at_i8259s (irq);
+ openpic_eoi(0);
+ }
+ _CPU_MSR_GET(msr);
+ new_msr = msr | MSR_EE;
+ _CPU_MSR_SET(new_msr);
+
+ rtems_hdl_tbl[irq].hdl();
+
+ _CPU_MSR_SET(msr);
+
+ if (isaIntr) {
+ i8259s_cache = oldMask;
+ outport_byte(PIC_MASTER_IMR_IO_PORT, i8259s_cache & 0xff);
+ outport_byte(PIC_SLAVE_IMR_IO_PORT, ((i8259s_cache & 0xff00) >> 8));
+ }
+ else {
+ openpic_eoi(0);
+ }
+}
+
+
+
+void _ThreadProcessSignalsFromIrq (BSP_Exception_frame* ctx)
+{
+ /*
+ * Process pending signals that have not already been
+ * processed by _Thread_Displatch. This happens quite
+ * unfrequently : the ISR must have posted an action
+ * to the current running thread.
+ */
+ if ( _Thread_Do_post_task_switch_extension ||
+ _Thread_Executing->do_post_task_switch_extension ) {
+ _Thread_Executing->do_post_task_switch_extension = FALSE;
+ _API_extensions_Run_postswitch();
+ }
+ /*
+ * I plan to process other thread related events here.
+ * This will include DEBUG session requested from keyboard...
+ */
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/irq.h b/c/src/lib/libbsp/powerpc/shared/irq/irq.h
new file mode 100644
index 0000000000..aaf438c8dd
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/irq/irq.h
@@ -0,0 +1,319 @@
+/* irq.h
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to write interrupt handlers.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * This code is heavilly inspired by the public specification of STREAM V2
+ * that can be found at :
+ *
+ * <http://www.chorus.com/Documentation/index.html> by following
+ * the STREAM API Specification Document link.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef LIBBSP_POWERPC_MCP750_IRQ_IRQ_H
+#define LIBBSP_POWERPC_MCP750_IRQ_IRQ_H
+
+
+/*
+ * 8259 edge/level control definitions at VIA
+ */
+#define ISA8259_M_ELCR 0x4d0
+#define ISA8259_S_ELCR 0x4d1
+
+#define ELCRS_INT15_LVL 0x80
+#define ELCRS_INT14_LVL 0x40
+#define ELCRS_INT13_LVL 0x20
+#define ELCRS_INT12_LVL 0x10
+#define ELCRS_INT11_LVL 0x08
+#define ELCRS_INT10_LVL 0x04
+#define ELCRS_INT9_LVL 0x02
+#define ELCRS_INT8_LVL 0x01
+#define ELCRM_INT7_LVL 0x80
+#define ELCRM_INT6_LVL 0x40
+#define ELCRM_INT5_LVL 0x20
+#define ELCRM_INT4_LVL 0x10
+#define ELCRM_INT3_LVL 0x8
+#define ELCRM_INT2_LVL 0x4
+#define ELCRM_INT1_LVL 0x2
+#define ELCRM_INT0_LVL 0x1
+
+#define BSP_ASM_IRQ_VECTOR_BASE 0x0
+ /* PIC's command and mask registers */
+#define PIC_MASTER_COMMAND_IO_PORT 0x20 /* Master PIC command register */
+#define PIC_SLAVE_COMMAND_IO_PORT 0xa0 /* Slave PIC command register */
+#define PIC_MASTER_IMR_IO_PORT 0x21 /* Master PIC Interrupt Mask Register */
+#define PIC_SLAVE_IMR_IO_PORT 0xa1 /* Slave PIC Interrupt Mask Register */
+
+ /* Command for specific EOI (End Of Interrupt): Interrupt acknowledge */
+#define PIC_EOSI 0x60 /* End of Specific Interrupt (EOSI) */
+#define SLAVE_PIC_EOSI 0x62 /* End of Specific Interrupt (EOSI) for cascade */
+#define PIC_EOI 0x20 /* Generic End of Interrupt (EOI) */
+
+#ifndef ASM
+
+
+/*
+ * Symblolic IRQ names and related definitions.
+ */
+
+typedef enum {
+ /* Base vector for our ISA IRQ handlers. */
+ BSP_ISA_IRQ_VECTOR_BASE = BSP_ASM_IRQ_VECTOR_BASE,
+ /*
+ * ISA IRQ handler related definitions
+ */
+ BSP_ISA_IRQ_NUMBER = 16,
+ BSP_ISA_IRQ_LOWEST_OFFSET = 0,
+ BSP_ISA_IRQ_MAX_OFFSET = BSP_ISA_IRQ_LOWEST_OFFSET + BSP_ISA_IRQ_NUMBER - 1,
+ /*
+ * PCI IRQ handlers related definitions
+ * CAUTION : BSP_PCI_IRQ_LOWEST_OFFSET should be equal to OPENPIC_VEC_SOURCE
+ */
+ BSP_PCI_IRQ_NUMBER = 16,
+ BSP_PCI_IRQ_LOWEST_OFFSET = BSP_ISA_IRQ_NUMBER,
+ BSP_PCI_IRQ_MAX_OFFSET = BSP_PCI_IRQ_LOWEST_OFFSET + BSP_PCI_IRQ_NUMBER - 1,
+ /*
+ * PowerPc exceptions handled as interrupt where a rtems managed interrupt
+ * handler might be connected
+ */
+ BSP_PROCESSOR_IRQ_NUMBER = 1,
+ BSP_PROCESSOR_IRQ_LOWEST_OFFSET = BSP_PCI_IRQ_MAX_OFFSET + 1,
+ BSP_PROCESSOR_IRQ_MAX_OFFSET = BSP_PROCESSOR_IRQ_LOWEST_OFFSET + BSP_PROCESSOR_IRQ_NUMBER - 1,
+ /*
+ * Summary
+ */
+ BSP_IRQ_NUMBER = BSP_PROCESSOR_IRQ_MAX_OFFSET + 1,
+ BSP_LOWEST_OFFSET = BSP_ISA_IRQ_LOWEST_OFFSET,
+ BSP_MAX_OFFSET = BSP_PROCESSOR_IRQ_MAX_OFFSET,
+ /*
+ * Some ISA IRQ symbolic name definition
+ */
+ BSP_ISA_PERIODIC_TIMER = 0,
+
+ BSP_ISA_KEYBOARD = 1,
+
+ BSP_ISA_UART_COM2_IRQ = 3,
+
+ BSP_ISA_UART_COM1_IRQ = 4,
+
+ BSP_ISA_RT_TIMER1 = 8,
+
+ BSP_ISA_RT_TIMER3 = 10,
+ /*
+ * Some PCI IRQ symbolic name definition
+ */
+ BSP_PCI_IRQ0 = BSP_PCI_IRQ_LOWEST_OFFSET,
+ BSP_PCI_ISA_BRIDGE_IRQ = BSP_PCI_IRQ0,
+ /*
+ * Some Processor execption handled as rtems IRQ symbolic name definition
+ */
+ BSP_DECREMENTER = BSP_PROCESSOR_IRQ_LOWEST_OFFSET
+
+}rtems_irq_symbolic_name;
+
+
+
+
+/*
+ * Type definition for RTEMS managed interrupts
+ */
+typedef unsigned char rtems_irq_prio;
+typedef unsigned short rtems_i8259_masks;
+
+extern volatile rtems_i8259_masks i8259s_cache;
+
+struct __rtems_irq_connect_data__; /* forward declaratiuon */
+
+typedef void (*rtems_irq_hdl) (void);
+typedef void (*rtems_irq_enable) (const struct __rtems_irq_connect_data__*);
+typedef void (*rtems_irq_disable) (const struct __rtems_irq_connect_data__*);
+typedef int (*rtems_irq_is_enabled) (const struct __rtems_irq_connect_data__*);
+
+typedef struct __rtems_irq_connect_data__ {
+ /*
+ * IRQ line
+ */
+ rtems_irq_symbolic_name name;
+ /*
+ * handler. See comment on handler properties below in function prototype.
+ */
+ rtems_irq_hdl hdl;
+ /*
+ * function for enabling interrupts at device level (ONLY!).
+ * The BSP code will automatically enable it at i8259s level and openpic level.
+ * RATIONALE : anyway such code has to exist in current driver code.
+ * It is usually called immediately AFTER connecting the interrupt handler.
+ * RTEMS may well need such a function when restoring normal interrupt
+ * processing after a debug session.
+ *
+ */
+ rtems_irq_enable on;
+ /*
+ * function for disabling interrupts at device level (ONLY!).
+ * The code will disable it at i8259s level. RATIONALE : anyway
+ * such code has to exist for clean shutdown. It is usually called
+ * BEFORE disconnecting the interrupt. RTEMS may well need such
+ * a function when disabling normal interrupt processing for
+ * a debug session. May well be a NOP function.
+ */
+ rtems_irq_disable off;
+ /*
+ * function enabling to know what interrupt may currently occur
+ * if someone manipulates the i8259s interrupt mask without care...
+ */
+ rtems_irq_is_enabled isOn;
+}rtems_irq_connect_data;
+
+typedef struct {
+ /*
+ * size of all the table fields (*Tbl) described below.
+ */
+ unsigned int irqNb;
+ /*
+ * Default handler used when disconnecting interrupts.
+ */
+ rtems_irq_connect_data defaultEntry;
+ /*
+ * Table containing initials/current value.
+ */
+ rtems_irq_connect_data* irqHdlTbl;
+ /*
+ * actual value of BSP_ISA_IRQ_VECTOR_BASE...
+ */
+ rtems_irq_symbolic_name irqBase;
+ /*
+ * software priorities associated with interrupts.
+ * if irqPrio [i] > intrPrio [j] it means that
+ * interrupt handler hdl connected for interrupt name i
+ * will not be interrupted by the handler connected for interrupt j
+ * The interrupt source will be physically masked at i8259 level.
+ */
+ rtems_irq_prio* irqPrioTbl;
+}rtems_irq_global_settings;
+
+
+
+
+/*-------------------------------------------------------------------------+
+| Function Prototypes.
++--------------------------------------------------------------------------*/
+/*
+ * ------------------------ Intel 8259 (or emulation) Mngt Routines -------
+ */
+
+/*
+ * function to disable a particular irq at 8259 level. After calling
+ * this function, even if the device asserts the interrupt line it will
+ * not be propagated further to the processor
+ */
+int BSP_irq_disable_at_i8259s (const rtems_irq_symbolic_name irqLine);
+/*
+ * function to enable a particular irq at 8259 level. After calling
+ * this function, if the device asserts the interrupt line it will
+ * be propagated further to the processor
+ */
+int BSP_irq_enable_at_i8259s (const rtems_irq_symbolic_name irqLine);
+/*
+ * function to acknoledge a particular irq at 8259 level. After calling
+ * this function, if a device asserts an enabled interrupt line it will
+ * be propagated further to the processor. Mainly usefull for people
+ * writting raw handlers as this is automagically done for rtems managed
+ * handlers.
+ */
+int BSP_irq_ack_at_i8259s (const rtems_irq_symbolic_name irqLine);
+/*
+ * function to check if a particular irq is enabled at 8259 level. After calling
+ */
+int BSP_irq_enabled_at_i8259s (const rtems_irq_symbolic_name irqLine);
+/*
+ * ------------------------ RTEMS Single Irq Handler Mngt Routines ----------------
+ */
+/*
+ * function to connect a particular irq handler. This hanlder will NOT be called
+ * directly as the result of the corresponding interrupt. Instead, a RTEMS
+ * irq prologue will be called that will :
+ *
+ * 1) save the C scratch registers,
+ * 2) switch to a interrupt stack if the interrupt is not nested,
+ * 3) store the current i8259s' interrupt masks
+ * 4) modify them to disable the current interrupt at 8259 level (and may
+ * be others depending on software priorities)
+ * 5) aknowledge the i8259s',
+ * 6) demask the processor,
+ * 7) call the application handler
+ *
+ * As a result the hdl function provided
+ *
+ * a) can perfectly be written is C,
+ * b) may also well directly call the part of the RTEMS API that can be used
+ * from interrupt level,
+ * c) It only responsible for handling the jobs that need to be done at
+ * the device level including (aknowledging/re-enabling the interrupt at device,
+ * level, getting the data,...)
+ *
+ * When returning from the function, the following will be performed by
+ * the RTEMS irq epilogue :
+ *
+ * 1) masks the interrupts again,
+ * 2) restore the original i8259s' interrupt masks
+ * 3) switch back on the orinal stack if needed,
+ * 4) perform rescheduling when necessary,
+ * 5) restore the C scratch registers...
+ * 6) restore initial execution flow
+ *
+ */
+int BSP_install_rtems_irq_handler (const rtems_irq_connect_data*);
+/*
+ * function to get the current RTEMS irq handler for ptr->name. It enables to
+ * define hanlder chain...
+ */
+int BSP_get_current_rtems_irq_handler (rtems_irq_connect_data* ptr);
+/*
+ * function to get disconnect the RTEMS irq handler for ptr->name.
+ * This function checks that the value given is the current one for safety reason.
+ * The user can use the previous function to get it.
+ */
+int BSP_remove_rtems_irq_handler (const rtems_irq_connect_data*);
+
+/*
+ * ------------------------ RTEMS Global Irq Handler Mngt Routines ----------------
+ */
+/*
+ * (Re) Initialize the RTEMS interrupt management.
+ *
+ * The result of calling this function will be the same as if each individual
+ * handler (config->irqHdlTbl[i].hdl) different from "config->defaultEntry.hdl"
+ * has been individualy connected via
+ * BSP_install_rtems_irq_handler(&config->irqHdlTbl[i])
+ * And each handler currently equal to config->defaultEntry.hdl
+ * has been previously disconnected via
+ * BSP_remove_rtems_irq_handler (&config->irqHdlTbl[i])
+ *
+ * This is to say that all information given will be used and not just
+ * only the space.
+ *
+ * CAUTION : the various table address contained in config will be used
+ * directly by the interrupt mangement code in order to save
+ * data size so they must stay valid after the call => they should
+ * not be modified or declared on a stack.
+ */
+
+int BSP_rtems_irq_mngt_set(rtems_irq_global_settings* config);
+/*
+ * (Re) get info on current RTEMS interrupt management.
+ */
+int BSP_rtems_irq_mngt_get(rtems_irq_global_settings**);
+
+extern void BSP_rtems_irq_mng_init(unsigned cpuId);
+extern void BSP_i8259s_init(void);
+#endif
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S b/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S
new file mode 100644
index 0000000000..043108155f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S
@@ -0,0 +1,322 @@
+/*
+ * This file contains the assembly code for the PowerPC
+ * IRQ veneers for RTEMS.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * Modified to support the MCP750.
+ * Modifications Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ *
+ * $Id$
+ */
+
+#include <bsp/vectors.h>
+#include <libcpu/cpu.h>
+#include <libcpu/raw_exception.h>
+#include <rtems/score/targopts.h>
+#include "asm.h"
+
+
+#define SYNC \
+ sync; \
+ isync
+
+ .text
+ .p2align 5
+
+ PUBLIC_VAR(decrementer_exception_vector_prolog_code)
+
+SYM (decrementer_exception_vector_prolog_code):
+ /*
+ * let room for exception frame
+ */
+ stwu r1, - (EXCEPTION_FRAME_END)(r1)
+ stw r4, GPR4_OFFSET(r1)
+ li r4, ASM_DEC_VECTOR
+ ba shared_raw_irq_code_entry
+
+ PUBLIC_VAR (decrementer_exception_vector_prolog_code_size)
+
+ decrementer_exception_vector_prolog_code_size = . - decrementer_exception_vector_prolog_code
+
+ PUBLIC_VAR(external_exception_vector_prolog_code)
+
+SYM (external_exception_vector_prolog_code):
+ /*
+ * let room for exception frame
+ */
+ stwu r1, - (EXCEPTION_FRAME_END)(r1)
+ stw r4, GPR4_OFFSET(r1)
+ li r4, ASM_EXT_VECTOR
+ ba shared_raw_irq_code_entry
+
+ PUBLIC_VAR (external_exception_vector_prolog_code_size)
+
+ external_exception_vector_prolog_code_size = . - external_exception_vector_prolog_code
+
+ PUBLIC_VAR(shared_raw_irq_code_entry)
+ PUBLIC_VAR(C_dispatch_irq_handler)
+
+ .p2align 5
+SYM (shared_raw_irq_code_entry):
+ /*
+ * Entry conditions :
+ * Registers already saved : R1, R4
+ * R1 : points to a location with enough room for the
+ * interrupt frame
+ * R4 : vector number
+ */
+ /*
+ * Save SRR0/SRR1 As soon As possible as it is the minimal needed
+ * to reenable exception processing
+ */
+ stw r0, GPR0_OFFSET(r1)
+ stw r2, GPR2_OFFSET(r1)
+ stw r3, GPR3_OFFSET(r1)
+
+ mfsrr0 r0
+ mfsrr1 r2
+ mfmsr r3
+
+ stw r0, SRR0_FRAME_OFFSET(r1)
+ stw r2, SRR1_FRAME_OFFSET(r1)
+ /*
+ * Enable data and instruction address translation, exception recovery
+ */
+ ori r3, r3, MSR_RI | MSR_IR | MSR_DR
+ mtmsr r3
+ SYNC
+ /*
+ * Push C scratch registers on the current stack. It may
+ * actually be the thread stack or the interrupt stack.
+ * Anyway we have to make it in order to be able to call C/C++
+ * functions. Depending on the nesting interrupt level, we will
+ * switch to the right stack later.
+ */
+ stw r5, GPR5_OFFSET(r1)
+ stw r6, GPR6_OFFSET(r1)
+ stw r7, GPR7_OFFSET(r1)
+ stw r8, GPR8_OFFSET(r1)
+ stw r9, GPR9_OFFSET(r1)
+ stw r10, GPR10_OFFSET(r1)
+ stw r11, GPR11_OFFSET(r1)
+ stw r12, GPR12_OFFSET(r1)
+ stw r13, GPR13_OFFSET(r1)
+
+ mfcr r5
+ mfctr r6
+ mfxer r7
+ mflr r8
+
+ stw r5, EXC_CR_OFFSET(r1)
+ stw r6, EXC_CTR_OFFSET(r1)
+ stw r7, EXC_XER_OFFSET(r1)
+ stw r8, EXC_LR_OFFSET(r1)
+
+ /*
+ * Add some non volatile registers to store information
+ * that will be used when returning from C handler
+ */
+ stw r14, GPR14_OFFSET(r1)
+ stw r15, GPR15_OFFSET(r1)
+ /*
+ * save current stack pointer location in R14
+ */
+ addi r14, r1, 0
+ /*
+ * store part of _Thread_Dispatch_disable_level address in R15
+ */
+ addis r15,0, _Thread_Dispatch_disable_level@ha
+ /*
+ * Get current nesting level in R2
+ */
+ mfspr r2, SPRG0
+ /*
+ * Check if stack switch is necessary
+ */
+ cmpwi r2,0
+ bne nested
+ mfspr r1, SPRG1
+
+nested:
+ /*
+ * Start Incrementing nesting level in R2
+ */
+ addi r2,r2,1
+ /*
+ * Start Incrementing _Thread_Dispatch_disable_level R4 = _Thread_Dispatch_disable_level
+ */
+ lwz r6,_Thread_Dispatch_disable_level@l(r15)
+ /*
+ * store new nesting level in SPRG0
+ */
+ mtspr SPRG0, r2
+
+ addi r6, r6, 1
+ mfmsr r5
+ /*
+ * store new _Thread_Dispatch_disable_level value
+ */
+ stw r6, _Thread_Dispatch_disable_level@l(r15)
+ /*
+ * We are now running on the interrupt stack. External and decrementer
+ * exceptions are still disabled. I see no purpose trying to optimize
+ * further assembler code.
+ */
+ /*
+ * Call C exception handler for decrementer Interrupt frame is passed just
+ * in case...
+ */
+ addi r3, r14, 0x8
+ bl C_dispatch_irq_handler /* C_dispatch_irq_handler(cpu_interrupt_frame* r3, vector r4) */
+ /*
+ * start decrementing nesting level. Note : do not test result against 0
+ * value as an easy exit condition because if interrupt nesting level > 1
+ * then _Thread_Dispatch_disable_level > 1
+ */
+ mfspr r2, SPRG0
+ /*
+ * start decrementing _Thread_Dispatch_disable_level
+ */
+ lwz r3,_Thread_Dispatch_disable_level@l(r15)
+ addi r2, r2, -1 /* Continue decrementing nesting level */
+ addi r3, r3, -1 /* Continue decrementing _Thread_Dispatch_disable_level */
+ mtspr SPRG0, r2 /* End decrementing nesting level */
+ stw r3,_Thread_Dispatch_disable_level@l(r15) /* End decrementing _Thread_Dispatch_disable_level */
+ cmpwi r3, 0
+ /*
+ * switch back to original stack (done here just optimize registers
+ * contention. Could have been done before...)
+ */
+ addi r1, r14, 0
+ bne easy_exit /* if (_Thread_Dispatch_disable_level != 0) goto easy_exit */
+ /*
+ * Here we are running again on the thread system stack.
+ * We have interrupt nesting level = _Thread_Dispatch_disable_level = 0.
+ * Interrupt are still disabled. Time to check if scheduler request to
+ * do something with the current thread...
+ */
+ addis r4, 0, _Context_Switch_necessary@ha
+ lwz r5, _Context_Switch_necessary@l(r4)
+ cmpwi r5, 0
+ bne switch
+
+ addis r6, 0, _ISR_Signals_to_thread_executing@ha
+ lwz r7, _ISR_Signals_to_thread_executing@l(r6)
+ cmpwi r7, 0
+ li r8, 0
+ beq easy_exit
+ stw r8, _ISR_Signals_to_thread_executing@l(r6)
+ /*
+ * going to call _ThreadProcessSignalsFromIrq
+ * Push a complete exception like frame...
+ */
+ stmw r16, GPR16_OFFSET(r1)
+ addi r3, r1, 0x8
+ /*
+ * compute SP at exception entry
+ */
+ addi r2, r1, EXCEPTION_FRAME_END
+ /*
+ * store it at the right place
+ */
+ stw r2, GPR1_OFFSET(r1)
+ /*
+ * Call High Level signal handling code
+ */
+ bl _ISR_Signals_to_thread_executing
+ /*
+ * start restoring exception like frame
+ */
+ lwz r31, EXC_CTR_OFFSET(r1)
+ lwz r30, EXC_XER_OFFSET(r1)
+ lwz r29, EXC_CR_OFFSET(r1)
+ lwz r28, EXC_LR_OFFSET(r1)
+
+ mtctr r31
+ mtxer r30
+ mtcr r29
+ mtlr r28
+
+ lmw r4, GPR4_OFFSET(r1)
+ lwz r2, GPR2_OFFSET(r1)
+ lwz r0, GPR0_OFFSET(r1)
+
+ /*
+ * Disable data and instruction translation. Make path non recoverable...
+ */
+ mfmsr r3
+ xori r3, r3, MSR_RI | MSR_IR | MSR_DR
+ mtmsr r3
+ SYNC
+ /*
+ * Restore rfi related settings
+ */
+
+ lwz r3, SRR1_FRAME_OFFSET(r1)
+ mtsrr1 r3
+ lwz r3, SRR0_FRAME_OFFSET(r1)
+ mtsrr0 r3
+
+ lwz r3, GPR3_OFFSET(r1)
+ addi r1,r1, EXCEPTION_FRAME_END
+ SYNC
+ rfi
+
+switch:
+ bl SYM (_Thread_Dispatch)
+
+easy_exit:
+ /*
+ * start restoring interrupt frame
+ */
+ lwz r3, EXC_CTR_OFFSET(r1)
+ lwz r4, EXC_XER_OFFSET(r1)
+ lwz r5, EXC_CR_OFFSET(r1)
+ lwz r6, EXC_LR_OFFSET(r1)
+
+ mtctr r3
+ mtxer r4
+ mtcr r5
+ mtlr r6
+
+ lwz r15, GPR15_OFFSET(r1)
+ lwz r14, GPR14_OFFSET(r1)
+ lwz r13, GPR13_OFFSET(r1)
+ lwz r12, GPR12_OFFSET(r1)
+ lwz r11, GPR11_OFFSET(r1)
+ lwz r10, GPR10_OFFSET(r1)
+ lwz r9, GPR9_OFFSET(r1)
+ lwz r8, GPR8_OFFSET(r1)
+ lwz r7, GPR7_OFFSET(r1)
+ lwz r6, GPR6_OFFSET(r1)
+ lwz r5, GPR5_OFFSET(r1)
+
+ /*
+ * Disable nested exception processing, data and instruction
+ * translation.
+ */
+ mfmsr r3
+ xori r3, r3, MSR_RI | MSR_IR | MSR_DR
+ mtmsr r3
+ SYNC
+ /*
+ * Restore rfi related settings
+ */
+
+ lwz r4, SRR1_FRAME_OFFSET(r1)
+ lwz r2, SRR0_FRAME_OFFSET(r1)
+ lwz r3, GPR3_OFFSET(r1)
+ lwz r0, GPR0_OFFSET(r1)
+
+ mtsrr1 r4
+ mtsrr0 r2
+ lwz r4, GPR4_OFFSET(r1)
+ lwz r2, GPR2_OFFSET(r1)
+ addi r1,r1, EXCEPTION_FRAME_END
+ SYNC
+ rfi
+
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/irq_init.c b/c/src/lib/libbsp/powerpc/shared/irq/irq_init.c
new file mode 100644
index 0000000000..6253ec789e
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/irq/irq_init.c
@@ -0,0 +1,315 @@
+/* irq_init.c
+ *
+ * This file contains the implementation of rtems initialization
+ * related to interrupt handling.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+#include <bsp/consoleIo.h>
+#include <libcpu/io.h>
+#include <libcpu/spr.h>
+#include <bsp/pci.h>
+#include <bsp/residual.h>
+#include <bsp/openpic.h>
+#include <bsp/irq.h>
+#include <bsp.h>
+#include <libcpu/raw_exception.h>
+#include <bsp/motorola.h>
+
+typedef struct {
+ unsigned char bus; /* few chance the PCI/ISA bridge is not on first bus but ... */
+ unsigned char device;
+ unsigned char function;
+} pci_isa_bridge_device;
+
+pci_isa_bridge_device* via_82c586 = 0;
+static pci_isa_bridge_device bridge;
+
+extern unsigned int external_exception_vector_prolog_code_size;
+extern void external_exception_vector_prolog_code();
+extern unsigned int decrementer_exception_vector_prolog_code_size;
+extern void decrementer_exception_vector_prolog_code();
+
+/*
+ * default on/off function
+ */
+static void nop_func(){}
+/*
+ * default isOn function
+ */
+static int not_connected() {return 0;}
+/*
+ * default possible isOn function
+ */
+static int connected() {return 1;}
+
+static rtems_irq_connect_data rtemsIrq[BSP_IRQ_NUMBER];
+static rtems_irq_global_settings initial_config;
+static rtems_irq_connect_data defaultIrq = {
+ /* vectorIdex, hdl , on , off , isOn */
+ 0, nop_func , nop_func , nop_func , not_connected
+};
+static rtems_irq_prio irqPrioTable[BSP_IRQ_NUMBER]={
+ /*
+ * actual rpiorities for interrupt :
+ * 0 means that only current interrupt is masked
+ * 255 means all other interrupts are masked
+ */
+ /*
+ * ISA interrupts.
+ * The second entry has a priority of 255 because
+ * it is the slave pic entry and is should always remain
+ * unmasked.
+ */
+ 0,0,
+ 255,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /*
+ * PCI Interrupts
+ */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* for raven prio 0 means unactive... */
+ /*
+ * Processor exceptions handled as interrupts
+ */
+ 0
+};
+
+static unsigned char mcp750_openpic_initsenses[] = {
+ 1, /* MCP750_INT_PCB(8259) */
+ 0, /* MCP750_INT_FALCON_ECC_ERR */
+ 1, /* MCP750_INT_PCI_ETHERNET */
+ 1, /* MCP750_INT_PCI_PMC */
+ 1, /* MCP750_INT_PCI_WATCHDOG_TIMER1 */
+ 1, /* MCP750_INT_PCI_PRST_SIGNAL */
+ 1, /* MCP750_INT_PCI_FALL_SIGNAL */
+ 1, /* MCP750_INT_PCI_DEG_SIGNAL */
+ 1, /* MCP750_INT_PCI_BUS1_INTA */
+ 1, /* MCP750_INT_PCI_BUS1_INTB */
+ 1, /* MCP750_INT_PCI_BUS1_INTC */
+ 1, /* MCP750_INT_PCI_BUS1_INTD */
+ 1, /* MCP750_INT_PCI_BUS2_INTA */
+ 1, /* MCP750_INT_PCI_BUS2_INTB */
+ 1, /* MCP750_INT_PCI_BUS2_INTC */
+ 1, /* MCP750_INT_PCI_BUS2_INTD */
+};
+
+void VIA_isa_bridge_interrupts_setup(void)
+{
+ pci_isa_bridge_device pci_dev;
+ unsigned int temp;
+ unsigned char tmp;
+ unsigned char maxBus;
+ unsigned found = 0;
+
+ maxBus = BusCountPCI();
+ pci_dev.function = 0; /* Assumes the bidge is the first function */
+
+ for (pci_dev.bus = 0; pci_dev.bus < maxBus; pci_dev.bus++) {
+#ifdef SCAN_PCI_PRINT
+ printk("isa_bridge_interrupts_setup: Scanning bus %d\n", pci_dev.bus);
+#endif
+ for (pci_dev.device = 0; pci_dev.device < PCI_MAX_DEVICES; pci_dev.device++) {
+#ifdef SCAN_PCI_PRINT
+ printk("isa_bridge_interrupts_setup: Scanning device %d\n", pci_dev.device);
+#endif
+ pci_read_config_dword(pci_dev.bus, pci_dev.device, pci_dev.function,
+ PCI_VENDOR_ID, &temp);
+#ifdef SCAN_PCI_PRINT
+ printk("Vendor/device = %x\n", temp);
+#endif
+ if ( (temp == (((unsigned short) PCI_VENDOR_ID_VIA) | (PCI_DEVICE_ID_VIA_82C586_1 << 16)))
+ ||
+ (temp == (((unsigned short) PCI_VENDOR_ID_VIA) | (PCI_DEVICE_ID_VIA_82C586_0 << 16)))
+ ) {
+ bridge = pci_dev;
+ via_82c586 = &bridge;
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ /*
+ * Should print : bus = 0, device = 11, function = 0 on a MCP750.
+ */
+ printk("Via PCI/ISA bridge found at bus = %d, device = %d, function = %d\n",
+ via_82c586->bus,
+ via_82c586->device,
+ via_82c586->function);
+#endif
+ found = 1;
+ goto loop_exit;
+
+ }
+ }
+ }
+loop_exit:
+ if (!found) BSP_panic("VIA_82C586 PCI/ISA bridge not found!n");
+
+ tmp = inb(0x810);
+ if ( !(tmp & 0x2)) {
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ printk("This is a second generation MCP750 board\n");
+ printk("We must reprogram the PCI/ISA bridge...\n");
+#endif
+ pci_read_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x47, &tmp);
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ printk(" PCI ISA bridge control2 = %x\n", (unsigned) tmp);
+#endif
+ /*
+ * Enable 4D0/4D1 ISA interrupt level/edge config registers
+ */
+ tmp |= 0x20;
+ pci_write_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x47, tmp);
+ /*
+ * Now program the ISA interrupt edge/level
+ */
+ tmp = ELCRS_INT9_LVL | ELCRS_INT10_LVL | ELCRS_INT11_LVL;
+ outb(tmp, ISA8259_S_ELCR);
+ tmp = ELCRM_INT5_LVL;
+ outb(tmp, ISA8259_M_ELCR);;
+ /*
+ * Set the Interrupt inputs to non-inverting level interrupt
+ */
+ pci_read_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x54, &tmp);
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ printk(" PCI ISA bridge PCI/IRQ Edge/Level Select = %x\n", (unsigned) tmp);
+#endif
+ tmp = 0;
+ pci_write_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x54, tmp);
+ }
+ else {
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ printk("This is a first generation MCP750 board\n");
+ printk("We just show the actual value used by PCI/ISA bridge\n");
+#endif
+ pci_read_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x47, &tmp);
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ printk(" PCI ISA bridge control2 = %x\n", (unsigned) tmp);
+#endif
+ /*
+ * Enable 4D0/4D1 ISA interrupt level/edge config registers
+ */
+ tmp |= 0x20;
+ pci_write_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x47, tmp);
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ tmp = inb(ISA8259_S_ELCR);
+ printk(" PCI ISA bridge slave edge/level control bit = %x\n", (unsigned) tmp);
+ tmp = inb(ISA8259_M_ELCR);;
+ printk(" PCI ISA bridge master edge/level control bit = %x\n", (unsigned) tmp);
+#endif
+ /*
+ * Must disable the 4D0/4D1 ISA interrupt level/edge config registers
+ * or the card will die a soon as we we will enable external interrupts
+ */
+ pci_read_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x47, &tmp);
+ tmp &= ~(0x20);
+ pci_write_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x47, tmp);
+ /*
+ * Show the Interrupt inputs inverting/non-inverting level status
+ */
+ pci_read_config_byte(via_82c586->bus, via_82c586->device, via_82c586->function,
+ 0x54, &tmp);
+#ifdef SHOW_ISA_PCI_BRIDGE_SETTINGS
+ printk(" PCI ISA bridge PCI/IRQ Edge/Level Select = %x\n", (unsigned) tmp);
+#endif
+ }
+}
+
+ /*
+ * This code assumes the exceptions management setup has already
+ * been done. We just need to replace the exceptions that will
+ * be handled like interrupt. On mcp750/mpc750 and many PPC processors
+ * this means the decrementer exception and the external exception.
+ */
+void BSP_rtems_irq_mng_init(unsigned cpuId)
+{
+ rtems_raw_except_connect_data vectorDesc;
+ int known_cpi_isa_bridge = 0;
+ int i;
+
+ /*
+ * First initialize the Interrupt management hardware
+ */
+ OpenPIC_InitSenses = mcp750_openpic_initsenses;
+ OpenPIC_NumInitSenses = sizeof(mcp750_openpic_initsenses) / sizeof(char);
+#ifdef TRACE_IRQ_INIT
+ printk("Going to initialize raven interrupt controller (openpic compliant)\n");
+#endif
+ openpic_init(1);
+#ifdef TRACE_IRQ_INIT
+ printk("Going to initialize the PCI/ISA bridge IRQ related setting (VIA 82C586)\n");
+#endif
+ if ( (currentBoard == MESQUITE) ) {
+ VIA_isa_bridge_interrupts_setup();
+ known_cpi_isa_bridge = 1;
+ }
+ if (!known_cpi_isa_bridge) {
+ printk("Please add code for PCI/ISA bridge init to libbsp/shared/irq/irq_init.c\n");
+ printk("If your card works correctly please add a test and set known_cpi_isa_bridge to true\n");
+ }
+#ifdef TRACE_IRQ_INIT
+ printk("Going to initialize the ISA PC legacy IRQ management hardware\n");
+#endif
+ BSP_i8259s_init();
+ /*
+ * Initialize Rtems management interrupt table
+ */
+ /*
+ * re-init the rtemsIrq table
+ */
+ for (i = 0; i < BSP_IRQ_NUMBER; i++) {
+ rtemsIrq[i] = defaultIrq;
+ rtemsIrq[i].name = i;
+ }
+ /*
+ * Init initial Interrupt management config
+ */
+ initial_config.irqNb = BSP_IRQ_NUMBER;
+ initial_config.defaultEntry = defaultIrq;
+ initial_config.irqHdlTbl = rtemsIrq;
+ initial_config.irqBase = BSP_ASM_IRQ_VECTOR_BASE;
+ initial_config.irqPrioTbl = irqPrioTable;
+
+ if (!BSP_rtems_irq_mngt_set(&initial_config)) {
+ /*
+ * put something here that will show the failure...
+ */
+ BSP_panic("Unable to initialize RTEMS interrupt Management!!! System locked\n");
+ }
+
+ /*
+ * We must connect the raw irq handler for the two
+ * expected interrupt sources : decrementer and external interrupts.
+ */
+ vectorDesc.exceptIndex = ASM_DEC_VECTOR;
+ vectorDesc.hdl.vector = ASM_DEC_VECTOR;
+ vectorDesc.hdl.raw_hdl = decrementer_exception_vector_prolog_code;
+ vectorDesc.hdl.raw_hdl_size = (unsigned) &decrementer_exception_vector_prolog_code_size;
+ vectorDesc.on = nop_func;
+ vectorDesc.off = nop_func;
+ vectorDesc.isOn = connected;
+ if (!mpc60x_set_exception (&vectorDesc)) {
+ BSP_panic("Unable to initialize RTEMS decrementer raw exception\n");
+ }
+ vectorDesc.exceptIndex = ASM_EXT_VECTOR;
+ vectorDesc.hdl.vector = ASM_EXT_VECTOR;
+ vectorDesc.hdl.raw_hdl = external_exception_vector_prolog_code;
+ vectorDesc.hdl.raw_hdl_size = (unsigned) &external_exception_vector_prolog_code_size;
+ if (!mpc60x_set_exception (&vectorDesc)) {
+ BSP_panic("Unable to initialize RTEMS external raw exception\n");
+ }
+#ifdef TRACE_IRQ_INIT
+ printk("RTEMS IRQ management is now operationnal\n");
+#endif
+}
+
diff --git a/c/src/lib/libbsp/powerpc/shared/motorola/Makefile.in b/c/src/lib/libbsp/powerpc/shared/motorola/Makefile.in
new file mode 100644
index 0000000000..7554571769
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/motorola/Makefile.in
@@ -0,0 +1,41 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/motorola
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES = $(srcdir)/motorola.h
+
+SRCS = $(C_FILES) $(H_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/motorola/motorola.c b/c/src/lib/libbsp/powerpc/shared/motorola/motorola.c
new file mode 100644
index 0000000000..6b8d52eeb7
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/motorola/motorola.c
@@ -0,0 +1,120 @@
+/* motorola.h
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to identify motorola boards.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+
+#include <bsp/motorola.h>
+#include <libcpu/io.h>
+
+typedef struct {
+ /*
+ * 0x100 mask assumes for Raven and Hawk boards
+ * that the level/edge are set.
+ * 0x200 if this board has a Hawk chip.
+ */
+ int cpu_type;
+ int base_type;
+ const char *name;
+} mot_info_t;
+
+
+static const mot_info_t mot_boards[] = {
+ {0x300, 0x00, "MVME 2400"},
+ {0x010, 0x00, "Genesis"},
+ {0x020, 0x00, "Powerstack (Series E)"},
+ {0x040, 0x00, "Blackhawk (Powerstack)"},
+ {0x050, 0x00, "Omaha (PowerStack II Pro3000)"},
+ {0x060, 0x00, "Utah (Powerstack II Pro4000)"},
+ {0x0A0, 0x00, "Powerstack (Series EX)"},
+ {0x1E0, 0xE0, "Mesquite cPCI (MCP750)"},
+ {0x1E0, 0xE1, "Sitka cPCI (MCPN750)"},
+ {0x1E0, 0xE2, "Mesquite cPCI (MCP750) w/ HAC"},
+ {0x1E0, 0xF6, "MTX Plus"},
+ {0x1E0, 0xF7, "MTX wo/ Parallel Port"},
+ {0x1E0, 0xF8, "MTX w/ Parallel Port"},
+ {0x1E0, 0xF9, "MVME 2300"},
+ {0x1E0, 0xFA, "MVME 2300SC/2600"},
+ {0x1E0, 0xFB, "MVME 2600 with MVME712M"},
+ {0x1E0, 0xFC, "MVME 2600/2700 with MVME761"},
+ {0x1E0, 0xFD, "MVME 3600 with MVME712M"},
+ {0x1E0, 0xFE, "MVME 3600 with MVME761"},
+ {0x1E0, 0xFF, "MVME 1600-001 or 1600-011"},
+ {0x000, 0x00, ""}
+};
+
+prep_t currentPrepType;
+motorolaBoard currentBoard;
+prep_t checkPrepBoardType(RESIDUAL *res)
+{
+ prep_t PREP_type;
+ /* figure out what kind of prep workstation we are */
+ if ( res->ResidualLength != 0 ) {
+ if ( !strncmp(res->VitalProductData.PrintableModel,"IBM",3) )
+ PREP_type = PREP_IBM;
+ else if (!strncmp(res->VitalProductData.PrintableModel,
+ "Radstone",8)){
+ PREP_type = PREP_Radstone;
+ }
+ else
+ PREP_type = PREP_Motorola;
+ }
+ else /* assume motorola if no residual (netboot?) */ {
+ PREP_type = PREP_Motorola;
+ }
+ currentPrepType = PREP_type;
+ return PREP_type;
+}
+
+motorolaBoard getMotorolaBoard()
+{
+ unsigned char cpu_type;
+ unsigned char base_mod;
+ int entry;
+ int mot_entry = -1;
+
+ cpu_type = inb(MOTOROLA_CPUTYPE_REG) & 0xF0;
+ base_mod = inb(MOTOROLA_BASETYPE_REG);
+
+ for (entry = 0; mot_boards[entry].cpu_type != 0; entry++) {
+ if ((mot_boards[entry].cpu_type & 0xff) != cpu_type)
+ continue;
+
+ if (mot_boards[entry].base_type == 0) {
+ mot_entry = entry;
+ break;
+ }
+
+ if (mot_boards[entry].base_type != base_mod)
+ continue;
+ else{
+ mot_entry = entry;
+ break;
+ }
+ }
+ if (mot_entry == -1) {
+ printk("Unkwon motorola board Please update libbsp/powerpc/shared/motorola/motorola.c\n");
+ printk("cpu_type = %x\n", (unsigned) cpu_type);
+ printk("base_mod = %x\n", (unsigned) base_mod);
+ currentBoard = MOTOROLA_UNKNOWN;
+ return currentBoard;
+ }
+ currentBoard = (motorolaBoard) mot_entry;
+ return currentBoard;
+}
+
+const char* motorolaBoardToString(motorolaBoard board)
+{
+ if (board == MOTOROLA_UNKNOWN) return "Unknown motorola board";
+ return (mot_boards[board].name);
+}
+
diff --git a/c/src/lib/libbsp/powerpc/shared/motorola/motorola.h b/c/src/lib/libbsp/powerpc/shared/motorola/motorola.h
new file mode 100644
index 0000000000..585d1d62dc
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/motorola/motorola.h
@@ -0,0 +1,67 @@
+/* motorola.h
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to identify motorola boards.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef LIBBSP_POWERPC_SHARED_MOTOROLA_MOTOROLA_H
+#define LIBBSP_POWERPC_SHARED_MOTOROLA_MOTOROLA_H
+
+#include <bsp/residual.h>
+
+typedef enum {
+ PREP_IBM = 0,
+ PREP_Radstone = 1,
+ PREP_Motorola = 2
+}prep_t;
+
+typedef enum {
+ MVME_2400 = 0,
+ GENESIS = 1,
+ POWERSTACK_E = 2,
+ BLACKAWK = 3,
+ OMAHA = 4,
+ UTAH = 5,
+ POWERSTACK_EX = 6,
+ MESQUITE = 7,
+ SITKA = 8,
+ MESQUITE_W_HAC = 9,
+ MTX_PLUS = 10,
+ MTX_WO_PP = 11,
+ MTX_W_PP = 12,
+ MVME_2300 = 13,
+ MVME_2300SC_2600 = 14,
+ MVME_2600_W_MVME712M = 15,
+ MVME_2600_2700_W_MVME761 = 16,
+ MVME_3600_W_MVME712M = 17,
+ MVME_3600_W_MVME761 = 18,
+ MVME_1600 = 19,
+ MOTOROLA_UNKNOWN = 255
+} motorolaBoard;
+
+typedef enum {
+ HOST_BRIDGE_RAVEN = 0,
+ HOST_BRIDGE_HAWK = 1,
+ HOST_BRIDGE_UNKNOWN = 255
+}motorolaHostBridge;
+
+#define MOTOROLA_CPUTYPE_REG 0x800
+#define MOTOROLA_BASETYPE_REG 0x803
+
+extern prep_t checkPrepBoardType(RESIDUAL *res);
+extern prep_t currentPrepType;
+extern motorolaBoard getMotorolaBoard();
+extern motorolaBoard currentBoard;
+extern const char* motorolaBoardToString(motorolaBoard);
+
+
+#endif /* LIBBSP_POWERPC_SHARED_MOTOROLA_MOTOROLA_H */
+
diff --git a/c/src/lib/libbsp/powerpc/shared/openpic/Makefile.in b/c/src/lib/libbsp/powerpc/shared/openpic/Makefile.in
new file mode 100644
index 0000000000..6d1cd78ed3
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/openpic/Makefile.in
@@ -0,0 +1,42 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/openpic
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES = $(srcdir)/openpic.h
+
+SRCS = $(C_FILES) $(H_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/openpic/openpic.c b/c/src/lib/libbsp/powerpc/shared/openpic/openpic.c
new file mode 100644
index 0000000000..4dd237ee9d
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/openpic/openpic.c
@@ -0,0 +1,509 @@
+/*
+ * openpic.c -- OpenPIC Interrupt Handling
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+/*
+ * Note: Interprocessor Interrupt (IPI) and Timer support is incomplete
+ */
+
+
+#include <bsp/openpic.h>
+#include <bsp/pci.h>
+#include <bsp/consoleIo.h>
+#include <libcpu/io.h>
+#include <libcpu/byteorder.h>
+#include <bsp.h>
+
+#define NULL 0
+#define REGISTER_DEBUG
+#undef REGISTER_DEBUG
+
+
+volatile struct OpenPIC *OpenPIC = NULL;
+unsigned int OpenPIC_NumInitSenses = 0;
+unsigned char *OpenPIC_InitSenses = NULL;
+
+static unsigned int NumProcessors;
+static unsigned int NumSources;
+
+
+ /*
+ * Accesses to the current processor's registers
+ */
+
+#define THIS_CPU Processor[cpu]
+#define CHECK_THIS_CPU check_arg_cpu(cpu)
+
+
+ /*
+ * Sanity checks
+ */
+
+#if 1
+#define check_arg_ipi(ipi) \
+ if (ipi < 0 || ipi >= OPENPIC_NUM_IPI) \
+ printk("openpic.c:%d: illegal ipi %d\n", __LINE__, ipi);
+#define check_arg_timer(timer) \
+ if (timer < 0 || timer >= OPENPIC_NUM_TIMERS) \
+ printk("openpic.c:%d: illegal timer %d\n", __LINE__, timer);
+#define check_arg_vec(vec) \
+ if (vec < 0 || vec >= OPENPIC_NUM_VECTORS) \
+ printk("openpic.c:%d: illegal vector %d\n", __LINE__, vec);
+#define check_arg_pri(pri) \
+ if (pri < 0 || pri >= OPENPIC_NUM_PRI) \
+ printk("openpic.c:%d: illegal priority %d\n", __LINE__, pri);
+#define check_arg_irq(irq) \
+ if (irq < 0 || irq >= NumSources) \
+ printk("openpic.c:%d: illegal irq %d from %p,[%p],[[%p]]\n", \
+ __LINE__, irq, __builtin_return_address(0), \
+ __builtin_return_address(1), __builtin_return_address(2) \
+ );
+#define check_arg_cpu(cpu) \
+ if (cpu < 0 || cpu >= NumProcessors) \
+ printk("openpic.c:%d: illegal cpu %d\n", __LINE__, cpu);
+#else
+#define check_arg_ipi(ipi) do {} while (0)
+#define check_arg_timer(timer) do {} while (0)
+#define check_arg_vec(vec) do {} while (0)
+#define check_arg_pri(pri) do {} while (0)
+#define check_arg_irq(irq) do {} while (0)
+#define check_arg_cpu(cpu) do {} while (0)
+#endif
+
+
+
+ /*
+ * I/O functions
+ */
+
+static inline unsigned int openpic_read(volatile unsigned int *addr)
+{
+ unsigned int val;
+
+ val = ld_le32(addr);
+#ifdef REGISTER_DEBUG
+ printk("openpic_read(0x%08x) = 0x%08x\n", (unsigned int)addr, val);
+#endif
+ return val;
+}
+
+static inline void openpic_write(volatile unsigned int *addr, unsigned int val)
+{
+#ifdef REGISTER_DEBUG
+ printk("openpic_write(0x%08x, 0x%08x)\n", (unsigned int)addr, val);
+#endif
+ out_le32(addr, val);
+}
+
+
+static inline unsigned int openpic_readfield(volatile unsigned int *addr, unsigned int mask)
+{
+ unsigned int val = openpic_read(addr);
+ return val & mask;
+}
+
+inline void openpic_writefield(volatile unsigned int *addr, unsigned int mask,
+ unsigned int field)
+{
+ unsigned int val = openpic_read(addr);
+ openpic_write(addr, (val & ~mask) | (field & mask));
+}
+
+static inline void openpic_clearfield(volatile unsigned int *addr, unsigned int mask)
+{
+ openpic_writefield(addr, mask, 0);
+}
+
+static inline void openpic_setfield(volatile unsigned int *addr, unsigned int mask)
+{
+ openpic_writefield(addr, mask, mask);
+}
+
+
+ /*
+ * Update a Vector/Priority register in a safe manner. The interrupt will
+ * be disabled.
+ */
+
+static void openpic_safe_writefield(volatile unsigned int *addr, unsigned int mask,
+ unsigned int field)
+{
+ openpic_setfield(addr, OPENPIC_MASK);
+ /* wait until it's not in use */
+ while (openpic_read(addr) & OPENPIC_ACTIVITY);
+ openpic_writefield(addr, mask | OPENPIC_MASK, field | OPENPIC_MASK);
+}
+
+
+/* -------- Global Operations ---------------------------------------------- */
+
+
+ /*
+ * Initialize the OpenPIC
+ *
+ * Add some kludge to use the Motorola Raven OpenPIC which does not
+ * report vendor and device id, and gets the wrong number of interrupts.
+ * (Motorola did a great job on that one!)
+ */
+
+void openpic_init(int main_pic)
+{
+ unsigned int t, i;
+ unsigned int vendorid, devid, stepping, timerfreq;
+ const char *version, *vendor, *device;
+
+ if (!OpenPIC)
+ BSP_panic("No OpenPIC found");
+
+ t = openpic_read(&OpenPIC->Global.Feature_Reporting0);
+ switch (t & OPENPIC_FEATURE_VERSION_MASK) {
+ case 1:
+ version = "1.0";
+ break;
+ case 2:
+ version = "1.2";
+ break;
+ default:
+ version = "?";
+ break;
+ }
+ NumProcessors = ((t & OPENPIC_FEATURE_LAST_PROCESSOR_MASK) >>
+ OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT) + 1;
+ NumSources = ((t & OPENPIC_FEATURE_LAST_SOURCE_MASK) >>
+ OPENPIC_FEATURE_LAST_SOURCE_SHIFT) + 1;
+ t = openpic_read(&OpenPIC->Global.Vendor_Identification);
+
+ vendorid = t & OPENPIC_VENDOR_ID_VENDOR_ID_MASK;
+ devid = (t & OPENPIC_VENDOR_ID_DEVICE_ID_MASK) >>
+ OPENPIC_VENDOR_ID_DEVICE_ID_SHIFT;
+ stepping = (t & OPENPIC_VENDOR_ID_STEPPING_MASK) >>
+ OPENPIC_VENDOR_ID_STEPPING_SHIFT;
+
+ /* Kludge for the Raven */
+ pci_read_config_dword(0, 0, 0, 0, &t);
+ if (t == PCI_VENDOR_ID_MOTOROLA + (PCI_DEVICE_ID_MOTOROLA_RAVEN<<16)) {
+ vendor = "Motorola";
+ device = "Raven";
+ NumSources += 1;
+ } else {
+ switch (vendorid) {
+ case OPENPIC_VENDOR_ID_APPLE:
+ vendor = "Apple";
+ break;
+ default:
+ vendor = "Unknown";
+ break;
+ }
+ switch (devid) {
+ case OPENPIC_DEVICE_ID_APPLE_HYDRA:
+ device = "Hydra";
+ break;
+ default:
+ device = "Unknown";
+ break;
+ }
+ }
+ printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n", version,
+ NumProcessors, NumSources, OpenPIC);
+
+ printk("OpenPIC Vendor %d (%s), Device %d (%s), Stepping %d\n", vendorid,
+ vendor, devid, device, stepping);
+
+ timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency);
+ printk("OpenPIC timer frequency is ");
+ if (timerfreq)
+ printk("%d Hz\n", timerfreq);
+ else
+ printk("not set\n");
+
+ if ( main_pic )
+ {
+ /* Initialize timer interrupts */
+ for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
+ /* Disabled, Priority 0 */
+ openpic_inittimer(i, 0, OPENPIC_VEC_TIMER+i);
+ /* No processor */
+ openpic_maptimer(i, 0);
+ }
+
+ /* Initialize IPI interrupts */
+ for (i = 0; i < OPENPIC_NUM_IPI; i++) {
+ /* Disabled, Priority 0 */
+ openpic_initipi(i, 0, OPENPIC_VEC_IPI+i);
+ }
+
+ /* Initialize external interrupts */
+ /* SIOint (8259 cascade) is special */
+ openpic_initirq(0, 8, OPENPIC_VEC_SOURCE, 1, 1);
+ /* Processor 0 */
+ openpic_mapirq(0, 1<<0);
+ for (i = 1; i < NumSources; i++) {
+ /* Enabled, Priority 8 */
+ openpic_initirq(i, 8, OPENPIC_VEC_SOURCE+i, 0,
+ i < OpenPIC_NumInitSenses ? OpenPIC_InitSenses[i] : 1);
+ /* Processor 0 */
+ openpic_mapirq(i, 1<<0);
+ }
+
+ /* Initialize the spurious interrupt */
+ openpic_set_spurious(OPENPIC_VEC_SPURIOUS);
+#if 0
+ if (request_irq(IRQ_8259_CASCADE, no_action, SA_INTERRUPT,
+ "82c59 cascade", NULL))
+ printk("Unable to get OpenPIC IRQ 0 for cascade\n");
+#endif
+ openpic_set_priority(0, 0);
+ openpic_disable_8259_pass_through();
+ }
+}
+
+
+ /*
+ * Reset the OpenPIC
+ */
+
+void openpic_reset(void)
+{
+ openpic_setfield(&OpenPIC->Global.Global_Configuration0,
+ OPENPIC_CONFIG_RESET);
+}
+
+
+ /*
+ * Enable/disable 8259 Pass Through Mode
+ */
+
+void openpic_enable_8259_pass_through(void)
+{
+ openpic_clearfield(&OpenPIC->Global.Global_Configuration0,
+ OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE);
+}
+
+void openpic_disable_8259_pass_through(void)
+{
+ openpic_setfield(&OpenPIC->Global.Global_Configuration0,
+ OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE);
+}
+
+
+ /*
+ * Find out the current interrupt
+ */
+
+unsigned int openpic_irq(unsigned int cpu)
+{
+ unsigned int vec;
+
+ check_arg_cpu(cpu);
+ vec = openpic_readfield(&OpenPIC->THIS_CPU.Interrupt_Acknowledge,
+ OPENPIC_VECTOR_MASK);
+ return vec;
+}
+
+
+ /*
+ * Signal end of interrupt (EOI) processing
+ */
+
+void openpic_eoi(unsigned int cpu)
+{
+ check_arg_cpu(cpu);
+ openpic_write(&OpenPIC->THIS_CPU.EOI, 0);
+}
+
+
+ /*
+ * Get/set the current task priority
+ */
+
+unsigned int openpic_get_priority(unsigned int cpu)
+{
+ CHECK_THIS_CPU;
+ return openpic_readfield(&OpenPIC->THIS_CPU.Current_Task_Priority,
+ OPENPIC_CURRENT_TASK_PRIORITY_MASK);
+}
+
+void openpic_set_priority(unsigned int cpu, unsigned int pri)
+{
+ CHECK_THIS_CPU;
+ check_arg_pri(pri);
+ openpic_writefield(&OpenPIC->THIS_CPU.Current_Task_Priority,
+ OPENPIC_CURRENT_TASK_PRIORITY_MASK, pri);
+}
+
+ /*
+ * Get/set the spurious vector
+ */
+
+unsigned int openpic_get_spurious(void)
+{
+ return openpic_readfield(&OpenPIC->Global.Spurious_Vector,
+ OPENPIC_VECTOR_MASK);
+}
+
+void openpic_set_spurious(unsigned int vec)
+{
+ check_arg_vec(vec);
+ openpic_writefield(&OpenPIC->Global.Spurious_Vector, OPENPIC_VECTOR_MASK,
+ vec);
+}
+
+
+ /*
+ * Initialize one or more CPUs
+ */
+
+void openpic_init_processor(unsigned int cpumask)
+{
+ openpic_write(&OpenPIC->Global.Processor_Initialization, cpumask);
+}
+
+
+/* -------- Interprocessor Interrupts -------------------------------------- */
+
+
+ /*
+ * Initialize an interprocessor interrupt (and disable it)
+ *
+ * ipi: OpenPIC interprocessor interrupt number
+ * pri: interrupt source priority
+ * vec: the vector it will produce
+ */
+
+void openpic_initipi(unsigned int ipi, unsigned int pri, unsigned int vec)
+{
+ check_arg_timer(ipi);
+ check_arg_pri(pri);
+ check_arg_vec(vec);
+ openpic_safe_writefield(&OpenPIC->Global.IPI_Vector_Priority(ipi),
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK,
+ (pri << OPENPIC_PRIORITY_SHIFT) | vec);
+}
+
+
+ /*
+ * Send an IPI to one or more CPUs
+ */
+
+void openpic_cause_IPI(unsigned int cpu, unsigned int ipi, unsigned int cpumask)
+{
+ CHECK_THIS_CPU;
+ check_arg_ipi(ipi);
+ openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi), cpumask);
+}
+
+
+/* -------- Timer Interrupts ----------------------------------------------- */
+
+
+ /*
+ * Initialize a timer interrupt (and disable it)
+ *
+ * timer: OpenPIC timer number
+ * pri: interrupt source priority
+ * vec: the vector it will produce
+ */
+
+void openpic_inittimer(unsigned int timer, unsigned int pri, unsigned int vec)
+{
+ check_arg_timer(timer);
+ check_arg_pri(pri);
+ check_arg_vec(vec);
+ openpic_safe_writefield(&OpenPIC->Global.Timer[timer].Vector_Priority,
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK,
+ (pri << OPENPIC_PRIORITY_SHIFT) | vec);
+}
+
+
+ /*
+ * Map a timer interrupt to one or more CPUs
+ */
+
+void openpic_maptimer(unsigned int timer, unsigned int cpumask)
+{
+ check_arg_timer(timer);
+ openpic_write(&OpenPIC->Global.Timer[timer].Destination, cpumask);
+}
+
+
+/* -------- Interrupt Sources ---------------------------------------------- */
+
+
+ /*
+ * Enable/disable an interrupt source
+ */
+
+void openpic_enable_irq(unsigned int irq)
+{
+ check_arg_irq(irq);
+ openpic_clearfield(&OpenPIC->Source[irq].Vector_Priority, OPENPIC_MASK);
+}
+
+void openpic_disable_irq(unsigned int irq)
+{
+ check_arg_irq(irq);
+ openpic_setfield(&OpenPIC->Source[irq].Vector_Priority, OPENPIC_MASK);
+}
+
+
+ /*
+ * Initialize an interrupt source (and disable it!)
+ *
+ * irq: OpenPIC interrupt number
+ * pri: interrupt source priority
+ * vec: the vector it will produce
+ * pol: polarity (1 for positive, 0 for negative)
+ * sense: 1 for level, 0 for edge
+ */
+
+void openpic_initirq(unsigned int irq, unsigned int pri, unsigned int vec, int pol, int sense)
+{
+ check_arg_irq(irq);
+ check_arg_pri(pri);
+ check_arg_vec(vec);
+ openpic_safe_writefield(&OpenPIC->Source[irq].Vector_Priority,
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK |
+ OPENPIC_SENSE_POLARITY | OPENPIC_SENSE_LEVEL,
+ (pri << OPENPIC_PRIORITY_SHIFT) | vec |
+ (pol ? OPENPIC_SENSE_POLARITY : 0) |
+ (sense ? OPENPIC_SENSE_LEVEL : 0));
+}
+
+
+ /*
+ * Map an interrupt source to one or more CPUs
+ */
+
+void openpic_mapirq(unsigned int irq, unsigned int cpumask)
+{
+ check_arg_irq(irq);
+ openpic_write(&OpenPIC->Source[irq].Destination, cpumask);
+}
+
+
+ /*
+ * Set the sense for an interrupt source (and disable it!)
+ *
+ * sense: 1 for level, 0 for edge
+ */
+
+void openpic_set_sense(unsigned int irq, int sense)
+{
+ check_arg_irq(irq);
+ openpic_safe_writefield(&OpenPIC->Source[irq].Vector_Priority,
+ OPENPIC_SENSE_LEVEL,
+ (sense ? OPENPIC_SENSE_LEVEL : 0));
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/openpic/openpic.h b/c/src/lib/libbsp/powerpc/shared/openpic/openpic.h
new file mode 100644
index 0000000000..97faec1e9d
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/openpic/openpic.h
@@ -0,0 +1,340 @@
+/*
+ * openpic.h -- OpenPIC definitions
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is based on the following documentation:
+ *
+ * The Open Programmable Interrupt Controller (PIC)
+ * Register Interface Specification Revision 1.2
+ *
+ * Issue Date: October 1995
+ *
+ * Issued jointly by Advanced Micro Devices and Cyrix Corporation
+ *
+ * AMD is a registered trademark of Advanced Micro Devices, Inc.
+ * Copyright (C) 1995, Advanced Micro Devices, Inc. and Cyrix, Inc.
+ * All Rights Reserved.
+ *
+ * To receive a copy of this documentation, send an email to openpic@amd.com.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef _RTEMS_OPENPIC_H
+#define _RTEMS_OPENPIC_H
+
+
+ /*
+ * OpenPIC supports up to 2048 interrupt sources and up to 32 processors
+ */
+
+#define OPENPIC_MAX_SOURCES 2048
+#define OPENPIC_MAX_PROCESSORS 32
+
+#define OPENPIC_NUM_TIMERS 4
+#define OPENPIC_NUM_IPI 4
+#define OPENPIC_NUM_PRI 16
+#define OPENPIC_NUM_VECTORS 256
+
+
+ /*
+ * Vector numbers
+ */
+
+#define OPENPIC_VEC_SOURCE 0x10 /* and up */
+#define OPENPIC_VEC_TIMER 0x40 /* and up */
+#define OPENPIC_VEC_IPI 0x50 /* and up */
+#define OPENPIC_VEC_SPURIOUS 99
+
+
+ /*
+ * OpenPIC Registers are 32 bits and aligned on 128 bit boundaries
+ */
+
+typedef struct _OpenPIC_Reg {
+ unsigned int Reg; /* Little endian! */
+ char Pad[0xc];
+} OpenPIC_Reg;
+
+
+ /*
+ * Per Processor Registers
+ */
+
+typedef struct _OpenPIC_Processor {
+ /*
+ * Private Shadow Registers (for SLiC backwards compatibility)
+ */
+ unsigned int IPI0_Dispatch_Shadow; /* Write Only */
+ char Pad1[0x4];
+ unsigned int IPI0_Vector_Priority_Shadow; /* Read/Write */
+ char Pad2[0x34];
+ /*
+ * Interprocessor Interrupt Command Ports
+ */
+ OpenPIC_Reg _IPI_Dispatch[OPENPIC_NUM_IPI]; /* Write Only */
+ /*
+ * Current Task Priority Register
+ */
+ OpenPIC_Reg _Current_Task_Priority; /* Read/Write */
+ char Pad3[0x10];
+ /*
+ * Interrupt Acknowledge Register
+ */
+ OpenPIC_Reg _Interrupt_Acknowledge; /* Read Only */
+ /*
+ * End of Interrupt (EOI) Register
+ */
+ OpenPIC_Reg _EOI; /* Read/Write */
+ char Pad5[0xf40];
+} OpenPIC_Processor;
+
+
+ /*
+ * Timer Registers
+ */
+
+typedef struct _OpenPIC_Timer {
+ OpenPIC_Reg _Current_Count; /* Read Only */
+ OpenPIC_Reg _Base_Count; /* Read/Write */
+ OpenPIC_Reg _Vector_Priority; /* Read/Write */
+ OpenPIC_Reg _Destination; /* Read/Write */
+} OpenPIC_Timer;
+
+
+ /*
+ * Global Registers
+ */
+
+typedef struct _OpenPIC_Global {
+ /*
+ * Feature Reporting Registers
+ */
+ OpenPIC_Reg _Feature_Reporting0; /* Read Only */
+ OpenPIC_Reg _Feature_Reporting1; /* Future Expansion */
+ /*
+ * Global Configuration Registers
+ */
+ OpenPIC_Reg _Global_Configuration0; /* Read/Write */
+ OpenPIC_Reg _Global_Configuration1; /* Future Expansion */
+ /*
+ * Vendor Specific Registers
+ */
+ OpenPIC_Reg _Vendor_Specific[4];
+ /*
+ * Vendor Identification Register
+ */
+ OpenPIC_Reg _Vendor_Identification; /* Read Only */
+ /*
+ * Processor Initialization Register
+ */
+ OpenPIC_Reg _Processor_Initialization; /* Read/Write */
+ /*
+ * IPI Vector/Priority Registers
+ */
+ OpenPIC_Reg _IPI_Vector_Priority[OPENPIC_NUM_IPI]; /* Read/Write */
+ /*
+ * Spurious Vector Register
+ */
+ OpenPIC_Reg _Spurious_Vector; /* Read/Write */
+ /*
+ * Global Timer Registers
+ */
+ OpenPIC_Reg _Timer_Frequency; /* Read/Write */
+ OpenPIC_Timer Timer[OPENPIC_NUM_TIMERS];
+ char Pad1[0xee00];
+} OpenPIC_Global;
+
+
+ /*
+ * Interrupt Source Registers
+ */
+
+typedef struct _OpenPIC_Source {
+ OpenPIC_Reg _Vector_Priority; /* Read/Write */
+ OpenPIC_Reg _Destination; /* Read/Write */
+} OpenPIC_Source;
+
+
+ /*
+ * OpenPIC Register Map
+ */
+
+struct OpenPIC {
+ char Pad1[0x1000];
+ /*
+ * Global Registers
+ */
+ OpenPIC_Global Global;
+ /*
+ * Interrupt Source Configuration Registers
+ */
+ OpenPIC_Source Source[OPENPIC_MAX_SOURCES];
+ /*
+ * Per Processor Registers
+ */
+ OpenPIC_Processor Processor[OPENPIC_MAX_PROCESSORS];
+};
+
+extern volatile struct OpenPIC *OpenPIC;
+extern unsigned int OpenPIC_NumInitSenses;
+extern unsigned char *OpenPIC_InitSenses;
+
+
+ /*
+ * Current Task Priority Register
+ */
+
+#define OPENPIC_CURRENT_TASK_PRIORITY_MASK 0x0000000f
+
+ /*
+ * Who Am I Register
+ */
+
+#define OPENPIC_WHO_AM_I_ID_MASK 0x0000001f
+
+ /*
+ * Feature Reporting Register 0
+ */
+
+#define OPENPIC_FEATURE_LAST_SOURCE_MASK 0x07ff0000
+#define OPENPIC_FEATURE_LAST_SOURCE_SHIFT 16
+#define OPENPIC_FEATURE_LAST_PROCESSOR_MASK 0x00001f00
+#define OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT 8
+#define OPENPIC_FEATURE_VERSION_MASK 0x000000ff
+
+ /*
+ * Global Configuration Register 0
+ */
+
+#define OPENPIC_CONFIG_RESET 0x80000000
+#define OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE 0x20000000
+#define OPENPIC_CONFIG_BASE_MASK 0x000fffff
+
+ /*
+ * Vendor Identification Register
+ */
+
+#define OPENPIC_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define OPENPIC_VENDOR_ID_STEPPING_SHIFT 16
+#define OPENPIC_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define OPENPIC_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define OPENPIC_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+
+ /*
+ * Vector/Priority Registers
+ */
+
+#define OPENPIC_MASK 0x80000000
+#define OPENPIC_ACTIVITY 0x40000000 /* Read Only */
+#define OPENPIC_PRIORITY_MASK 0x000f0000
+#define OPENPIC_PRIORITY_SHIFT 16
+#define OPENPIC_VECTOR_MASK 0x000000ff
+
+
+ /*
+ * Interrupt Source Registers
+ */
+
+#define OPENPIC_SENSE_POLARITY 0x00800000 /* Undoc'd */
+#define OPENPIC_SENSE_LEVEL 0x00400000
+
+
+ /*
+ * Timer Registers
+ */
+
+#define OPENPIC_COUNT_MASK 0x7fffffff
+#define OPENPIC_TIMER_TOGGLE 0x80000000
+#define OPENPIC_TIMER_COUNT_INHIBIT 0x80000000
+
+
+ /*
+ * Aliases to make life simpler
+ */
+
+/* Per Processor Registers */
+#define IPI_Dispatch(i) _IPI_Dispatch[i].Reg
+#define Current_Task_Priority _Current_Task_Priority.Reg
+#define Interrupt_Acknowledge _Interrupt_Acknowledge.Reg
+#define EOI _EOI.Reg
+
+/* Global Registers */
+#define Feature_Reporting0 _Feature_Reporting0.Reg
+#define Feature_Reporting1 _Feature_Reporting1.Reg
+#define Global_Configuration0 _Global_Configuration0.Reg
+#define Global_Configuration1 _Global_Configuration1.Reg
+#define Vendor_Specific(i) _Vendor_Specific[i].Reg
+#define Vendor_Identification _Vendor_Identification.Reg
+#define Processor_Initialization _Processor_Initialization.Reg
+#define IPI_Vector_Priority(i) _IPI_Vector_Priority[i].Reg
+#define Spurious_Vector _Spurious_Vector.Reg
+#define Timer_Frequency _Timer_Frequency.Reg
+
+/* Timer Registers */
+#define Current_Count _Current_Count.Reg
+#define Base_Count _Base_Count.Reg
+#define Vector_Priority _Vector_Priority.Reg
+#define Destination _Destination.Reg
+
+/* Interrupt Source Registers */
+#define Vector_Priority _Vector_Priority.Reg
+#define Destination _Destination.Reg
+
+
+ /*
+ * Vendor and Device IDs
+ */
+
+#define OPENPIC_VENDOR_ID_APPLE 0x14
+#define OPENPIC_DEVICE_ID_APPLE_HYDRA 0x46
+
+
+ /*
+ * OpenPIC Operations
+ */
+
+/* Global Operations */
+extern void openpic_init(int);
+extern void openpic_reset(void);
+extern void openpic_enable_8259_pass_through(void);
+extern void openpic_disable_8259_pass_through(void);
+extern unsigned int openpic_irq(unsigned int cpu);
+extern void openpic_eoi(unsigned int cpu);
+extern unsigned int openpic_get_priority(unsigned int cpu);
+extern void openpic_set_priority(unsigned int cpu, unsigned int pri);
+extern unsigned int openpic_get_spurious(void);
+extern void openpic_set_spurious(unsigned int vector);
+extern void openpic_init_processor(unsigned int cpumask);
+
+/* Interprocessor Interrupts */
+extern void openpic_initipi(unsigned int ipi, unsigned int pri, unsigned int vector);
+extern void openpic_cause_IPI(unsigned int cpu, unsigned int ipi, unsigned int cpumask);
+
+/* Timer Interrupts */
+extern void openpic_inittimer(unsigned int timer, unsigned int pri, unsigned int vector);
+extern void openpic_maptimer(unsigned int timer, unsigned int cpumask);
+
+/* Interrupt Sources */
+extern void openpic_enable_irq(unsigned int irq);
+extern void openpic_disable_irq(unsigned int irq);
+extern void openpic_initirq(unsigned int irq, unsigned int pri, unsigned int vector, int polarity,
+ int is_level);
+extern void openpic_mapirq(unsigned int irq, unsigned int cpumask);
+extern void openpic_set_sense(unsigned int irq, int sense);
+
+#endif /* RTEMS_OPENPIC_H */
diff --git a/c/src/lib/libbsp/powerpc/shared/pci/Makefile.in b/c/src/lib/libbsp/powerpc/shared/pci/Makefile.in
new file mode 100644
index 0000000000..5186f9dab9
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/pci/Makefile.in
@@ -0,0 +1,42 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/pci
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES = $(srcdir)/pci.h
+
+SRCS = $(C_FILES) $(H_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/pci/pci.c b/c/src/lib/libbsp/powerpc/shared/pci/pci.c
new file mode 100644
index 0000000000..233a2efc00
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/pci/pci.c
@@ -0,0 +1,388 @@
+/*
+ * pci.c : this file contains basic PCI Io functions.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * This code is heavilly inspired by the public specification of STREAM V2
+ * that can be found at :
+ *
+ * <http://www.chorus.com/Documentation/index.html> by following
+ * the STREAM API Specification Document link.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <bsp/consoleIo.h>
+#include <libcpu/io.h>
+#include <bsp/pci.h>
+#include <bsp/residual.h>
+#include <bsp/openpic.h>
+#include <bsp.h>
+
+#define PCI_CONFIG_ADDR 0xcf8
+#define PCI_CONFIG_DATA 0xcfc
+#define PCI_INVALID_VENDORDEVICEID 0xffffffff
+#define PCI_MULTI_FUNCTION 0x80
+#define RAVEN_MPIC_IOSPACE_ENABLE 0x1
+#define RAVEN_MPIC_MEMSPACE_ENABLE 0x2
+#define RAVEN_MASTER_ENABLE 0x4
+#define RAVEN_PARITY_CHECK_ENABLE 0x40
+#define RAVEN_SYSTEM_ERROR_ENABLE 0x100
+#define RAVEN_CLEAR_EVENTS_MASK 0xf9000000
+
+
+/*
+ * Bit encode for PCI_CONFIG_HEADER_TYPE register
+ */
+unsigned char ucMaxPCIBus;
+
+static int
+indirect_pci_read_config_byte(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned char *val) {
+ out_be32((unsigned int*) pci.pci_config_addr,
+ 0x80|(bus<<8)|(PCI_DEVFN(slot,function)<<16)|((offset&~3)<<24));
+ *val = in_8(pci.pci_config_data + (offset&3));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_read_config_word(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned short *val) {
+ *val = 0xffff;
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32((unsigned int*) pci.pci_config_addr,
+ 0x80|(bus<<8)|(PCI_DEVFN(slot,function)<<16)|((offset&~3)<<24));
+ *val = in_le16((volatile unsigned short *)(pci.pci_config_data + (offset&3)));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_read_config_dword(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned int *val) {
+ *val = 0xffffffff;
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32((unsigned int*) pci.pci_config_addr,
+ 0x80|(bus<<8)|(PCI_DEVFN(slot,function)<<16)|(offset<<24));
+ *val = in_le32((volatile unsigned int *)pci.pci_config_data);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_write_config_byte(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned char val) {
+ out_be32((unsigned int*) pci.pci_config_addr,
+ 0x80|(bus<<8)|(PCI_DEVFN(slot,function)<<16)|((offset&~3)<<24));
+ out_8(pci.pci_config_data + (offset&3), val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_write_config_word(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned short val) {
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32((unsigned int*) pci.pci_config_addr,
+ 0x80|(bus<<8)|(PCI_DEVFN(slot,function)<<16)|((offset&~3)<<24));
+ out_le16((volatile unsigned short *)(pci.pci_config_data + (offset&3)), val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_pci_write_config_dword(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned int val) {
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ out_be32((unsigned int*) pci.pci_config_addr,
+ 0x80|(bus<<8)|(PCI_DEVFN(slot,function)<<16)|(offset<<24));
+ out_le32((volatile unsigned int *)pci.pci_config_data, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static const pci_config_access_functions indirect_functions = {
+ indirect_pci_read_config_byte,
+ indirect_pci_read_config_word,
+ indirect_pci_read_config_dword,
+ indirect_pci_write_config_byte,
+ indirect_pci_write_config_word,
+ indirect_pci_write_config_dword
+};
+
+pci_config pci = {(volatile unsigned char*)PCI_CONFIG_ADDR,
+ (volatile unsigned char*)PCI_CONFIG_DATA,
+ &indirect_functions};
+
+static int
+direct_pci_read_config_byte(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned char *val) {
+ if (bus != 0 || (1<<slot & 0xff8007fe)) {
+ *val=0xff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *val=in_8(pci.pci_config_data + ((1<<slot)&~1)
+ + (function<<8) + offset);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_read_config_word(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned short *val) {
+ *val = 0xffff;
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<slot & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *val=in_le16((volatile unsigned short *)
+ (pci.pci_config_data + ((1<<slot)&~1)
+ + (function<<8) + offset));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_read_config_dword(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned int *val) {
+ *val = 0xffffffff;
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<slot & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *val=in_le32((volatile unsigned int *)
+ (pci.pci_config_data + ((1<<slot)&~1)
+ + (function<<8) + offset));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_write_config_byte(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned char val) {
+ if (bus != 0 || (1<<slot & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ out_8(pci.pci_config_data + ((1<<slot)&~1)
+ + (function<<8) + offset,
+ val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_write_config_word(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned short val) {
+ if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<slot & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ out_le16((volatile unsigned short *)
+ (pci.pci_config_data + ((1<<slot)&~1)
+ + (function<<8) + offset),
+ val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+direct_pci_write_config_dword(unsigned char bus, unsigned char slot,
+ unsigned char function,
+ unsigned char offset, unsigned int val) {
+ if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus != 0 || (1<<slot & 0xff8007fe)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ out_le32((volatile unsigned int *)
+ (pci.pci_config_data + ((1<<slot)&~1)
+ + (function<<8) + offset),
+ val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static const pci_config_access_functions direct_functions = {
+ direct_pci_read_config_byte,
+ direct_pci_read_config_word,
+ direct_pci_read_config_dword,
+ direct_pci_write_config_byte,
+ direct_pci_write_config_word,
+ direct_pci_write_config_dword
+};
+
+
+void detect_host_bridge()
+{
+ PPC_DEVICE *hostbridge;
+ unsigned int id0;
+ unsigned int tmp;
+
+ /*
+ * This code assumes that the host bridge is located at
+ * bus 0, dev 0, func 0 AND that the old pre PCI 2.1
+ * standart devices detection mecahnism that was used on PC
+ * (still used in BSD source code) works.
+ */
+ hostbridge=residual_find_device(&residualCopy, PROCESSORDEVICE, NULL,
+ BridgeController,
+ PCIBridge, -1, 0);
+ if (hostbridge) {
+ if (hostbridge->DeviceId.Interface==PCIBridgeIndirect) {
+ pci.pci_functions=&indirect_functions;
+ /* Should be extracted from residual data,
+ * indeed MPC106 in CHRP mode is different,
+ * but we should not use residual data in
+ * this case anyway.
+ */
+ pci.pci_config_addr = ((volatile unsigned char *)
+ (ptr_mem_map->io_base+0xcf8));
+ pci.pci_config_data = ptr_mem_map->io_base+0xcfc;
+ } else if(hostbridge->DeviceId.Interface==PCIBridgeDirect) {
+ pci.pci_functions=&direct_functions;
+ pci.pci_config_data=(unsigned char *) 0x80800000;
+ } else {
+ }
+ } else {
+ /* Let us try by experimentation at our own risk! */
+ pci.pci_functions = &direct_functions;
+ /* On all direct bridges I know the host bridge itself
+ * appears as device 0 function 0.
+ */
+ pci_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &id0);
+ if (id0==~0U) {
+ pci.pci_functions = &indirect_functions;
+ pci.pci_config_addr = ((volatile unsigned char*)
+ (ptr_mem_map->io_base+0xcf8));
+ pci.pci_config_data = ((volatile unsigned char*)ptr_mem_map->io_base+0xcfc);
+ }
+ /* Here we should check that the host bridge is actually
+ * present, but if it not, we are in such a desperate
+ * situation, that we probably can't even tell it.
+ */
+ }
+ pci_read_config_dword(0, 0, 0, 0, &id0);
+ if(id0 == PCI_VENDOR_ID_MOTOROLA +
+ (PCI_DEVICE_ID_MOTOROLA_RAVEN<<16)) {
+ /*
+ * We have a Raven bridge. We will get information about its settings
+ */
+ pci_read_config_dword(0, 0, 0, PCI_COMMAND, &id0);
+#ifdef SHOW_RAVEN_SETTING
+ printk("RAVEN PCI command register = %x\n",id0);
+#endif
+ id0 |= RAVEN_CLEAR_EVENTS_MASK;
+ pci_write_config_dword(0, 0, 0, PCI_COMMAND, id0);
+ pci_read_config_dword(0, 0, 0, PCI_COMMAND, &id0);
+#ifdef SHOW_RAVEN_SETTING
+ printk("After error clearing RAVEN PCI command register = %x\n",id0);
+#endif
+
+ if (id0 & RAVEN_MPIC_IOSPACE_ENABLE) {
+ pci_read_config_dword(0, 0, 0,PCI_BASE_ADDRESS_0, &tmp);
+#ifdef SHOW_RAVEN_SETTING
+ printk("Raven MPIC is accessed via IO Space Access at address : %x\n",(tmp & ~0x1));
+#endif
+ }
+ if (id0 & RAVEN_MPIC_MEMSPACE_ENABLE) {
+ pci_read_config_dword(0, 0, 0,PCI_BASE_ADDRESS_1, &tmp);
+#ifdef SHOW_RAVEN_SETTING
+ printk("Raven MPIC is accessed via memory Space Access at address : %x\n", tmp);
+#endif
+ OpenPIC=(volatile struct OpenPIC *) (tmp + PREP_ISA_MEM_BASE);
+ printk("OpenPIC found at %p.\n",
+ OpenPIC);
+ }
+ }
+ if (OpenPIC == (volatile struct OpenPIC *)0) {
+ BSP_panic("OpenPic Not found\n");
+ }
+
+}
+
+/*
+ * This routine determines the maximum bus number in the system
+ */
+void InitializePCI()
+{
+ unsigned char ucSlotNumber, ucFnNumber, ucNumFuncs;
+ unsigned char ucHeader;
+ unsigned char ucMaxSubordinate;
+ unsigned int ulClass, ulDeviceID;
+
+ detect_host_bridge();
+ /*
+ * Scan PCI bus 0 looking for PCI-PCI bridges
+ */
+ for(ucSlotNumber=0;ucSlotNumber<PCI_MAX_DEVICES;ucSlotNumber++) {
+ (void)pci_read_config_dword(0,
+ ucSlotNumber,
+ 0,
+ PCI_VENDOR_ID,
+ &ulDeviceID);
+ if(ulDeviceID==PCI_INVALID_VENDORDEVICEID) {
+ /*
+ * This slot is empty
+ */
+ continue;
+ }
+ (void)pci_read_config_byte(0,
+ ucSlotNumber,
+ 0,
+ PCI_HEADER_TYPE,
+ &ucHeader);
+ if(ucHeader&PCI_MULTI_FUNCTION) {
+ ucNumFuncs=PCI_MAX_FUNCTIONS;
+ }
+ else {
+ ucNumFuncs=1;
+ }
+ for(ucFnNumber=0;ucFnNumber<ucNumFuncs;ucFnNumber++) {
+ (void)pci_read_config_dword(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_VENDOR_ID,
+ &ulDeviceID);
+ if(ulDeviceID==PCI_INVALID_VENDORDEVICEID) {
+ /*
+ * This slot/function is empty
+ */
+ continue;
+ }
+
+ /*
+ * This slot/function has a device fitted.
+ */
+ (void)pci_read_config_dword(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_CLASS_REVISION,
+ &ulClass);
+ ulClass >>= 16;
+ if (ulClass == PCI_CLASS_BRIDGE_PCI) {
+ /*
+ * We have found a PCI-PCI bridge
+ */
+ (void)pci_read_config_byte(0,
+ ucSlotNumber,
+ ucFnNumber,
+ PCI_SUBORDINATE_BUS,
+ &ucMaxSubordinate);
+ if(ucMaxSubordinate>ucMaxPCIBus) {
+ ucMaxPCIBus=ucMaxSubordinate;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Return the number of PCI busses in the system
+ */
+unsigned char BusCountPCI()
+{
+ return(ucMaxPCIBus+1);
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/pci/pci.h b/c/src/lib/libbsp/powerpc/shared/pci/pci.h
new file mode 100644
index 0000000000..2070bb434d
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/pci/pci.h
@@ -0,0 +1,1153 @@
+/*
+ *
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ * Copyright 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * For more information, please consult the following manuals (look at
+ * http://www.pcisig.com/ for how to get them):
+ *
+ * PCI BIOS Specification
+ * PCI Local Bus Specification
+ * PCI to PCI Bridge Specification
+ * PCI System Design Guide
+ *
+ * $Id$
+ */
+
+#ifndef RTEMS_PCI_H
+#define RTEMS_PCI_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features */
+
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8
+ revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_NORMAL 0
+#define PCI_HEADER_TYPE_BRIDGE 1
+#define PCI_HEADER_TYPE_CARDBUS 2
+
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
+/* bit 1 is reserved if address_space = 1 */
+
+/* Header type 0 (normal devices) */
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
+#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
+#define PCI_ROM_ADDRESS_ENABLE 0x01
+#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+
+/* 0x34-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+/* Header type 1 (PCI-to-PCI bridges) */
+#define PCI_PRIMARY_BUS 0x18 /* Primary bus number */
+#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
+#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
+#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
+#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
+#define PCI_IO_LIMIT 0x1d
+#define PCI_IO_RANGE_TYPE_MASK 0x0f /* I/O bridging type */
+#define PCI_IO_RANGE_TYPE_16 0x00
+#define PCI_IO_RANGE_TYPE_32 0x01
+#define PCI_IO_RANGE_MASK ~0x0f
+#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
+#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
+#define PCI_MEMORY_LIMIT 0x22
+#define PCI_MEMORY_RANGE_TYPE_MASK 0x0f
+#define PCI_MEMORY_RANGE_MASK ~0x0f
+#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */
+#define PCI_PREF_MEMORY_LIMIT 0x26
+#define PCI_PREF_RANGE_TYPE_MASK 0x0f
+#define PCI_PREF_RANGE_TYPE_32 0x00
+#define PCI_PREF_RANGE_TYPE_64 0x01
+#define PCI_PREF_RANGE_MASK ~0x0f
+#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */
+#define PCI_PREF_LIMIT_UPPER32 0x2c
+#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */
+#define PCI_IO_LIMIT_UPPER16 0x32
+/* 0x34-0x3b is reserved */
+#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
+/* 0x3c-0x3d are same as for htype 0 */
+#define PCI_BRIDGE_CONTROL 0x3e
+#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */
+#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */
+#define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */
+#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */
+#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */
+#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */
+#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */
+
+/* Header type 2 (CardBus bridges) */
+/* 0x14-0x15 reserved */
+#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */
+#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */
+#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */
+#define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */
+#define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */
+#define PCI_CB_MEMORY_BASE_0 0x1c
+#define PCI_CB_MEMORY_LIMIT_0 0x20
+#define PCI_CB_MEMORY_BASE_1 0x24
+#define PCI_CB_MEMORY_LIMIT_1 0x28
+#define PCI_CB_IO_BASE_0 0x2c
+#define PCI_CB_IO_BASE_0_HI 0x2e
+#define PCI_CB_IO_LIMIT_0 0x30
+#define PCI_CB_IO_LIMIT_0_HI 0x32
+#define PCI_CB_IO_BASE_1 0x34
+#define PCI_CB_IO_BASE_1_HI 0x36
+#define PCI_CB_IO_LIMIT_1 0x38
+#define PCI_CB_IO_LIMIT_1_HI 0x3a
+#define PCI_CB_IO_RANGE_MASK ~0x03
+/* 0x3c-0x3d are same as for htype 0 */
+#define PCI_CB_BRIDGE_CONTROL 0x3e
+#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */
+#define PCI_CB_BRIDGE_CTL_SERR 0x02
+#define PCI_CB_BRIDGE_CTL_ISA 0x04
+#define PCI_CB_BRIDGE_CTL_VGA 0x08
+#define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20
+#define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */
+#define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */
+#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */
+#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
+#define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400
+#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
+#define PCI_CB_SUBSYSTEM_ID 0x42
+#define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */
+/* 0x48-0x7f reserved */
+
+/* Device classes and subclasses */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a01
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+
+#define PCI_CLASS_OTHERS 0xff
+
+/*
+ * Vendor and card ID's: sort these numerically according to vendor
+ * (and according to card ID within vendor). Send all updates to
+ * <linux-pcisupport@cck.uni-kl.de>.
+ */
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_1280 0x3033
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215LG 0x4c47
+#define PCI_DEVICE_ID_ATI_264LT 0x4c54
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7542 0x1200
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_7541 0x1204
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_82G2675 0x001d
+#define PCI_DEVICE_ID_IBM_MCA 0x0020
+#define PCI_DEVICE_ID_IBM_82351 0x0022
+#define PCI_DEVICE_ID_IBM_SERVERAID 0x002e
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_MPIC 0x0046
+#define PCI_DEVICE_ID_IBM_3780IDSP 0x007d
+#define PCI_DEVICE_ID_IBM_MPIC_2 0xffff
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_7197 0x3296
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_601 0x0601
+#define PCI_DEVICE_ID_SI_5107 0x5107
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_7001 0x7001
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_92C178 0xc178
+#define PCI_DEVICE_ID_OPTI_82C557 0xc557
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C701 0xc701
+#define PCI_DEVICE_ID_OPTI_82C814 0xc814
+#define PCI_DEVICE_ID_OPTI_82C822 0xc822
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_SGS 0x104a
+#define PCI_DEVICE_ID_SGS_2000 0x0008
+#define PCI_DEVICE_ID_SGS_1764 0x0009
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4010 0x3d04
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_PCI1130 0xac12
+#define PCI_DEVICE_ID_TI_PCI1031 0xac13
+#define PCI_DEVICE_ID_TI_PCI1131 0xac15
+#define PCI_DEVICE_ID_TI_PCI1250 0xac16
+#define PCI_DEVICE_ID_TI_PCI1220 0xac17
+
+#define PCI_VENDOR_ID_OAK 0x104e
+#define PCI_DEVICE_ID_OAK_OTI107 0x0107
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940 0x0940
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_5300 0x5300
+
+#define PCI_VENDOR_ID_N9 0x105d
+#define PCI_DEVICE_ID_N9_I128 0x2309
+#define PCI_DEVICE_ID_N9_I128_2 0x2339
+#define PCI_DEVICE_ID_N9_I128_T2R 0x493d
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8891A 0x0891
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+#define PCI_DEVICE_ID_UMC_UM8881F 0x8881
+#define PCI_DEVICE_ID_UMC_UM8886F 0x8886
+#define PCI_DEVICE_ID_UMC_UM9017F 0x9017
+#define PCI_DEVICE_ID_UMC_UM8886N 0xe886
+#define PCI_DEVICE_ID_UMC_UM8891N 0xe891
+
+#define PCI_VENDOR_ID_X 0x1061
+#define PCI_DEVICE_ID_X_AGX016 0x0001
+
+#define PCI_VENDOR_ID_PICOP 0x1066
+#define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001
+#define PCI_DEVICE_ID_PICOP_PT80C524 0x8002
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_GC 0x0002
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+
+#define PCI_VENDOR_ID_NEXGEN 0x1074
+#define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+#define PCI_VENDOR_ID_LEADTEK 0x107d
+#define PCI_DEVICE_ID_LEADTEK_805 0x0000
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C599 0x0600
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+#define PCI_VENDOR_ID_FOREX 0x1083
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC3136 0x0001
+#define PCI_DEVICE_ID_OLICOM_OC2315 0x0011
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#define PCI_DEVICE_ID_OLICOM_OC6151 0x0021
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_640 0x0640
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_647 0x0647
+#define PCI_DEVICE_ID_CMD_670 0x0670
+
+#define PCI_VENDOR_ID_VISION 0x1098
+#define PCI_DEVICE_ID_VISION_QD8500 0x0001
+#define PCI_DEVICE_ID_VISION_QD8580 0x0002
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_848 0x0350
+#define PCI_DEVICE_ID_BROOKTREE_849A 0x0351
+#define PCI_DEVICE_ID_BROOKTREE_8474 0x8474
+
+#define PCI_VENDOR_ID_SIERRA 0x10a8
+#define PCI_DEVICE_ID_SIERRA_STB 0x0000
+
+#define PCI_VENDOR_ID_ACC 0x10aa
+#define PCI_DEVICE_ID_ACC_2056 0x0000
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_83769 0x0001
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+#define PCI_VENDOR_ID_DATABOOK 0x10b3
+#define PCI_DEVICE_ID_DATABOOK_87144 0xb106
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9060 0x9060
+#define PCI_DEVICE_ID_PLX_9060ES 0x906E
+#define PCI_DEVICE_ID_PLX_9060SD 0x906D
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+#define PCI_DEVICE_ID_MADGE_C155S 0x1001
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C590 0x5900
+#define PCI_DEVICE_ID_3COM_3C595TX 0x5950
+#define PCI_DEVICE_ID_3COM_3C595T4 0x5951
+#define PCI_DEVICE_ID_3COM_3C595MII 0x5952
+#define PCI_DEVICE_ID_3COM_3C900TPO 0x9000
+#define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001
+#define PCI_DEVICE_ID_3COM_3C905TX 0x9050
+#define PCI_DEVICE_ID_3COM_3C905T4 0x9051
+#define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055
+
+#define PCI_VENDOR_ID_SMC 0x10b8
+#define PCI_DEVICE_ID_SMC_EPIC100 0x0005
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1445 0x1445
+#define PCI_DEVICE_ID_AL_M1449 0x1449
+#define PCI_DEVICE_ID_AL_M1451 0x1451
+#define PCI_DEVICE_ID_AL_M1461 0x1461
+#define PCI_DEVICE_ID_AL_M1489 0x1489
+#define PCI_DEVICE_ID_AL_M1511 0x1511
+#define PCI_DEVICE_ID_AL_M1513 0x1513
+#define PCI_DEVICE_ID_AL_M1521 0x1521
+#define PCI_DEVICE_ID_AL_M1523 0x1523
+#define PCI_DEVICE_ID_AL_M1531 0x1531
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M3307 0x3307
+#define PCI_DEVICE_ID_AL_M4803 0x5215
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5237 0x5237
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+#define PCI_VENDOR_ID_MITSUBISHI 0x10ba
+
+#define PCI_VENDOR_ID_SURECOM 0x10bd
+#define PCI_DEVICE_ID_SURECOM_NE34 0x0e34
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
+#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
+
+#define PCI_VENDOR_ID_MACRONIX 0x10d9
+#define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512
+#define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531
+
+#define PCI_VENDOR_ID_CERN 0x10dc
+#define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001
+#define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002
+#define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021
+#define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_8849 0x8849
+
+#define PCI_VENDOR_ID_TEKRAM2 0x10e1
+#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
+
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_DEVICE_ID_AMCC_MYRINET 0x8043
+#define PCI_DEVICE_ID_AMCC_PARASTATION 0x8062
+#define PCI_DEVICE_ID_AMCC_S5933 0x807d
+#define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1680 0x1680
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
+#define PCI_DEVICE_ID_REALTEK_8129 0x8129
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_TRUEVISION 0x10fa
+#define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c
+
+#define PCI_VENDOR_ID_INIT 0x1101
+#define PCI_DEVICE_ID_INIT_320P 0x9100
+#define PCI_DEVICE_ID_INIT_360P 0x9500
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_82C505 0x0505
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C585 0x0585
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C595 0x0595
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C926 0x0926
+#define PCI_DEVICE_ID_VIA_82C416 0x1571
+#define PCI_DEVICE_ID_VIA_82C595_97 0x1595
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_86C100A 0x6100
+#define PCI_DEVICE_ID_VIA_82C597_1 0x8597
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200PC 0x0210
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+#define PCI_VENDOR_ID_IMAGINGTECH 0x112f
+#define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7145 0x7145
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+
+#define PCI_VENDOR_ID_CYCLONE 0x113c
+#define PCI_DEVICE_ID_CYCLONE_SDK 0x0001
+
+#define PCI_VENDOR_ID_ALLIANCE 0x1142
+#define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210
+#define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422
+#define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424
+#define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d
+
+#define PCI_VENDOR_ID_SK 0x1148
+#define PCI_DEVICE_ID_SK_FP 0x4000
+#define PCI_DEVICE_ID_SK_TR 0x4200
+#define PCI_DEVICE_ID_SK_GE 0x4300
+
+#define PCI_VENDOR_ID_VMIC 0x114a
+#define PCI_DEVICE_ID_VMIC_VME 0x7587
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_EPC 0x0002
+#define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003
+#define PCI_DEVICE_ID_DIGI_XEM 0x0004
+#define PCI_DEVICE_ID_DIGI_XR 0x0005
+#define PCI_DEVICE_ID_DIGI_CX 0x0006
+#define PCI_DEVICE_ID_DIGI_XRJ 0x0009
+#define PCI_DEVICE_ID_DIGI_EPCJ 0x000a
+#define PCI_DEVICE_ID_DIGI_XR_920 0x0027
+
+#define PCI_VENDOR_ID_MUTECH 0x1159
+#define PCI_DEVICE_ID_MUTECH_MV1000 0x0001
+
+#define PCI_VENDOR_ID_RENDITION 0x1163
+#define PCI_DEVICE_ID_RENDITION_VERITE 0x0001
+#define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_601 0x0601
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
+#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP8400 0x0004
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+
+#define PCI_VENDOR_ID_LITEON 0x11ad
+#define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002
+
+#define PCI_VENDOR_ID_NP 0x11bc
+#define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_L56XMF 0x0440
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
+#define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+
+#define PCI_VENDOR_ID_AURAVISION 0x11d1
+#define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7
+
+#define PCI_VENDOR_ID_IKON 0x11d5
+#define PCI_DEVICE_ID_IKON_10115 0x0115
+#define PCI_DEVICE_ID_IKON_10117 0x0117
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+#define PCI_VENDOR_ID_KINETIC 0x11f4
+#define PCI_DEVICE_ID_KINETIC_2915 0x2915
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#define PCI_DEVICE_ID_COMPEX_RL2000 0x1401
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP32INTF 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP4QUAD 0x0004
+#define PCI_DEVICE_ID_RP8OCTA 0x0005
+#define PCI_DEVICE_ID_RP8J 0x0006
+#define PCI_DEVICE_ID_RPP4 0x000A
+#define PCI_DEVICE_ID_RPP8 0x000B
+#define PCI_DEVICE_ID_RP8M 0x000C
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6729 0x6729
+#define PCI_DEVICE_ID_O2_6730 0x673a
+#define PCI_DEVICE_ID_O2_6832 0x6832
+#define PCI_DEVICE_ID_O2_6836 0x6836
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+
+#define PCI_VENDOR_ID_SIGMADES 0x1236
+#define PCI_DEVICE_ID_SIGMADES_6425 0x6401
+
+#define PCI_VENDOR_ID_CCUBE 0x123f
+
+#define PCI_VENDOR_ID_DIPIX 0x1246
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+#define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000
+#define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002
+#define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003
+
+#define PCI_VENDOR_ID_OPTIBASE 0x1255
+#define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110
+#define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210
+#define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110
+#define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120
+#define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352
+#define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b
+
+#define PCI_VENDOR_ID_HUGHES 0x1273
+#define PCI_DEVICE_ID_HUGHES_DIRECPC 0x0002
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000
+
+#define PCI_VENDOR_ID_ALTEON 0x12ae
+#define PCI_DEVICE_ID_ALTEON_ACENIC 0x0001
+
+#define PCI_VENDOR_ID_PICTUREL 0x12c5
+#define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_VENDOR_ID_CBOARDS 0x1307
+#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
+
+#define PCI_VENDOR_ID_SYMPHONY 0x1c1c
+#define PCI_DEVICE_ID_SYMPHONY_101 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_300SX 0x0001
+#define PCI_DEVICE_ID_3DLABS_500TX 0x0002
+#define PCI_DEVICE_ID_3DLABS_DELTA 0x0003
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004
+#define PCI_DEVICE_ID_3DLABS_MX 0x0006
+
+#define PCI_VENDOR_ID_AVANCE 0x4005
+#define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064
+#define PCI_DEVICE_ID_AVANCE_2302 0x2302
+
+#define PCI_VENDOR_ID_NETVIN 0x4a14
+#define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551
+#define PCI_DEVICE_ID_S3_ViRGE 0x5631
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_AURORA64VP 0x8812
+#define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814
+#define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_928 0x88b0
+#define PCI_DEVICE_ID_S3_864_1 0x88c0
+#define PCI_DEVICE_ID_S3_864_2 0x88c1
+#define PCI_DEVICE_ID_S3_964_1 0x88d0
+#define PCI_DEVICE_ID_S3_964_2 0x88d1
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_TRIO64V2 0x8901
+#define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902
+#define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01
+#define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10
+#define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01
+#define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02
+#define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82430 0x0486
+#define PCI_DEVICE_ID_INTEL_82434 0x04a3
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
+#define PCI_DEVICE_ID_INTEL_7116 0x1223
+#define PCI_DEVICE_ID_INTEL_82596 0x1226
+#define PCI_DEVICE_ID_INTEL_82865 0x1227
+#define PCI_DEVICE_ID_INTEL_82557 0x1229
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82437MX 0x1235
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_P6 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+#define PCI_DEVICE_ID_KTI_ET32P2 0x3000
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_5800 0x5800
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+
+#define PCI_VENDOR_ID_ATRONICS 0x907f
+#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+
+#define PCI_VENDOR_ID_ARK 0xedd8
+#define PCI_DEVICE_ID_ARK_STING 0xa091
+#define PCI_DEVICE_ID_ARK_STINGARK 0xa099
+#define PCI_DEVICE_ID_ARK_2000MT 0xa0a1
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/*
+ * Error values that may be returned by the PCI bios.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+#define PCI_MAX_DEVICES 16
+#define PCI_MAX_FUNCTIONS 8
+
+typedef struct {
+ int (*read_config_byte)(unsigned char, unsigned char, unsigned char,
+ unsigned char, unsigned char *);
+ int (*read_config_word)(unsigned char, unsigned char, unsigned char,
+ unsigned char, unsigned short *);
+ int (*read_config_dword)(unsigned char, unsigned char, unsigned char,
+ unsigned char, unsigned int *);
+ int (*write_config_byte)(unsigned char, unsigned char, unsigned char,
+ unsigned char, unsigned char);
+ int (*write_config_word)(unsigned char, unsigned char, unsigned char,
+ unsigned char, unsigned short);
+ int (*write_config_dword)(unsigned char, unsigned char, unsigned char,
+ unsigned char, unsigned int);
+}pci_config_access_functions;
+
+typedef struct {
+ volatile unsigned char* pci_config_addr;
+ volatile unsigned char* pci_config_data;
+ pci_config_access_functions* pci_functions;
+} pci_config;
+
+extern pci_config pci;
+
+extern inline int
+pci_read_config_byte(unsigned char bus, unsigned char slot, unsigned char function,
+ unsigned char where, unsigned char * val) {
+ return pci.pci_functions->read_config_byte(bus, slot, function, where, val);
+}
+
+extern inline int
+pci_read_config_word(unsigned char bus, unsigned char slot, unsigned char function,
+ unsigned char where, unsigned short * val) {
+ return pci.pci_functions->read_config_word(bus, slot, function, where, val);
+}
+
+extern inline int
+pci_read_config_dword(unsigned char bus, unsigned char slot, unsigned char function,
+ unsigned char where, unsigned int * val) {
+ return pci.pci_functions->read_config_dword(bus, slot, function, where, val);
+}
+
+extern inline int
+pci_write_config_byte(unsigned char bus, unsigned char slot, unsigned char function,
+ unsigned char where, unsigned char val) {
+ return pci.pci_functions->write_config_byte(bus, slot, function, where, val);
+}
+
+extern inline int
+pci_write_config_word(unsigned char bus, unsigned char slot, unsigned char function,
+ unsigned char where, unsigned short val) {
+ return pci.pci_functions->write_config_word(bus, slot, function, where, val);
+}
+
+extern inline int
+pci_write_config_dword(unsigned char bus, unsigned char slot, unsigned char function,
+ unsigned char where, unsigned int val) {
+ return pci.pci_functions->write_config_dword(bus, slot, function, where, val);
+}
+
+/*
+ * Return the number of PCI busses in the system
+ */
+extern unsigned char BusCountPCI();
+extern void InitializePCI();
+
+#endif /* RTEMS_PCI_H */
diff --git a/c/src/lib/libbsp/powerpc/shared/residual/Makefile.in b/c/src/lib/libbsp/powerpc/shared/residual/Makefile.in
new file mode 100644
index 0000000000..74cf7892c6
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/residual/Makefile.in
@@ -0,0 +1,50 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/residual
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+PGM = ${ARCH}/residual.rel
+
+# C source names, if any, go here -- minus the .c
+C_PIECES = $(RESIDUAL_C_PIECES)
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+H_FILES = $(srcdir)/pnp.h $(srcdir)/residual.h
+
+SRCS = $(C_FILES) $(H_FILES)
+OBJS = $(C_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/residual/pnp.h b/c/src/lib/libbsp/powerpc/shared/residual/pnp.h
new file mode 100644
index 0000000000..beebba1b5f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/residual/pnp.h
@@ -0,0 +1,647 @@
+/* 11/02/95 */
+/*----------------------------------------------------------------------------*/
+/* Plug and Play header definitions */
+/*----------------------------------------------------------------------------*/
+
+/* Structure map for PnP on PowerPC Reference Platform */
+/* See Plug and Play ISA Specification, Version 1.0, May 28, 1993. It */
+/* (or later versions) is available on Compuserve in the PLUGPLAY area. */
+/* This code has extensions to that specification, namely new short and */
+/* long tag types for platform dependent information */
+
+/* Warning: LE notation used throughout this file */
+
+/* For enum's: if given in hex then they are bit significant, i.e. */
+/* only one bit is on for each enum */
+
+/*
+ * $Id$
+ */
+
+#ifndef _PNP_
+#define _PNP_
+
+#ifndef ASM
+#define MAX_MEM_REGISTERS 9
+#define MAX_IO_PORTS 20
+#define MAX_IRQS 7
+/*#define MAX_DMA_CHANNELS 7*/
+
+/* Interrupt controllers */
+
+#define PNPinterrupt0 "PNP0000" /* AT Interrupt Controller */
+#define PNPinterrupt1 "PNP0001" /* EISA Interrupt Controller */
+#define PNPinterrupt2 "PNP0002" /* MCA Interrupt Controller */
+#define PNPinterrupt3 "PNP0003" /* APIC */
+#define PNPExtInt "IBM000D" /* PowerPC Extended Interrupt Controller */
+
+/* Timers */
+
+#define PNPtimer0 "PNP0100" /* AT Timer */
+#define PNPtimer1 "PNP0101" /* EISA Timer */
+#define PNPtimer2 "PNP0102" /* MCA Timer */
+
+/* DMA controllers */
+
+#define PNPdma0 "PNP0200" /* AT DMA Controller */
+#define PNPdma1 "PNP0201" /* EISA DMA Controller */
+#define PNPdma2 "PNP0202" /* MCA DMA Controller */
+
+/* start of August 15, 1994 additions */
+/* CMOS */
+#define PNPCMOS "IBM0009" /* CMOS */
+
+/* L2 Cache */
+#define PNPL2 "IBM0007" /* L2 Cache */
+
+/* NVRAM */
+#define PNPNVRAM "IBM0008" /* NVRAM */
+
+/* Power Management */
+#define PNPPM "IBM0005" /* Power Management */
+/* end of August 15, 1994 additions */
+
+/* Keyboards */
+
+#define PNPkeyboard0 "PNP0300" /* IBM PC/XT KB Cntlr (83 key, no mouse) */
+#define PNPkeyboard1 "PNP0301" /* Olivetti ICO (102 key) */
+#define PNPkeyboard2 "PNP0302" /* IBM PC/AT KB Cntlr (84 key) */
+#define PNPkeyboard3 "PNP0303" /* IBM Enhanced (101/2 key, PS/2 mouse) */
+#define PNPkeyboard4 "PNP0304" /* Nokia 1050 KB Cntlr */
+#define PNPkeyboard5 "PNP0305" /* Nokia 9140 KB Cntlr */
+#define PNPkeyboard6 "PNP0306" /* Standard Japanese KB Cntlr */
+#define PNPkeyboard7 "PNP0307" /* Microsoft Windows (R) KB Cntlr */
+
+/* Parallel port controllers */
+
+#define PNPparallel0 "PNP0400" /* Standard LPT Parallel Port */
+#define PNPparallel1 "PNP0401" /* ECP Parallel Port */
+#define PNPepp "IBM001C" /* EPP Parallel Port */
+
+/* Serial port controllers */
+
+#define PNPserial0 "PNP0500" /* Standard PC Serial port */
+#define PNPSerial1 "PNP0501" /* 16550A Compatible Serial port */
+
+/* Disk controllers */
+
+#define PNPdisk0 "PNP0600" /* Generic ESDI/IDE/ATA Compat HD Cntlr */
+#define PNPdisk1 "PNP0601" /* Plus Hardcard II */
+#define PNPdisk2 "PNP0602" /* Plus Hardcard IIXL/EZ */
+
+/* Diskette controllers */
+
+#define PNPdiskette0 "PNP0700" /* PC Standard Floppy Disk Controller */
+
+/* Display controllers */
+
+#define PNPdisplay0 "PNP0900" /* VGA Compatible */
+#define PNPdisplay1 "PNP0901" /* Video Seven VGA */
+#define PNPdisplay2 "PNP0902" /* 8514/A Compatible */
+#define PNPdisplay3 "PNP0903" /* Trident VGA */
+#define PNPdisplay4 "PNP0904" /* Cirrus Logic Laptop VGA */
+#define PNPdisplay5 "PNP0905" /* Cirrus Logic VGA */
+#define PNPdisplay6 "PNP0906" /* Tseng ET4000 or ET4000/W32 */
+#define PNPdisplay7 "PNP0907" /* Western Digital VGA */
+#define PNPdisplay8 "PNP0908" /* Western Digital Laptop VGA */
+#define PNPdisplay9 "PNP0909" /* S3 */
+#define PNPdisplayA "PNP090A" /* ATI Ultra Pro/Plus (Mach 32) */
+#define PNPdisplayB "PNP090B" /* ATI Ultra (Mach 8) */
+#define PNPdisplayC "PNP090C" /* XGA Compatible */
+#define PNPdisplayD "PNP090D" /* ATI VGA Wonder */
+#define PNPdisplayE "PNP090E" /* Weitek P9000 Graphics Adapter */
+#define PNPdisplayF "PNP090F" /* Oak Technology VGA */
+
+/* Peripheral busses */
+
+#define PNPbuses0 "PNP0A00" /* ISA Bus */
+#define PNPbuses1 "PNP0A01" /* EISA Bus */
+#define PNPbuses2 "PNP0A02" /* MCA Bus */
+#define PNPbuses3 "PNP0A03" /* PCI Bus */
+#define PNPbuses4 "PNP0A04" /* VESA/VL Bus */
+
+/* RTC, BIOS, planar devices */
+
+#define PNPspeaker0 "PNP0800" /* AT Style Speaker Sound */
+#define PNPrtc0 "PNP0B00" /* AT RTC */
+#define PNPpnpbios0 "PNP0C00" /* PNP BIOS (only created by root enum) */
+#define PNPpnpbios1 "PNP0C01" /* System Board Memory Device */
+#define PNPpnpbios2 "PNP0C02" /* Math Coprocessor */
+#define PNPpnpbios3 "PNP0C03" /* PNP BIOS Event Notification Interrupt */
+
+/* PCMCIA controller */
+
+#define PNPpcmcia0 "PNP0E00" /* Intel 82365 Compatible PCMCIA Cntlr */
+
+/* Mice */
+
+#define PNPmouse0 "PNP0F00" /* Microsoft Bus Mouse */
+#define PNPmouse1 "PNP0F01" /* Microsoft Serial Mouse */
+#define PNPmouse2 "PNP0F02" /* Microsoft Inport Mouse */
+#define PNPmouse3 "PNP0F03" /* Microsoft PS/2 Mouse */
+#define PNPmouse4 "PNP0F04" /* Mousesystems Mouse */
+#define PNPmouse5 "PNP0F05" /* Mousesystems 3 Button Mouse - COM2 */
+#define PNPmouse6 "PNP0F06" /* Genius Mouse - COM1 */
+#define PNPmouse7 "PNP0F07" /* Genius Mouse - COM2 */
+#define PNPmouse8 "PNP0F08" /* Logitech Serial Mouse */
+#define PNPmouse9 "PNP0F09" /* Microsoft Ballpoint Serial Mouse */
+#define PNPmouseA "PNP0F0A" /* Microsoft PNP Mouse */
+#define PNPmouseB "PNP0F0B" /* Microsoft PNP Ballpoint Mouse */
+
+/* Modems */
+
+#define PNPmodem0 "PNP9000" /* Specific IDs TBD */
+
+/* Network controllers */
+
+#define PNPnetworkC9 "PNP80C9" /* IBM Token Ring */
+#define PNPnetworkCA "PNP80CA" /* IBM Token Ring II */
+#define PNPnetworkCB "PNP80CB" /* IBM Token Ring II/Short */
+#define PNPnetworkCC "PNP80CC" /* IBM Token Ring 4/16Mbs */
+#define PNPnetwork27 "PNP8327" /* IBM Token Ring (All types) */
+#define PNPnetworket "IBM0010" /* IBM Ethernet used by Power PC */
+#define PNPneteisaet "IBM2001" /* IBM Ethernet EISA adapter */
+#define PNPAMD79C970 "IBM0016" /* AMD 79C970 (PCI Ethernet) */
+
+/* SCSI controllers */
+
+#define PNPscsi0 "PNPA000" /* Adaptec 154x Compatible SCSI Cntlr */
+#define PNPscsi1 "PNPA001" /* Adaptec 174x Compatible SCSI Cntlr */
+#define PNPscsi2 "PNPA002" /* Future Domain 16-700 Compat SCSI Cntlr*/
+#define PNPscsi3 "PNPA003" /* Panasonic CDROM Adapter (SBPro/SB16) */
+#define PNPscsiF "IBM000F" /* NCR 810 SCSI Controller */
+#define PNPscsi825 "IBM001B" /* NCR 825 SCSI Controller */
+#define PNPscsi875 "IBM0018" /* NCR 875 SCSI Controller */
+
+/* Sound/Video, Multimedia */
+
+#define PNPmm0 "PNPB000" /* Sound Blaster Compatible Sound Device */
+#define PNPmm1 "PNPB001" /* MS Windows Sound System Compat Device */
+#define PNPmmF "IBM000E" /* Crystal CS4231 Audio Device */
+#define PNPv7310 "IBM0015" /* ASCII V7310 Video Capture Device */
+#define PNPmm4232 "IBM0017" /* Crystal CS4232 Audio Device */
+#define PNPpmsyn "IBM001D" /* YMF 289B chip (Yamaha) */
+#define PNPgp4232 "IBM0012" /* Crystal CS4232 Game Port */
+#define PNPmidi4232 "IBM0013" /* Crystal CS4232 MIDI */
+
+/* Operator Panel */
+#define PNPopctl "IBM000B" /* Operator's panel */
+
+/* Service Processor */
+#define PNPsp "IBM0011" /* IBM Service Processor */
+#define PNPLTsp "IBM001E" /* Lightning/Terlingua Support Processor */
+#define PNPLTmsp "IBM001F" /* Lightning/Terlingua Mini-SP */
+
+/* Memory Controller */
+#define PNPmemctl "IBM000A" /* Memory controller */
+
+/* Graphics Assist */
+#define PNPg_assist "IBM0014" /* Graphics Assist */
+
+/* Miscellaneous Device Controllers */
+#define PNPtablet "IBM0019" /* IBM Tablet Controller */
+
+/* PNP Packet Handles */
+
+#define S1_Packet 0x0A /* Version resource */
+#define S2_Packet 0x15 /* Logical DEVID (without flags) */
+#define S2_Packet_flags 0x16 /* Logical DEVID (with flags) */
+#define S3_Packet 0x1C /* Compatible device ID */
+#define S4_Packet 0x22 /* IRQ resource (without flags) */
+#define S4_Packet_flags 0x23 /* IRQ resource (with flags) */
+#define S5_Packet 0x2A /* DMA resource */
+#define S6_Packet 0x30 /* Depend funct start (w/o priority) */
+#define S6_Packet_priority 0x31 /* Depend funct start (w/ priority) */
+#define S7_Packet 0x38 /* Depend funct end */
+#define S8_Packet 0x47 /* I/O port resource (w/o fixed loc) */
+#define S9_Packet_fixed 0x4B /* I/O port resource (w/ fixed loc) */
+#define S14_Packet 0x71 /* Vendor defined */
+#define S15_Packet 0x78 /* End of resource (w/o checksum) */
+#define S15_Packet_checksum 0x79 /* End of resource (w/ checksum) */
+#define L1_Packet 0x81 /* Memory range */
+#define L1_Shadow 0x20 /* Memory is shadowable */
+#define L1_32bit_mem 0x18 /* 32-bit memory only */
+#define L1_8_16bit_mem 0x10 /* 8- and 16-bit supported */
+#define L1_Decode_Hi 0x04 /* decode supports high address */
+#define L1_Cache 0x02 /* read cacheable, write-through */
+#define L1_Writeable 0x01 /* Memory is writeable */
+#define L2_Packet 0x82 /* ANSI ID string */
+#define L3_Packet 0x83 /* Unicode ID string */
+#define L4_Packet 0x84 /* Vendor defined */
+#define L5_Packet 0x85 /* Large I/O */
+#define L6_Packet 0x86 /* 32-bit Fixed Loc Mem Range Desc */
+#define END_TAG 0x78 /* End of resource */
+#define DF_START_TAG 0x30 /* Dependent function start */
+#define DF_START_TAG_priority 0x31 /* Dependent function start */
+#define DF_END_TAG 0x38 /* Dependent function end */
+#define SUBOPTIMAL_CONFIGURATION 0x2 /* Priority byte sub optimal config */
+
+/* Device Base Type Codes */
+
+typedef enum _PnP_BASE_TYPE {
+ Reserved = 0,
+ MassStorageDevice = 1,
+ NetworkInterfaceController = 2,
+ DisplayController = 3,
+ MultimediaController = 4,
+ MemoryController = 5,
+ BridgeController = 6,
+ CommunicationsDevice = 7,
+ SystemPeripheral = 8,
+ InputDevice = 9,
+ ServiceProcessor = 0x0A, /* 11/2/95 */
+ } PnP_BASE_TYPE;
+
+/* Device Sub Type Codes */
+
+typedef enum _PnP_SUB_TYPE {
+ SCSIController = 0,
+ IDEController = 1,
+ FloppyController = 2,
+ IPIController = 3,
+ OtherMassStorageController = 0x80,
+
+ EthernetController = 0,
+ TokenRingController = 1,
+ FDDIController = 2,
+ OtherNetworkController = 0x80,
+
+ VGAController= 0,
+ SVGAController= 1,
+ XGAController= 2,
+ OtherDisplayController = 0x80,
+
+ VideoController = 0,
+ AudioController = 1,
+ OtherMultimediaController = 0x80,
+
+ RAM = 0,
+ FLASH = 1,
+ OtherMemoryDevice = 0x80,
+
+ HostProcessorBridge = 0,
+ ISABridge = 1,
+ EISABridge = 2,
+ MicroChannelBridge = 3,
+ PCIBridge = 4,
+ PCMCIABridge = 5,
+ VMEBridge = 6,
+ OtherBridgeDevice = 0x80,
+
+ RS232Device = 0,
+ ATCompatibleParallelPort = 1,
+ OtherCommunicationsDevice = 0x80,
+
+ ProgrammableInterruptController = 0,
+ DMAController = 1,
+ SystemTimer = 2,
+ RealTimeClock = 3,
+ L2Cache = 4,
+ NVRAM = 5,
+ PowerManagement = 6,
+ CMOS = 7,
+ OperatorPanel = 8,
+ ServiceProcessorClass1 = 9,
+ ServiceProcessorClass2 = 0xA,
+ ServiceProcessorClass3 = 0xB,
+ GraphicAssist = 0xC,
+ SystemPlanar = 0xF, /* 10/5/95 */
+ OtherSystemPeripheral = 0x80,
+
+ KeyboardController = 0,
+ Digitizer = 1,
+ MouseController = 2,
+ TabletController = 3, /* 10/27/95 */
+ OtherInputController = 0x80,
+
+ GeneralMemoryController = 0,
+ } PnP_SUB_TYPE;
+
+/* Device Interface Type Codes */
+
+typedef enum _PnP_INTERFACE {
+ General = 0,
+ GeneralSCSI = 0,
+ GeneralIDE = 0,
+ ATACompatible = 1,
+
+ GeneralFloppy = 0,
+ Compatible765 = 1,
+ NS398_Floppy = 2, /* NS Super I/O wired to use index
+ register at port 398 and data
+ register at port 399 */
+ NS26E_Floppy = 3, /* Ports 26E and 26F */
+ NS15C_Floppy = 4, /* Ports 15C and 15D */
+ NS2E_Floppy = 5, /* Ports 2E and 2F */
+ CHRP_Floppy = 6, /* CHRP Floppy in PR*P system */
+
+ GeneralIPI = 0,
+
+ GeneralEther = 0,
+ GeneralToken = 0,
+ GeneralFDDI = 0,
+
+ GeneralVGA = 0,
+ GeneralSVGA = 0,
+ GeneralXGA = 0,
+
+ GeneralVideo = 0,
+ GeneralAudio = 0,
+ CS4232Audio = 1, /* CS 4232 Plug 'n Play Configured */
+
+ GeneralRAM = 0,
+ GeneralFLASH = 0,
+ PCIMemoryController = 0, /* PCI Config Method */
+ RS6KMemoryController = 1, /* RS6K Config Method */
+
+ GeneralHostBridge = 0,
+ GeneralISABridge = 0,
+ GeneralEISABridge = 0,
+ GeneralMCABridge = 0,
+ GeneralPCIBridge = 0,
+ PCIBridgeDirect = 0,
+ PCIBridgeIndirect = 1,
+ PCIBridgeRS6K = 2,
+ GeneralPCMCIABridge = 0,
+ GeneralVMEBridge = 0,
+
+ GeneralRS232 = 0,
+ COMx = 1,
+ Compatible16450 = 2,
+ Compatible16550 = 3,
+ NS398SerPort = 4, /* NS Super I/O wired to use index
+ register at port 398 and data
+ register at port 399 */
+ NS26ESerPort = 5, /* Ports 26E and 26F */
+ NS15CSerPort = 6, /* Ports 15C and 15D */
+ NS2ESerPort = 7, /* Ports 2E and 2F */
+
+ GeneralParPort = 0,
+ LPTx = 1,
+ NS398ParPort = 2, /* NS Super I/O wired to use index
+ register at port 398 and data
+ register at port 399 */
+ NS26EParPort = 3, /* Ports 26E and 26F */
+ NS15CParPort = 4, /* Ports 15C and 15D */
+ NS2EParPort = 5, /* Ports 2E and 2F */
+
+ GeneralPIC = 0,
+ ISA_PIC = 1,
+ EISA_PIC = 2,
+ MPIC = 3,
+ RS6K_PIC = 4,
+
+ GeneralDMA = 0,
+ ISA_DMA = 1,
+ EISA_DMA = 2,
+
+ GeneralTimer = 0,
+ ISA_Timer = 1,
+ EISA_Timer = 2,
+ GeneralRTC = 0,
+ ISA_RTC = 1,
+
+ StoreThruOnly = 1,
+ StoreInEnabled = 2,
+ RS6KL2Cache = 3,
+
+ IndirectNVRAM = 0, /* Indirectly addressed */
+ DirectNVRAM = 1, /* Memory Mapped */
+ IndirectNVRAM24 = 2, /* Indirectly addressed - 24 bit */
+
+ GeneralPowerManagement = 0,
+ EPOWPowerManagement = 1,
+ PowerControl = 2, /* d1378 */
+
+ GeneralCMOS = 0,
+
+ GeneralOPPanel = 0,
+ HarddiskLight = 1,
+ CDROMLight = 2,
+ PowerLight = 3,
+ KeyLock = 4,
+ ANDisplay = 5, /* AlphaNumeric Display */
+ SystemStatusLED = 6, /* 3 digit 7 segment LED */
+ CHRP_SystemStatusLED = 7, /* CHRP LEDs in PR*P system */
+
+ GeneralServiceProcessor = 0,
+
+ TransferData = 1,
+ IGMC32 = 2,
+ IGMC64 = 3,
+
+ GeneralSystemPlanar = 0, /* 10/5/95 */
+
+ } PnP_INTERFACE;
+
+/* PnP resources */
+
+/* Compressed ASCII is 5 bits per char; 00001=A ... 11010=Z */
+
+typedef struct _SERIAL_ID {
+ unsigned char VendorID0; /* Bit(7)=0 */
+ /* Bits(6:2)=1st character in */
+ /* compressed ASCII */
+ /* Bits(1:0)=2nd character in */
+ /* compressed ASCII bits(4:3) */
+ unsigned char VendorID1; /* Bits(7:5)=2nd character in */
+ /* compressed ASCII bits(2:0) */
+ /* Bits(4:0)=3rd character in */
+ /* compressed ASCII */
+ unsigned char VendorID2; /* Product number - vendor assigned */
+ unsigned char VendorID3; /* Product number - vendor assigned */
+
+/* Serial number is to provide uniqueness if more than one board of same */
+/* type is in system. Must be "FFFFFFFF" if feature not supported. */
+
+ unsigned char Serial0; /* Unique serial number bits (7:0) */
+ unsigned char Serial1; /* Unique serial number bits (15:8) */
+ unsigned char Serial2; /* Unique serial number bits (23:16) */
+ unsigned char Serial3; /* Unique serial number bits (31:24) */
+ unsigned char Checksum;
+ } SERIAL_ID;
+
+typedef enum _PnPItemName {
+ Unused = 0,
+ PnPVersion = 1,
+ LogicalDevice = 2,
+ CompatibleDevice = 3,
+ IRQFormat = 4,
+ DMAFormat = 5,
+ StartDepFunc = 6,
+ EndDepFunc = 7,
+ IOPort = 8,
+ FixedIOPort = 9,
+ Res1 = 10,
+ Res2 = 11,
+ Res3 = 12,
+ SmallVendorItem = 14,
+ EndTag = 15,
+ MemoryRange = 1,
+ ANSIIdentifier = 2,
+ UnicodeIdentifier = 3,
+ LargeVendorItem = 4,
+ MemoryRange32 = 5,
+ MemoryRangeFixed32 = 6,
+ } PnPItemName;
+
+/* Define a bunch of access functions for the bits in the tag field */
+
+/* Tag type - 0 = small; 1 = large */
+#define tag_type(t) (((t) & 0x80)>>7)
+#define set_tag_type(t,v) (t = (t & 0x7f) | ((v)<<7))
+
+/* Small item name is 4 bits - one of PnPItemName enum above */
+#define tag_small_item_name(t) (((t) & 0x78)>>3)
+#define set_tag_small_item_name(t,v) (t = (t & 0x07) | ((v)<<3))
+
+/* Small item count is 3 bits - count of further bytes in packet */
+#define tag_small_count(t) ((t) & 0x07)
+#define set_tag_count(t,v) (t = (t & 0x78) | (v))
+
+/* Large item name is 7 bits - one of PnPItemName enum above */
+#define tag_large_item_name(t) ((t) & 0x7f)
+#define set_tag_large_item_name(t,v) (t = (t | 0x80) | (v))
+
+/* a PnP resource is a bunch of contiguous TAG packets ending with an end tag */
+
+typedef union _PnP_TAG_PACKET {
+ struct _S1_Pack{ /* VERSION PACKET */
+ unsigned char Tag; /* small tag = 0x0a */
+ unsigned char Version[2]; /* PnP version, Vendor version */
+ } S1_Pack;
+
+ struct _S2_Pack{ /* LOGICAL DEVICE ID PACKET */
+ unsigned char Tag; /* small tag = 0x15 or 0x16 */
+ unsigned char DevId[4]; /* Logical device id */
+ unsigned char Flags[2]; /* bit(0) boot device; */
+ /* bit(7:1) cmd in range x31-x37 */
+ /* bit(7:0) cmd in range x28-x3f (opt)*/
+ } S2_Pack;
+
+ struct _S3_Pack{ /* COMPATIBLE DEVICE ID PACKET */
+ unsigned char Tag; /* small tag = 0x1c */
+ unsigned char CompatId[4]; /* Compatible device id */
+ } S3_Pack;
+
+ struct _S4_Pack{ /* IRQ PACKET */
+ unsigned char Tag; /* small tag = 0x22 or 0x23 */
+ unsigned char IRQMask[2]; /* bit(0) is IRQ0, ...; */
+ /* bit(0) is IRQ8 ... */
+ unsigned char IRQInfo; /* optional; assume bit(0)=1; else */
+ /* bit(0) - high true edge sensitive */
+ /* bit(1) - low true edge sensitive */
+ /* bit(2) - high true level sensitive*/
+ /* bit(3) - low true level sensitive */
+ /* bit(7:4) - must be 0 */
+ } S4_Pack;
+
+ struct _S5_Pack{ /* DMA PACKET */
+ unsigned char Tag; /* small tag = 0x2a */
+ unsigned char DMAMask; /* bit(0) is channel 0 ... */
+ unsigned char DMAInfo;
+ } S5_Pack;
+
+ struct _S6_Pack{ /* START DEPENDENT FUNCTION PACKET */
+ unsigned char Tag; /* small tag = 0x30 or 0x31 */
+ unsigned char Priority; /* Optional; if missing then x01; else*/
+ /* x00 = best possible */
+ /* x01 = acceptible */
+ /* x02 = sub-optimal but functional */
+ } S6_Pack;
+
+ struct _S7_Pack{ /* END DEPENDENT FUNCTION PACKET */
+ unsigned char Tag; /* small tag = 0x38 */
+ } S7_Pack;
+
+ struct _S8_Pack{ /* VARIABLE I/O PORT PACKET */
+ unsigned char Tag; /* small tag x47 */
+ unsigned char IOInfo; /* x0 = decode only bits(9:0); */
+#define ISAAddr16bit 0x01 /* x01 = decode bits(15:0) */
+ unsigned char RangeMin[2]; /* Min base address */
+ unsigned char RangeMax[2]; /* Max base address */
+ unsigned char IOAlign; /* base alignmt, incr in 1B blocks */
+ unsigned char IONum; /* number of contiguous I/O ports */
+ } S8_Pack;
+
+ struct _S9_Pack{ /* FIXED I/O PORT PACKET */
+ unsigned char Tag; /* small tag = 0x4b */
+ unsigned char Range[2]; /* base address 10 bits */
+ unsigned char IONum; /* number of contiguous I/O ports */
+ } S9_Pack;
+
+ struct _S14_Pack{ /* VENDOR DEFINED PACKET */
+ unsigned char Tag; /* small tag = 0x7m m = 1-7 */
+ union _S14_Data{
+ unsigned char Data[7]; /* Vendor defined */
+ struct _S14_PPCPack{ /* Pr*p s14 pack */
+ unsigned char Type; /* 00=non-IBM */
+ unsigned char PPCData[6]; /* Vendor defined */
+ } S14_PPCPack;
+ } S14_Data;
+ } S14_Pack;
+
+ struct _S15_Pack{ /* END PACKET */
+ unsigned char Tag; /* small tag = 0x78 or 0x79 */
+ unsigned char Check; /* optional - checksum */
+ } S15_Pack;
+
+ struct _L1_Pack{ /* MEMORY RANGE PACKET */
+ unsigned char Tag; /* large tag = 0x81 */
+ unsigned char Count0; /* x09 */
+ unsigned char Count1; /* x00 */
+ unsigned char Data[9]; /* a variable array of bytes, */
+ /* count in tag */
+ } L1_Pack;
+
+ struct _L2_Pack{ /* ANSI ID STRING PACKET */
+ unsigned char Tag; /* large tag = 0x82 */
+ unsigned char Count0; /* Length of string */
+ unsigned char Count1;
+ unsigned char Identifier[1]; /* a variable array of bytes, */
+ /* count in tag */
+ } L2_Pack;
+
+ struct _L3_Pack{ /* UNICODE ID STRING PACKET */
+ unsigned char Tag; /* large tag = 0x83 */
+ unsigned char Count0; /* Length + 2 of string */
+ unsigned char Count1;
+ unsigned char Country0; /* TBD */
+ unsigned char Country1; /* TBD */
+ unsigned char Identifier[1]; /* a variable array of bytes, */
+ /* count in tag */
+ } L3_Pack;
+
+ struct _L4_Pack{ /* VENDOR DEFINED PACKET */
+ unsigned char Tag; /* large tag = 0x84 */
+ unsigned char Count0;
+ unsigned char Count1;
+ union _L4_Data{
+ unsigned char Data[1]; /* a variable array of bytes, */
+ /* count in tag */
+ struct _L4_PPCPack{ /* Pr*p L4 packet */
+ unsigned char Type; /* 00=non-IBM */
+ unsigned char PPCData[1]; /* a variable array of bytes, */
+ /* count in tag */
+ } L4_PPCPack;
+ } L4_Data;
+ } L4_Pack;
+
+ struct _L5_Pack{
+ unsigned char Tag; /* large tag = 0x85 */
+ unsigned char Count0; /* Count = 17 */
+ unsigned char Count1;
+ unsigned char Data[17];
+ } L5_Pack;
+
+ struct _L6_Pack{
+ unsigned char Tag; /* large tag = 0x86 */
+ unsigned char Count0; /* Count = 9 */
+ unsigned char Count1;
+ unsigned char Data[9];
+ } L6_Pack;
+
+ } PnP_TAG_PACKET;
+
+#endif /* ASM */
+#endif /* ndef _PNP_ */
diff --git a/c/src/lib/libbsp/powerpc/shared/residual/residual.c b/c/src/lib/libbsp/powerpc/shared/residual/residual.c
new file mode 100644
index 0000000000..8a4ea7339e
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/residual/residual.c
@@ -0,0 +1,106 @@
+/*
+ * residual.c : function used to parse residual data.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * This code is heavilly inspired by the public specification of STREAM V2
+ * that can be found at :
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <bsp/residual.h>
+#include <libcpu/io.h>
+#include <libcpu/byteorder.h>
+
+
+static int same_DevID(unsigned short vendor,
+ unsigned short Number,
+ char * str)
+{
+ static unsigned const char hexdigit[]="0123456789ABCDEF";
+ if (strlen(str)!=7) return 0;
+ if ( ( ((vendor>>10)&0x1f)+'A'-1 == str[0]) &&
+ ( ((vendor>>5)&0x1f)+'A'-1 == str[1]) &&
+ ( (vendor&0x1f)+'A'-1 == str[2]) &&
+ (hexdigit[(Number>>12)&0x0f] == str[3]) &&
+ (hexdigit[(Number>>8)&0x0f] == str[4]) &&
+ (hexdigit[(Number>>4)&0x0f] == str[5]) &&
+ (hexdigit[Number&0x0f] == str[6]) ) return 1;
+ return 0;
+}
+
+PPC_DEVICE *residual_find_device(RESIDUAL *res,unsigned long BusMask,
+ unsigned char * DevID,
+ int BaseType,
+ int SubType,
+ int Interface,
+ int n)
+{
+ int i;
+ if ( !res || !res->ResidualLength ) return NULL;
+ for (i=0; i<res->ActualNumDevices; i++) {
+#define Dev res->Devices[i].DeviceId
+ if ( (Dev.BusId&BusMask) &&
+ (BaseType==-1 || Dev.BaseType==BaseType) &&
+ (SubType==-1 || Dev.SubType==SubType) &&
+ (Interface==-1 || Dev.Interface==Interface) &&
+ (DevID==NULL || same_DevID((Dev.DevId>>16)&0xffff,
+ Dev.DevId&0xffff, DevID)) &&
+ !(n--) ) return res->Devices+i;
+#undef Dev
+ }
+ return 0;
+}
+
+PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
+ unsigned packet_tag,
+ int n)
+{
+ unsigned mask, masked_tag, size;
+ if(!p) return 0;
+ if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
+ masked_tag = packet_tag&mask;
+ for(; *p != END_TAG; p+=size) {
+ if ((*p & mask) == masked_tag && !(n--))
+ return (PnP_TAG_PACKET *) p;
+ if (tag_type(*p))
+ size=ld_le16((unsigned short *)(p+1))+3;
+ else
+ size=tag_small_count(*p)+1;
+ }
+ return 0; /* not found */
+}
+
+PnP_TAG_PACKET *PnP_find_small_vendor_packet(unsigned char *p,
+ unsigned packet_type,
+ int n)
+{
+ int next=0;
+ while (p) {
+ p = (unsigned char *) PnP_find_packet(p, 0x70, next);
+ if (p && p[1]==packet_type && !(n--))
+ return (PnP_TAG_PACKET *) p;
+ next = 1;
+ };
+ return 0; /* not found */
+}
+
+PnP_TAG_PACKET *PnP_find_large_vendor_packet(unsigned char *p,
+ unsigned packet_type,
+ int n)
+{
+ int next=0;
+ while (p) {
+ p = (unsigned char *) PnP_find_packet(p, 0x84, next);
+ if (p && p[3]==packet_type && !(n--))
+ return (PnP_TAG_PACKET *) p;
+ next = 1;
+ };
+ return 0; /* not found */
+}
+
diff --git a/c/src/lib/libbsp/powerpc/shared/residual/residual.h b/c/src/lib/libbsp/powerpc/shared/residual/residual.h
new file mode 100644
index 0000000000..8da92aae2f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/residual/residual.h
@@ -0,0 +1,346 @@
+/* 7/18/95 */
+/*----------------------------------------------------------------------------*/
+/* Residual Data header definitions and prototypes */
+/*----------------------------------------------------------------------------*/
+
+/* Structure map for RESIDUAL on PowerPC Reference Platform */
+/* residual.h - Residual data structure passed in r3. */
+/* Load point passed in r4 to boot image. */
+/* For enum's: if given in hex then they are bit significant, */
+/* i.e. only one bit is on for each enum */
+/* Reserved fields must be filled with zeros. */
+
+/*
+ * $Id$
+ */
+
+#ifndef _RESIDUAL_
+#define _RESIDUAL_
+
+#ifndef ASM
+
+#define MAX_CPUS 32 /* These should be set to the maximum */
+#define MAX_MEMS 64 /* number possible for this system. */
+#define MAX_DEVICES 256 /* Changing these will change the */
+#define AVE_PNP_SIZE 32 /* structure, hence the version of */
+#define MAX_MEM_SEGS 64 /* this header file. */
+
+/*----------------------------------------------------------------------------*/
+/* Public structures... */
+/*----------------------------------------------------------------------------*/
+
+#include <bsp/pnp.h>
+
+typedef enum _L1CACHE_TYPE {
+ NoneCAC = 0,
+ SplitCAC = 1,
+ CombinedCAC = 2
+ } L1CACHE_TYPE;
+
+typedef enum _TLB_TYPE {
+ NoneTLB = 0,
+ SplitTLB = 1,
+ CombinedTLB = 2
+ } TLB_TYPE;
+
+typedef enum _FIRMWARE_SUPPORT {
+ Conventional = 0x01,
+ OpenFirmware = 0x02,
+ Diagnostics = 0x04,
+ LowDebug = 0x08,
+ Multiboot = 0x10,
+ LowClient = 0x20,
+ Hex41 = 0x40,
+ FAT = 0x80,
+ ISO9660 = 0x0100,
+ SCSI_InitiatorID_Override = 0x0200,
+ Tape_Boot = 0x0400,
+ FW_Boot_Path = 0x0800
+ } FIRMWARE_SUPPORT;
+
+typedef enum _FIRMWARE_SUPPLIERS {
+ IBMFirmware = 0x00,
+ MotoFirmware = 0x01, /* 7/18/95 */
+ FirmWorks = 0x02, /* 10/5/95 */
+ Bull = 0x03, /* 04/03/96 */
+ } FIRMWARE_SUPPLIERS;
+
+typedef enum _ENDIAN_SWITCH_METHODS {
+ UsePort92 = 0x01,
+ UsePCIConfigA8 = 0x02,
+ UseFF001030 = 0x03,
+ } ENDIAN_SWITCH_METHODS;
+
+typedef enum _SPREAD_IO_METHODS {
+ UsePort850 = 0x00,
+/*UsePCIConfigA8 = 0x02,*/
+ } SPREAD_IO_METHODS;
+
+typedef struct _VPD {
+
+ /* Box dependent stuff */
+ unsigned char PrintableModel[32]; /* Null terminated string.
+ Must be of the form:
+ vvv,<20h>,<model designation>,<0x0>
+ where vvv is the vendor ID
+ e.g. IBM PPS MODEL 6015<0x0> */
+ unsigned char Serial[16]; /* 12/94:
+ Serial Number; must be of the form:
+ vvv<serial number> where vvv is the
+ vendor ID.
+ e.g. IBM60151234567<20h><20h> */
+ unsigned char Reserved[48];
+ unsigned long FirmwareSupplier; /* See FirmwareSuppliers enum */
+ unsigned long FirmwareSupports; /* See FirmwareSupport enum */
+ unsigned long NvramSize; /* Size of nvram in bytes */
+ unsigned long NumSIMMSlots;
+ unsigned short EndianSwitchMethod; /* See EndianSwitchMethods enum */
+ unsigned short SpreadIOMethod; /* See SpreadIOMethods enum */
+ unsigned long SmpIar;
+ unsigned long RAMErrLogOffset; /* Heap offset to error log */
+ unsigned long Reserved5;
+ unsigned long Reserved6;
+ unsigned long ProcessorHz; /* Processor clock frequency in Hertz */
+ unsigned long ProcessorBusHz; /* Processor bus clock frequency */
+ unsigned long Reserved7;
+ unsigned long TimeBaseDivisor; /* (Bus clocks per timebase tic)*1000 */
+ unsigned long WordWidth; /* Word width in bits */
+ unsigned long PageSize; /* Page size in bytes */
+ unsigned long CoherenceBlockSize; /* Unit of transfer in/out of cache
+ for which coherency is maintained;
+ normally <= CacheLineSize. */
+ unsigned long GranuleSize; /* Unit of lock allocation to avoid */
+ /* false sharing of locks. */
+
+ /* L1 Cache variables */
+ unsigned long CacheSize; /* L1 Cache size in KB. This is the */
+ /* total size of the L1, whether */
+ /* combined or split */
+ unsigned long CacheAttrib; /* L1CACHE_TYPE */
+ unsigned long CacheAssoc; /* L1 Cache associativity. Use this
+ for combined cache. If split, put
+ zeros here. */
+ unsigned long CacheLineSize; /* L1 Cache line size in bytes. Use
+ for combined cache. If split, put
+ zeros here. */
+ /* For split L1 Cache: (= combined if combined cache) */
+ unsigned long I_CacheSize;
+ unsigned long I_CacheAssoc;
+ unsigned long I_CacheLineSize;
+ unsigned long D_CacheSize;
+ unsigned long D_CacheAssoc;
+ unsigned long D_CacheLineSize;
+
+ /* Translation Lookaside Buffer variables */
+ unsigned long TLBSize; /* Total number of TLBs on the system */
+ unsigned long TLBAttrib; /* Combined I+D or split TLB */
+ unsigned long TLBAssoc; /* TLB Associativity. Use this for
+ combined TLB. If split, put zeros
+ here. */
+ /* For split TLB: (= combined if combined TLB) */
+ unsigned long I_TLBSize;
+ unsigned long I_TLBAssoc;
+ unsigned long D_TLBSize;
+ unsigned long D_TLBAssoc;
+
+ unsigned long ExtendedVPD; /* Offset to extended VPD area;
+ null if unused */
+ } VPD;
+
+typedef enum _DEVICE_FLAGS {
+ Enabled = 0x4000, /* 1 - PCI device is enabled */
+ Integrated = 0x2000,
+ Failed = 0x1000, /* 1 - device failed POST code tests */
+ Static = 0x0800, /* 0 - dynamically configurable
+ 1 - static */
+ Dock = 0x0400, /* 0 - not a docking station device
+ 1 - is a docking station device */
+ Boot = 0x0200, /* 0 - device cannot be used for BOOT
+ 1 - can be a BOOT device */
+ Configurable = 0x0100, /* 1 - device is configurable */
+ Disableable = 0x80, /* 1 - device can be disabled */
+ PowerManaged = 0x40, /* 0 - not managed; 1 - managed */
+ ReadOnly = 0x20, /* 1 - device is read only */
+ Removable = 0x10, /* 1 - device is removable */
+ ConsoleIn = 0x08,
+ ConsoleOut = 0x04,
+ Input = 0x02,
+ Output = 0x01
+ } DEVICE_FLAGS;
+
+typedef enum _BUS_ID {
+ ISADEVICE = 0x01,
+ EISADEVICE = 0x02,
+ PCIDEVICE = 0x04,
+ PCMCIADEVICE = 0x08,
+ PNPISADEVICE = 0x10,
+ MCADEVICE = 0x20,
+ MXDEVICE = 0x40, /* Devices on mezzanine bus */
+ PROCESSORDEVICE = 0x80, /* Devices on processor bus */
+ VMEDEVICE = 0x100,
+ } BUS_ID;
+
+typedef struct _DEVICE_ID {
+ unsigned long BusId; /* See BUS_ID enum above */
+ unsigned long DevId; /* Big Endian format */
+ unsigned long SerialNum; /* For multiple usage of a single
+ DevId */
+ unsigned long Flags; /* See DEVICE_FLAGS enum above */
+ unsigned char BaseType; /* See pnp.h for bit definitions */
+ unsigned char SubType; /* See pnp.h for bit definitions */
+ unsigned char Interface; /* See pnp.h for bit definitions */
+ unsigned char Spare;
+ } DEVICE_ID;
+
+typedef union _BUS_ACCESS {
+ struct _PnPAccess{
+ unsigned char CSN;
+ unsigned char LogicalDevNumber;
+ unsigned short ReadDataPort;
+ } PnPAccess;
+ struct _ISAAccess{
+ unsigned char SlotNumber; /* ISA Slot Number generally not
+ available; 0 if unknown */
+ unsigned char LogicalDevNumber;
+ unsigned short ISAReserved;
+ } ISAAccess;
+ struct _MCAAccess{
+ unsigned char SlotNumber;
+ unsigned char LogicalDevNumber;
+ unsigned short MCAReserved;
+ } MCAAccess;
+ struct _PCMCIAAccess{
+ unsigned char SlotNumber;
+ unsigned char LogicalDevNumber;
+ unsigned short PCMCIAReserved;
+ } PCMCIAAccess;
+ struct _EISAAccess{
+ unsigned char SlotNumber;
+ unsigned char FunctionNumber;
+ unsigned short EISAReserved;
+ } EISAAccess;
+ struct _PCIAccess{
+ unsigned char BusNumber;
+ unsigned char DevFuncNumber;
+ unsigned short PCIReserved;
+ } PCIAccess;
+ struct _ProcBusAccess{
+ unsigned char BusNumber;
+ unsigned char BUID;
+ unsigned short ProcBusReserved;
+ } ProcBusAccess;
+ } BUS_ACCESS;
+
+/* Per logical device information */
+typedef struct _PPC_DEVICE {
+ DEVICE_ID DeviceId;
+ BUS_ACCESS BusAccess;
+
+ /* The following three are offsets into the DevicePnPHeap */
+ /* All are in PnP compressed format */
+ unsigned long AllocatedOffset; /* Allocated resource description */
+ unsigned long PossibleOffset; /* Possible resource description */
+ unsigned long CompatibleOffset; /* Compatible device identifiers */
+ } PPC_DEVICE;
+
+typedef enum _CPU_STATE {
+ CPU_GOOD = 0, /* CPU is present, and active */
+ CPU_GOOD_FW = 1, /* CPU is present, and in firmware */
+ CPU_OFF = 2, /* CPU is present, but inactive */
+ CPU_FAILED = 3, /* CPU is present, but failed POST */
+ CPU_NOT_PRESENT = 255 /* CPU not present */
+ } CPU_STATE;
+
+typedef struct _PPC_CPU {
+ unsigned long CpuType; /* Result of mfspr from Processor
+ Version Register (PVR).
+ PVR(0-15) = Version (e.g. 601)
+ PVR(16-31 = EC Level */
+ unsigned char CpuNumber; /* CPU Number for this processor */
+ unsigned char CpuState; /* CPU State, see CPU_STATE enum */
+ unsigned short Reserved;
+ } PPC_CPU;
+
+typedef struct _PPC_MEM {
+ unsigned long SIMMSize; /* 0 - absent or bad
+ 8M, 32M (in MB) */
+ } PPC_MEM;
+
+typedef enum _MEM_USAGE {
+ Other = 0x8000,
+ ResumeBlock = 0x4000, /* for use by power management */
+ SystemROM = 0x2000, /* Flash memory (populated) */
+ UnPopSystemROM = 0x1000, /* Unpopulated part of SystemROM area */
+ IOMemory = 0x0800,
+ SystemIO = 0x0400,
+ SystemRegs = 0x0200,
+ PCIAddr = 0x0100,
+ PCIConfig = 0x80,
+ ISAAddr = 0x40,
+ Unpopulated = 0x20, /* Unpopulated part of System Memory */
+ Free = 0x10, /* Free part of System Memory */
+ BootImage = 0x08, /* BootImage part of System Memory */
+ FirmwareCode = 0x04, /* FirmwareCode part of System Memory */
+ FirmwareHeap = 0x02, /* FirmwareHeap part of System Memory */
+ FirmwareStack = 0x01 /* FirmwareStack part of System Memory*/
+ } MEM_USAGE;
+
+typedef struct _MEM_MAP {
+ unsigned long Usage; /* See MEM_USAGE above */
+ unsigned long BasePage; /* Page number measured in 4KB pages */
+ unsigned long PageCount; /* Page count measured in 4KB pages */
+ } MEM_MAP;
+
+typedef struct _RESIDUAL {
+ unsigned long ResidualLength; /* Length of Residual */
+ unsigned char Version; /* of this data structure */
+ unsigned char Revision; /* of this data structure */
+ unsigned short EC; /* of this data structure */
+ /* VPD */
+ VPD VitalProductData;
+ /* CPU */
+ unsigned short MaxNumCpus; /* Max CPUs in this system */
+ unsigned short ActualNumCpus; /* ActualNumCpus < MaxNumCpus means */
+ /* that there are unpopulated or */
+ /* otherwise unusable cpu locations */
+ PPC_CPU Cpus[MAX_CPUS];
+ /* Memory */
+ unsigned long TotalMemory; /* Total amount of memory installed */
+ unsigned long GoodMemory; /* Total amount of good memory */
+ unsigned long ActualNumMemSegs;
+ MEM_MAP Segs[MAX_MEM_SEGS];
+ unsigned long ActualNumMemories;
+ PPC_MEM Memories[MAX_MEMS];
+ /* Devices */
+ unsigned long ActualNumDevices;
+ PPC_DEVICE Devices[MAX_DEVICES];
+ unsigned char DevicePnPHeap[2*MAX_DEVICES*AVE_PNP_SIZE];
+ } RESIDUAL;
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+extern RESIDUAL residualCopy;
+
+extern void print_residual_device_info(void);
+#ifndef __BOOT__
+extern PPC_DEVICE *residual_find_device(RESIDUAL *res, unsigned long BusMask,
+ unsigned char * DevID, int BaseType,
+ int SubType, int Interface, int n);
+#else
+extern PPC_DEVICE *residual_find_device(unsigned long BusMask,
+ unsigned char * DevID, int BaseType,
+ int SubType, int Interface, int n);
+#endif
+extern PnP_TAG_PACKET *PnP_find_packet(unsigned char *p, unsigned packet_tag,
+ int n);
+extern PnP_TAG_PACKET *PnP_find_small_vendor_packet(unsigned char *p,
+ unsigned packet_type,
+ int n);
+extern PnP_TAG_PACKET *PnP_find_large_vendor_packet(unsigned char *p,
+ unsigned packet_type,
+ int n);
+#endif /* ASM */
+#endif /* ndef _RESIDUAL_ */
+
diff --git a/c/src/lib/libbsp/powerpc/shared/start/Makefile.in b/c/src/lib/libbsp/powerpc/shared/start/Makefile.in
new file mode 100644
index 0000000000..08ba289e91
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/start/Makefile.in
@@ -0,0 +1,29 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/mcp750/start
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES =
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+all: ${ARCH} $(SRCS)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/start/start.S b/c/src/lib/libbsp/powerpc/shared/start/start.S
new file mode 100644
index 0000000000..cc2dabd2c7
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/start/start.S
@@ -0,0 +1,131 @@
+/*
+ * start.S : RTEMS entry point
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ *
+ */
+
+#include <libcpu/cpu.h>
+#include <libcpu/io.h>
+#include <rtems/score/targopts.h>
+#include <rtems/score/cpu.h>
+#include "asm.h"
+
+#define SYNC \
+ sync; \
+ isync
+
+#define KERNELBASE 0x0
+
+#define MONITOR_ENTER \
+ mfmsr r10 ; \
+ ori r10,r10,MSR_IP ; \
+ mtmsr r10 ; \
+ li r10,0x63 ; \
+ sc
+
+ .text
+ .globl __rtems_entry_point
+ .type __rtems_entry_point,@function
+__rtems_entry_point:
+#ifdef DEBUG_EARLY_START
+ MONITOR_ENTER
+#endif
+
+/*
+ * PREP
+ * This is jumped to on prep systems right after the kernel is relocated
+ * to its proper place in memory by the boot loader. The expected layout
+ * of the regs is:
+ * r3: ptr to residual data
+ * r4: initrd_start or if no initrd then 0
+ * r5: initrd_end - unused if r4 is 0
+ * r6: Start of command line string
+ * r7: End of command line string
+ *
+ */
+
+ mr r31,r3 /* save parameters */
+ mr r30,r4
+ mr r29,r5
+ mr r28,r6
+ mr r27,r7
+/*
+ * Use the first pair of BAT registers to map the 1st 64MB
+ * of RAM to KERNELBASE.
+ */
+ lis r11,KERNELBASE@h
+ ori r11,r11,0x7fe /* set up BAT registers for 604 */
+ li r8,2 /* R/W access */
+ mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr DBAT0U,r11 /* bit in upper BAT register */
+ mtspr IBAT0L,r8
+ mtspr IBAT0U,r11
+ isync
+
+/*
+ * we now have the 1st 64M of ram mapped with the bats.
+ */
+
+enter_C_code:
+ bl MMUon
+ /*
+ * stack = &__rtems_end + 4096
+ */
+ addis r9,r0, __rtems_end+(4096-CPU_MINIMUM_STACK_FRAME_SIZE)@ha
+ addi r9,r9, __rtems_end+(4096-CPU_MINIMUM_STACK_FRAME_SIZE)@l
+ mr r1, r9
+ bl zero_bss
+ /*
+ * restore prep boot params
+ */
+ mr r3,r31
+ mr r4,r30
+ mr r5,r29
+ mr r6,r28
+ mr r7,r27
+ bl save_boot_params
+ bl boot_card
+ bl _return_to_ppcbug
+
+ .globl MMUon
+ .type MMUon,@function
+MMUon:
+ mfmsr r0
+ ori r0,r0, MSR_IP | MSR_RI | MSR_IR | MSR_DR | MSR_EE | MSR_FE0 | MSR_FE1
+ xori r0, r0, MSR_EE | MSR_IP | MSR_FP
+ mflr r11
+ mtsrr0 r11
+ mtsrr1 r0
+ SYNC
+ rfi
+
+ .globl MMUoff
+ .type MMUoff,@function
+MMUoff:
+ mfmsr r0
+ ori r0,r0,MSR_IR| MSR_DR | MSR_IP
+ mflr r11
+ xori r0,r0,MSR_IR|MSR_DR
+ mtsrr0 r11
+ mtsrr1 r0
+ SYNC
+ rfi
+
+ .globl _return_to_ppcbug
+ .type _return_to_ppcbug,@function
+
+
+_return_to_ppcbug:
+ mflr r30
+ bl MMUoff
+ MONITOR_ENTER
+ bl MMUon
+ mtctr r30
+ bctr
diff --git a/c/src/lib/libbsp/powerpc/shared/startup/Makefile.in b/c/src/lib/libbsp/powerpc/shared/startup/Makefile.in
new file mode 100644
index 0000000000..984d5da57a
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/startup/Makefile.in
@@ -0,0 +1,35 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/startup
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES =
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_RELEASE)/lib
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+all: ${ARCH} $(SRCS)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/startup/bspstart.c b/c/src/lib/libbsp/powerpc/shared/startup/bspstart.c
new file mode 100644
index 0000000000..63181b603d
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/startup/bspstart.c
@@ -0,0 +1,338 @@
+/*
+ * This routine starts the application. It includes application,
+ * board, and monitor specific initialization and configuration.
+ * The generic CPU dependent initialization has been performed
+ * before this routine is invoked.
+ *
+ * COPYRIGHT (c) 1989-1998.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * Modified to support the MCP750.
+ * Modifications Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * $Id$
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <libcsupport.h>
+#include <string.h>
+#include <bsp/consoleIo.h>
+#include <libcpu/spr.h>
+#include <bsp/residual.h>
+#include <bsp/pci.h>
+#include <bsp/openpic.h>
+#include <bsp/irq.h>
+#include <bsp.h>
+#include <libcpu/bat.h>
+#include <bsp/vectors.h>
+#include <bsp/motorola.h>
+
+extern void _return_to_ppcbug();
+extern unsigned long __rtems_end;
+extern unsigned long _end;
+extern unsigned long __bss_start;
+extern void L1_caches_enables();
+extern unsigned get_L2CR();
+extern void set_L2CR(unsigned);
+extern void bsp_cleanup(void);
+/*
+ * Copy of residuals passed by firmware
+ */
+RESIDUAL residualCopy;
+/*
+ * Copy Additional boot param passed by boot loader
+ */
+#define MAX_LOADER_ADD_PARM 80
+char loaderParam[MAX_LOADER_ADD_PARM];
+/*
+ * Vital Board data Start using DATA RESIDUAL
+ */
+/*
+ * Total memory using RESIDUAL DATA
+ */
+unsigned int BSP_mem_size;
+/*
+ * PCI Bus Frequency
+ */
+unsigned int BSP_bus_frequency;
+/*
+ * processor clock frequency
+ */
+unsigned int BSP_processor_frequency;
+/*
+ * Time base divisior (how many tick for 1 second).
+ */
+unsigned int BSP_time_base_divisor;
+/*
+ * system init stack and soft ir stack size
+ */
+#define INIT_STACK_SIZE 0x1000
+#define INTR_STACK_SIZE 0x4000
+
+void BSP_panic(char *s)
+{
+ printk("RTEMS 4.x PANIC %s\n", s);
+ _return_to_ppcbug();
+}
+
+void _BSP_Fatal_error(unsigned int v)
+{
+ printk("RTEMS 4.x PANIC ERROR %x\n", v);
+ _return_to_ppcbug();
+}
+
+/*
+ * The original table from the application and our copy of it with
+ * some changes.
+ */
+
+extern rtems_configuration_table Configuration;
+
+rtems_configuration_table BSP_Configuration;
+
+rtems_cpu_table Cpu_table;
+
+char *rtems_progname;
+
+/*
+ * Use the shared implementations of the following routines
+ */
+
+void bsp_postdriver_hook(void);
+void bsp_libc_init( void *, unsigned32, int );
+
+/*
+ * Function: bsp_pretasking_hook
+ * Created: 95/03/10
+ *
+ * Description:
+ * BSP pretasking hook. Called just before drivers are initialized.
+ * Used to setup libc and install any BSP extensions.
+ *
+ * NOTES:
+ * Must not use libc (to do io) from here, since drivers are
+ * not yet initialized.
+ *
+ */
+
+void bsp_pretasking_hook(void)
+{
+ rtems_unsigned32 heap_start;
+ rtems_unsigned32 heap_size;
+
+ heap_start = ((rtems_unsigned32) &__rtems_end) +INIT_STACK_SIZE + INTR_STACK_SIZE;
+ if (heap_start & (CPU_ALIGNMENT-1))
+ heap_start = (heap_start + CPU_ALIGNMENT) & ~(CPU_ALIGNMENT-1);
+
+ heap_size = (BSP_mem_size - heap_start) - BSP_Configuration.work_space_size;
+
+#ifdef SHOW_MORE_INIT_SETTINGS
+ printk(" HEAP start %x size %x\n", heap_start, heap_size);
+#endif
+ bsp_libc_init((void *) heap_start, heap_size, 0);
+
+#ifdef RTEMS_DEBUG
+ rtems_debug_enable( RTEMS_DEBUG_ALL_MASK );
+#endif
+}
+
+void zero_bss()
+{
+ memset(&__bss_start, 0, ((unsigned) (&__rtems_end)) - ((unsigned) &__bss_start));
+}
+
+void save_boot_params(RESIDUAL* r3, void *r4, void* r5, char *additional_boot_options)
+{
+
+ residualCopy = *r3;
+ strncpy(loaderParam, additional_boot_options, MAX_LOADER_ADD_PARM);
+ loaderParam[MAX_LOADER_ADD_PARM - 1] ='\0';
+}
+
+/*
+ * bsp_start
+ *
+ * This routine does the bulk of the system initialization.
+ */
+
+void bsp_start( void )
+{
+ int err;
+ unsigned char *stack;
+ unsigned l2cr;
+ register unsigned char* intrStack;
+ register unsigned int intrNestingLevel = 0;
+ unsigned char *work_space_start;
+ ppc_cpu_id_t myCpu;
+ ppc_cpu_revision_t myCpuRevision;
+ prep_t boardManufacturer;
+ motorolaBoard myBoard;
+ /*
+ * Get CPU identification dynamically. Note that the get_ppc_cpu_type() function
+ * store the result in global variables so that it can be used latter...
+ */
+ myCpu = get_ppc_cpu_type();
+ myCpuRevision = get_ppc_cpu_revision();
+ /*
+ * enables L1 Cache. Note that the L1_caches_enables() codes checks for
+ * relevant CPU type so that the reason why there is no use of myCpu...
+ */
+ L1_caches_enables();
+ /*
+ * Enable L2 Cache. Note that the set_L2CR(L2CR) codes checks for
+ * relevant CPU type (mpc750)...
+ */
+ l2cr = get_L2CR();
+#ifdef SHOW_LCR2_REGISTER
+ printk("Initial L2CR value = %x\n", l2cr);
+#endif
+ if ( (! (l2cr & 0x80000000)) && ((int) l2cr == -1))
+ set_L2CR(0xb9A14000);
+ /*
+ * the initial stack has aready been set to this value in start.S
+ * so there is no need to set it in r1 again... It is just for info
+ * so that It can be printed without accessing R1.
+ */
+ stack = ((unsigned char*) &__rtems_end) + INIT_STACK_SIZE - CPU_MINIMUM_STACK_FRAME_SIZE;
+ /*
+ * Initialize the interrupt related settings
+ * SPRG0 = interrupt nesting level count
+ * SPRG1 = software managed IRQ stack
+ *
+ * This could be done latter (e.g in IRQ_INIT) but it helps to understand
+ * some settings below...
+ */
+ intrStack = ((unsigned char*) &__rtems_end) + INIT_STACK_SIZE + INTR_STACK_SIZE - CPU_MINIMUM_STACK_FRAME_SIZE;
+ asm volatile ("mtspr 273, %0" : "=r" (intrStack) : "0" (intrStack));
+ asm volatile ("mtspr 272, %0" : "=r" (intrNestingLevel) : "0" (intrNestingLevel));
+ /*
+ * Initialize default raw exception hanlders. See vectors/vectors_init.c
+ */
+ initialize_exceptions();
+ /*
+ * Init MMU block address translation to enable hardware
+ * access
+ */
+ /*
+ * PC legacy IO space used for inb/outb and all PC
+ * compatible hardware
+ */
+ setdbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
+ /*
+ * PCI devices memory area. Needed to access OPENPIC features
+ * provided by the RAVEN
+ */
+ setdbat(2, 0xc0000000, 0xc0000000, 0x08000000, IO_PAGE);
+ /*
+ * Must have acces to open pic PCI ACK registers
+ * provided by the RAVEN
+ */
+ setdbat(3, 0xfeff0000, 0xfeff0000, 0x10000, IO_PAGE);
+
+ select_console(CONSOLE_LOG);
+
+ /* We check that the keyboard is present and immediately
+ * select the serial console if not.
+ */
+ err = kbdreset();
+ if (err) select_console(CONSOLE_SERIAL);
+
+ boardManufacturer = checkPrepBoardType(&residualCopy);
+ if (boardManufacturer != PREP_Motorola) {
+ printk("Unsupported hardware vendor\n");
+ while (1);
+ }
+ myBoard = getMotorolaBoard();
+
+ printk("-----------------------------------------\n");
+ printk("Welcome to %s on %s\n", _RTEMS_version, motorolaBoardToString(myBoard));
+ printk("-----------------------------------------\n");
+#ifdef SHOW_MORE_INIT_SETTINGS
+ printk("Residuals are located at %x\n", (unsigned) &residualCopy);
+ printk("Additionnal boot options are %s\n", loaderParam);
+ printk("Initial system stack at %x\n",stack);
+ printk("Software IRQ stack at %x\n",intrStack);
+ printk("-----------------------------------------\n");
+#endif
+
+#ifdef TEST_RETURN_TO_PPCBUG
+ printk("Hit <Enter> to return to PPCBUG monitor\n");
+ printk("When Finished hit GO. It should print <Back from monitor>\n");
+ debug_getc();
+ _return_to_ppcbug();
+ printk("Back from monitor\n");
+ _return_to_ppcbug();
+#endif /* TEST_RETURN_TO_PPCBUG */
+
+#ifdef SHOW_MORE_INIT_SETTINGS
+ printk("Going to start PCI buses scanning and initialization\n");
+#endif
+ InitializePCI();
+#ifdef SHOW_MORE_INIT_SETTINGS
+ printk("Number of PCI buses found is : %d\n", BusCountPCI());
+#endif
+#ifdef TEST_RAW_EXCEPTION_CODE
+ printk("Testing exception handling Part 1\n");
+ /*
+ * Cause a software exception
+ */
+ __asm__ __volatile ("sc");
+ /*
+ * Check we can still catch exceptions and returned coorectly.
+ */
+ printk("Testing exception handling Part 2\n");
+ __asm__ __volatile ("sc");
+#endif
+
+
+ BSP_mem_size = residualCopy.TotalMemory;
+ BSP_bus_frequency = residualCopy.VitalProductData.ProcessorBusHz;
+ BSP_processor_frequency = residualCopy.VitalProductData.ProcessorHz;
+ BSP_time_base_divisor = (residualCopy.VitalProductData.TimeBaseDivisor?
+ residualCopy.VitalProductData.TimeBaseDivisor : 4000);
+
+ /*
+ * Set up our hooks
+ * Make sure libc_init is done before drivers initialized so that
+ * they can use atexit()
+ */
+
+ Cpu_table.pretasking_hook = bsp_pretasking_hook; /* init libc, etc. */
+ Cpu_table.postdriver_hook = bsp_postdriver_hook;
+ Cpu_table.do_zero_of_workspace = TRUE;
+ Cpu_table.interrupt_stack_size = INTR_STACK_SIZE;
+ Cpu_table.clicks_per_usec = BSP_processor_frequency/(BSP_time_base_divisor * 1000);
+ Cpu_table.exceptions_in_RAM = TRUE;
+
+#ifdef SHOW_MORE_INIT_SETTINGS
+ printk("BSP_Configuration.work_space_size = %x\n", BSP_Configuration.work_space_size);
+#endif
+ work_space_start =
+ (unsigned char *)BSP_mem_size - BSP_Configuration.work_space_size;
+
+ if ( work_space_start <= ((unsigned char *)&__rtems_end) + INIT_STACK_SIZE + INTR_STACK_SIZE) {
+ printk( "bspstart: Not enough RAM!!!\n" );
+ bsp_cleanup();
+ }
+
+ BSP_Configuration.work_space_start = work_space_start;
+
+ /*
+ * Account for the console's resources
+ */
+
+ console_reserve_resources( &BSP_Configuration );
+ /*
+ * Initalize RTEMS IRQ system
+ */
+ BSP_rtems_irq_mng_init(0);
+#ifdef SHOW_MORE_INIT_SETTINGS
+ printk("Exit from bspstart\n");
+#endif
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/startup/linkcmds b/c/src/lib/libbsp/powerpc/shared/startup/linkcmds
new file mode 100644
index 0000000000..0eb6628de9
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/startup/linkcmds
@@ -0,0 +1,147 @@
+OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc",
+ "elf32-powerpc")
+OUTPUT_ARCH(powerpc)
+ENTRY(_start)
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+PROVIDE (__stack = 0);
+MEMORY {
+ VECTORS : ORIGIN = 0x0 , LENGTH = 0x3000
+ CODE : ORIGIN = 0x3000 , LENGTH = 0x100000
+}
+SECTIONS
+{
+ .entry_point_section :
+ {
+ *(.entry_point_section)
+ } > VECTORS
+
+ /*
+ * This section is used only if NO_DYNAMIC_EXCEPTION_VECTOR_INSTALL
+ * is defined in vectors/vectors.S
+ */
+ .vectors :
+ {
+ *(.vectors)
+ } > VECTORS
+
+ /* Read-only sections, merged into text segment: */
+ .interp : { *(.interp) } > CODE
+ .hash : { *(.hash) } > CODE
+ .dynsym : { *(.dynsym) } > CODE
+ .dynstr : { *(.dynstr) } > CODE
+ .gnu.version : { *(.gnu.version) } > CODE
+ .gnu.version_d : { *(.gnu.version_d) } > CODE
+ .gnu.version_r : { *(.gnu.version_r) } > CODE
+ .rela.text :
+ { *(.rela.text) *(.rela.gnu.linkonce.t*) } > CODE
+ .rela.data :
+ { *(.rela.data) *(.rela.gnu.linkonce.d*) } > CODE
+ .rela.rodata :
+ { *(.rela.rodata) *(.rela.gnu.linkonce.r*) } > CODE
+ .rela.got : { *(.rela.got) } > CODE
+ .rela.got1 : { *(.rela.got1) } > CODE
+ .rela.got2 : { *(.rela.got2) } > CODE
+ .rela.ctors : { *(.rela.ctors) } > CODE
+ .rela.dtors : { *(.rela.dtors) } > CODE
+ .rela.init : { *(.rela.init) } > CODE
+ .rela.fini : { *(.rela.fini) } > CODE
+ .rela.bss : { *(.rela.bss) } > CODE
+ .rela.plt : { *(.rela.plt) } > CODE
+ .rela.sdata : { *(.rela.sdata) } > CODE
+ .rela.sbss : { *(.rela.sbss) } > CODE
+ .rela.sdata2 : { *(.rela.sdata2) } > CODE
+ .rela.sbss2 : { *(.rela.sbss2) } > CODE
+ .text :
+ {
+ *(.text)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.gnu.linkonce.t*)
+ } > CODE
+ .init : { *(.init) } > CODE
+ .fini : { *(.fini) } > CODE
+ .rodata : { *(.rodata) *(.gnu.linkonce.r*) } > CODE
+ .rodata1 : { *(.rodata1) } > CODE
+ _etext = .;
+ PROVIDE (etext = .);
+ .sdata2 : { *(.sdata2) } > CODE
+ .sbss2 : { *(.sbss2) } > CODE
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. It would
+ be more correct to do this:
+ . = ALIGN(0x40000) + (ALIGN(8) & (0x40000 - 1));
+ The current expression does not correctly handle the case of a
+ text segment ending precisely at the end of a page; it causes the
+ data segment to skip a page. The above expression does not have
+ this problem, but it will currently (2/95) cause BFD to allocate
+ a single segment, combining both text and data, for this case.
+ This will prevent the text segment from being shared among
+ multiple executions of the program; I think that is more
+ important than losing a page of the virtual address space (note
+ that no actual memory is lost; the page which is skipped can not
+ be referenced). */
+ . = ALIGN(0x1000);
+ .data :
+ {
+ *(.data)
+ *(.gnu.linkonce.d*)
+ CONSTRUCTORS
+ } > CODE
+ .data1 : { *(.data1) } > CODE
+ PROVIDE (__EXCEPT_START__ = .);
+ .gcc_except_table : { *(.gcc_except_table) } > CODE
+ PROVIDE (__EXCEPT_END__ = .);
+ .got1 : { *(.got1) } > CODE
+ .dynamic : { *(.dynamic) } > CODE
+ /* Put .ctors and .dtors next to the .got2 section, so that the pointers
+ get relocated with -mrelocatable. Also put in the .fixup pointers.
+ The current compiler no longer needs this, but keep it around for 2.7.2 */
+ PROVIDE (_GOT2_START_ = .);
+ .got2 : { *(.got2) } > CODE
+ PROVIDE (__CTOR_LIST__ = .);
+ .ctors : { *(.ctors) } > CODE
+ PROVIDE (__CTOR_END__ = .);
+ PROVIDE (__DTOR_LIST__ = .);
+ .dtors : { *(.dtors) } > CODE
+ PROVIDE (__DTOR_END__ = .);
+ PROVIDE (_FIXUP_START_ = .);
+ .fixup : { *(.fixup) } > CODE
+ PROVIDE (_FIXUP_END_ = .);
+ PROVIDE (_GOT2_END_ = .);
+ PROVIDE (_GOT_START_ = .);
+ .got : { *(.got) } > CODE
+ .got.plt : { *(.got.plt) } > CODE
+ PROVIDE (_GOT_END_ = .);
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : { *(.sdata) } > CODE
+ _edata = .;
+ PROVIDE (edata = .);
+ .sbss :
+ {
+ PROVIDE (__sbss_start = .);
+ *(.sbss)
+ *(.scommon)
+ *(.dynsbss)
+ PROVIDE (__sbss_end = .);
+ } > CODE
+ .plt : { *(.plt) } > CODE
+ .bss :
+ {
+ PROVIDE (__bss_start = .);
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(16);
+ } > CODE
+ . = ALIGN(16);
+ _end = . ;
+ __rtems_end = . ;
+ PROVIDE (end = .);
+ /DISCARD/ :
+ {
+ *(.comment)
+ }
+}
diff --git a/c/src/lib/libbsp/powerpc/shared/vectors/Makefile.in b/c/src/lib/libbsp/powerpc/shared/vectors/Makefile.in
new file mode 100644
index 0000000000..43c28cb86a
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/vectors/Makefile.in
@@ -0,0 +1,39 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/shared/vectors
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+H_FILES = $(srcdir)/vectors.h
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/bsp
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+preinstall:
+ @$(mkinstalldirs) $(PROJECT_INCLUDE)/bsp
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/bsp
+
+all: ${ARCH} $(SRCS) preinstall
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libbsp/powerpc/shared/vectors/vectors.S b/c/src/lib/libbsp/powerpc/shared/vectors/vectors.S
new file mode 100644
index 0000000000..fca0cbfac1
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/vectors/vectors.S
@@ -0,0 +1,154 @@
+/*
+ * (c) 1999, Eric Valette valette@crf.canon.fr
+ *
+ *
+ * This file contains the assembly code for the PowerPC
+ * exception veneers for RTEMS.
+ *
+ * $Id$
+ */
+
+
+
+#include <bsp/vectors.h>
+#include <libcpu/cpu.h>
+#include <rtems/score/targopts.h>
+#include "asm.h"
+
+
+#define SYNC \
+ sync; \
+ isync
+
+ PUBLIC_VAR (__rtems_start)
+ .section .entry_point_section,"awx",@progbits
+/*
+ * Entry point information used by bootloader code
+ */
+SYM (__rtems_start):
+ .long __rtems_entry_point
+
+ /*
+ * end of special Entry point section
+ */
+ .text
+ .p2align 5
+
+PUBLIC_VAR(default_exception_vector_code_prolog)
+SYM (default_exception_vector_code_prolog):
+ /*
+ * let room for exception frame
+ */
+ stwu r1, - (EXCEPTION_FRAME_END)(r1)
+ stw r3, GPR3_OFFSET(r1)
+ stw r2, GPR2_OFFSET(r1)
+ mflr r2
+ stw r2, EXC_LR_OFFSET(r1)
+ bl 0f
+0: /*
+ * r3 = exception vector entry point
+ * (256 * vector number) + few instructions
+ */
+ mflr r3
+ /*
+ * r3 = r3 >> 8 = vector
+ */
+ srwi r3,r3,8
+ ba push_normalized_frame
+
+ PUBLIC_VAR (default_exception_vector_code_prolog_size)
+
+ default_exception_vector_code_prolog_size= . - default_exception_vector_code_prolog
+
+ .p2align 5
+PUBLIC_VAR (push_normalized_frame)
+SYM (push_normalized_frame):
+ stw r3, EXCEPTION_NUMBER_OFFSET(r1)
+ stw r0, GPR0_OFFSET(r1)
+ mfsrr0 r2
+ stw r2, SRR0_FRAME_OFFSET(r1)
+ mfsrr1 r3
+ stw r3, SRR1_FRAME_OFFSET(r1)
+ /*
+ * Save general purpose registers
+ * Already saved in prolog : R1, R2, R3, LR.
+ * Saved a few line above : R0
+ *
+ * Manual says that "stmw" instruction may be slower than
+ * series of individual "stw" but who cares about performance
+ * for the DEFAULT exception handler?
+ */
+ stmw r4, GPR4_OFFSET(r1) /* save R4->R31 */
+
+ mfcr r31
+ stw r31, EXC_CR_OFFSET(r1)
+ mfctr r30
+ stw r30, EXC_CTR_OFFSET(r1)
+ mfxer r28
+ stw r28, EXC_XER_OFFSET(r1)
+ /*
+ * compute SP at exception entry
+ */
+ addi r2, r1, EXCEPTION_FRAME_END
+ /*
+ * store it at the right place
+ */
+ stw r2, GPR1_OFFSET(r1)
+ /*
+ * Enable data and instruction address translation, exception nesting
+ */
+ mfmsr r3
+ ori r3,r3, MSR_RI | MSR_IR | MSR_DR
+ mtmsr r3
+ SYNC
+
+ /*
+ * Call C exception handler
+ */
+ /*
+ * store the execption frame address in r3 (first param)
+ */
+ addi r3, r1, 0x8
+ /*
+ * globalExceptHdl(r3)
+ */
+ addis r4, 0, globalExceptHdl@ha
+ lwz r5, globalExceptHdl@l(r4)
+ mtlr r5
+ blrl
+ /*
+ * Restore registers status
+ */
+ lwz r31, EXC_CR_OFFSET(r1)
+ mtcr r31
+ lwz r30, EXC_CTR_OFFSET(r1)
+ mtctr r30
+ lwz r29, EXC_LR_OFFSET(r1)
+ mtlr r29
+ lwz r28, EXC_XER_OFFSET(r1)
+ mtxer r28
+
+ lmw r4, GPR4_OFFSET(r1)
+ lwz r2, GPR2_OFFSET(r1)
+ lwz r0, GPR0_OFFSET(r1)
+
+ /*
+ * Disable data and instruction translation. Make path non recoverable...
+ */
+ mfmsr r3
+ xori r3, r3, MSR_RI | MSR_IR | MSR_DR
+ mtmsr r3
+ SYNC
+ /*
+ * Restore rfi related settings
+ */
+
+ lwz r3, SRR1_FRAME_OFFSET(r1)
+ mtsrr1 r3
+ lwz r3, SRR0_FRAME_OFFSET(r1)
+ mtsrr0 r3
+
+ lwz r3, GPR3_OFFSET(r1)
+ addi r1,r1, EXCEPTION_FRAME_END
+ SYNC
+ rfi
diff --git a/c/src/lib/libbsp/powerpc/shared/vectors/vectors.h b/c/src/lib/libbsp/powerpc/shared/vectors/vectors.h
new file mode 100644
index 0000000000..101f46f121
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/vectors/vectors.h
@@ -0,0 +1,144 @@
+/*
+ * vectors.h Exception frame related contant and API.
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to handle exceptions.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+#ifndef LIBBSP_POWERPC_MCP750_VECTORS_H
+#define LIBBSP_POWERPC_MCP750_VECTORS_H
+
+/*
+ * The callee (high level exception code written in C)
+ * will store the Link Registers (return address) at entry r1 + 4 !!!.
+ * So let room for it!!!.
+ */
+#define LINK_REGISTER_CALLEE_UPDATE_ROOM 4
+#define SRR0_FRAME_OFFSET 8
+#define SRR1_FRAME_OFFSET 12
+#define EXCEPTION_NUMBER_OFFSET 16
+#define GPR0_OFFSET 20
+#define GPR1_OFFSET 24
+#define GPR2_OFFSET 28
+#define GPR3_OFFSET 32
+#define GPR4_OFFSET 36
+#define GPR5_OFFSET 40
+#define GPR6_OFFSET 44
+#define GPR7_OFFSET 48
+#define GPR8_OFFSET 52
+#define GPR9_OFFSET 56
+#define GPR10_OFFSET 60
+#define GPR11_OFFSET 64
+#define GPR12_OFFSET 68
+#define GPR13_OFFSET 72
+#define GPR14_OFFSET 76
+#define GPR15_OFFSET 80
+#define GPR16_OFFSET 84
+#define GPR17_OFFSET 88
+#define GPR18_OFFSET 92
+#define GPR19_OFFSET 96
+#define GPR20_OFFSET 100
+#define GPR21_OFFSET 104
+#define GPR22_OFFSET 108
+#define GPR23_OFFSET 112
+#define GPR24_OFFSET 116
+#define GPR25_OFFSET 120
+#define GPR26_OFFSET 124
+#define GPR27_OFFSET 128
+#define GPR28_OFFSET 132
+#define GPR29_OFFSET 136
+#define GPR30_OFFSET 140
+#define GPR31_OFFSET 144
+#define EXC_CR_OFFSET 148
+#define EXC_CTR_OFFSET 152
+#define EXC_XER_OFFSET 156
+#define EXC_LR_OFFSET 160
+#define EXC_DAR_OFFSET 164
+/*
+ * maintain the EABI requested 8 bytes aligment
+ * As SVR4 ABI requires 16, make it 16 (as some
+ * exception may need more registers to be processed...)
+ */
+#define EXCEPTION_FRAME_END 176
+
+#ifndef ASM
+/*
+ * default raw exception handlers
+ */
+
+extern void default_exception_vector_code_prolog();
+extern int default_exception_vector_code_prolog_size;
+
+/* codemove is like memmove, but it also gets the cache line size
+ * as 4th parameter to synchronize them. If this last parameter is
+ * zero, it performs more or less like memmove. No copy is performed if
+ * source and destination addresses are equal. However the caches
+ * are synchronized. Note that the size is always rounded up to the
+ * next mutiple of 4.
+ */
+extern void * codemove(void *, const void *, unsigned int, unsigned long);
+extern void initialize_exceptions();
+
+typedef struct {
+ unsigned EXC_SRR0;
+ unsigned EXC_SRR1;
+ unsigned _EXC_number;
+ unsigned GPR0;
+ unsigned GPR1;
+ unsigned GPR2;
+ unsigned GPR3;
+ unsigned GPR4;
+ unsigned GPR5;
+ unsigned GPR6;
+ unsigned GPR7;
+ unsigned GPR8;
+ unsigned GPR9;
+ unsigned GPR10;
+ unsigned GPR11;
+ unsigned GPR12;
+ unsigned GPR13;
+ unsigned GPR14;
+ unsigned GPR15;
+ unsigned GPR16;
+ unsigned GPR17;
+ unsigned GPR18;
+ unsigned GPR19;
+ unsigned GPR20;
+ unsigned GPR21;
+ unsigned GPR22;
+ unsigned GPR23;
+ unsigned GPR24;
+ unsigned GPR25;
+ unsigned GPR26;
+ unsigned GPR27;
+ unsigned GPR28;
+ unsigned GPR29;
+ unsigned GPR30;
+ unsigned GPR31;
+ unsigned EXC_CR;
+ unsigned EXC_CTR;
+ unsigned EXC_XER;
+ unsigned EXC_LR;
+ unsigned EXC_MSR;
+ unsigned EXC_DAR;
+}BSP_Exception_frame;
+
+
+typedef void (*exception_handler_t) (BSP_Exception_frame* excPtr);
+extern exception_handler_t globalExceptHdl;
+/*
+ * Compatibility with pc386
+ */
+typedef BSP_Exception_frame CPU_Exception_frame;
+typedef exception_handler_t cpuExcHandlerType;
+
+#endif /* ASM */
+
+#endif /* LIBBSP_POWERPC_MCP750_VECTORS_H */
diff --git a/c/src/lib/libbsp/powerpc/shared/vectors/vectors_init.c b/c/src/lib/libbsp/powerpc/shared/vectors/vectors_init.c
new file mode 100644
index 0000000000..7310fbd2c5
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/shared/vectors/vectors_init.c
@@ -0,0 +1,122 @@
+/*
+ * vectors_init.c Exception hanlding initialisation (and generic handler).
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to handle exceptions.
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+#include <bsp/vectors.h>
+#include <libcpu/raw_exception.h>
+#include <bsp.h>
+
+static rtems_raw_except_global_settings exception_config;
+static rtems_raw_except_connect_data exception_table[LAST_VALID_EXC + 1];
+
+exception_handler_t globalExceptHdl;
+
+void C_exception_handler(BSP_Exception_frame* excPtr)
+{
+ int recoverable = 0;
+
+ printk("exception handler called for exception %d\n", excPtr->_EXC_number);
+ printk("\t Next PC or Address of fault = %x\n", excPtr->EXC_SRR0);
+ printk("\t Saved MSR = %x\n", excPtr->EXC_SRR1);
+ printk("\t R0 = %x\n", excPtr->GPR0);
+ printk("\t R1 = %x\n", excPtr->GPR1);
+ printk("\t R2 = %x\n", excPtr->GPR2);
+ printk("\t R3 = %x\n", excPtr->GPR3);
+ printk("\t R4 = %x\n", excPtr->GPR4);
+ printk("\t R5 = %x\n", excPtr->GPR5);
+ printk("\t R6 = %x\n", excPtr->GPR6);
+ printk("\t R7 = %x\n", excPtr->GPR7);
+ printk("\t R8 = %x\n", excPtr->GPR8);
+ printk("\t R9 = %x\n", excPtr->GPR9);
+ printk("\t R10 = %x\n", excPtr->GPR10);
+ printk("\t R11 = %x\n", excPtr->GPR11);
+ printk("\t R12 = %x\n", excPtr->GPR12);
+ printk("\t R13 = %x\n", excPtr->GPR13);
+ printk("\t R14 = %x\n", excPtr->GPR14);
+ printk("\t R15 = %x\n", excPtr->GPR15);
+ printk("\t R16 = %x\n", excPtr->GPR16);
+ printk("\t R17 = %x\n", excPtr->GPR17);
+ printk("\t R18 = %x\n", excPtr->GPR18);
+ printk("\t R19 = %x\n", excPtr->GPR19);
+ printk("\t R20 = %x\n", excPtr->GPR20);
+ printk("\t R21 = %x\n", excPtr->GPR21);
+ printk("\t R22 = %x\n", excPtr->GPR22);
+ printk("\t R23 = %x\n", excPtr->GPR23);
+ printk("\t R24 = %x\n", excPtr->GPR24);
+ printk("\t R25 = %x\n", excPtr->GPR25);
+ printk("\t R26 = %x\n", excPtr->GPR26);
+ printk("\t R27 = %x\n", excPtr->GPR27);
+ printk("\t R28 = %x\n", excPtr->GPR28);
+ printk("\t R29 = %x\n", excPtr->GPR29);
+ printk("\t R30 = %x\n", excPtr->GPR30);
+ printk("\t R31 = %x\n", excPtr->GPR31);
+ printk("\t CR = %x\n", excPtr->EXC_CR);
+ printk("\t CTR = %x\n", excPtr->EXC_CTR);
+ printk("\t XER = %x\n", excPtr->EXC_XER);
+ printk("\t LR = %x\n", excPtr->EXC_LR);
+ printk("\t MSR = %x\n", excPtr->EXC_MSR);
+ if ( (excPtr->_EXC_number == ASM_DEC_VECTOR) ||
+ (excPtr->_EXC_number == ASM_SYS_VECTOR)
+ )
+ recoverable = 1;
+ if (!recoverable) BSP_panic("unrecoverable exception!!! Push reset button\n");
+}
+
+void nop_except_enable(const rtems_raw_except_connect_data* ptr)
+{
+}
+int except_always_enabled(const rtems_raw_except_connect_data* ptr)
+{
+ return 1;
+}
+
+void initialize_exceptions()
+{
+ int i;
+
+ /*
+ * Initialize pointer used by low level execption handling
+ */
+ globalExceptHdl = C_exception_handler;
+ /*
+ * Put default_exception_vector_code_prolog at relevant exception
+ * code entry addresses
+ */
+ exception_config.exceptSize = LAST_VALID_EXC + 1;
+ exception_config.rawExceptHdlTbl = &exception_table[0];
+ exception_config.defaultRawEntry.exceptIndex = 0;
+ exception_config.defaultRawEntry.hdl.vector = 0;
+ exception_config.defaultRawEntry.hdl.raw_hdl = default_exception_vector_code_prolog;
+ /*
+ * Note that next line the '&' before default_exception_vector_code_prolog_size
+ * is not a bug as it is defined a .set directly in asm...
+ */
+ exception_config.defaultRawEntry.hdl.raw_hdl_size = (unsigned) &default_exception_vector_code_prolog_size;
+ for (i=0; i <= exception_config.exceptSize; i++) {
+ if (!mpc750_vector_is_valid (i)) {
+ continue;
+ }
+ exception_table[i].exceptIndex = i;
+ exception_table[i].hdl = exception_config.defaultRawEntry.hdl;
+ exception_table[i].hdl.vector = i;
+ exception_table[i].on = nop_except_enable;
+ exception_table[i].off = nop_except_enable;
+ exception_table[i].isOn = except_always_enabled;
+ }
+ if (!mpc60x_init_exceptions(&exception_config)) {
+ BSP_panic("Exception handling initialization failed\n");
+ }
+ else {
+ printk("Exception handling initialization done\n");
+ }
+}
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl b/c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl
new file mode 100644
index 0000000000..68f8116fe9
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl
@@ -0,0 +1,9 @@
+RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
+{
+ register unsigned int isr_nesting_level;
+ /*
+ * Move from special purpose register 0 (mfspr SPRG0, r3)
+ */
+ asm volatile ("mfspr %0, 272" : "=r" (isr_nesting_level));
+ return isr_nesting_level;
+}
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c
new file mode 100644
index 0000000000..e1c6eac4fd
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c
@@ -0,0 +1,116 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ _CPU_MSR_GET( msr_value );
+
+ if (!(new_level & CPU_MODES_INTERRUPT_MASK)) {
+ msr_value |= MSR_EE;
+ }
+ else {
+ msr_value &= ~MSR_EE;
+ }
+
+ the_context->msr = msr_value;
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ the_context->pc = (unsigned32)entry_point;
+}
+
+
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h
new file mode 100644
index 0000000000..145e2924eb
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h
@@ -0,0 +1,979 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/ppc.h> /* pick up machine definitions */
+#include <libcpu/cpu.h>
+
+#ifndef ASM
+#include <rtems/score/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 0
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT \
+ __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+#ifndef ASM
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+ unsigned32 calleeLr; /* link register used by callees: SVR4/EABI */
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 idle_task_stack_size;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_mpci_receive_server_stack;
+ void * (*stack_allocate_hook)( unsigned32 );
+ void (*stack_free_hook)( void* );
+ /* end of fields required on all CPUs */
+
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ boolean exceptions_in_RAM; /* TRUE if in RAM */
+
+} rtems_cpu_table;
+
+/*
+ * Macros to access required entires in the CPU Table are in
+ * the file rtems/system.h.
+ */
+
+/*
+ * Macros to access PowerPC MPC750 specific additions to the CPU Table
+ */
+
+#define rtems_cpu_configuration_get_clicks_per_usec() \
+ (_CPU_Table.clicks_per_usec)
+
+#define rtems_cpu_configuration_get_exceptions_in_ram() \
+ (_CPU_Table.exceptions_in_RAM)
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+
+#endif /* ndef ASM */
+
+/*
+ * This defines the number of levels and the mask used to pick those
+ * bits out of a thread mode.
+ */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+#ifndef ASM
+
+SCORE_EXTERN struct {
+ unsigned32 *Disable_level;
+ void *Stack;
+ volatile boolean *Switch_necessary;
+ boolean *Signal;
+
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+#endif /* ndef ASM */
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/*
+ * Needed for Interrupt stack
+ */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _isr_cookie.
+ */
+
+#define loc_string(a,b) a " (" #b ")\n"
+
+#ifndef ASM
+
+static inline unsigned32 _CPU_ISR_Get_level( void )
+{
+ register unsigned int msr;
+ _CPU_MSR_GET(msr);
+ if (msr & MSR_EE) return 0;
+ else return 1;
+}
+
+static inline void _CPU_ISR_Set_level( unsigned32 level )
+{
+ register unsigned int msr;
+ _CPU_MSR_GET(msr);
+ if (!(level & CPU_MODES_INTERRUPT_MASK)) {
+ msr |= MSR_EE;
+ }
+ else {
+ msr &= ~MSR_EE;
+ }
+ _CPU_MSR_SET(msr);
+}
+
+#define _CPU_ISR_install_vector(irq, new, old) {BSP_panic("_CPU_ISR_install_vector called\n");}
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _BSP_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=&r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
+ } while (0)
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline unsigned64 PPC_Get_timebase_register( void )
+{
+ unsigned32 tbr_low;
+ unsigned32 tbr_high;
+ unsigned32 tbr_high_old;
+ unsigned64 tbr;
+
+ do {
+ asm volatile( "mftbu %0" : "=r" (tbr_high_old));
+ asm volatile( "mftb %0" : "=r" (tbr_low));
+ asm volatile( "mftbu %0" : "=r" (tbr_high));
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+#endif /* ndef ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S
new file mode 100644
index 0000000000..213e094fa6
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S
@@ -0,0 +1,396 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+
+ .set IP_LINK, 0
+ .set IP_0, (IP_LINK + 8)
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/README b/c/src/lib/libbsp/powerpc/support/old_exception_processing/README
new file mode 100644
index 0000000000..c72bebfe0c
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/README
@@ -0,0 +1,80 @@
+#
+# $Id$
+#
+
+There are various issues regarding this port:
+
+
+
+1) Legal
+
+This port is written by Andrew Bray <andy@i-cubed.co.uk>, and
+is copyright 1995 i-cubed ltd.
+
+This port was later updated by Joel Sherrill <joel@OARcorp.com>
+to test the support for the PPC603, PPC603e, and MPC604. This
+was tested on the PowerPC simulator PSIM and a VMEbus single board
+computer.
+
+2) CPU support.
+
+This release fully supports the PPC403GA, PPC403GB, PPC603, PPC603e,
+MPC604, MPC750, and numerous MPC8xx processors. A good faith attempt
+has been made to include support other models based upon available
+documentation including the MPC5xx. There are two interrupt structures
+supported by the PowerPC port. The newer structure is supported by
+all the MPC750 and MPC604 BSPs. This structure is required to use
+the RDBG remote debugging support.
+
+This port was originally written and tested on the PPC403GA (using
+software floating point). Current ports are tested primarily on
+60x CPUs using the PowerPC simulator PSIM.
+
+Andrew Bray received assistance during the initial porting effort
+from IBM and Blue Micro and we would like to gratefully acknowledge
+that help.
+
+The support for the PPC602 processor is incomplete as only sketchy
+data is currently available. Perhaps this model has been dropped.
+
+3) Application Binary Interface
+
+In the context of RTEMS, the ABI is of interest for the following
+aspects:
+
+a) Register usage. Which registers are used to provide static variable
+ linkage, stack pointer etc.
+
+b) Function calling convention. How parameters are passed, how function
+ variables should be invoked, how values are returned, etc.
+
+c) Stack frame layout.
+
+I am aware of a number of ABIs for the PowerPC:
+
+a) The PowerOpen ABI. This is the original Power ABI used on the RS/6000.
+ This is the only ABI supported by versions of GCC before 2.7.0.
+
+b) The SVR4 ABI. This is the ABI defined by SunSoft for the Solaris port
+ to the PowerPC.
+
+c) The Embedded ABI. This is an embedded ABI for PowerPC use, which has no
+ operating system interface defined. It is promoted by SunSoft, Motorola,
+ and Cygnus Support. Cygnus are porting the GNU toolchain to this ABI.
+
+d) GCC 2.7.0. This compiler is partway along the road to supporting the EABI,
+ but is currently halfway in between.
+
+This port was built and tested using the PowerOpen ABI, with the following
+caveat: we used an ELF assembler and linker. So some attention may be
+required on the assembler files to get them through a traditional (XCOFF)
+PowerOpen assembler.
+
+This port contains support for the other ABIs, but this may prove to be
+incomplete as it is untested.
+
+The RTEMS PowerPC port supports EABI as the primary ABI. The powerpc-rtems
+GNU toolset configuration is EABI.
+
+Andrew Bray, 4 December 1995
+Joel Sherrill, 16 July 1997
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO b/c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO
new file mode 100644
index 0000000000..64c96cb14c
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO
@@ -0,0 +1,8 @@
+#
+# $Id$
+#
+
+Todo list:
+
+Maybe decode external interrupts like the HPPA does.
+ See c/src/lib/libcpu/powerpc/ppc403/ictrl/* for implementation on ppc403
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl b/c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl
new file mode 100644
index 0000000000..706d4f7e4f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl
@@ -0,0 +1,4 @@
+RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
+{
+ return (_ISR_Nest_level != 0);
+}
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c
new file mode 100644
index 0000000000..7d6824cb26
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c
@@ -0,0 +1,853 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+/*
+ * These are for testing purposes.
+ */
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+static void ppc_spurious(int, CPU_Interrupt_frame *);
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ proc_ptr handler = (proc_ptr)ppc_spurious;
+ int i;
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ register unsigned32 r2 = 0;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ register unsigned32 r13 = 0;
+
+ asm ("mr %0,13" : "=r" ((r13)) : "0" ((r13)));
+ _CPU_IRQ_info.Default_r13 = r13;
+#endif
+
+ asm ("mr %0,2" : "=r" ((r2)) : "0" ((r2)));
+ _CPU_IRQ_info.Default_r2 = r2;
+#endif
+
+ _CPU_IRQ_info.Nest_level = &_ISR_Nest_level;
+ _CPU_IRQ_info.Disable_level = &_Thread_Dispatch_disable_level;
+ _CPU_IRQ_info.Vector_table = _ISR_Vector_table;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ _CPU_IRQ_info.Dispatch_r2 = ((unsigned32 *)_Thread_Dispatch)[1];
+#endif
+ _CPU_IRQ_info.Switch_necessary = &_Context_Switch_necessary;
+ _CPU_IRQ_info.Signal = &_ISR_Signals_to_thread_executing;
+
+#if (PPC_USE_SPRG)
+ i = (int)&_CPU_IRQ_info;
+ asm volatile("mtspr 0x113, %0" : "=r" (i) : "0" (i)); /* SPRG 3 */
+#endif
+
+ /*
+ * Store Msr Value in the IRQ info structure.
+ */
+ _CPU_MSR_Value(_CPU_IRQ_info.msr_initial);
+
+#if (PPC_USE_SPRG)
+ i = _CPU_IRQ_info.msr_initial;
+ asm volatile("mtspr 0x112, %0" : "=r" (i) : "0" (i)); /* SPRG 2 */
+#endif
+
+ if ( cpu_table->spurious_handler )
+ handler = (proc_ptr)cpu_table->spurious_handler;
+
+ for (i = 0; i < PPC_INTERRUPT_MAX; i++)
+ _ISR_Vector_table[i] = handler;
+
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Calculate_level
+ *
+ * The PowerPC puts its interrupt enable status in the MSR register
+ * which also contains things like endianness control. To be more
+ * awkward, the layout varies from processor to processor. This
+ * is why it was necessary to adopt a scheme which allowed the user
+ * to specify specifically which interrupt sources were enabled.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 new_msr = 0;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_CE) )
+ new_msr |= PPC_MSR_CE;
+#endif
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_ME) )
+ new_msr |= PPC_MSR_ME;
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_EE) )
+ new_msr |= PPC_MSR_EE;
+
+ return new_msr;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Set_level
+ *
+ * This routine sets the requested level in the MSR.
+ */
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 tmp = 0;
+ register unsigned32 new_msr;
+
+ new_msr = _CPU_ISR_Calculate_level( new_level );
+
+ asm volatile (
+ "mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" :
+ "=&r" ((tmp)) :
+ "r" ((PPC_MSR_DISABLE_MASK)), "r" ((new_msr)), "0" ((tmp))
+ );
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Get_level
+ *
+ * This routine gets the current interrupt level from the MSR and
+ * converts it to an RTEMS interrupt level.
+ */
+
+unsigned32 _CPU_ISR_Get_level( void )
+{
+ unsigned32 level = 0;
+ unsigned32 msr;
+
+ asm volatile("mfmsr %0" : "=r" ((msr)));
+
+ msr &= PPC_MSR_DISABLE_MASK;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(msr & PPC_MSR_CE) )
+ level |= PPC_INTERRUPT_LEVEL_CE;
+#endif
+
+ if ( !(msr & PPC_MSR_ME) )
+ level |= PPC_INTERRUPT_LEVEL_ME;
+
+ if ( !(msr & PPC_MSR_EE) )
+ level |= PPC_INTERRUPT_LEVEL_EE;
+
+ return level;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+#define CPU_MINIMUM_STACK_FRAME_SIZE 56
+#else /* PPC_ABI_SVR4 or PPC_ABI_EABI */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+#endif
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ the_context->msr = _CPU_ISR_Calculate_level( new_level );
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ /*
+ * Calculate the task's MSR value:
+ *
+ * + Set the exception prefix bit to point to the exception table
+ * + Force the RI bit
+ * + Use the DR and IR bits
+ */
+ _CPU_MSR_Value( msr_value );
+ the_context->msr |= (msr_value & PPC_MSR_EP);
+ the_context->msr |= PPC_MSR_RI;
+ the_context->msr |= msr_value & (PPC_MSR_DR|PPC_MSR_IR);
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ { unsigned32 *desc = (unsigned32 *)entry_point;
+
+ the_context->pc = desc[0];
+ the_context->gpr2 = desc[1];
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_SVR4)
+ { unsigned r13 = 0;
+ asm volatile ("mr %0, 13" : "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr13 = r13;
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_EABI)
+ { unsigned32 r2 = 0;
+ unsigned r13 = 0;
+ asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr2 = r2;
+ the_context->gpr13 = r13;
+ }
+#endif
+}
+
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ proc_ptr ignored;
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * Install the wrapper so this ISR can be invoked properly.
+ */
+ if (_CPU_Table.exceptions_in_RAM)
+ _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler ? (ISR_Handler_entry)new_handler :
+ _CPU_Table.spurious_handler ?
+ (ISR_Handler_entry)_CPU_Table.spurious_handler :
+ (ISR_Handler_entry)ppc_spurious;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 56;
+#else
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 8;
+#endif
+}
+
+/* Handle a spurious interrupt */
+static void ppc_spurious(int v, CPU_Interrupt_frame *i)
+{
+#if 0
+ printf("Spurious interrupt on vector %d from %08.8x\n",
+ v, i->pc);
+#endif
+#ifdef ppc403
+ if (v == PPC_IRQ_EXTERNAL)
+ {
+ register int r = 0;
+
+ asm volatile("mtdcr 0x42, %0" :
+ "=&r" ((r)) : "0" ((r))); /* EXIER */
+ }
+ else if (v == PPC_IRQ_PIT)
+ {
+ register int r = 0x08000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+ else if (v == PPC_IRQ_FIT)
+ {
+ register int r = 0x04000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+#endif
+}
+
+void _CPU_Fatal_error(unsigned32 _error)
+{
+ asm volatile ("mr 3, %0" : : "r" ((_error)));
+ asm volatile ("tweq 5,5");
+ asm volatile ("li 0,0; mtmsr 0");
+ while (1) ;
+}
+
+#define PPC_SYNCHRONOUS_TRAP_BIT_MASK 0x100
+#define PPC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
+#define PPC_SYNCHRONOUS_TRAP ( _trap ) ((_trap)+PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+#define PPC_REAL_TRAP_NUMBER ( _trap ) ((_trap)%PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+
+
+const CPU_Trap_table_entry _CPU_Trap_slot_template = {
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+#error " Vector install not tested."
+#if (PPC_HAS_FPU)
+#error " Vector install not tested."
+ 0x9421feb0, /* stwu r1, -(20*4 + 18*8 + IP_END)(r1) */
+#else
+#error " Vector install not tested."
+ 0x9421ff40, /* stwu r1, -(20*4 + IP_END)(r1) */
+#endif
+#else
+ 0x9421ff90, /* stwu r1, -(IP_END)(r1) */
+#endif
+
+ 0x90010008, /* stw %r0, IP_0(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* ba PROC (_ISR_Handler) */
+};
+
+#if defined(mpc860) || defined(mpc821)
+const CPU_Trap_table_entry _CPU_Trap_slot_template_m860 = {
+ 0x7c0803ac, /* mtlr %r0 */
+ 0x81210028, /* lwz %r9, IP_9(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* b PROC (_ISR_Handler) */
+};
+#endif /* mpc860 */
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+);
+
+
+/*PAGE
+ *
+ * _CPU_ISR_install_raw_handler
+ *
+ * This routine installs the specified handler as a "raw" non-executive
+ * supported trap handler (a.k.a. interrupt service routine).
+ *
+ * Input Parameters:
+ * vector - trap table entry number plus synchronous
+ * vs. asynchronous information
+ * new_handler - address of the handler to be installed
+ * old_handler - pointer to an address of the handler previously installed
+ *
+ * Output Parameters: NONE
+ * *new_handler - address of the handler previously installed
+ *
+ * NOTE:
+ *
+ * This routine is based on the SPARC routine _CPU_ISR_install_raw_handler.
+ * Install a software trap handler as an executive interrupt handler
+ * (which is desirable since RTEMS takes care of window and register issues),
+ * then the executive needs to know that the return address is to the trap
+ * rather than the instruction following the trap.
+ *
+ */
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ unsigned32 real_vector;
+ CPU_Trap_table_entry *slot;
+ unsigned32 u32_handler=0;
+
+ /*
+ * Get the "real" trap number for this vector ignoring the synchronous
+ * versus asynchronous indicator included with our vector numbers.
+ */
+
+ real_vector = vector;
+
+ /*
+ * Get the current base address of the trap table and calculate a pointer
+ * to the slot we are interested in.
+ */
+ slot = (CPU_Trap_table_entry *)ppc_exception_vector_addr( real_vector );
+
+ /*
+ * Get the address of the old_handler from the trap table.
+ *
+ * NOTE: The old_handler returned will be bogus if it does not follow
+ * the RTEMS model.
+ */
+
+#define HIGH_BITS_MASK 0xFFFFFC00
+#define HIGH_BITS_SHIFT 10
+#define LOW_BITS_MASK 0x000003FF
+
+ if (slot->stwu_r1 == _CPU_Trap_slot_template.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+
+ /* IMD FIX: sign extend address fragment... */
+ if (u32_handler & 0x02000000) {
+ u32_handler |= 0xfc000000;
+ }
+
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+/* There are two kinds of handlers for the MPC860. One is the 'standard'
+ * one like above. The other is for the cascaded interrupts from the SIU
+ * and CPM. Therefore we must check for the alternate one if the standard
+ * one is not present
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (slot->stwu_r1 == _CPU_Trap_slot_template_m860.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+#endif /* mpc860 */
+
+ *old_handler = 0;
+
+ /*
+ * Copy the template to the slot and then fix it.
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (vector >= PPC_IRQ_IRQ0)
+ *slot = _CPU_Trap_slot_template_m860;
+ else
+#endif /* mpc860 */
+ *slot = _CPU_Trap_slot_template;
+
+ u32_handler = (unsigned32) new_handler;
+
+ /*
+ * IMD FIX: insert address fragment only (bits 6..29)
+ * therefore check for proper address range
+ * and remove unwanted bits
+ */
+ if ((u32_handler & 0xfc000000) == 0xfc000000) {
+ u32_handler &= ~0xfc000000;
+ }
+ else if ((u32_handler & 0xfc000000) != 0x00000000) {
+ _Internal_error_Occurred(INTERNAL_ERROR_CORE,
+ TRUE,
+ u32_handler);
+ }
+
+ slot->b_Handler |= u32_handler;
+
+ slot->li_r0_IRQ |= vector;
+
+ _CPU_Data_Cache_Block_Flush( slot );
+}
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+)
+{
+#if (!PPC_HAS_EVPR)
+ unsigned32 Msr;
+#endif
+ unsigned32 Top = 0;
+ unsigned32 Offset = 0x000;
+
+#if (PPC_HAS_EXCEPTION_PREFIX)
+ _CPU_MSR_Value ( Msr );
+ if ( ( Msr & PPC_MSR_EP) != 0 ) /* Vectors at FFFx_xxxx */
+ Top = 0xfff00000;
+#elif (PPC_HAS_EVPR)
+ asm volatile( "mfspr %0,0x3d6" : "=r" (Top)); /* EVPR */
+ Top = Top & 0xffff0000;
+#endif
+
+ switch ( vector ) {
+ case PPC_IRQ_SYSTEM_RESET: /* on 40x aka PPC_IRQ_CRIT */
+ Offset = 0x00100;
+ break;
+ case PPC_IRQ_MCHECK:
+ Offset = 0x00200;
+ break;
+ case PPC_IRQ_PROTECT:
+ Offset = 0x00300;
+ break;
+ case PPC_IRQ_ISI:
+ Offset = 0x00400;
+ break;
+ case PPC_IRQ_EXTERNAL:
+ Offset = 0x00500;
+ break;
+ case PPC_IRQ_ALIGNMENT:
+ Offset = 0x00600;
+ break;
+ case PPC_IRQ_PROGRAM:
+ Offset = 0x00700;
+ break;
+ case PPC_IRQ_NOFP:
+ Offset = 0x00800;
+ break;
+ case PPC_IRQ_DECREMENTER:
+ Offset = 0x00900;
+ break;
+ case PPC_IRQ_RESERVED_A:
+ Offset = 0x00a00;
+ break;
+ case PPC_IRQ_RESERVED_B:
+ Offset = 0x00b00;
+ break;
+ case PPC_IRQ_SCALL:
+ Offset = 0x00c00;
+ break;
+ case PPC_IRQ_TRACE:
+ Offset = 0x00d00;
+ break;
+ case PPC_IRQ_FP_ASST:
+ Offset = 0x00e00;
+ break;
+
+#if defined(ppc403)
+
+/* PPC_IRQ_CRIT is the same vector as PPC_IRQ_RESET
+ case PPC_IRQ_CRIT:
+ Offset = 0x00100;
+ break;
+*/
+ case PPC_IRQ_PIT:
+ Offset = 0x01000;
+ break;
+ case PPC_IRQ_FIT:
+ Offset = 0x01010;
+ break;
+ case PPC_IRQ_WATCHDOG:
+ Offset = 0x01020;
+ break;
+ case PPC_IRQ_DEBUG:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc601)
+ case PPC_IRQ_TRACE:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc603)
+ case PPC_IRQ_TRANS_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_DATA_LOAD:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_STORE:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(ppc603e)
+ case PPC_TLB_INST_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_TLB_LOAD_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_TLB_STORE_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDRBRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc604)
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc860) || defined(mpc821)
+ case PPC_IRQ_EMULATE:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_INST_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_INST_ERR:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_DATA_ERR:
+ Offset = 0x1400;
+ break;
+ case PPC_IRQ_DATA_BPNT:
+ Offset = 0x1c00;
+ break;
+ case PPC_IRQ_INST_BPNT:
+ Offset = 0x1d00;
+ break;
+ case PPC_IRQ_IO_BPNT:
+ Offset = 0x1e00;
+ break;
+ case PPC_IRQ_DEV_PORT:
+ Offset = 0x1f00;
+ break;
+ case PPC_IRQ_IRQ0:
+ Offset = 0x2000;
+ break;
+ case PPC_IRQ_LVL0:
+ Offset = 0x2040;
+ break;
+ case PPC_IRQ_IRQ1:
+ Offset = 0x2080;
+ break;
+ case PPC_IRQ_LVL1:
+ Offset = 0x20c0;
+ break;
+ case PPC_IRQ_IRQ2:
+ Offset = 0x2100;
+ break;
+ case PPC_IRQ_LVL2:
+ Offset = 0x2140;
+ break;
+ case PPC_IRQ_IRQ3:
+ Offset = 0x2180;
+ break;
+ case PPC_IRQ_LVL3:
+ Offset = 0x21c0;
+ break;
+ case PPC_IRQ_IRQ4:
+ Offset = 0x2200;
+ break;
+ case PPC_IRQ_LVL4:
+ Offset = 0x2240;
+ break;
+ case PPC_IRQ_IRQ5:
+ Offset = 0x2280;
+ break;
+ case PPC_IRQ_LVL5:
+ Offset = 0x22c0;
+ break;
+ case PPC_IRQ_IRQ6:
+ Offset = 0x2300;
+ break;
+ case PPC_IRQ_LVL6:
+ Offset = 0x2340;
+ break;
+ case PPC_IRQ_IRQ7:
+ Offset = 0x2380;
+ break;
+ case PPC_IRQ_LVL7:
+ Offset = 0x23c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_0:
+ Offset = 0x2400;
+ break;
+ case PPC_IRQ_CPM_PC4:
+ Offset = 0x2410;
+ break;
+ case PPC_IRQ_CPM_PC5:
+ Offset = 0x2420;
+ break;
+ case PPC_IRQ_CPM_SMC2:
+ Offset = 0x2430;
+ break;
+ case PPC_IRQ_CPM_SMC1:
+ Offset = 0x2440;
+ break;
+ case PPC_IRQ_CPM_SPI:
+ Offset = 0x2450;
+ break;
+ case PPC_IRQ_CPM_PC6:
+ Offset = 0x2460;
+ break;
+ case PPC_IRQ_CPM_TIMER4:
+ Offset = 0x2470;
+ break;
+ case PPC_IRQ_CPM_RESERVED_8:
+ Offset = 0x2480;
+ break;
+ case PPC_IRQ_CPM_PC7:
+ Offset = 0x2490;
+ break;
+ case PPC_IRQ_CPM_PC8:
+ Offset = 0x24a0;
+ break;
+ case PPC_IRQ_CPM_PC9:
+ Offset = 0x24b0;
+ break;
+ case PPC_IRQ_CPM_TIMER3:
+ Offset = 0x24c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_D:
+ Offset = 0x24d0;
+ break;
+ case PPC_IRQ_CPM_PC10:
+ Offset = 0x24e0;
+ break;
+ case PPC_IRQ_CPM_PC11:
+ Offset = 0x24f0;
+ break;
+ case PPC_IRQ_CPM_I2C:
+ Offset = 0x2500;
+ break;
+ case PPC_IRQ_CPM_RISC_TIMER:
+ Offset = 0x2510;
+ break;
+ case PPC_IRQ_CPM_TIMER2:
+ Offset = 0x2520;
+ break;
+ case PPC_IRQ_CPM_RESERVED_13:
+ Offset = 0x2530;
+ break;
+ case PPC_IRQ_CPM_IDMA2:
+ Offset = 0x2540;
+ break;
+ case PPC_IRQ_CPM_IDMA1:
+ Offset = 0x2550;
+ break;
+ case PPC_IRQ_CPM_SDMA_ERROR:
+ Offset = 0x2560;
+ break;
+ case PPC_IRQ_CPM_PC12:
+ Offset = 0x2570;
+ break;
+ case PPC_IRQ_CPM_PC13:
+ Offset = 0x2580;
+ break;
+ case PPC_IRQ_CPM_TIMER1:
+ Offset = 0x2590;
+ break;
+ case PPC_IRQ_CPM_PC14:
+ Offset = 0x25a0;
+ break;
+ case PPC_IRQ_CPM_SCC4:
+ Offset = 0x25b0;
+ break;
+ case PPC_IRQ_CPM_SCC3:
+ Offset = 0x25c0;
+ break;
+ case PPC_IRQ_CPM_SCC2:
+ Offset = 0x25d0;
+ break;
+ case PPC_IRQ_CPM_SCC1:
+ Offset = 0x25e0;
+ break;
+ case PPC_IRQ_CPM_PC15:
+ Offset = 0x25f0;
+ break;
+#endif
+
+ }
+ Top += Offset;
+ return Top;
+}
+
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h
new file mode 100644
index 0000000000..2a502d0745
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h
@@ -0,0 +1,1200 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/ppc.h> /* pick up machine definitions */
+#ifndef ASM
+struct CPU_Interrupt_frame;
+typedef void ( *ppc_isr_entry )( int, struct CPU_Interrupt_frame * );
+
+#include <rtems/score/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+/*
+ * ACB: This is a lie, but it gets us a handle on a call to set up
+ * a variable derived from the top of the interrupt stack.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 1
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT \
+ __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ *
+ * The interrupt level is bit mapped for the PowerPC family. The
+ * bits are set to 0 to indicate that a particular exception source
+ * enabled and 1 if it is disabled. This keeps with RTEMS convention
+ * that interrupt level 0 means all sources are enabled.
+ *
+ * The bits are assigned to correspond to enable bits in the MSR.
+ */
+
+#define PPC_INTERRUPT_LEVEL_ME 0x01
+#define PPC_INTERRUPT_LEVEL_EE 0x02
+#define PPC_INTERRUPT_LEVEL_CE 0x04
+
+/* XXX should these be maskable? */
+#if 0
+#define PPC_INTERRUPT_LEVEL_DE 0x08
+#define PPC_INTERRUPT_LEVEL_BE 0x10
+#define PPC_INTERRUPT_LEVEL_SE 0x20
+#endif
+
+#define CPU_MODES_INTERRUPT_MASK 0x00000007
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ unsigned32 dummy[13]; /* Used by callees: PowerOpen ABI */
+#else
+ unsigned32 dummy[1]; /* Used by callees: SVR4/EABI */
+#endif
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 idle_task_stack_size;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_mpci_receive_server_stack;
+ void * (*stack_allocate_hook)( unsigned32 );
+ void (*stack_free_hook)( void* );
+ /* end of fields required on all CPUs */
+
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ void (*spurious_handler)(unsigned32 vector, CPU_Interrupt_frame *);
+ boolean exceptions_in_RAM; /* TRUE if in RAM */
+
+#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
+ unsigned32 serial_per_sec; /* Serial clocks per second */
+ boolean serial_external_clock;
+ boolean serial_xon_xoff;
+ boolean serial_cts_rts;
+ unsigned32 serial_rate;
+ unsigned32 timer_average_overhead; /* Average overhead of timer in ticks */
+ unsigned32 timer_least_valid; /* Least valid number from timer */
+ boolean timer_internal_clock; /* TRUE, when timer runs with CPU clk */
+#endif
+
+#if (defined(mpc860) || defined(mpc821))
+ unsigned32 clock_speed; /* Speed of CPU in Hz */
+#endif
+} rtems_cpu_table;
+
+/*
+ * Macros to access required entires in the CPU Table are in
+ * the file rtems/system.h.
+ */
+
+/*
+ * Macros to access PowerPC specific additions to the CPU Table
+ */
+
+#define rtems_cpu_configuration_get_clicks_per_usec() \
+ (_CPU_Table.clicks_per_usec)
+
+#define rtems_cpu_configuration_get_spurious_handler() \
+ (_CPU_Table.spurious_handler)
+
+#define rtems_cpu_configuration_get_exceptions_in_ram() \
+ (_CPU_Table.exceptions_in_RAM)
+
+#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
+
+#define rtems_cpu_configuration_get_serial_per_sec() \
+ (_CPU_Table.serial_per_sec)
+
+#define rtems_cpu_configuration_get_serial_external_clock() \
+ (_CPU_Table.serial_external_clock)
+
+#define rtems_cpu_configuration_get_serial_xon_xoff() \
+ (_CPU_Table.serial_xon_xoff)
+
+#define rtems_cpu_configuration_get_serial_cts_rts() \
+ (_CPU_Table.serial_cts_rts)
+
+#define rtems_cpu_configuration_get_serial_rate() \
+ (_CPU_Table.serial_rate)
+
+#define rtems_cpu_configuration_get_timer_average_overhead() \
+ (_CPU_Table.timer_average_overhead)
+
+#define rtems_cpu_configuration_get_timer_least_valid() \
+ (_CPU_Table.timer_least_valid)
+
+#define rtems_cpu_configuration_get_timer_internal_clock() \
+ (_CPU_Table.timer_internal_clock)
+
+#endif
+
+#if (defined(mpc860) || defined(mpc821))
+#define rtems_cpu_configuration_get_clock_speed() \
+ (_CPU_Table.clock_speed)
+#endif
+
+
+/*
+ * The following type defines an entry in the PPC's trap table.
+ *
+ * NOTE: The instructions chosen are RTEMS dependent although one is
+ * obligated to use two of the four instructions to perform a
+ * long jump. The other instructions load one register with the
+ * trap type (a.k.a. vector) and another with the psr.
+ */
+
+typedef struct {
+ unsigned32 stwu_r1; /* stwu %r1, -(??+IP_END)(%1)*/
+ unsigned32 stw_r0; /* stw %r0, IP_0(%r1) */
+ unsigned32 li_r0_IRQ; /* li %r0, _IRQ */
+ unsigned32 b_Handler; /* b PROC (_ISR_Handler) */
+} CPU_Trap_table_entry;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+
+SCORE_EXTERN struct {
+ unsigned32 *Nest_level;
+ unsigned32 *Disable_level;
+ void *Vector_table;
+ void *Stack;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ unsigned32 Dispatch_r2;
+#else
+ unsigned32 Default_r2;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ unsigned32 Default_r13;
+#endif
+#endif
+ volatile boolean *Switch_necessary;
+ boolean *Signal;
+
+ unsigned32 msr_initial;
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _isr_cookie.
+ */
+
+#define loc_string(a,b) a " (" #b ")\n"
+
+#define _CPU_MSR_Value( _msr_value ) \
+ do { \
+ _msr_value = 0; \
+ asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
+ } while (0)
+
+#define _CPU_MSR_SET( _msr_value ) \
+{ asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
+
+#if 0
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ _isr_cookie = 0; \
+ asm volatile (
+ "mfmsr %0" : \
+ "=r" ((_isr_cookie)) : \
+ "0" ((_isr_cookie)) \
+ ); \
+ asm volatile (
+ "andc %1,%0,%1" : \
+ "=r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ asm volatile (
+ "mtmsr %1" : \
+ "=r" ((_disable_mask)) : \
+ "0" ((_disable_mask)) \
+ ); \
+ }
+#endif
+
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ _isr_cookie = 0; \
+ asm volatile ( \
+ "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ }
+
+
+#define _CPU_Data_Cache_Block_Flush( _address ) \
+ do { register void *__address = (_address); \
+ register unsigned32 _zero = 0; \
+ asm volatile ( "dcbf %0,%1" : \
+ "=r" (_zero), "=r" (__address) : \
+ "0" (_zero), "1" (__address) \
+ ); \
+ } while (0)
+
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of an RTEMS critical section. The parameter
+ * _isr_cookie is not modified.
+ */
+
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ asm volatile ( "mtmsr %0" : \
+ "=r" ((_isr_cookie)) : \
+ "0" ((_isr_cookie))); \
+ }
+
+/*
+ * This temporarily restores the interrupt to _isr_cookie before immediately
+ * disabling them again. This is used to divide long RTEMS critical
+ * sections into two or more parts. The parameter _isr_cookie is not
+ * modified.
+ *
+ * NOTE: The version being used is not very optimized but it does
+ * not trip a problem in gcc where the disable mask does not
+ * get loaded. Check this for future (post 10/97 gcc versions.
+ */
+
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ asm volatile ( \
+ "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=r" ((_isr_cookie)), "=r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ }
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+);
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+);
+
+unsigned32 _CPU_ISR_Get_level( void );
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/* end of ISR handler macros */
+
+/*
+ * Simple spin delay in microsecond units for device drivers.
+ * This is very dependent on the clock speed of the target.
+ */
+
+#define CPU_Get_timebase_low( _value ) \
+ asm volatile( "mftb %0" : "=r" (_value) )
+
+#define delay( _microseconds ) \
+ do { \
+ unsigned32 start, ticks, now; \
+ CPU_Get_timebase_low( start ) ; \
+ ticks = (_microseconds) * _CPU_Table.clicks_per_usec; \
+ do \
+ CPU_Get_timebase_low( now ) ; \
+ while (now - start < ticks); \
+ } while (0)
+
+#define delay_in_bus_cycles( _cycles ) \
+ do { \
+ unsigned32 start, now; \
+ CPU_Get_timebase_low( start ); \
+ do \
+ CPU_Get_timebase_low( now ); \
+ while (now - start < (_cycles)); \
+ } while (0)
+
+
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _CPU_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=&r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
+ } while (0)
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline unsigned64 PPC_Get_timebase_register( void )
+{
+ unsigned32 tbr_low;
+ unsigned32 tbr_high;
+ unsigned32 tbr_high_old;
+ unsigned64 tbr;
+
+ do {
+ asm volatile( "mftbu %0" : "=r" (tbr_high_old));
+ asm volatile( "mftb %0" : "=r" (tbr_low));
+ asm volatile( "mftbu %0" : "=r" (tbr_high));
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S
new file mode 100644
index 0000000000..a377fa5d2a
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S
@@ -0,0 +1,809 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+#if (PPC_HAS_DOUBLE == 1)
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 8)
+ .set FP_2, (FP_1 + 8)
+ .set FP_3, (FP_2 + 8)
+ .set FP_4, (FP_3 + 8)
+ .set FP_5, (FP_4 + 8)
+ .set FP_6, (FP_5 + 8)
+ .set FP_7, (FP_6 + 8)
+ .set FP_8, (FP_7 + 8)
+ .set FP_9, (FP_8 + 8)
+ .set FP_10, (FP_9 + 8)
+ .set FP_11, (FP_10 + 8)
+ .set FP_12, (FP_11 + 8)
+ .set FP_13, (FP_12 + 8)
+ .set FP_14, (FP_13 + 8)
+ .set FP_15, (FP_14 + 8)
+ .set FP_16, (FP_15 + 8)
+ .set FP_17, (FP_16 + 8)
+ .set FP_18, (FP_17 + 8)
+ .set FP_19, (FP_18 + 8)
+ .set FP_20, (FP_19 + 8)
+ .set FP_21, (FP_20 + 8)
+ .set FP_22, (FP_21 + 8)
+ .set FP_23, (FP_22 + 8)
+ .set FP_24, (FP_23 + 8)
+ .set FP_25, (FP_24 + 8)
+ .set FP_26, (FP_25 + 8)
+ .set FP_27, (FP_26 + 8)
+ .set FP_28, (FP_27 + 8)
+ .set FP_29, (FP_28 + 8)
+ .set FP_30, (FP_29 + 8)
+ .set FP_31, (FP_30 + 8)
+ .set FP_FPSCR, (FP_31 + 8)
+#else
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+#endif
+
+ .set IP_LINK, 0
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ .set IP_0, (IP_LINK + 56)
+#else
+ .set IP_0, (IP_LINK + 8)
+#endif
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ /* _CPU_IRQ_info offsets */
+
+ /* These must be in this order */
+ .set Nest_level, 0
+ .set Disable_level, 4
+ .set Vector_table, 8
+ .set Stack, 12
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ .set Dispatch_r2, 16
+ .set Switch_necessary, 20
+#else
+ .set Default_r2, 16
+#if (PPC_ABI != PPC_ABI_GCC27)
+ .set Default_r13, 20
+ .set Switch_necessary, 24
+#else
+ .set Switch_necessary, 20
+#endif
+#endif
+ .set Signal, Switch_necessary + 4
+ .set msr_initial, Signal + 4
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ stfd f0, FP_0(r3)
+ stfd f1, FP_1(r3)
+ stfd f2, FP_2(r3)
+ stfd f3, FP_3(r3)
+ stfd f4, FP_4(r3)
+ stfd f5, FP_5(r3)
+ stfd f6, FP_6(r3)
+ stfd f7, FP_7(r3)
+ stfd f8, FP_8(r3)
+ stfd f9, FP_9(r3)
+ stfd f10, FP_10(r3)
+ stfd f11, FP_11(r3)
+ stfd f12, FP_12(r3)
+ stfd f13, FP_13(r3)
+ stfd f14, FP_14(r3)
+ stfd f15, FP_15(r3)
+ stfd f16, FP_16(r3)
+ stfd f17, FP_17(r3)
+ stfd f18, FP_18(r3)
+ stfd f19, FP_19(r3)
+ stfd f20, FP_20(r3)
+ stfd f21, FP_21(r3)
+ stfd f22, FP_22(r3)
+ stfd f23, FP_23(r3)
+ stfd f24, FP_24(r3)
+ stfd f25, FP_25(r3)
+ stfd f26, FP_26(r3)
+ stfd f27, FP_27(r3)
+ stfd f28, FP_28(r3)
+ stfd f29, FP_29(r3)
+ stfd f30, FP_30(r3)
+ stfd f31, FP_31(r3)
+ mffs f2
+ stfd f2, FP_FPSCR(r3)
+#else
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ lfd f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfd f0, FP_0(r3)
+ lfd f1, FP_1(r3)
+ lfd f2, FP_2(r3)
+ lfd f3, FP_3(r3)
+ lfd f4, FP_4(r3)
+ lfd f5, FP_5(r3)
+ lfd f6, FP_6(r3)
+ lfd f7, FP_7(r3)
+ lfd f8, FP_8(r3)
+ lfd f9, FP_9(r3)
+ lfd f10, FP_10(r3)
+ lfd f11, FP_11(r3)
+ lfd f12, FP_12(r3)
+ lfd f13, FP_13(r3)
+ lfd f14, FP_14(r3)
+ lfd f15, FP_15(r3)
+ lfd f16, FP_16(r3)
+ lfd f17, FP_17(r3)
+ lfd f18, FP_18(r3)
+ lfd f19, FP_19(r3)
+ lfd f20, FP_20(r3)
+ lfd f21, FP_21(r3)
+ lfd f22, FP_22(r3)
+ lfd f23, FP_23(r3)
+ lfd f24, FP_24(r3)
+ lfd f25, FP_25(r3)
+ lfd f26, FP_26(r3)
+ lfd f27, FP_27(r3)
+ lfd f28, FP_28(r3)
+ lfd f29, FP_29(r3)
+ lfd f30, FP_30(r3)
+ lfd f31, FP_31(r3)
+#else
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */
+ stw r1, GP_1(r3)
+ lwz r1, GP_1(r4)
+ stw r2, GP_2(r3)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ stmw r13, GP_13(r3)
+ lmw r13, GP_13(r4)
+#else
+ stw r13, GP_13(r3)
+ lwz r13, GP_13(r4)
+ stw r14, GP_14(r3)
+ lwz r14, GP_14(r4)
+ stw r15, GP_15(r3)
+ lwz r15, GP_15(r4)
+ stw r16, GP_16(r3)
+ lwz r16, GP_16(r4)
+ stw r17, GP_17(r3)
+ lwz r17, GP_17(r4)
+ stw r18, GP_18(r3)
+ lwz r18, GP_18(r4)
+ stw r19, GP_19(r3)
+ lwz r19, GP_19(r4)
+ stw r20, GP_20(r3)
+ lwz r20, GP_20(r4)
+ stw r21, GP_21(r3)
+ lwz r21, GP_21(r4)
+ stw r22, GP_22(r3)
+ lwz r22, GP_22(r4)
+ stw r23, GP_23(r3)
+ lwz r23, GP_23(r4)
+ stw r24, GP_24(r3)
+ lwz r24, GP_24(r4)
+ stw r25, GP_25(r3)
+ lwz r25, GP_25(r4)
+ stw r26, GP_26(r3)
+ lwz r26, GP_26(r4)
+ stw r27, GP_27(r3)
+ lwz r27, GP_27(r4)
+ stw r28, GP_28(r3)
+ lwz r28, GP_28(r4)
+ stw r29, GP_29(r3)
+ lwz r29, GP_29(r4)
+ stw r30, GP_30(r3)
+ lwz r30, GP_30(r4)
+ stw r31, GP_31(r3)
+ lwz r31, GP_31(r4)
+#endif
+ mfcr r5
+ stw r5, GP_CR(r3)
+ lwz r5, GP_CR(r4)
+ mflr r6
+ mtcrf 255, r5
+ stw r6, GP_PC(r3)
+ lwz r6, GP_PC(r4)
+ mfmsr r7
+ mtlr r6
+ stw r7, GP_MSR(r3)
+ lwz r7, GP_MSR(r4)
+ mtmsr r7
+#endif
+#if (PPC_CACHE_ALIGNMENT == 16)
+ /* This assumes that all the registers are in the given order */
+ li r5, 16
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_14+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+
+ addi r3, r3, GP_18-GP_14
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_22-GP_18
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_26-GP_22
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_26(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stwu r14, GP_14+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r15, GP_15-GP_14(r3)
+ stw r16, GP_16-GP_14(r3)
+ stw r17, GP_17-GP_14(r3)
+ stwu r18, GP_18-GP_14(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stwu r22, GP_22-GP_18(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r23, GP_23-GP_22(r3)
+ stw r24, GP_24-GP_22(r3)
+ stw r25, GP_25-GP_22(r3)
+ stwu r26, GP_26-GP_22(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r27, GP_27-GP_26(r3)
+ stw r28, GP_28-GP_26(r3)
+ stw r29, GP_29-GP_26(r3)
+ stw r30, GP_30-GP_26(r3)
+ stw r31, GP_31-GP_26(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_26(r3)
+ mflr r7
+ stw r7, GP_PC-GP_26(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_26(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_19-GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_23-GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_27-GP_23
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_27(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwzu r15, GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r16, GP_16-GP_15(r4)
+ lwz r17, GP_17-GP_15(r4)
+ lwz r18, GP_18-GP_15(r4)
+ lwzu r19, GP_19-GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwzu r23, GP_23-GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r24, GP_24-GP_23(r4)
+ lwz r25, GP_25-GP_23(r4)
+ lwz r26, GP_26-GP_23(r4)
+ lwzu r27, GP_27-GP_23(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r28, GP_28-GP_27(r4)
+ lwz r29, GP_29-GP_27(r4)
+ lwz r30, GP_30-GP_27(r4)
+ lwz r31, GP_31-GP_27(r4)
+#endif
+ lwz r6, GP_CR-GP_27(r4)
+ lwz r7, GP_PC-GP_27(r4)
+ lwz r8, GP_MSR-GP_27(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+#if (PPC_CACHE_ALIGNMENT == 32)
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
+/* Individual interrupt prologues look like this:
+ * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ * #if (PPC_HAS_FPU)
+ * stwu r1, -(20*4 + 18*8 + IP_END)(r1)
+ * #else
+ * stwu r1, -(20*4 + IP_END)(r1)
+ * #endif
+ * #else
+ * stwu r1, -(IP_END)(r1)
+ * #endif
+ * stw r0, IP_0(r1)
+ *
+ * li r0, vectornum
+ * b PROC (_ISR_Handler{,C})
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_Handler)
+PROC (_ISR_Handler):
+#define LABEL(x) x
+/* XXX ??
+#define MTSAVE(x) mtspr sprg0, x
+#define MFSAVE(x) mfspr x, sprg0
+*/
+#define MTPC(x) mtspr srr0, x
+#define MFPC(x) mfspr x, srr0
+#define MTMSR(x) mtspr srr1, x
+#define MFMSR(x) mfspr x, srr1
+
+ #include "irq_stub.S"
+ rfi
+
+#if (PPC_HAS_RFCI == 1)
+/* void __ISR_HandlerC()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * For critical interrupts
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_HandlerC)
+PROC (_ISR_HandlerC):
+#undef LABEL
+#undef MTSAVE
+#undef MFSAVE
+#undef MTPC
+#undef MFPC
+#undef MTMSR
+#undef MFMSR
+#define LABEL(x) x##_C
+/* XXX??
+#define MTSAVE(x) mtspr sprg1, x
+#define MFSAVE(x) mfspr x, sprg1
+*/
+#define MTPC(x) mtspr srr2, x
+#define MFPC(x) mfspr x, srr2
+#define MTMSR(x) mtspr srr3, x
+#define MFMSR(x) mfspr x, srr3
+ #include "irq_stub.S"
+ rfci
+#endif
+
+/* PowerOpen descriptors for indirect function calls.
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (_CPU_Context_save_fp)
+ DESCRIPTOR (_CPU_Context_restore_fp)
+ DESCRIPTOR (_CPU_Context_switch)
+ DESCRIPTOR (_CPU_Context_restore)
+ DESCRIPTOR (_ISR_Handler)
+#if (PPC_HAS_RFCI == 1)
+ DESCRIPTOR (_ISR_HandlerC)
+#endif
+#endif
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S b/c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S
new file mode 100644
index 0000000000..76c8927305
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S
@@ -0,0 +1,268 @@
+/*
+ * This file contains the interrupt handler assembly code for the PowerPC
+ * implementation of RTEMS. It is #included from cpu_asm.s.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * $Id$
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ PUBLIC_VAR (_CPU_IRQ_info )
+
+ /* Finish off the interrupt frame */
+ stw r2, IP_2(r1)
+ stw r3, IP_3(r1)
+ stw r4, IP_4(r1)
+ stw r5, IP_5(r1)
+ stw r6, IP_6(r1)
+ stw r7, IP_7(r1)
+ stw r8, IP_8(r1)
+ stw r9, IP_9(r1)
+ stw r10, IP_10(r1)
+ stw r11, IP_11(r1)
+ stw r12, IP_12(r1)
+ stw r13, IP_13(r1)
+ stmw r28, IP_28(r1)
+ mfcr r5
+ mfctr r6
+ mfxer r7
+ mflr r8
+ MFPC (r9)
+ MFMSR (r10)
+ /* Establish addressing */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+#endif
+ dcbt r0, r11
+ stw r5, IP_CR(r1)
+ stw r6, IP_CTR(r1)
+ stw r7, IP_XER(r1)
+ stw r8, IP_LR(r1)
+ stw r9, IP_PC(r1)
+ stw r10, IP_MSR(r1)
+
+ lwz r30, Vector_table(r11)
+ slwi r4,r0,2
+ lwz r28, Nest_level(r11)
+ add r4, r4, r30
+
+ lwz r30, 0(r28)
+ mr r3, r0
+ lwz r31, Stack(r11)
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ */
+ /* Switch stacks, here we must prevent ALL interrupts */
+#if (PPC_USE_SPRG)
+ mfmsr r5
+ mfspr r6, sprg2
+#else
+ lwz r6,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r6,r6,r5
+ mfmsr r5
+#endif
+ mtmsr r6
+ cmpwi r30, 0
+ lwz r29, Disable_level(r11)
+ subf r31,r1,r31
+ bne LABEL (nested)
+ stwux r1,r1,r31
+LABEL (nested):
+ /*
+ * _ISR_Nest_level++;
+ */
+ lwz r31, 0(r29)
+ addi r30,r30,1
+ stw r30,0(r28)
+ /* From here on out, interrupts can be re-enabled. RTEMS
+ * convention says not.
+ */
+ lwz r4,0(r4)
+ /*
+ * _Thread_Dispatch_disable_level++;
+ */
+ addi r31,r31,1
+ stw r31, 0(r29)
+/* SCE 980217
+ *
+ * We need address translation ON when we call our ISR routine
+
+ mtmsr r5
+
+ */
+
+ /*
+ * (*_ISR_Vector_table[ vector ])( vector );
+ */
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r6,0(r4)
+ lwz r2,4(r4)
+ mtlr r6
+ lwz r11,8(r4)
+#endif
+#if (PPC_ABI == PPC_ABI_GCC27)
+ lwz r2, Default_r2(r11)
+ mtlr r4
+ #lwz r2, 0(r2)
+#endif
+#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI)
+ mtlr r4
+ lwz r2, Default_r2(r11)
+ lwz r13, Default_r13(r11)
+ #lwz r2, 0(r2)
+ #lwz r13, 0(r13)
+#endif
+ mr r4,r1
+ blrl
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+
+ /* We must re-disable the interrupts */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r30,~PPC_MSR_DISABLE_MASK@ha
+ ori r30,r30,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r30
+#endif
+ mtmsr r0
+ lwz r30, 0(r28)
+ lwz r31, 0(r29)
+
+ /*
+ * if (--Thread_Dispatch_disable,--_ISR_Nest_level)
+ * goto easy_exit;
+ */
+ addi r30, r30, -1
+ cmpwi r30, 0
+ addi r31, r31, -1
+ stw r30, 0(r28)
+ stw r31, 0(r29)
+ bne LABEL (easy_exit)
+ cmpwi r31, 0
+
+ lwz r30, Switch_necessary(r11)
+
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ */
+ lwz r1,0(r1)
+ bne LABEL (easy_exit)
+ lwz r30, 0(r30)
+ lwz r31, Signal(r11)
+
+ /*
+ * if ( _Context_Switch_necessary )
+ * goto switch
+ */
+ cmpwi r30, 0
+ lwz r28, 0(r31)
+ li r6,0
+ bne LABEL (switch)
+ /*
+ * if ( !_ISR_Signals_to_thread_executing )
+ * goto easy_exit
+ * _ISR_Signals_to_thread_executing = 0;
+ */
+ cmpwi r28, 0
+ beq LABEL (easy_exit)
+
+ /*
+ * switch:
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ */
+LABEL (switch):
+ stw r6, 0(r31)
+ /* Re-enable interrupts */
+ lwz r0, IP_MSR(r1)
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r2, Dispatch_r2(r11)
+#else
+ /* R2 and R13 still hold their values from the last call */
+#endif
+ mtmsr r0
+ bl SYM (_Thread_Dispatch)
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+ /*
+ * prepare to get out of interrupt
+ */
+ /* Re-disable IRQs */
+#if (PPC_USE_SPRG)
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r5
+#endif
+ mtmsr r0
+
+ /*
+ * easy_exit:
+ * prepare to get out of interrupt
+ * return from interrupt
+ */
+LABEL (easy_exit):
+ lwz r5, IP_CR(r1)
+ lwz r6, IP_CTR(r1)
+ lwz r7, IP_XER(r1)
+ lwz r8, IP_LR(r1)
+ lwz r9, IP_PC(r1)
+ lwz r10, IP_MSR(r1)
+ mtcrf 255,r5
+ mtctr r6
+ mtxer r7
+ mtlr r8
+ MTPC (r9)
+ MTMSR (r10)
+ lwz r0, IP_0(r1)
+ lwz r2, IP_2(r1)
+ lwz r3, IP_3(r1)
+ lwz r4, IP_4(r1)
+ lwz r5, IP_5(r1)
+ lwz r6, IP_6(r1)
+ lwz r7, IP_7(r1)
+ lwz r8, IP_8(r1)
+ lwz r9, IP_9(r1)
+ lwz r10, IP_10(r1)
+ lwz r11, IP_11(r1)
+ lwz r12, IP_12(r1)
+ lwz r13, IP_13(r1)
+ lmw r28, IP_28(r1)
+ lwz r1, 0(r1)
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c b/c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c
new file mode 100644
index 0000000000..ecfb4b96ca
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c
@@ -0,0 +1,61 @@
+/*
+ * PowerPC Cache enable routines
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+
+void powerpc_instruction_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * Enable the instruction cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00008000; /* Set ICE bit */
+
+ PPC_Set_HID0( value );
+}
+
+void powerpc_data_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * enable data cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00004000; /* set DCE bit */
+
+ PPC_Set_HID0( value );
+}
+
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S b/c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S
new file mode 100644
index 0000000000..b653152411
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S
@@ -0,0 +1,132 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the PowerPC implementation of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/rtems.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+ BEGIN_CODE
+/*
+ * RTEMS
+ *
+ * This routine jumps to the directive indicated in r11.
+ * This routine is used when RTEMS is linked by itself and placed
+ * in ROM. This routine is the first address in the ROM space for
+ * RTEMS. The user "calls" this address with the directive arguments
+ * in the normal place.
+ * This routine then jumps indirectly to the correct directive
+ * preserving the arguments. The directive should not realize
+ * it has been "wrapped" in this way. The table "_Entry_points"
+ * is used to look up the directive.
+ */
+
+ ALIGN (4, 2)
+ PUBLIC_PROC (RTEMS)
+PROC (RTEMS):
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ mflr r0
+ stw r0, 8(r1)
+ stwu r1, -64(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r12, r12, r11
+
+ stw r2, 56(r1)
+ lwz r0, 0(r12)
+ mtlr r0
+ lwz r2, 4(r12)
+ lwz r11, 8(r12)
+ blrl
+ lwz r2, 56(r1)
+ addi r1, r1, 64
+ lwz r0, 8(r1)
+ mtlr r0
+#else
+ mflr r0
+ stw r0, 4(r1)
+ stwu r1, -16(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r11, r12, r11
+
+ stw r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ stw r13, 12(r1)
+#endif
+ mtlr r11
+ lwz r11, irqinfo-abase(r12)
+ lwz r2, 0(r11)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 4(r11)
+#endif
+ blrl
+ lwz r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 12(r1)
+#endif
+ addi r1, r1, 16
+ lwz r0, 4(r1)
+ mtlr r0
+#endif
+ blr
+
+
+ /* Addressability stuff */
+tabaddr:
+abase:
+ EXTERN_VAR (_Entry_points)
+Entry_points:
+ EXT_SYM_REF (_Entry_points)
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ EXTERN_VAR (_CPU_IRQ_info)
+irqinfo:
+ EXT_SYM_REF (_CPU_IRQ_info)
+#endif
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (RTEMS)
+#endif
+
+
diff --git a/c/src/lib/libcpu/powerpc/Makefile.in b/c/src/lib/libcpu/powerpc/Makefile.in
index 755737150c..1a661a5b64 100644
--- a/c/src/lib/libcpu/powerpc/Makefile.in
+++ b/c/src/lib/libcpu/powerpc/Makefile.in
@@ -18,9 +18,12 @@ include $(RTEMS_ROOT)/make/directory.cfg
INSTALL_CHANGE = @INSTALL_CHANGE@
-ifeq ($(wildcard $(RTEMS_CPU_MODEL)),$(RTEMS_CPU_MODEL))
-SHARED_LIB = shared
-endif
+ifeq ($(RTEMS_CPU_MODEL),mpc750)
+SHARED_LIB = shared mpc6xx
+endif
+ifeq ($(RTEMS_CPU_MODEL),mpc604)
+SHARED_LIB = shared mpc6xx
+endif
SUBDIRS = $(SHARED_LIB) $(wildcard $(RTEMS_CPU_MODEL)) wrapup
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/Makefile.in b/c/src/lib/libcpu/powerpc/mpc6xx/Makefile.in
new file mode 100644
index 0000000000..27fc592154
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/Makefile.in
@@ -0,0 +1,25 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../..
+subdir = powerpc/mpc6xx
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/directory.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+SUBDIRS = exceptions mmu clock wrapup
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/clock/Makefile.in b/c/src/lib/libcpu/powerpc/mpc6xx/clock/Makefile.in
new file mode 100644
index 0000000000..fc945cd304
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/clock/Makefile.in
@@ -0,0 +1,68 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/mpc6xx/clock
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+# C source names, if any, go here -- minus the .c
+C_PIECES = c_clock
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+H_FILES = $(srcdir)/c_clock.h
+
+# Assembly source names, if any, go here -- minus the .S
+S_PIECES =
+S_FILES = $(S_PIECES:%=%.S)
+S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+OBJS = $(C_O_FILES) $(CC_O_FILES) $(S_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+#
+# (OPTIONAL) Add local stuff here using +=
+#
+
+DEFINES +=
+CPPFLAGS +=
+CFLAGS +=
+
+LD_PATHS +=
+LD_LIBS +=
+LDFLAGS +=
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+all: ${ARCH} $(SRCS) preinstall $(OBJS)
+
+preinstall: $(INSTALLDIRS) $(H_FILES)
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/libcpu
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c
new file mode 100644
index 0000000000..b3d93f8f34
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c
@@ -0,0 +1,208 @@
+/*
+ * Clock Tick Device Driver
+ *
+ * This routine utilizes the Decrementer Register common to the PPC family.
+ *
+ * The tick frequency is directly programmed to the configured number of
+ * microseconds per tick.
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * Modified to support the MPC750.
+ * Modifications Copyright (c) 1999 Eric Valette valette@crf.canon.fr
+ *
+ * $Id$
+ */
+
+#include <rtems.h>
+#include <rtems/libio.h>
+#include <stdlib.h> /* for atexit() */
+#include <assert.h>
+#include <libcpu/cpu.h>
+#include <libcpu/c_clock.h>
+
+/*
+ * Clock ticks since initialization
+ */
+
+volatile rtems_unsigned32 Clock_driver_ticks;
+
+/*
+ * This is the value programmed into the count down timer.
+ */
+
+rtems_unsigned32 Clock_Decrementer_value;
+
+/*
+ * These are set by clock driver during its init
+ */
+
+rtems_device_major_number rtems_clock_major = ~0;
+rtems_device_minor_number rtems_clock_minor;
+
+void clockOff(void* unused)
+{
+ if (BSP_Configuration.ticks_per_timeslice) {
+ /*
+ * Nothing to do as we cannot disable all interrupts and
+ * the decrementer interrupt enable is MSR_EE
+ */
+ }
+}
+void clockOn(void* unused)
+{
+ PPC_Set_decrementer( Clock_Decrementer_value );
+}
+
+/*
+ * Clock_isr
+ *
+ * This is the clock tick interrupt handler.
+ *
+ * Input parameters:
+ * vector - vector number
+ *
+ * Output parameters: NONE
+ *
+ * Return values: NONE
+ *
+ */
+void clockIsr()
+{
+ /*
+ * The driver has seen another tick.
+ */
+
+ PPC_Set_decrementer( Clock_Decrementer_value );
+
+ Clock_driver_ticks += 1;
+
+ /*
+ * Real Time Clock counter/timer is set to automatically reload.
+ */
+
+ rtems_clock_tick();
+}
+
+int clockIsOn(void* unused)
+{
+ unsigned32 msr_value;
+
+ _CPU_MSR_GET( msr_value );
+ if (msr_value & MSR_EE) return 1;
+ return 0;
+}
+
+
+/*
+ * Clock_exit
+ *
+ * This routine allows the clock driver to exit by masking the interrupt and
+ * disabling the clock's counter.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ *
+ * Return values: NONE
+ *
+ */
+
+void Clock_exit( void )
+{
+ if ( BSP_Configuration.ticks_per_timeslice ) {
+ (void) BSP_disconnect_clock_handler ();
+ }
+}
+
+/*
+ * Clock_initialize
+ *
+ * This routine initializes the clock driver.
+ *
+ * Input parameters:
+ * major - clock device major number
+ * minor - clock device minor number
+ * parg - pointer to optional device driver arguments
+ *
+ * Output parameters: NONE
+ *
+ * Return values:
+ * rtems_device_driver status code
+ */
+
+rtems_device_driver Clock_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *pargp
+)
+{
+ Clock_Decrementer_value = (BSP_bus_frequency/4000)*
+ (BSP_Configuration.microseconds_per_tick/1000);
+
+ if (!BSP_connect_clock_handler ()) {
+ printk("Unable to initialize system clock\n");
+ rtems_fatal_error_occurred(1);
+ }
+ /* make major/minor avail to others such as shared memory driver */
+
+ rtems_clock_major = major;
+ rtems_clock_minor = minor;
+
+ return RTEMS_SUCCESSFUL;
+} /* Clock_initialize */
+
+/*
+ * Clock_control
+ *
+ * This routine is the clock device driver control entry point.
+ *
+ * Input parameters:
+ * major - clock device major number
+ * minor - clock device minor number
+ * parg - pointer to optional device driver arguments
+ *
+ * Output parameters: NONE
+ *
+ * Return values:
+ * rtems_device_driver status code
+ */
+
+rtems_device_driver Clock_control(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *pargp
+)
+{
+ rtems_libio_ioctl_args_t *args = pargp;
+
+ if (args == 0)
+ goto done;
+
+ Clock_Decrementer_value = (BSP_bus_frequency/4000)*
+ (BSP_Configuration.microseconds_per_tick/1000);
+
+ if (args->command == rtems_build_name('I', 'S', 'R', ' '))
+ clockIsr();
+ else if (args->command == rtems_build_name('N', 'E', 'W', ' '))
+ {
+ if (!BSP_connect_clock_handler ()) {
+ printk("Error installing clock interrupt handler!\n");
+ rtems_fatal_error_occurred(1);
+ }
+ }
+done:
+ return RTEMS_SUCCESSFUL;
+}
+
+
+
+
+
+
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.h b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.h
new file mode 100644
index 0000000000..237273f6f9
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.h
@@ -0,0 +1,42 @@
+/*
+ * Clock Tick Device Driver
+ *
+ * This routine utilizes the Decrementer Register common to the PPC family.
+ *
+ * The tick frequency is directly programmed to the configured number of
+ * microseconds per tick.
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * Modified to support the MPC750.
+ * Modifications Copyright (c) 1999 Eric Valette valette@crf.canon.fr
+ *
+ * $Id$
+ */
+
+#ifndef _LIB_LIBCPU_C_CLOCK_H
+#define _LIB_LIBCPU_C_CLOCK_H
+
+#include <rtems.h>
+#include <bsp.h>
+
+/*
+ * Theses functions and variables represent the API exported by the CPU to the BSP
+ */
+extern void clockOff (void* unused);
+extern void clockOn (void* unused);
+extern void clockIsr (void);
+extern int clockIsOn (void* unused);
+
+#endif
+
+
+
+
+
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/Makefile.in b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/Makefile.in
new file mode 100644
index 0000000000..866432d3e9
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/Makefile.in
@@ -0,0 +1,79 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/mpc6xx/exceptions
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+PGM = ${ARCH}/exceptions.rel
+
+# C source names, if any, go here -- minus the .c
+C_PIECES = raw_exception
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+H_FILES = $(srcdir)/raw_exception.h
+
+# Assembly source names, if any, go here -- minus the .S
+S_PIECES = asm_utils
+S_FILES = $(S_PIECES:%=%.S)
+S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+OBJS = $(C_O_FILES) $(CC_O_FILES) $(S_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/libcpu
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+#
+# (OPTIONAL) Add local stuff here using +=
+#
+
+DEFINES +=
+CPPFLAGS +=
+CFLAGS +=
+
+LD_PATHS +=
+LD_LIBS +=
+LDFLAGS +=
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+${PGM}: ${OBJS}
+ $(make-rel)
+
+preinstall: $(INSTALLDIRS) $(H_FILES)
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/libcpu
+
+all: ${ARCH} $(SRCS) preinstall $(PGM)
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/asm_utils.S b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/asm_utils.S
new file mode 100644
index 0000000000..f046915404
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/asm_utils.S
@@ -0,0 +1,65 @@
+/*
+ * asm_utils.s
+ *
+ * $Id$
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ *
+ * This file contains the low-level support for moving exception
+ * exception code to appropriate location.
+ *
+ */
+
+#include <libcpu/cpu.h>
+#include <libcpu/io.h>
+#include <rtems/score/targopts.h>
+#include "asm.h"
+
+ .globl codemove
+codemove:
+ .type codemove,@function
+/* r3 dest, r4 src, r5 length in bytes, r6 cachelinesize */
+ cmplw cr1,r3,r4
+ addi r0,r5,3
+ srwi. r0,r0,2
+ beq cr1,4f /* In place copy is not necessary */
+ beq 7f /* Protect against 0 count */
+ mtctr r0
+ bge cr1,2f
+
+ la r8,-4(r4)
+ la r7,-4(r3)
+1: lwzu r0,4(r8)
+ stwu r0,4(r7)
+ bdnz 1b
+ b 4f
+
+2: slwi r0,r0,2
+ add r8,r4,r0
+ add r7,r3,r0
+3: lwzu r0,-4(r8)
+ stwu r0,-4(r7)
+ bdnz 3b
+
+/* Now flush the cache: note that we must start from a cache aligned
+ * address. Otherwise we might miss one cache line.
+ */
+4: cmpwi r6,0
+ add r5,r3,r5
+ beq 7f /* Always flush prefetch queue in any case */
+ subi r0,r6,1
+ andc r3,r3,r0
+ mr r4,r3
+5: cmplw r4,r5
+ dcbst 0,r4
+ add r4,r4,r6
+ blt 5b
+ sync /* Wait for all dcbst to complete on bus */
+ mr r4,r3
+6: cmplw r4,r5
+ icbi 0,r4
+ add r4,r4,r6
+ blt 6b
+7: sync /* Wait for all icbi to complete on bus */
+ isync
+ blr
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
new file mode 100644
index 0000000000..cd8274e2e1
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
@@ -0,0 +1,195 @@
+/*
+ * raw_exception.c - This file contains implementation of C function to
+ * Instanciate 60x ppc primary exception entries.
+ * More detailled information can be found on motorola
+ * site and more precisely in the following book :
+ *
+ * MPC750
+ * Risc Microporcessor User's Manual
+ * Motorola REF : MPC750UM/AD 8/97
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+#include <rtems/score/targopts.h>
+#include <rtems/score/ppc.h>
+#include <rtems/system.h>
+#include <rtems/score/cpu.h>
+#include <libcpu/raw_exception.h>
+#include <libcpu/cpu.h>
+
+static rtems_raw_except_connect_data* raw_except_table;
+static rtems_raw_except_connect_data default_raw_except_entry;
+static rtems_raw_except_global_settings* local_settings;
+
+int mpc750_vector_is_valid(rtems_vector vector)
+{
+ switch(vector) {
+ case ASM_RESET_VECTOR: /* fall through */
+ case ASM_MACH_VECTOR:
+ case ASM_PROT_VECTOR:
+ case ASM_ISI_VECTOR:
+ case ASM_EXT_VECTOR:
+ case ASM_ALIGN_VECTOR:
+ case ASM_PROG_VECTOR:
+ case ASM_FLOAT_VECTOR:
+ case ASM_DEC_VECTOR:
+ case ASM_SYS_VECTOR:
+ case ASM_TRACE_VECTOR:
+ case ASM_ADDR_VECTOR:
+ case ASM_SYSMGMT_VECTOR:
+ case ASM_ITM_VECTOR:
+ return 1;
+ default: return 0;
+ }
+}
+
+int mpc604_vector_is_valid(rtems_vector vector)
+{
+ /*
+ * Please fill this for MVME2307
+ */
+ printk("Please complete libcpu/powerpc/XXX/raw_exception.c\n");
+ return 0;
+}
+
+int mpc60x_set_exception (const rtems_raw_except_connect_data* except)
+{
+ unsigned int level;
+
+ if (current_ppc_cpu == PPC_750) {
+ if (!mpc750_vector_is_valid(except->exceptIndex)){
+ return 0;
+ }
+ goto exception_ok;
+ }
+ if (current_ppc_cpu == PPC_604) {
+ if (!mpc604_vector_is_valid(except->exceptIndex)){
+ return 0;
+ }
+ goto exception_ok;
+ }
+ printk("Please complete libcpu/powerpc/XXX/raw_exception.c\n");
+ return 0;
+
+exception_ok:
+ /*
+ * Check if default handler is actually connected. If not issue an error.
+ * You must first get the current handler via mpc60x_get_current_exception
+ * and then disconnect it using mpc60x_delete_exception.
+ * RATIONALE : to always have the same transition by forcing the user
+ * to get the previous handler before accepting to disconnect.
+ */
+ if (memcmp(mpc60x_get_vector_addr(except->exceptIndex), (void*)default_raw_except_entry.hdl.raw_hdl,default_raw_except_entry.hdl.raw_hdl_size)) {
+ return 0;
+ }
+
+ _CPU_ISR_Disable(level);
+
+ raw_except_table [except->exceptIndex] = *except;
+ codemove((void*)mpc60x_get_vector_addr(except->exceptIndex),
+ except->hdl.raw_hdl,
+ except->hdl.raw_hdl_size,
+ PPC_CACHE_ALIGNMENT);
+ except->on(except);
+
+ _CPU_ISR_Enable(level);
+ return 1;
+}
+
+int mpc60x_get_current_exception (rtems_raw_except_connect_data* except)
+{
+ if (!mpc750_vector_is_valid(except->exceptIndex)){
+ return 0;
+ }
+
+ *except = raw_except_table [except->exceptIndex];
+
+ return 1;
+}
+
+int mpc60x_delete_exception (const rtems_raw_except_connect_data* except)
+{
+ unsigned int level;
+
+ if (!mpc750_vector_is_valid(except->exceptIndex)){
+ return 0;
+ }
+ /*
+ * Check if handler passed is actually connected. If not issue an error.
+ * You must first get the current handler via mpc60x_get_current_exception
+ * and then disconnect it using mpc60x_delete_exception.
+ * RATIONALE : to always have the same transition by forcing the user
+ * to get the previous handler before accepting to disconnect.
+ */
+ if (memcmp(mpc60x_get_vector_addr(except->exceptIndex),
+ (void*)except->hdl.raw_hdl,
+ except->hdl.raw_hdl_size)) {
+ return 0;
+ }
+ _CPU_ISR_Disable(level);
+
+ except->off(except);
+ codemove((void*)mpc60x_get_vector_addr(except->exceptIndex),
+ default_raw_except_entry.hdl.raw_hdl,
+ default_raw_except_entry.hdl.raw_hdl_size,
+ PPC_CACHE_ALIGNMENT);
+
+
+ raw_except_table[except->exceptIndex] = default_raw_except_entry;
+ raw_except_table[except->exceptIndex].exceptIndex = except->exceptIndex;
+
+ _CPU_ISR_Enable(level);
+
+ return 1;
+}
+
+/*
+ * Exception global init.
+ */
+int mpc60x_init_exceptions (rtems_raw_except_global_settings* config)
+{
+ unsigned i;
+ unsigned int level;
+
+ /*
+ * store various accelerators
+ */
+ raw_except_table = config->rawExceptHdlTbl;
+ local_settings = config;
+ default_raw_except_entry = config->defaultRawEntry;
+
+ _CPU_ISR_Disable(level);
+
+ for (i=0; i <= LAST_VALID_EXC; i++) {
+ if (!mpc750_vector_is_valid(i)){
+ continue;
+ }
+ codemove((void*)mpc60x_get_vector_addr(i),
+ raw_except_table[i].hdl.raw_hdl,
+ raw_except_table[i].hdl.raw_hdl_size,
+ PPC_CACHE_ALIGNMENT);
+ if (raw_except_table[i].hdl.raw_hdl != default_raw_except_entry.hdl.raw_hdl) {
+ raw_except_table[i].on(&raw_except_table[i]);
+ }
+ else {
+ raw_except_table[i].off(&raw_except_table[i]);
+ }
+ }
+ _CPU_ISR_Enable(level);
+
+ return 1;
+}
+
+int mpc60x_get_exception_config (rtems_raw_except_global_settings** config)
+{
+ *config = local_settings;
+ return 1;
+}
+
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.h b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.h
new file mode 100644
index 0000000000..f6542b9dfe
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.h
@@ -0,0 +1,168 @@
+/*
+ * raw_execption.h
+ *
+ * This file contains implementation of C function to
+ * Instanciate 60x ppc primary exception entries.
+ * More detailled information can be found on motorola
+ * site and more precisely in the following book :
+ *
+ * MPC750
+ * Risc Microporcessor User's Manual
+ * Mtorola REF : MPC750UM/AD 8/97
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef _LIBCPU_MCP750_EXCEPTION_RAW_EXCEPTION_H
+#define _LIBCPU_MCP750_EXCEPTION_RAW_EXCEPTION_H
+
+/*
+ * Exception Vectors as defined in the MCP750 manual
+ */
+
+#define ASM_RESET_VECTOR 0x01
+#define ASM_MACH_VECTOR 0x02
+#define ASM_PROT_VECTOR 0x03
+#define ASM_ISI_VECTOR 0x04
+#define ASM_EXT_VECTOR 0x05
+#define ASM_ALIGN_VECTOR 0x06
+#define ASM_PROG_VECTOR 0x07
+#define ASM_FLOAT_VECTOR 0x08
+#define ASM_DEC_VECTOR 0x09
+#define ASM_SYS_VECTOR 0x0C
+#define ASM_TRACE_VECTOR 0x0D
+#define ASM_ADDR_VECTOR 0x13
+#define ASM_SYSMGMT_VECTOR 0x14
+#define ASM_ITM_VECTOR 0x17
+#define LAST_VALID_EXC ASM_ITM_VECTOR
+
+/*
+ * Vector offsets as defined in the MCP750 manual
+ */
+
+#define ASM_RESET_VECTOR_OFFSET (ASM_RESET_VECTOR << 8)
+#define ASM_MACH_VECTOR_OFFSET (ASM_MACH_VECTOR << 8)
+#define ASM_PROT_VECTOR_OFFSET (ASM_PROT_VECTOR << 8)
+#define ASM_ISI_VECTOR_OFFSET (ASM_ISI_VECTOR << 8)
+#define ASM_EXT_VECTOR_OFFSET (ASM_EXT_VECTOR << 8)
+#define ASM_ALIGN_VECTOR_OFFSET (ASM_ALIGN_VECTOR << 8)
+#define ASM_PROG_VECTOR_OFFSET (ASM_PROG_VECTOR << 8)
+#define ASM_FLOAT_VECTOR_OFFSET (ASM_FLOAT_VECTOR << 8)
+#define ASM_DEC_VECTOR_OFFSET (ASM_DEC_VECTOR << 8)
+#define ASM_SYS_VECTOR_OFFSET (ASM_SYS_VECTOR << 8)
+#define ASM_TRACE_VECTOR_OFFSET (ASM_TRACE_VECTOR << 8)
+#define ASM_ADDR_VECTOR_OFFSET (ASM_ADDR_VECTOR << 8)
+#define ASM_SYSMGMT_VECTOR_OFFSET (ASM_SYSMGMT_VECTOR << 8)
+#define ASM_ITM_VECTOR_OFFSET (ASM_ITM_VECTOR << 8)
+
+
+#ifndef ASM
+
+/*
+ * Type definition for raw exceptions.
+ */
+
+typedef unsigned char rtems_vector;
+struct __rtems_raw_except_connect_data__;
+typedef void (*rtems_raw_except_func) (void);
+typedef unsigned char rtems_raw_except_hdl_size;
+
+typedef struct {
+ rtems_vector vector;
+ rtems_raw_except_func raw_hdl;
+ rtems_raw_except_hdl_size raw_hdl_size;
+}rtems_raw_except_hdl;
+
+typedef void (*rtems_raw_except_enable) (const struct __rtems_raw_except_connect_data__*);
+typedef void (*rtems_raw_except_disable) (const struct __rtems_raw_except_connect_data__*);
+typedef int (*rtems_raw_except_is_enabled) (const struct __rtems_raw_except_connect_data__*);
+
+typedef struct __rtems_raw_except_connect_data__{
+ /*
+ * Exception vector (As defined in the manual)
+ */
+ rtems_vector exceptIndex;
+ /*
+ * Exception raw handler. See comment on handler properties below in function prototype.
+ */
+ rtems_raw_except_hdl hdl;
+ /*
+ * function for enabling raw exceptions. In order to be consistent
+ * with the fact that the raw connexion can defined in the
+ * libcpu library, this library should have no knowledge of
+ * board specific hardware to manage exceptions and thus the
+ * "on" routine must enable the except at processor level only.
+ *
+ */
+ rtems_raw_except_enable on;
+ /*
+ * function for disabling raw exceptions. In order to be consistent
+ * with the fact that the raw connexion can defined in the
+ * libcpu library, this library should have no knowledge of
+ * board specific hardware to manage exceptions and thus the
+ * "on" routine must disable the except both at device and PIC level.
+ *
+ */
+ rtems_raw_except_disable off;
+ /*
+ * function enabling to know what exception may currently occur
+ */
+ rtems_raw_except_is_enabled isOn;
+}rtems_raw_except_connect_data;
+
+typedef struct {
+ /*
+ * size of all the table fields (*Tbl) described below.
+ */
+ unsigned int exceptSize;
+ /*
+ * Default handler used when disconnecting exceptions.
+ */
+ rtems_raw_except_connect_data defaultRawEntry;
+ /*
+ * Table containing initials/current value.
+ */
+ rtems_raw_except_connect_data* rawExceptHdlTbl;
+}rtems_raw_except_global_settings;
+
+/*
+ * C callable function enabling to set up one raw idt entry
+ */
+extern int mpc60x_set_exception (const rtems_raw_except_connect_data*);
+
+/*
+ * C callable function enabling to get one current raw idt entry
+ */
+extern int mpc60x_get_current_exception (rtems_raw_except_connect_data*);
+
+/*
+ * C callable function enabling to remove one current raw idt entry
+ */
+extern int mpc60x_delete_exception (const rtems_raw_except_connect_data*);
+
+/*
+ * C callable function enabling to check if vector is valid
+ */
+extern int mpc750_vector_is_valid(rtems_vector vector);
+
+inline static void* mpc60x_get_vector_addr(rtems_vector vector)
+{
+ return ((void*) (((unsigned) vector) << 8));
+}
+/*
+ * Exception global init.
+ */
+extern int mpc60x_init_exceptions (rtems_raw_except_global_settings* config);
+extern int mpc60x_get_exception_config (rtems_raw_except_global_settings** config);
+
+# endif /* ASM */
+
+#endif
+
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.in b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.in
new file mode 100644
index 0000000000..44278afbbc
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.in
@@ -0,0 +1,79 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/mpc6xx/mmu
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+PGM = ${ARCH}/mmu.rel
+
+# C source names, if any, go here -- minus the .c
+C_PIECES = bat
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+H_FILES = $(srcdir)/bat.h
+
+# Assembly source names, if any, go here -- minus the .S
+S_PIECES = mmuAsm
+S_FILES = $(S_PIECES:%=%.S)
+S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES)
+OBJS = $(C_O_FILES) $(CC_O_FILES) $(S_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/libcpu
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+#
+# (OPTIONAL) Add local stuff here using +=
+#
+
+DEFINES +=
+CPPFLAGS +=
+CFLAGS +=
+
+LD_PATHS +=
+LD_LIBS +=
+LDFLAGS +=
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+${PGM}: ${OBJS}
+ $(make-rel)
+
+preinstall: $(INSTALLDIRS) $(H_FILES)
+ @$(INSTALL_CHANGE) -m 644 $(H_FILES) $(PROJECT_INCLUDE)/libcpu
+
+all: ${ARCH} $(SRCS) preinstall $(PGM)
+
+# the .rel file built here will be put into libbsp.a by ../wrapup/Makefile
+install: all
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
new file mode 100644
index 0000000000..e39ab96ec5
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
@@ -0,0 +1,64 @@
+/*
+ * bat.c
+ *
+ * This file contains the implementation of C function to
+ * Instanciate 60x/7xx ppc Block Address Translation (BAT) registers.
+ * More detailled information can be found on motorola
+ * site and more precisely in the following book :
+ *
+ * MPC750
+ * Risc Microporcessor User's Manual
+ * Mtorola REF : MPC750UM/AD 8/97
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <libcpu/bat.h>
+
+typedef union { /* BAT register values to be loaded */
+ BAT bat;
+ unsigned int word[2];
+}ubat;
+
+typedef struct batrange { /* stores address ranges mapped by BATs */
+ unsigned long start;
+ unsigned long limit;
+ unsigned long phys;
+}batrange;
+
+batrange bat_addrs[4];
+
+void setdbat(int bat_index, unsigned long virt, unsigned long phys,
+ unsigned int size, int flags)
+{
+ unsigned int bl;
+ int wimgxpp;
+ ubat bat;
+
+ bl = (size >> 17) - 1;
+ /* 603, 604, etc. */
+ wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+ | _PAGE_COHERENT | _PAGE_GUARDED);
+ wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+ bat.word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+ bat.word[1] = phys | wimgxpp;
+ if (flags & _PAGE_USER)
+ bat.bat.batu.vp = 1;
+ bat_addrs[bat_index].start = virt;
+ bat_addrs[bat_index].limit = virt + ((bl + 1) << 17) - 1;
+ bat_addrs[bat_index].phys = phys;
+ switch (bat_index) {
+ case 1 : asm_setdbat1(bat.word[0], bat.word[1]); break;
+ case 2 : asm_setdbat2(bat.word[0], bat.word[1]); break;
+ case 3 : asm_setdbat3(bat.word[0], bat.word[1]); break;
+ default: printk("bat.c : invalid BAT bat_index\n");
+ }
+}
+
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
new file mode 100644
index 0000000000..616f6182a4
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
@@ -0,0 +1,40 @@
+/*
+ * bat.h
+ *
+ * This file contains declaration of C function to
+ * Instanciate 60x/7xx ppc Block Address Translation (BAT) registers.
+ * More detailled information can be found on motorola
+ * site and more precisely in the following book :
+ *
+ * MPC750
+ * Risc Microporcessor User's Manual
+ * Mtorola REF : MPC750UM/AD 8/97
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef LIBCPU_MCP750_MMU_BAT_H
+#define LIBCPU_MCP750_MMU_BAT_H
+
+#include <libcpu/mmu.h>
+#include <libcpu/pgtable.h>
+#include <bsp/consoleIo.h>
+
+#define IO_PAGE (_PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_RW)
+
+extern void setdbat(int bat_index, unsigned long virt, unsigned long phys,
+ unsigned int size, int flags);
+
+extern void asm_setdbat1(unsigned int uperPart, unsigned int lowerPart);
+extern void asm_setdbat2(unsigned int uperPart, unsigned int lowerPart);
+extern void asm_setdbat3(unsigned int uperPart, unsigned int lowerPart);
+extern void asm_setdbat4(unsigned int uperPart, unsigned int lowerPart);
+
+#endif /* LIBCPU_MCP750_MMU_BAT_H */
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
new file mode 100644
index 0000000000..a0f298e5c3
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
@@ -0,0 +1,224 @@
+/*
+ * mmuAsm.S
+ *
+ * $Id$
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ *
+ * This file contains the low-level support for various MMU
+ * features.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ */
+
+#include <libcpu/cpu.h>
+#include <libcpu/io.h>
+#include <rtems/score/targopts.h>
+#include "asm.h"
+
+ .globl asm_setdbat1
+ .type asm_setdbat1,@function
+asm_setdbat1:
+ mtspr DBAT1U, r3
+ mtspr DBAT1L, r4
+ SYNC
+ blr
+
+ .globl asm_setdbat2
+ .type asm_setdbat2,@function
+asm_setdbat2:
+ mtspr DBAT2U, r3
+ mtspr DBAT2L, r4
+ SYNC
+ blr
+
+ .globl asm_setdbat3
+ .type asm_setdbat3,@function
+asm_setdbat3:
+ mtspr DBAT3U, r3
+ mtspr DBAT3L, r4
+ SYNC
+ blr
+
+ .globl L1_caches_enables
+ .type L1_caches_enables, @function
+
+L1_caches_enables:
+ /*
+ * Enable caches and 604-specific features if necessary.
+ */
+ mfspr r9,PVR
+ rlwinm r9,r9,16,16,31
+ cmpi 0,r9,1
+ beq 4f /* not needed for 601 */
+ mfspr r11,HID0
+ andi. r0,r11,HID0_DCE
+ ori r11,r11,HID0_ICE|HID0_DCE
+ ori r8,r11,HID0_ICFI
+ bne 3f /* don't invalidate the D-cache */
+ ori r8,r8,HID0_DCI /* unless it wasn't enabled */
+3:
+ sync
+ mtspr HID0,r8 /* enable and invalidate caches */
+ sync
+ mtspr HID0,r11 /* enable caches */
+ sync
+ isync
+ cmpi 0,r9,4 /* check for 604 */
+ cmpi 1,r9,9 /* or 604e */
+ cmpi 2,r9,10 /* or mach5 */
+ cror 2,2,6
+ cror 2,2,10
+ bne 4f
+ ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */
+ bne 2,5f
+ ori r11,r11,HID0_BTCD
+5: mtspr HID0,r11 /* superscalar exec & br history tbl */
+4:
+ blr
+
+ .globl get_L2CR
+ .type get_L2CR, @function
+get_L2CR:
+ /* Make sure this is a 750 chip */
+ mfspr r3,PVR
+ rlwinm r3,r3,16,16,31
+ cmplwi r3,0x0008
+ li r3,0
+ bnelr
+
+ /* Return the L2CR contents */
+ mfspr r3,L2CR
+ blr
+
+ .globl set_L2CR
+ .type set_L2CR, @function
+set_L2CR:
+ /* Usage:
+ * When setting the L2CR register, you must do a few special things.
+ * If you are enabling the cache, you must perform a global invalidate.
+ * If you are disabling the cache, you must flush the cache contents first.
+ * This routine takes care of doing these things. When first
+ * enabling the cache, make sure you pass in the L2CR you want, as well as
+ * passing in the global invalidate bit set. A global invalidate will
+ * only be performed if the L2I bit is set in applyThis. When enabling
+ * the cache, you should also set the L2E bit in applyThis. If you
+ * want to modify the L2CR contents after the cache has been enabled,
+ * the recommended procedure is to first call __setL2CR(0) to disable
+ * the cache and then call it again with the new values for L2CR. Examples:
+ *
+ * _setL2CR(0) - disables the cache
+ * _setL2CR(0xb9A14000) - enables my G3 MCP750 card:
+ * - L2E set to turn on the cache
+ * - L2SIZ set to 1MB
+ * - L2CLK set to %2
+ * - L2RAM set to pipelined syncronous late-write
+ * - L2I set to perform a global invalidation
+ * - L2OH set to 1 nS
+ *
+ * A similar call should work for your card. You need to know the correct
+ * setting for your card and then place them in the fields I have outlined
+ * above. Other fields support optional features, such as L2DO which caches
+ * only data, or L2TS which causes cache pushes from the L1 cache to go to
+ *the L2 cache instead of to main memory.
+ */
+
+ /* Make sure this is a 750 chip */
+ mfspr r4,PVR
+ rlwinm r4,r4,16,16,31
+ cmplwi r4,0x0008
+ beq thisIs750
+ li r3,-1
+ blr
+
+thisIs750:
+ /* Get the current enable bit of the L2CR into r4 */
+ mfspr r4,L2CR
+ rlwinm r4,r4,0,0,0
+
+ /* See if we want to perform a global inval this time. */
+ rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
+ rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
+ rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
+ rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
+ or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
+ bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
+
+disableCache:
+ /* Disable the cache. First, we turn off data relocation. */
+ mfmsr r7
+ rlwinm r4,r7,0,28,26 /* Turn off DR bit */
+ rlwinm r4,r4,0,17,15 /* Turn off EE bit - an external exception while we are flushing
+ the cache is fatal (comment this line and see!) */
+ sync
+ mtmsr r4
+ sync
+
+ /*
+ Now, read the first 2MB of memory to put new data in the cache.
+ (Actually we only need the size of the L2 cache plus
+ the size of the L1 cache, but 2MB will cover everything just to be safe).
+ */
+ lis r4,0x0001
+ mtctr r4
+ li r4,0
+loadLoop:
+ lwzx r0,r0,r4
+ addi r4,r4,0x0020 /* Go to start of next cache line */
+ bdnz loadLoop
+
+ /* Now, flush the first 2MB of memory */
+ lis r4,0x0001
+ mtctr r4
+ li r4,0
+ sync
+flushLoop:
+ dcbf r0,r4
+ addi r4,r4,0x0020 /* Go to start of next cache line */
+ bdnz flushLoop
+
+ /* Turn off the L2CR enable bit. */
+ rlwinm r3,r3,0,1,31
+
+ /* Reenable data relocation. */
+ sync
+ mtmsr r7
+ sync
+
+dontDisableCache:
+ /* Set up the L2CR configuration bits */
+ sync
+ mtspr L2CR,r3
+ sync
+ cmplwi r6,0
+ beq noInval
+
+ /* Perform a global invalidation */
+ oris r3,r3,0x0020
+ sync
+ mtspr 1017,r3
+ sync
+invalCompleteLoop: /* Wait for the invalidation to complete */
+ mfspr r3,1017
+ rlwinm. r4,r3,0,31,31
+ bne invalCompleteLoop
+
+ rlwinm r3,r3,0,11,9; /* Turn off the L2I bit */
+ sync
+ mtspr L2CR,r3
+ sync
+
+noInval:
+ /* See if we need to enable the cache */
+ cmplwi r5,0
+ beqlr
+
+enableCache:
+ /* Enable the cache */
+ oris r3,r3,0x8000
+ mtspr L2CR,r3
+ sync
+ blr
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/wrapup/Makefile.in b/c/src/lib/libcpu/powerpc/mpc6xx/wrapup/Makefile.in
new file mode 100644
index 0000000000..577d6b09a6
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/wrapup/Makefile.in
@@ -0,0 +1,62 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../../..
+subdir = powerpc/mpc6xx/wrapup
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+# PROC_SPECIFIC_O_PIECES = exceptions mmu clock
+PROC_SPECIFIC_O_PIECES = exceptions mmu
+GENERIC_PIECES =
+
+# bummer; have to use $foreach since % pattern subst rules only replace 1x
+OBJS = $(foreach piece, $(PROC_SPECIFIC_O_PIECES), \
+ ../../mpc6xx/$(piece)/$(ARCH)/*.o) $(foreach piece, \
+ $(GENERIC_PIECES), ../../mpc6xx/$(piece)/$(ARCH)/*.o)
+LIB = $(ARCH)/libcpuspec.a
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/lib.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+
+#
+# (OPTIONAL) Add local stuff here using +=
+#
+
+DEFINES +=
+CPPFLAGS +=
+CFLAGS +=
+
+LD_PATHS +=
+LD_LIBS +=
+LDFLAGS +=
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+$(LIB): ${OBJS}
+ echo $(OBJ)
+ $(make-library)
+ cp $(LIB) ..
+
+all: ${ARCH} $(SRCS) $(LIB)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
new file mode 100644
index 0000000000..e1c6eac4fd
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
@@ -0,0 +1,116 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ _CPU_MSR_GET( msr_value );
+
+ if (!(new_level & CPU_MODES_INTERRUPT_MASK)) {
+ msr_value |= MSR_EE;
+ }
+ else {
+ msr_value &= ~MSR_EE;
+ }
+
+ the_context->msr = msr_value;
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ the_context->pc = (unsigned32)entry_point;
+}
+
+
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+
+
+
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
new file mode 100644
index 0000000000..213e094fa6
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
@@ -0,0 +1,396 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+
+ .set IP_LINK, 0
+ .set IP_0, (IP_LINK + 8)
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
diff --git a/c/src/lib/libcpu/powerpc/new_exception_processing/Makefile.in b/c/src/lib/libcpu/powerpc/new_exception_processing/Makefile.in
new file mode 100644
index 0000000000..19f78a0a4b
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new_exception_processing/Makefile.in
@@ -0,0 +1,90 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../..
+subdir = powerpc/new_exception_processing
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+RELS = ../$(ARCH)/rtems-cpu.rel
+
+# C source names, if any, go here -- minus the .c
+C_PIECES = cpu
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+ROOT_H_PIECES =
+ROOT_H_FILES = $(ROOT_H_PIECES:%=$(srcdir)/%)
+RTEMS_SCORE_H_PIECES = cpu.h
+RTEMS_SCORE_H_FILES = $(RTEMS_SCORE_H_PIECES:%=$(srcdir)/%)
+H_PIECES = $(ROOT_H_PIECES) $(RTEMS_SCORE_H_PIECES)
+H_FILES = $(H_PIECES%=$(srcdir)/%)
+I_PIECES = c_isr
+I_FILES = $(I_PIECES:%=$(srcdir)/%.inl)
+
+# Assembly source names, if any, go here -- minus the .S
+S_PIECES = cpu_asm
+S_FILES = $(S_PIECES:%=%.S)
+S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) \
+ $(I_FILES)
+OBJS = $(C_O_FILES) $(CC_O_FILES) $(S_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/rtems/score $(PROJECT_INCLUDE)
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+#
+# (OPTIONAL) Add local stuff here using +=
+#
+
+DEFINES +=
+CPPFLAGS +=
+CFLAGS += $(CFLAGS_OS_V)
+
+LD_PATHS +=
+LD_LIBS +=
+LDFLAGS +=
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+../$(ARCH)/rtems-cpu.rel: $(OBJS)
+ test -d ../$(ARCH) || mkdir ../$(ARCH)
+ $(make-rel)
+
+all: ${ARCH} $(SRCS) preinstall $(OBJS) $(RELS)
+
+# Install the program(s), appending _g or _p as appropriate.
+# for include files, just use $(INSTALL_CHANGE)
+install: all
+
+preinstall: ${ARCH}
+ @$(INSTALL_CHANGE) -m 644 $(RTEMS_SCORE_H_FILES) $(I_FILES) $(PROJECT_INCLUDE)/rtems/score
+ @$(INSTALL_CHANGE) -m 644 $(ROOT_H_FILES) $(PROJECT_INCLUDE)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/new_exception_processing/c_isr.inl b/c/src/lib/libcpu/powerpc/new_exception_processing/c_isr.inl
new file mode 100644
index 0000000000..68f8116fe9
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new_exception_processing/c_isr.inl
@@ -0,0 +1,9 @@
+RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
+{
+ register unsigned int isr_nesting_level;
+ /*
+ * Move from special purpose register 0 (mfspr SPRG0, r3)
+ */
+ asm volatile ("mfspr %0, 272" : "=r" (isr_nesting_level));
+ return isr_nesting_level;
+}
diff --git a/c/src/lib/libcpu/powerpc/new_exception_processing/cpu.c b/c/src/lib/libcpu/powerpc/new_exception_processing/cpu.c
new file mode 100644
index 0000000000..e1c6eac4fd
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new_exception_processing/cpu.c
@@ -0,0 +1,116 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ _CPU_MSR_GET( msr_value );
+
+ if (!(new_level & CPU_MODES_INTERRUPT_MASK)) {
+ msr_value |= MSR_EE;
+ }
+ else {
+ msr_value &= ~MSR_EE;
+ }
+
+ the_context->msr = msr_value;
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ the_context->pc = (unsigned32)entry_point;
+}
+
+
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+
+
+
diff --git a/c/src/lib/libcpu/powerpc/new_exception_processing/cpu.h b/c/src/lib/libcpu/powerpc/new_exception_processing/cpu.h
new file mode 100644
index 0000000000..145e2924eb
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new_exception_processing/cpu.h
@@ -0,0 +1,979 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/ppc.h> /* pick up machine definitions */
+#include <libcpu/cpu.h>
+
+#ifndef ASM
+#include <rtems/score/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 0
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT \
+ __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+#ifndef ASM
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+ unsigned32 calleeLr; /* link register used by callees: SVR4/EABI */
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 idle_task_stack_size;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_mpci_receive_server_stack;
+ void * (*stack_allocate_hook)( unsigned32 );
+ void (*stack_free_hook)( void* );
+ /* end of fields required on all CPUs */
+
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ boolean exceptions_in_RAM; /* TRUE if in RAM */
+
+} rtems_cpu_table;
+
+/*
+ * Macros to access required entires in the CPU Table are in
+ * the file rtems/system.h.
+ */
+
+/*
+ * Macros to access PowerPC MPC750 specific additions to the CPU Table
+ */
+
+#define rtems_cpu_configuration_get_clicks_per_usec() \
+ (_CPU_Table.clicks_per_usec)
+
+#define rtems_cpu_configuration_get_exceptions_in_ram() \
+ (_CPU_Table.exceptions_in_RAM)
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+
+#endif /* ndef ASM */
+
+/*
+ * This defines the number of levels and the mask used to pick those
+ * bits out of a thread mode.
+ */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+#ifndef ASM
+
+SCORE_EXTERN struct {
+ unsigned32 *Disable_level;
+ void *Stack;
+ volatile boolean *Switch_necessary;
+ boolean *Signal;
+
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+#endif /* ndef ASM */
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/*
+ * Needed for Interrupt stack
+ */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _isr_cookie.
+ */
+
+#define loc_string(a,b) a " (" #b ")\n"
+
+#ifndef ASM
+
+static inline unsigned32 _CPU_ISR_Get_level( void )
+{
+ register unsigned int msr;
+ _CPU_MSR_GET(msr);
+ if (msr & MSR_EE) return 0;
+ else return 1;
+}
+
+static inline void _CPU_ISR_Set_level( unsigned32 level )
+{
+ register unsigned int msr;
+ _CPU_MSR_GET(msr);
+ if (!(level & CPU_MODES_INTERRUPT_MASK)) {
+ msr |= MSR_EE;
+ }
+ else {
+ msr &= ~MSR_EE;
+ }
+ _CPU_MSR_SET(msr);
+}
+
+#define _CPU_ISR_install_vector(irq, new, old) {BSP_panic("_CPU_ISR_install_vector called\n");}
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _BSP_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=&r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
+ } while (0)
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline unsigned64 PPC_Get_timebase_register( void )
+{
+ unsigned32 tbr_low;
+ unsigned32 tbr_high;
+ unsigned32 tbr_high_old;
+ unsigned64 tbr;
+
+ do {
+ asm volatile( "mftbu %0" : "=r" (tbr_high_old));
+ asm volatile( "mftb %0" : "=r" (tbr_low));
+ asm volatile( "mftbu %0" : "=r" (tbr_high));
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+#endif /* ndef ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/lib/libcpu/powerpc/new_exception_processing/cpu_asm.S b/c/src/lib/libcpu/powerpc/new_exception_processing/cpu_asm.S
new file mode 100644
index 0000000000..213e094fa6
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/new_exception_processing/cpu_asm.S
@@ -0,0 +1,396 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+
+ .set IP_LINK, 0
+ .set IP_0, (IP_LINK + 8)
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
diff --git a/c/src/lib/libcpu/powerpc/old-exceptions/README b/c/src/lib/libcpu/powerpc/old-exceptions/README
new file mode 100644
index 0000000000..c72bebfe0c
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old-exceptions/README
@@ -0,0 +1,80 @@
+#
+# $Id$
+#
+
+There are various issues regarding this port:
+
+
+
+1) Legal
+
+This port is written by Andrew Bray <andy@i-cubed.co.uk>, and
+is copyright 1995 i-cubed ltd.
+
+This port was later updated by Joel Sherrill <joel@OARcorp.com>
+to test the support for the PPC603, PPC603e, and MPC604. This
+was tested on the PowerPC simulator PSIM and a VMEbus single board
+computer.
+
+2) CPU support.
+
+This release fully supports the PPC403GA, PPC403GB, PPC603, PPC603e,
+MPC604, MPC750, and numerous MPC8xx processors. A good faith attempt
+has been made to include support other models based upon available
+documentation including the MPC5xx. There are two interrupt structures
+supported by the PowerPC port. The newer structure is supported by
+all the MPC750 and MPC604 BSPs. This structure is required to use
+the RDBG remote debugging support.
+
+This port was originally written and tested on the PPC403GA (using
+software floating point). Current ports are tested primarily on
+60x CPUs using the PowerPC simulator PSIM.
+
+Andrew Bray received assistance during the initial porting effort
+from IBM and Blue Micro and we would like to gratefully acknowledge
+that help.
+
+The support for the PPC602 processor is incomplete as only sketchy
+data is currently available. Perhaps this model has been dropped.
+
+3) Application Binary Interface
+
+In the context of RTEMS, the ABI is of interest for the following
+aspects:
+
+a) Register usage. Which registers are used to provide static variable
+ linkage, stack pointer etc.
+
+b) Function calling convention. How parameters are passed, how function
+ variables should be invoked, how values are returned, etc.
+
+c) Stack frame layout.
+
+I am aware of a number of ABIs for the PowerPC:
+
+a) The PowerOpen ABI. This is the original Power ABI used on the RS/6000.
+ This is the only ABI supported by versions of GCC before 2.7.0.
+
+b) The SVR4 ABI. This is the ABI defined by SunSoft for the Solaris port
+ to the PowerPC.
+
+c) The Embedded ABI. This is an embedded ABI for PowerPC use, which has no
+ operating system interface defined. It is promoted by SunSoft, Motorola,
+ and Cygnus Support. Cygnus are porting the GNU toolchain to this ABI.
+
+d) GCC 2.7.0. This compiler is partway along the road to supporting the EABI,
+ but is currently halfway in between.
+
+This port was built and tested using the PowerOpen ABI, with the following
+caveat: we used an ELF assembler and linker. So some attention may be
+required on the assembler files to get them through a traditional (XCOFF)
+PowerOpen assembler.
+
+This port contains support for the other ABIs, but this may prove to be
+incomplete as it is untested.
+
+The RTEMS PowerPC port supports EABI as the primary ABI. The powerpc-rtems
+GNU toolset configuration is EABI.
+
+Andrew Bray, 4 December 1995
+Joel Sherrill, 16 July 1997
diff --git a/c/src/lib/libcpu/powerpc/old-exceptions/TODO b/c/src/lib/libcpu/powerpc/old-exceptions/TODO
new file mode 100644
index 0000000000..64c96cb14c
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old-exceptions/TODO
@@ -0,0 +1,8 @@
+#
+# $Id$
+#
+
+Todo list:
+
+Maybe decode external interrupts like the HPPA does.
+ See c/src/lib/libcpu/powerpc/ppc403/ictrl/* for implementation on ppc403
diff --git a/c/src/lib/libcpu/powerpc/old-exceptions/cpu.c b/c/src/lib/libcpu/powerpc/old-exceptions/cpu.c
new file mode 100644
index 0000000000..7d6824cb26
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old-exceptions/cpu.c
@@ -0,0 +1,853 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+/*
+ * These are for testing purposes.
+ */
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+static void ppc_spurious(int, CPU_Interrupt_frame *);
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ proc_ptr handler = (proc_ptr)ppc_spurious;
+ int i;
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ register unsigned32 r2 = 0;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ register unsigned32 r13 = 0;
+
+ asm ("mr %0,13" : "=r" ((r13)) : "0" ((r13)));
+ _CPU_IRQ_info.Default_r13 = r13;
+#endif
+
+ asm ("mr %0,2" : "=r" ((r2)) : "0" ((r2)));
+ _CPU_IRQ_info.Default_r2 = r2;
+#endif
+
+ _CPU_IRQ_info.Nest_level = &_ISR_Nest_level;
+ _CPU_IRQ_info.Disable_level = &_Thread_Dispatch_disable_level;
+ _CPU_IRQ_info.Vector_table = _ISR_Vector_table;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ _CPU_IRQ_info.Dispatch_r2 = ((unsigned32 *)_Thread_Dispatch)[1];
+#endif
+ _CPU_IRQ_info.Switch_necessary = &_Context_Switch_necessary;
+ _CPU_IRQ_info.Signal = &_ISR_Signals_to_thread_executing;
+
+#if (PPC_USE_SPRG)
+ i = (int)&_CPU_IRQ_info;
+ asm volatile("mtspr 0x113, %0" : "=r" (i) : "0" (i)); /* SPRG 3 */
+#endif
+
+ /*
+ * Store Msr Value in the IRQ info structure.
+ */
+ _CPU_MSR_Value(_CPU_IRQ_info.msr_initial);
+
+#if (PPC_USE_SPRG)
+ i = _CPU_IRQ_info.msr_initial;
+ asm volatile("mtspr 0x112, %0" : "=r" (i) : "0" (i)); /* SPRG 2 */
+#endif
+
+ if ( cpu_table->spurious_handler )
+ handler = (proc_ptr)cpu_table->spurious_handler;
+
+ for (i = 0; i < PPC_INTERRUPT_MAX; i++)
+ _ISR_Vector_table[i] = handler;
+
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Calculate_level
+ *
+ * The PowerPC puts its interrupt enable status in the MSR register
+ * which also contains things like endianness control. To be more
+ * awkward, the layout varies from processor to processor. This
+ * is why it was necessary to adopt a scheme which allowed the user
+ * to specify specifically which interrupt sources were enabled.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 new_msr = 0;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_CE) )
+ new_msr |= PPC_MSR_CE;
+#endif
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_ME) )
+ new_msr |= PPC_MSR_ME;
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_EE) )
+ new_msr |= PPC_MSR_EE;
+
+ return new_msr;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Set_level
+ *
+ * This routine sets the requested level in the MSR.
+ */
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 tmp = 0;
+ register unsigned32 new_msr;
+
+ new_msr = _CPU_ISR_Calculate_level( new_level );
+
+ asm volatile (
+ "mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" :
+ "=&r" ((tmp)) :
+ "r" ((PPC_MSR_DISABLE_MASK)), "r" ((new_msr)), "0" ((tmp))
+ );
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Get_level
+ *
+ * This routine gets the current interrupt level from the MSR and
+ * converts it to an RTEMS interrupt level.
+ */
+
+unsigned32 _CPU_ISR_Get_level( void )
+{
+ unsigned32 level = 0;
+ unsigned32 msr;
+
+ asm volatile("mfmsr %0" : "=r" ((msr)));
+
+ msr &= PPC_MSR_DISABLE_MASK;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(msr & PPC_MSR_CE) )
+ level |= PPC_INTERRUPT_LEVEL_CE;
+#endif
+
+ if ( !(msr & PPC_MSR_ME) )
+ level |= PPC_INTERRUPT_LEVEL_ME;
+
+ if ( !(msr & PPC_MSR_EE) )
+ level |= PPC_INTERRUPT_LEVEL_EE;
+
+ return level;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+#define CPU_MINIMUM_STACK_FRAME_SIZE 56
+#else /* PPC_ABI_SVR4 or PPC_ABI_EABI */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+#endif
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ the_context->msr = _CPU_ISR_Calculate_level( new_level );
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ /*
+ * Calculate the task's MSR value:
+ *
+ * + Set the exception prefix bit to point to the exception table
+ * + Force the RI bit
+ * + Use the DR and IR bits
+ */
+ _CPU_MSR_Value( msr_value );
+ the_context->msr |= (msr_value & PPC_MSR_EP);
+ the_context->msr |= PPC_MSR_RI;
+ the_context->msr |= msr_value & (PPC_MSR_DR|PPC_MSR_IR);
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ { unsigned32 *desc = (unsigned32 *)entry_point;
+
+ the_context->pc = desc[0];
+ the_context->gpr2 = desc[1];
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_SVR4)
+ { unsigned r13 = 0;
+ asm volatile ("mr %0, 13" : "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr13 = r13;
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_EABI)
+ { unsigned32 r2 = 0;
+ unsigned r13 = 0;
+ asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr2 = r2;
+ the_context->gpr13 = r13;
+ }
+#endif
+}
+
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ proc_ptr ignored;
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * Install the wrapper so this ISR can be invoked properly.
+ */
+ if (_CPU_Table.exceptions_in_RAM)
+ _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler ? (ISR_Handler_entry)new_handler :
+ _CPU_Table.spurious_handler ?
+ (ISR_Handler_entry)_CPU_Table.spurious_handler :
+ (ISR_Handler_entry)ppc_spurious;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 56;
+#else
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 8;
+#endif
+}
+
+/* Handle a spurious interrupt */
+static void ppc_spurious(int v, CPU_Interrupt_frame *i)
+{
+#if 0
+ printf("Spurious interrupt on vector %d from %08.8x\n",
+ v, i->pc);
+#endif
+#ifdef ppc403
+ if (v == PPC_IRQ_EXTERNAL)
+ {
+ register int r = 0;
+
+ asm volatile("mtdcr 0x42, %0" :
+ "=&r" ((r)) : "0" ((r))); /* EXIER */
+ }
+ else if (v == PPC_IRQ_PIT)
+ {
+ register int r = 0x08000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+ else if (v == PPC_IRQ_FIT)
+ {
+ register int r = 0x04000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+#endif
+}
+
+void _CPU_Fatal_error(unsigned32 _error)
+{
+ asm volatile ("mr 3, %0" : : "r" ((_error)));
+ asm volatile ("tweq 5,5");
+ asm volatile ("li 0,0; mtmsr 0");
+ while (1) ;
+}
+
+#define PPC_SYNCHRONOUS_TRAP_BIT_MASK 0x100
+#define PPC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
+#define PPC_SYNCHRONOUS_TRAP ( _trap ) ((_trap)+PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+#define PPC_REAL_TRAP_NUMBER ( _trap ) ((_trap)%PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+
+
+const CPU_Trap_table_entry _CPU_Trap_slot_template = {
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+#error " Vector install not tested."
+#if (PPC_HAS_FPU)
+#error " Vector install not tested."
+ 0x9421feb0, /* stwu r1, -(20*4 + 18*8 + IP_END)(r1) */
+#else
+#error " Vector install not tested."
+ 0x9421ff40, /* stwu r1, -(20*4 + IP_END)(r1) */
+#endif
+#else
+ 0x9421ff90, /* stwu r1, -(IP_END)(r1) */
+#endif
+
+ 0x90010008, /* stw %r0, IP_0(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* ba PROC (_ISR_Handler) */
+};
+
+#if defined(mpc860) || defined(mpc821)
+const CPU_Trap_table_entry _CPU_Trap_slot_template_m860 = {
+ 0x7c0803ac, /* mtlr %r0 */
+ 0x81210028, /* lwz %r9, IP_9(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* b PROC (_ISR_Handler) */
+};
+#endif /* mpc860 */
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+);
+
+
+/*PAGE
+ *
+ * _CPU_ISR_install_raw_handler
+ *
+ * This routine installs the specified handler as a "raw" non-executive
+ * supported trap handler (a.k.a. interrupt service routine).
+ *
+ * Input Parameters:
+ * vector - trap table entry number plus synchronous
+ * vs. asynchronous information
+ * new_handler - address of the handler to be installed
+ * old_handler - pointer to an address of the handler previously installed
+ *
+ * Output Parameters: NONE
+ * *new_handler - address of the handler previously installed
+ *
+ * NOTE:
+ *
+ * This routine is based on the SPARC routine _CPU_ISR_install_raw_handler.
+ * Install a software trap handler as an executive interrupt handler
+ * (which is desirable since RTEMS takes care of window and register issues),
+ * then the executive needs to know that the return address is to the trap
+ * rather than the instruction following the trap.
+ *
+ */
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ unsigned32 real_vector;
+ CPU_Trap_table_entry *slot;
+ unsigned32 u32_handler=0;
+
+ /*
+ * Get the "real" trap number for this vector ignoring the synchronous
+ * versus asynchronous indicator included with our vector numbers.
+ */
+
+ real_vector = vector;
+
+ /*
+ * Get the current base address of the trap table and calculate a pointer
+ * to the slot we are interested in.
+ */
+ slot = (CPU_Trap_table_entry *)ppc_exception_vector_addr( real_vector );
+
+ /*
+ * Get the address of the old_handler from the trap table.
+ *
+ * NOTE: The old_handler returned will be bogus if it does not follow
+ * the RTEMS model.
+ */
+
+#define HIGH_BITS_MASK 0xFFFFFC00
+#define HIGH_BITS_SHIFT 10
+#define LOW_BITS_MASK 0x000003FF
+
+ if (slot->stwu_r1 == _CPU_Trap_slot_template.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+
+ /* IMD FIX: sign extend address fragment... */
+ if (u32_handler & 0x02000000) {
+ u32_handler |= 0xfc000000;
+ }
+
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+/* There are two kinds of handlers for the MPC860. One is the 'standard'
+ * one like above. The other is for the cascaded interrupts from the SIU
+ * and CPM. Therefore we must check for the alternate one if the standard
+ * one is not present
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (slot->stwu_r1 == _CPU_Trap_slot_template_m860.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+#endif /* mpc860 */
+
+ *old_handler = 0;
+
+ /*
+ * Copy the template to the slot and then fix it.
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (vector >= PPC_IRQ_IRQ0)
+ *slot = _CPU_Trap_slot_template_m860;
+ else
+#endif /* mpc860 */
+ *slot = _CPU_Trap_slot_template;
+
+ u32_handler = (unsigned32) new_handler;
+
+ /*
+ * IMD FIX: insert address fragment only (bits 6..29)
+ * therefore check for proper address range
+ * and remove unwanted bits
+ */
+ if ((u32_handler & 0xfc000000) == 0xfc000000) {
+ u32_handler &= ~0xfc000000;
+ }
+ else if ((u32_handler & 0xfc000000) != 0x00000000) {
+ _Internal_error_Occurred(INTERNAL_ERROR_CORE,
+ TRUE,
+ u32_handler);
+ }
+
+ slot->b_Handler |= u32_handler;
+
+ slot->li_r0_IRQ |= vector;
+
+ _CPU_Data_Cache_Block_Flush( slot );
+}
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+)
+{
+#if (!PPC_HAS_EVPR)
+ unsigned32 Msr;
+#endif
+ unsigned32 Top = 0;
+ unsigned32 Offset = 0x000;
+
+#if (PPC_HAS_EXCEPTION_PREFIX)
+ _CPU_MSR_Value ( Msr );
+ if ( ( Msr & PPC_MSR_EP) != 0 ) /* Vectors at FFFx_xxxx */
+ Top = 0xfff00000;
+#elif (PPC_HAS_EVPR)
+ asm volatile( "mfspr %0,0x3d6" : "=r" (Top)); /* EVPR */
+ Top = Top & 0xffff0000;
+#endif
+
+ switch ( vector ) {
+ case PPC_IRQ_SYSTEM_RESET: /* on 40x aka PPC_IRQ_CRIT */
+ Offset = 0x00100;
+ break;
+ case PPC_IRQ_MCHECK:
+ Offset = 0x00200;
+ break;
+ case PPC_IRQ_PROTECT:
+ Offset = 0x00300;
+ break;
+ case PPC_IRQ_ISI:
+ Offset = 0x00400;
+ break;
+ case PPC_IRQ_EXTERNAL:
+ Offset = 0x00500;
+ break;
+ case PPC_IRQ_ALIGNMENT:
+ Offset = 0x00600;
+ break;
+ case PPC_IRQ_PROGRAM:
+ Offset = 0x00700;
+ break;
+ case PPC_IRQ_NOFP:
+ Offset = 0x00800;
+ break;
+ case PPC_IRQ_DECREMENTER:
+ Offset = 0x00900;
+ break;
+ case PPC_IRQ_RESERVED_A:
+ Offset = 0x00a00;
+ break;
+ case PPC_IRQ_RESERVED_B:
+ Offset = 0x00b00;
+ break;
+ case PPC_IRQ_SCALL:
+ Offset = 0x00c00;
+ break;
+ case PPC_IRQ_TRACE:
+ Offset = 0x00d00;
+ break;
+ case PPC_IRQ_FP_ASST:
+ Offset = 0x00e00;
+ break;
+
+#if defined(ppc403)
+
+/* PPC_IRQ_CRIT is the same vector as PPC_IRQ_RESET
+ case PPC_IRQ_CRIT:
+ Offset = 0x00100;
+ break;
+*/
+ case PPC_IRQ_PIT:
+ Offset = 0x01000;
+ break;
+ case PPC_IRQ_FIT:
+ Offset = 0x01010;
+ break;
+ case PPC_IRQ_WATCHDOG:
+ Offset = 0x01020;
+ break;
+ case PPC_IRQ_DEBUG:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc601)
+ case PPC_IRQ_TRACE:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc603)
+ case PPC_IRQ_TRANS_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_DATA_LOAD:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_STORE:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(ppc603e)
+ case PPC_TLB_INST_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_TLB_LOAD_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_TLB_STORE_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDRBRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc604)
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc860) || defined(mpc821)
+ case PPC_IRQ_EMULATE:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_INST_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_INST_ERR:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_DATA_ERR:
+ Offset = 0x1400;
+ break;
+ case PPC_IRQ_DATA_BPNT:
+ Offset = 0x1c00;
+ break;
+ case PPC_IRQ_INST_BPNT:
+ Offset = 0x1d00;
+ break;
+ case PPC_IRQ_IO_BPNT:
+ Offset = 0x1e00;
+ break;
+ case PPC_IRQ_DEV_PORT:
+ Offset = 0x1f00;
+ break;
+ case PPC_IRQ_IRQ0:
+ Offset = 0x2000;
+ break;
+ case PPC_IRQ_LVL0:
+ Offset = 0x2040;
+ break;
+ case PPC_IRQ_IRQ1:
+ Offset = 0x2080;
+ break;
+ case PPC_IRQ_LVL1:
+ Offset = 0x20c0;
+ break;
+ case PPC_IRQ_IRQ2:
+ Offset = 0x2100;
+ break;
+ case PPC_IRQ_LVL2:
+ Offset = 0x2140;
+ break;
+ case PPC_IRQ_IRQ3:
+ Offset = 0x2180;
+ break;
+ case PPC_IRQ_LVL3:
+ Offset = 0x21c0;
+ break;
+ case PPC_IRQ_IRQ4:
+ Offset = 0x2200;
+ break;
+ case PPC_IRQ_LVL4:
+ Offset = 0x2240;
+ break;
+ case PPC_IRQ_IRQ5:
+ Offset = 0x2280;
+ break;
+ case PPC_IRQ_LVL5:
+ Offset = 0x22c0;
+ break;
+ case PPC_IRQ_IRQ6:
+ Offset = 0x2300;
+ break;
+ case PPC_IRQ_LVL6:
+ Offset = 0x2340;
+ break;
+ case PPC_IRQ_IRQ7:
+ Offset = 0x2380;
+ break;
+ case PPC_IRQ_LVL7:
+ Offset = 0x23c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_0:
+ Offset = 0x2400;
+ break;
+ case PPC_IRQ_CPM_PC4:
+ Offset = 0x2410;
+ break;
+ case PPC_IRQ_CPM_PC5:
+ Offset = 0x2420;
+ break;
+ case PPC_IRQ_CPM_SMC2:
+ Offset = 0x2430;
+ break;
+ case PPC_IRQ_CPM_SMC1:
+ Offset = 0x2440;
+ break;
+ case PPC_IRQ_CPM_SPI:
+ Offset = 0x2450;
+ break;
+ case PPC_IRQ_CPM_PC6:
+ Offset = 0x2460;
+ break;
+ case PPC_IRQ_CPM_TIMER4:
+ Offset = 0x2470;
+ break;
+ case PPC_IRQ_CPM_RESERVED_8:
+ Offset = 0x2480;
+ break;
+ case PPC_IRQ_CPM_PC7:
+ Offset = 0x2490;
+ break;
+ case PPC_IRQ_CPM_PC8:
+ Offset = 0x24a0;
+ break;
+ case PPC_IRQ_CPM_PC9:
+ Offset = 0x24b0;
+ break;
+ case PPC_IRQ_CPM_TIMER3:
+ Offset = 0x24c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_D:
+ Offset = 0x24d0;
+ break;
+ case PPC_IRQ_CPM_PC10:
+ Offset = 0x24e0;
+ break;
+ case PPC_IRQ_CPM_PC11:
+ Offset = 0x24f0;
+ break;
+ case PPC_IRQ_CPM_I2C:
+ Offset = 0x2500;
+ break;
+ case PPC_IRQ_CPM_RISC_TIMER:
+ Offset = 0x2510;
+ break;
+ case PPC_IRQ_CPM_TIMER2:
+ Offset = 0x2520;
+ break;
+ case PPC_IRQ_CPM_RESERVED_13:
+ Offset = 0x2530;
+ break;
+ case PPC_IRQ_CPM_IDMA2:
+ Offset = 0x2540;
+ break;
+ case PPC_IRQ_CPM_IDMA1:
+ Offset = 0x2550;
+ break;
+ case PPC_IRQ_CPM_SDMA_ERROR:
+ Offset = 0x2560;
+ break;
+ case PPC_IRQ_CPM_PC12:
+ Offset = 0x2570;
+ break;
+ case PPC_IRQ_CPM_PC13:
+ Offset = 0x2580;
+ break;
+ case PPC_IRQ_CPM_TIMER1:
+ Offset = 0x2590;
+ break;
+ case PPC_IRQ_CPM_PC14:
+ Offset = 0x25a0;
+ break;
+ case PPC_IRQ_CPM_SCC4:
+ Offset = 0x25b0;
+ break;
+ case PPC_IRQ_CPM_SCC3:
+ Offset = 0x25c0;
+ break;
+ case PPC_IRQ_CPM_SCC2:
+ Offset = 0x25d0;
+ break;
+ case PPC_IRQ_CPM_SCC1:
+ Offset = 0x25e0;
+ break;
+ case PPC_IRQ_CPM_PC15:
+ Offset = 0x25f0;
+ break;
+#endif
+
+ }
+ Top += Offset;
+ return Top;
+}
+
diff --git a/c/src/lib/libcpu/powerpc/old-exceptions/cpu_asm.S b/c/src/lib/libcpu/powerpc/old-exceptions/cpu_asm.S
new file mode 100644
index 0000000000..a377fa5d2a
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old-exceptions/cpu_asm.S
@@ -0,0 +1,809 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+#if (PPC_HAS_DOUBLE == 1)
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 8)
+ .set FP_2, (FP_1 + 8)
+ .set FP_3, (FP_2 + 8)
+ .set FP_4, (FP_3 + 8)
+ .set FP_5, (FP_4 + 8)
+ .set FP_6, (FP_5 + 8)
+ .set FP_7, (FP_6 + 8)
+ .set FP_8, (FP_7 + 8)
+ .set FP_9, (FP_8 + 8)
+ .set FP_10, (FP_9 + 8)
+ .set FP_11, (FP_10 + 8)
+ .set FP_12, (FP_11 + 8)
+ .set FP_13, (FP_12 + 8)
+ .set FP_14, (FP_13 + 8)
+ .set FP_15, (FP_14 + 8)
+ .set FP_16, (FP_15 + 8)
+ .set FP_17, (FP_16 + 8)
+ .set FP_18, (FP_17 + 8)
+ .set FP_19, (FP_18 + 8)
+ .set FP_20, (FP_19 + 8)
+ .set FP_21, (FP_20 + 8)
+ .set FP_22, (FP_21 + 8)
+ .set FP_23, (FP_22 + 8)
+ .set FP_24, (FP_23 + 8)
+ .set FP_25, (FP_24 + 8)
+ .set FP_26, (FP_25 + 8)
+ .set FP_27, (FP_26 + 8)
+ .set FP_28, (FP_27 + 8)
+ .set FP_29, (FP_28 + 8)
+ .set FP_30, (FP_29 + 8)
+ .set FP_31, (FP_30 + 8)
+ .set FP_FPSCR, (FP_31 + 8)
+#else
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+#endif
+
+ .set IP_LINK, 0
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ .set IP_0, (IP_LINK + 56)
+#else
+ .set IP_0, (IP_LINK + 8)
+#endif
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ /* _CPU_IRQ_info offsets */
+
+ /* These must be in this order */
+ .set Nest_level, 0
+ .set Disable_level, 4
+ .set Vector_table, 8
+ .set Stack, 12
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ .set Dispatch_r2, 16
+ .set Switch_necessary, 20
+#else
+ .set Default_r2, 16
+#if (PPC_ABI != PPC_ABI_GCC27)
+ .set Default_r13, 20
+ .set Switch_necessary, 24
+#else
+ .set Switch_necessary, 20
+#endif
+#endif
+ .set Signal, Switch_necessary + 4
+ .set msr_initial, Signal + 4
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ stfd f0, FP_0(r3)
+ stfd f1, FP_1(r3)
+ stfd f2, FP_2(r3)
+ stfd f3, FP_3(r3)
+ stfd f4, FP_4(r3)
+ stfd f5, FP_5(r3)
+ stfd f6, FP_6(r3)
+ stfd f7, FP_7(r3)
+ stfd f8, FP_8(r3)
+ stfd f9, FP_9(r3)
+ stfd f10, FP_10(r3)
+ stfd f11, FP_11(r3)
+ stfd f12, FP_12(r3)
+ stfd f13, FP_13(r3)
+ stfd f14, FP_14(r3)
+ stfd f15, FP_15(r3)
+ stfd f16, FP_16(r3)
+ stfd f17, FP_17(r3)
+ stfd f18, FP_18(r3)
+ stfd f19, FP_19(r3)
+ stfd f20, FP_20(r3)
+ stfd f21, FP_21(r3)
+ stfd f22, FP_22(r3)
+ stfd f23, FP_23(r3)
+ stfd f24, FP_24(r3)
+ stfd f25, FP_25(r3)
+ stfd f26, FP_26(r3)
+ stfd f27, FP_27(r3)
+ stfd f28, FP_28(r3)
+ stfd f29, FP_29(r3)
+ stfd f30, FP_30(r3)
+ stfd f31, FP_31(r3)
+ mffs f2
+ stfd f2, FP_FPSCR(r3)
+#else
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ lfd f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfd f0, FP_0(r3)
+ lfd f1, FP_1(r3)
+ lfd f2, FP_2(r3)
+ lfd f3, FP_3(r3)
+ lfd f4, FP_4(r3)
+ lfd f5, FP_5(r3)
+ lfd f6, FP_6(r3)
+ lfd f7, FP_7(r3)
+ lfd f8, FP_8(r3)
+ lfd f9, FP_9(r3)
+ lfd f10, FP_10(r3)
+ lfd f11, FP_11(r3)
+ lfd f12, FP_12(r3)
+ lfd f13, FP_13(r3)
+ lfd f14, FP_14(r3)
+ lfd f15, FP_15(r3)
+ lfd f16, FP_16(r3)
+ lfd f17, FP_17(r3)
+ lfd f18, FP_18(r3)
+ lfd f19, FP_19(r3)
+ lfd f20, FP_20(r3)
+ lfd f21, FP_21(r3)
+ lfd f22, FP_22(r3)
+ lfd f23, FP_23(r3)
+ lfd f24, FP_24(r3)
+ lfd f25, FP_25(r3)
+ lfd f26, FP_26(r3)
+ lfd f27, FP_27(r3)
+ lfd f28, FP_28(r3)
+ lfd f29, FP_29(r3)
+ lfd f30, FP_30(r3)
+ lfd f31, FP_31(r3)
+#else
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */
+ stw r1, GP_1(r3)
+ lwz r1, GP_1(r4)
+ stw r2, GP_2(r3)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ stmw r13, GP_13(r3)
+ lmw r13, GP_13(r4)
+#else
+ stw r13, GP_13(r3)
+ lwz r13, GP_13(r4)
+ stw r14, GP_14(r3)
+ lwz r14, GP_14(r4)
+ stw r15, GP_15(r3)
+ lwz r15, GP_15(r4)
+ stw r16, GP_16(r3)
+ lwz r16, GP_16(r4)
+ stw r17, GP_17(r3)
+ lwz r17, GP_17(r4)
+ stw r18, GP_18(r3)
+ lwz r18, GP_18(r4)
+ stw r19, GP_19(r3)
+ lwz r19, GP_19(r4)
+ stw r20, GP_20(r3)
+ lwz r20, GP_20(r4)
+ stw r21, GP_21(r3)
+ lwz r21, GP_21(r4)
+ stw r22, GP_22(r3)
+ lwz r22, GP_22(r4)
+ stw r23, GP_23(r3)
+ lwz r23, GP_23(r4)
+ stw r24, GP_24(r3)
+ lwz r24, GP_24(r4)
+ stw r25, GP_25(r3)
+ lwz r25, GP_25(r4)
+ stw r26, GP_26(r3)
+ lwz r26, GP_26(r4)
+ stw r27, GP_27(r3)
+ lwz r27, GP_27(r4)
+ stw r28, GP_28(r3)
+ lwz r28, GP_28(r4)
+ stw r29, GP_29(r3)
+ lwz r29, GP_29(r4)
+ stw r30, GP_30(r3)
+ lwz r30, GP_30(r4)
+ stw r31, GP_31(r3)
+ lwz r31, GP_31(r4)
+#endif
+ mfcr r5
+ stw r5, GP_CR(r3)
+ lwz r5, GP_CR(r4)
+ mflr r6
+ mtcrf 255, r5
+ stw r6, GP_PC(r3)
+ lwz r6, GP_PC(r4)
+ mfmsr r7
+ mtlr r6
+ stw r7, GP_MSR(r3)
+ lwz r7, GP_MSR(r4)
+ mtmsr r7
+#endif
+#if (PPC_CACHE_ALIGNMENT == 16)
+ /* This assumes that all the registers are in the given order */
+ li r5, 16
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_14+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+
+ addi r3, r3, GP_18-GP_14
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_22-GP_18
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_26-GP_22
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_26(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stwu r14, GP_14+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r15, GP_15-GP_14(r3)
+ stw r16, GP_16-GP_14(r3)
+ stw r17, GP_17-GP_14(r3)
+ stwu r18, GP_18-GP_14(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stwu r22, GP_22-GP_18(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r23, GP_23-GP_22(r3)
+ stw r24, GP_24-GP_22(r3)
+ stw r25, GP_25-GP_22(r3)
+ stwu r26, GP_26-GP_22(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r27, GP_27-GP_26(r3)
+ stw r28, GP_28-GP_26(r3)
+ stw r29, GP_29-GP_26(r3)
+ stw r30, GP_30-GP_26(r3)
+ stw r31, GP_31-GP_26(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_26(r3)
+ mflr r7
+ stw r7, GP_PC-GP_26(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_26(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_19-GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_23-GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_27-GP_23
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_27(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwzu r15, GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r16, GP_16-GP_15(r4)
+ lwz r17, GP_17-GP_15(r4)
+ lwz r18, GP_18-GP_15(r4)
+ lwzu r19, GP_19-GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwzu r23, GP_23-GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r24, GP_24-GP_23(r4)
+ lwz r25, GP_25-GP_23(r4)
+ lwz r26, GP_26-GP_23(r4)
+ lwzu r27, GP_27-GP_23(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r28, GP_28-GP_27(r4)
+ lwz r29, GP_29-GP_27(r4)
+ lwz r30, GP_30-GP_27(r4)
+ lwz r31, GP_31-GP_27(r4)
+#endif
+ lwz r6, GP_CR-GP_27(r4)
+ lwz r7, GP_PC-GP_27(r4)
+ lwz r8, GP_MSR-GP_27(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+#if (PPC_CACHE_ALIGNMENT == 32)
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
+/* Individual interrupt prologues look like this:
+ * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ * #if (PPC_HAS_FPU)
+ * stwu r1, -(20*4 + 18*8 + IP_END)(r1)
+ * #else
+ * stwu r1, -(20*4 + IP_END)(r1)
+ * #endif
+ * #else
+ * stwu r1, -(IP_END)(r1)
+ * #endif
+ * stw r0, IP_0(r1)
+ *
+ * li r0, vectornum
+ * b PROC (_ISR_Handler{,C})
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_Handler)
+PROC (_ISR_Handler):
+#define LABEL(x) x
+/* XXX ??
+#define MTSAVE(x) mtspr sprg0, x
+#define MFSAVE(x) mfspr x, sprg0
+*/
+#define MTPC(x) mtspr srr0, x
+#define MFPC(x) mfspr x, srr0
+#define MTMSR(x) mtspr srr1, x
+#define MFMSR(x) mfspr x, srr1
+
+ #include "irq_stub.S"
+ rfi
+
+#if (PPC_HAS_RFCI == 1)
+/* void __ISR_HandlerC()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * For critical interrupts
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_HandlerC)
+PROC (_ISR_HandlerC):
+#undef LABEL
+#undef MTSAVE
+#undef MFSAVE
+#undef MTPC
+#undef MFPC
+#undef MTMSR
+#undef MFMSR
+#define LABEL(x) x##_C
+/* XXX??
+#define MTSAVE(x) mtspr sprg1, x
+#define MFSAVE(x) mfspr x, sprg1
+*/
+#define MTPC(x) mtspr srr2, x
+#define MFPC(x) mfspr x, srr2
+#define MTMSR(x) mtspr srr3, x
+#define MFMSR(x) mfspr x, srr3
+ #include "irq_stub.S"
+ rfci
+#endif
+
+/* PowerOpen descriptors for indirect function calls.
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (_CPU_Context_save_fp)
+ DESCRIPTOR (_CPU_Context_restore_fp)
+ DESCRIPTOR (_CPU_Context_switch)
+ DESCRIPTOR (_CPU_Context_restore)
+ DESCRIPTOR (_ISR_Handler)
+#if (PPC_HAS_RFCI == 1)
+ DESCRIPTOR (_ISR_HandlerC)
+#endif
+#endif
diff --git a/c/src/lib/libcpu/powerpc/old-exceptions/irq_stub.S b/c/src/lib/libcpu/powerpc/old-exceptions/irq_stub.S
new file mode 100644
index 0000000000..76c8927305
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old-exceptions/irq_stub.S
@@ -0,0 +1,268 @@
+/*
+ * This file contains the interrupt handler assembly code for the PowerPC
+ * implementation of RTEMS. It is #included from cpu_asm.s.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * $Id$
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ PUBLIC_VAR (_CPU_IRQ_info )
+
+ /* Finish off the interrupt frame */
+ stw r2, IP_2(r1)
+ stw r3, IP_3(r1)
+ stw r4, IP_4(r1)
+ stw r5, IP_5(r1)
+ stw r6, IP_6(r1)
+ stw r7, IP_7(r1)
+ stw r8, IP_8(r1)
+ stw r9, IP_9(r1)
+ stw r10, IP_10(r1)
+ stw r11, IP_11(r1)
+ stw r12, IP_12(r1)
+ stw r13, IP_13(r1)
+ stmw r28, IP_28(r1)
+ mfcr r5
+ mfctr r6
+ mfxer r7
+ mflr r8
+ MFPC (r9)
+ MFMSR (r10)
+ /* Establish addressing */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+#endif
+ dcbt r0, r11
+ stw r5, IP_CR(r1)
+ stw r6, IP_CTR(r1)
+ stw r7, IP_XER(r1)
+ stw r8, IP_LR(r1)
+ stw r9, IP_PC(r1)
+ stw r10, IP_MSR(r1)
+
+ lwz r30, Vector_table(r11)
+ slwi r4,r0,2
+ lwz r28, Nest_level(r11)
+ add r4, r4, r30
+
+ lwz r30, 0(r28)
+ mr r3, r0
+ lwz r31, Stack(r11)
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ */
+ /* Switch stacks, here we must prevent ALL interrupts */
+#if (PPC_USE_SPRG)
+ mfmsr r5
+ mfspr r6, sprg2
+#else
+ lwz r6,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r6,r6,r5
+ mfmsr r5
+#endif
+ mtmsr r6
+ cmpwi r30, 0
+ lwz r29, Disable_level(r11)
+ subf r31,r1,r31
+ bne LABEL (nested)
+ stwux r1,r1,r31
+LABEL (nested):
+ /*
+ * _ISR_Nest_level++;
+ */
+ lwz r31, 0(r29)
+ addi r30,r30,1
+ stw r30,0(r28)
+ /* From here on out, interrupts can be re-enabled. RTEMS
+ * convention says not.
+ */
+ lwz r4,0(r4)
+ /*
+ * _Thread_Dispatch_disable_level++;
+ */
+ addi r31,r31,1
+ stw r31, 0(r29)
+/* SCE 980217
+ *
+ * We need address translation ON when we call our ISR routine
+
+ mtmsr r5
+
+ */
+
+ /*
+ * (*_ISR_Vector_table[ vector ])( vector );
+ */
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r6,0(r4)
+ lwz r2,4(r4)
+ mtlr r6
+ lwz r11,8(r4)
+#endif
+#if (PPC_ABI == PPC_ABI_GCC27)
+ lwz r2, Default_r2(r11)
+ mtlr r4
+ #lwz r2, 0(r2)
+#endif
+#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI)
+ mtlr r4
+ lwz r2, Default_r2(r11)
+ lwz r13, Default_r13(r11)
+ #lwz r2, 0(r2)
+ #lwz r13, 0(r13)
+#endif
+ mr r4,r1
+ blrl
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+
+ /* We must re-disable the interrupts */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r30,~PPC_MSR_DISABLE_MASK@ha
+ ori r30,r30,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r30
+#endif
+ mtmsr r0
+ lwz r30, 0(r28)
+ lwz r31, 0(r29)
+
+ /*
+ * if (--Thread_Dispatch_disable,--_ISR_Nest_level)
+ * goto easy_exit;
+ */
+ addi r30, r30, -1
+ cmpwi r30, 0
+ addi r31, r31, -1
+ stw r30, 0(r28)
+ stw r31, 0(r29)
+ bne LABEL (easy_exit)
+ cmpwi r31, 0
+
+ lwz r30, Switch_necessary(r11)
+
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ */
+ lwz r1,0(r1)
+ bne LABEL (easy_exit)
+ lwz r30, 0(r30)
+ lwz r31, Signal(r11)
+
+ /*
+ * if ( _Context_Switch_necessary )
+ * goto switch
+ */
+ cmpwi r30, 0
+ lwz r28, 0(r31)
+ li r6,0
+ bne LABEL (switch)
+ /*
+ * if ( !_ISR_Signals_to_thread_executing )
+ * goto easy_exit
+ * _ISR_Signals_to_thread_executing = 0;
+ */
+ cmpwi r28, 0
+ beq LABEL (easy_exit)
+
+ /*
+ * switch:
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ */
+LABEL (switch):
+ stw r6, 0(r31)
+ /* Re-enable interrupts */
+ lwz r0, IP_MSR(r1)
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r2, Dispatch_r2(r11)
+#else
+ /* R2 and R13 still hold their values from the last call */
+#endif
+ mtmsr r0
+ bl SYM (_Thread_Dispatch)
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+ /*
+ * prepare to get out of interrupt
+ */
+ /* Re-disable IRQs */
+#if (PPC_USE_SPRG)
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r5
+#endif
+ mtmsr r0
+
+ /*
+ * easy_exit:
+ * prepare to get out of interrupt
+ * return from interrupt
+ */
+LABEL (easy_exit):
+ lwz r5, IP_CR(r1)
+ lwz r6, IP_CTR(r1)
+ lwz r7, IP_XER(r1)
+ lwz r8, IP_LR(r1)
+ lwz r9, IP_PC(r1)
+ lwz r10, IP_MSR(r1)
+ mtcrf 255,r5
+ mtctr r6
+ mtxer r7
+ mtlr r8
+ MTPC (r9)
+ MTMSR (r10)
+ lwz r0, IP_0(r1)
+ lwz r2, IP_2(r1)
+ lwz r3, IP_3(r1)
+ lwz r4, IP_4(r1)
+ lwz r5, IP_5(r1)
+ lwz r6, IP_6(r1)
+ lwz r7, IP_7(r1)
+ lwz r8, IP_8(r1)
+ lwz r9, IP_9(r1)
+ lwz r10, IP_10(r1)
+ lwz r11, IP_11(r1)
+ lwz r12, IP_12(r1)
+ lwz r13, IP_13(r1)
+ lmw r28, IP_28(r1)
+ lwz r1, 0(r1)
diff --git a/c/src/lib/libcpu/powerpc/old-exceptions/ppccache.c b/c/src/lib/libcpu/powerpc/old-exceptions/ppccache.c
new file mode 100644
index 0000000000..ecfb4b96ca
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old-exceptions/ppccache.c
@@ -0,0 +1,61 @@
+/*
+ * PowerPC Cache enable routines
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+
+void powerpc_instruction_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * Enable the instruction cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00008000; /* Set ICE bit */
+
+ PPC_Set_HID0( value );
+}
+
+void powerpc_data_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * enable data cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00004000; /* set DCE bit */
+
+ PPC_Set_HID0( value );
+}
+
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/Makefile.in b/c/src/lib/libcpu/powerpc/old_exception_processing/Makefile.in
new file mode 100644
index 0000000000..252b424d51
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/Makefile.in
@@ -0,0 +1,90 @@
+#
+# $Id$
+#
+
+@SET_MAKE@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+top_builddir = ../..
+subdir = powerpc/old_exception_processing
+
+RTEMS_ROOT = @RTEMS_ROOT@
+PROJECT_ROOT = @PROJECT_ROOT@
+
+VPATH = @srcdir@
+
+RELS = ../$(ARCH)/rtems-cpu.rel
+
+# C source names, if any, go here -- minus the .c
+C_PIECES = cpu ppccache
+C_FILES = $(C_PIECES:%=%.c)
+C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
+
+ROOT_H_PIECES =
+ROOT_H_FILES = $(ROOT_H_PIECES:%=$(srcdir)/%)
+RTEMS_SCORE_H_PIECES = cpu.h
+RTEMS_SCORE_H_FILES = $(RTEMS_SCORE_H_PIECES:%=$(srcdir)/%)
+H_PIECES = $(ROOT_H_PIECES) $(RTEMS_SCORE_H_PIECES)
+H_FILES = $(H_PIECES%=$(srcdir)/%)
+I_PIECES = c_isr
+I_FILES = $(I_PIECES:%=$(srcdir)/%.inl)
+
+# Assembly source names, if any, go here -- minus the .S
+S_PIECES = cpu_asm rtems # irq_stub
+S_FILES = $(S_PIECES:%=%.S)
+S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
+
+SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) \
+ $(I_FILES)
+OBJS = $(C_O_FILES) $(CC_O_FILES) $(S_O_FILES)
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(RTEMS_ROOT)/make/leaf.cfg
+
+INSTALL_CHANGE = @INSTALL_CHANGE@
+mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
+
+INSTALLDIRS = $(PROJECT_INCLUDE)/rtems/score $(PROJECT_INCLUDE)
+
+$(INSTALLDIRS):
+ @$(mkinstalldirs) $(INSTALLDIRS)
+
+#
+# (OPTIONAL) Add local stuff here using +=
+#
+
+DEFINES +=
+CPPFLAGS +=
+CFLAGS += $(CFLAGS_OS_V)
+
+LD_PATHS +=
+LD_LIBS +=
+LDFLAGS +=
+
+#
+# Add your list of files to delete here. The config files
+# already know how to delete some stuff, so you may want
+# to just run 'make clean' first to see what gets missed.
+# 'make clobber' already includes 'make clean'
+#
+
+CLEAN_ADDITIONS +=
+CLOBBER_ADDITIONS +=
+
+../$(ARCH)/rtems-cpu.rel: $(OBJS)
+ test -d ../$(ARCH) || mkdir ../$(ARCH)
+ $(make-rel)
+
+all: ${ARCH} $(SRCS) preinstall $(OBJS) $(RELS)
+
+# Install the program(s), appending _g or _p as appropriate.
+# for include files, just use $(INSTALL_CHANGE)
+install: all
+
+preinstall: ${ARCH}
+ @$(INSTALL_CHANGE) -m 644 $(RTEMS_SCORE_H_FILES) $(I_FILES) $(PROJECT_INCLUDE)/rtems/score
+ @$(INSTALL_CHANGE) -m 644 $(ROOT_H_FILES) $(PROJECT_INCLUDE)
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/README b/c/src/lib/libcpu/powerpc/old_exception_processing/README
new file mode 100644
index 0000000000..c72bebfe0c
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/README
@@ -0,0 +1,80 @@
+#
+# $Id$
+#
+
+There are various issues regarding this port:
+
+
+
+1) Legal
+
+This port is written by Andrew Bray <andy@i-cubed.co.uk>, and
+is copyright 1995 i-cubed ltd.
+
+This port was later updated by Joel Sherrill <joel@OARcorp.com>
+to test the support for the PPC603, PPC603e, and MPC604. This
+was tested on the PowerPC simulator PSIM and a VMEbus single board
+computer.
+
+2) CPU support.
+
+This release fully supports the PPC403GA, PPC403GB, PPC603, PPC603e,
+MPC604, MPC750, and numerous MPC8xx processors. A good faith attempt
+has been made to include support other models based upon available
+documentation including the MPC5xx. There are two interrupt structures
+supported by the PowerPC port. The newer structure is supported by
+all the MPC750 and MPC604 BSPs. This structure is required to use
+the RDBG remote debugging support.
+
+This port was originally written and tested on the PPC403GA (using
+software floating point). Current ports are tested primarily on
+60x CPUs using the PowerPC simulator PSIM.
+
+Andrew Bray received assistance during the initial porting effort
+from IBM and Blue Micro and we would like to gratefully acknowledge
+that help.
+
+The support for the PPC602 processor is incomplete as only sketchy
+data is currently available. Perhaps this model has been dropped.
+
+3) Application Binary Interface
+
+In the context of RTEMS, the ABI is of interest for the following
+aspects:
+
+a) Register usage. Which registers are used to provide static variable
+ linkage, stack pointer etc.
+
+b) Function calling convention. How parameters are passed, how function
+ variables should be invoked, how values are returned, etc.
+
+c) Stack frame layout.
+
+I am aware of a number of ABIs for the PowerPC:
+
+a) The PowerOpen ABI. This is the original Power ABI used on the RS/6000.
+ This is the only ABI supported by versions of GCC before 2.7.0.
+
+b) The SVR4 ABI. This is the ABI defined by SunSoft for the Solaris port
+ to the PowerPC.
+
+c) The Embedded ABI. This is an embedded ABI for PowerPC use, which has no
+ operating system interface defined. It is promoted by SunSoft, Motorola,
+ and Cygnus Support. Cygnus are porting the GNU toolchain to this ABI.
+
+d) GCC 2.7.0. This compiler is partway along the road to supporting the EABI,
+ but is currently halfway in between.
+
+This port was built and tested using the PowerOpen ABI, with the following
+caveat: we used an ELF assembler and linker. So some attention may be
+required on the assembler files to get them through a traditional (XCOFF)
+PowerOpen assembler.
+
+This port contains support for the other ABIs, but this may prove to be
+incomplete as it is untested.
+
+The RTEMS PowerPC port supports EABI as the primary ABI. The powerpc-rtems
+GNU toolset configuration is EABI.
+
+Andrew Bray, 4 December 1995
+Joel Sherrill, 16 July 1997
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/TODO b/c/src/lib/libcpu/powerpc/old_exception_processing/TODO
new file mode 100644
index 0000000000..64c96cb14c
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/TODO
@@ -0,0 +1,8 @@
+#
+# $Id$
+#
+
+Todo list:
+
+Maybe decode external interrupts like the HPPA does.
+ See c/src/lib/libcpu/powerpc/ppc403/ictrl/* for implementation on ppc403
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/c_isr.inl b/c/src/lib/libcpu/powerpc/old_exception_processing/c_isr.inl
new file mode 100644
index 0000000000..706d4f7e4f
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/c_isr.inl
@@ -0,0 +1,4 @@
+RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
+{
+ return (_ISR_Nest_level != 0);
+}
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/cpu.c b/c/src/lib/libcpu/powerpc/old_exception_processing/cpu.c
new file mode 100644
index 0000000000..7d6824cb26
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/cpu.c
@@ -0,0 +1,853 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+/*
+ * These are for testing purposes.
+ */
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+static void ppc_spurious(int, CPU_Interrupt_frame *);
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ proc_ptr handler = (proc_ptr)ppc_spurious;
+ int i;
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ register unsigned32 r2 = 0;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ register unsigned32 r13 = 0;
+
+ asm ("mr %0,13" : "=r" ((r13)) : "0" ((r13)));
+ _CPU_IRQ_info.Default_r13 = r13;
+#endif
+
+ asm ("mr %0,2" : "=r" ((r2)) : "0" ((r2)));
+ _CPU_IRQ_info.Default_r2 = r2;
+#endif
+
+ _CPU_IRQ_info.Nest_level = &_ISR_Nest_level;
+ _CPU_IRQ_info.Disable_level = &_Thread_Dispatch_disable_level;
+ _CPU_IRQ_info.Vector_table = _ISR_Vector_table;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ _CPU_IRQ_info.Dispatch_r2 = ((unsigned32 *)_Thread_Dispatch)[1];
+#endif
+ _CPU_IRQ_info.Switch_necessary = &_Context_Switch_necessary;
+ _CPU_IRQ_info.Signal = &_ISR_Signals_to_thread_executing;
+
+#if (PPC_USE_SPRG)
+ i = (int)&_CPU_IRQ_info;
+ asm volatile("mtspr 0x113, %0" : "=r" (i) : "0" (i)); /* SPRG 3 */
+#endif
+
+ /*
+ * Store Msr Value in the IRQ info structure.
+ */
+ _CPU_MSR_Value(_CPU_IRQ_info.msr_initial);
+
+#if (PPC_USE_SPRG)
+ i = _CPU_IRQ_info.msr_initial;
+ asm volatile("mtspr 0x112, %0" : "=r" (i) : "0" (i)); /* SPRG 2 */
+#endif
+
+ if ( cpu_table->spurious_handler )
+ handler = (proc_ptr)cpu_table->spurious_handler;
+
+ for (i = 0; i < PPC_INTERRUPT_MAX; i++)
+ _ISR_Vector_table[i] = handler;
+
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Calculate_level
+ *
+ * The PowerPC puts its interrupt enable status in the MSR register
+ * which also contains things like endianness control. To be more
+ * awkward, the layout varies from processor to processor. This
+ * is why it was necessary to adopt a scheme which allowed the user
+ * to specify specifically which interrupt sources were enabled.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 new_msr = 0;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_CE) )
+ new_msr |= PPC_MSR_CE;
+#endif
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_ME) )
+ new_msr |= PPC_MSR_ME;
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_EE) )
+ new_msr |= PPC_MSR_EE;
+
+ return new_msr;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Set_level
+ *
+ * This routine sets the requested level in the MSR.
+ */
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 tmp = 0;
+ register unsigned32 new_msr;
+
+ new_msr = _CPU_ISR_Calculate_level( new_level );
+
+ asm volatile (
+ "mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" :
+ "=&r" ((tmp)) :
+ "r" ((PPC_MSR_DISABLE_MASK)), "r" ((new_msr)), "0" ((tmp))
+ );
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Get_level
+ *
+ * This routine gets the current interrupt level from the MSR and
+ * converts it to an RTEMS interrupt level.
+ */
+
+unsigned32 _CPU_ISR_Get_level( void )
+{
+ unsigned32 level = 0;
+ unsigned32 msr;
+
+ asm volatile("mfmsr %0" : "=r" ((msr)));
+
+ msr &= PPC_MSR_DISABLE_MASK;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(msr & PPC_MSR_CE) )
+ level |= PPC_INTERRUPT_LEVEL_CE;
+#endif
+
+ if ( !(msr & PPC_MSR_ME) )
+ level |= PPC_INTERRUPT_LEVEL_ME;
+
+ if ( !(msr & PPC_MSR_EE) )
+ level |= PPC_INTERRUPT_LEVEL_EE;
+
+ return level;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+#define CPU_MINIMUM_STACK_FRAME_SIZE 56
+#else /* PPC_ABI_SVR4 or PPC_ABI_EABI */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+#endif
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ the_context->msr = _CPU_ISR_Calculate_level( new_level );
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ /*
+ * Calculate the task's MSR value:
+ *
+ * + Set the exception prefix bit to point to the exception table
+ * + Force the RI bit
+ * + Use the DR and IR bits
+ */
+ _CPU_MSR_Value( msr_value );
+ the_context->msr |= (msr_value & PPC_MSR_EP);
+ the_context->msr |= PPC_MSR_RI;
+ the_context->msr |= msr_value & (PPC_MSR_DR|PPC_MSR_IR);
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ { unsigned32 *desc = (unsigned32 *)entry_point;
+
+ the_context->pc = desc[0];
+ the_context->gpr2 = desc[1];
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_SVR4)
+ { unsigned r13 = 0;
+ asm volatile ("mr %0, 13" : "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr13 = r13;
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_EABI)
+ { unsigned32 r2 = 0;
+ unsigned r13 = 0;
+ asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr2 = r2;
+ the_context->gpr13 = r13;
+ }
+#endif
+}
+
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ proc_ptr ignored;
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * Install the wrapper so this ISR can be invoked properly.
+ */
+ if (_CPU_Table.exceptions_in_RAM)
+ _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler ? (ISR_Handler_entry)new_handler :
+ _CPU_Table.spurious_handler ?
+ (ISR_Handler_entry)_CPU_Table.spurious_handler :
+ (ISR_Handler_entry)ppc_spurious;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 56;
+#else
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 8;
+#endif
+}
+
+/* Handle a spurious interrupt */
+static void ppc_spurious(int v, CPU_Interrupt_frame *i)
+{
+#if 0
+ printf("Spurious interrupt on vector %d from %08.8x\n",
+ v, i->pc);
+#endif
+#ifdef ppc403
+ if (v == PPC_IRQ_EXTERNAL)
+ {
+ register int r = 0;
+
+ asm volatile("mtdcr 0x42, %0" :
+ "=&r" ((r)) : "0" ((r))); /* EXIER */
+ }
+ else if (v == PPC_IRQ_PIT)
+ {
+ register int r = 0x08000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+ else if (v == PPC_IRQ_FIT)
+ {
+ register int r = 0x04000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+#endif
+}
+
+void _CPU_Fatal_error(unsigned32 _error)
+{
+ asm volatile ("mr 3, %0" : : "r" ((_error)));
+ asm volatile ("tweq 5,5");
+ asm volatile ("li 0,0; mtmsr 0");
+ while (1) ;
+}
+
+#define PPC_SYNCHRONOUS_TRAP_BIT_MASK 0x100
+#define PPC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
+#define PPC_SYNCHRONOUS_TRAP ( _trap ) ((_trap)+PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+#define PPC_REAL_TRAP_NUMBER ( _trap ) ((_trap)%PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+
+
+const CPU_Trap_table_entry _CPU_Trap_slot_template = {
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+#error " Vector install not tested."
+#if (PPC_HAS_FPU)
+#error " Vector install not tested."
+ 0x9421feb0, /* stwu r1, -(20*4 + 18*8 + IP_END)(r1) */
+#else
+#error " Vector install not tested."
+ 0x9421ff40, /* stwu r1, -(20*4 + IP_END)(r1) */
+#endif
+#else
+ 0x9421ff90, /* stwu r1, -(IP_END)(r1) */
+#endif
+
+ 0x90010008, /* stw %r0, IP_0(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* ba PROC (_ISR_Handler) */
+};
+
+#if defined(mpc860) || defined(mpc821)
+const CPU_Trap_table_entry _CPU_Trap_slot_template_m860 = {
+ 0x7c0803ac, /* mtlr %r0 */
+ 0x81210028, /* lwz %r9, IP_9(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* b PROC (_ISR_Handler) */
+};
+#endif /* mpc860 */
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+);
+
+
+/*PAGE
+ *
+ * _CPU_ISR_install_raw_handler
+ *
+ * This routine installs the specified handler as a "raw" non-executive
+ * supported trap handler (a.k.a. interrupt service routine).
+ *
+ * Input Parameters:
+ * vector - trap table entry number plus synchronous
+ * vs. asynchronous information
+ * new_handler - address of the handler to be installed
+ * old_handler - pointer to an address of the handler previously installed
+ *
+ * Output Parameters: NONE
+ * *new_handler - address of the handler previously installed
+ *
+ * NOTE:
+ *
+ * This routine is based on the SPARC routine _CPU_ISR_install_raw_handler.
+ * Install a software trap handler as an executive interrupt handler
+ * (which is desirable since RTEMS takes care of window and register issues),
+ * then the executive needs to know that the return address is to the trap
+ * rather than the instruction following the trap.
+ *
+ */
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ unsigned32 real_vector;
+ CPU_Trap_table_entry *slot;
+ unsigned32 u32_handler=0;
+
+ /*
+ * Get the "real" trap number for this vector ignoring the synchronous
+ * versus asynchronous indicator included with our vector numbers.
+ */
+
+ real_vector = vector;
+
+ /*
+ * Get the current base address of the trap table and calculate a pointer
+ * to the slot we are interested in.
+ */
+ slot = (CPU_Trap_table_entry *)ppc_exception_vector_addr( real_vector );
+
+ /*
+ * Get the address of the old_handler from the trap table.
+ *
+ * NOTE: The old_handler returned will be bogus if it does not follow
+ * the RTEMS model.
+ */
+
+#define HIGH_BITS_MASK 0xFFFFFC00
+#define HIGH_BITS_SHIFT 10
+#define LOW_BITS_MASK 0x000003FF
+
+ if (slot->stwu_r1 == _CPU_Trap_slot_template.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+
+ /* IMD FIX: sign extend address fragment... */
+ if (u32_handler & 0x02000000) {
+ u32_handler |= 0xfc000000;
+ }
+
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+/* There are two kinds of handlers for the MPC860. One is the 'standard'
+ * one like above. The other is for the cascaded interrupts from the SIU
+ * and CPM. Therefore we must check for the alternate one if the standard
+ * one is not present
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (slot->stwu_r1 == _CPU_Trap_slot_template_m860.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+#endif /* mpc860 */
+
+ *old_handler = 0;
+
+ /*
+ * Copy the template to the slot and then fix it.
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (vector >= PPC_IRQ_IRQ0)
+ *slot = _CPU_Trap_slot_template_m860;
+ else
+#endif /* mpc860 */
+ *slot = _CPU_Trap_slot_template;
+
+ u32_handler = (unsigned32) new_handler;
+
+ /*
+ * IMD FIX: insert address fragment only (bits 6..29)
+ * therefore check for proper address range
+ * and remove unwanted bits
+ */
+ if ((u32_handler & 0xfc000000) == 0xfc000000) {
+ u32_handler &= ~0xfc000000;
+ }
+ else if ((u32_handler & 0xfc000000) != 0x00000000) {
+ _Internal_error_Occurred(INTERNAL_ERROR_CORE,
+ TRUE,
+ u32_handler);
+ }
+
+ slot->b_Handler |= u32_handler;
+
+ slot->li_r0_IRQ |= vector;
+
+ _CPU_Data_Cache_Block_Flush( slot );
+}
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+)
+{
+#if (!PPC_HAS_EVPR)
+ unsigned32 Msr;
+#endif
+ unsigned32 Top = 0;
+ unsigned32 Offset = 0x000;
+
+#if (PPC_HAS_EXCEPTION_PREFIX)
+ _CPU_MSR_Value ( Msr );
+ if ( ( Msr & PPC_MSR_EP) != 0 ) /* Vectors at FFFx_xxxx */
+ Top = 0xfff00000;
+#elif (PPC_HAS_EVPR)
+ asm volatile( "mfspr %0,0x3d6" : "=r" (Top)); /* EVPR */
+ Top = Top & 0xffff0000;
+#endif
+
+ switch ( vector ) {
+ case PPC_IRQ_SYSTEM_RESET: /* on 40x aka PPC_IRQ_CRIT */
+ Offset = 0x00100;
+ break;
+ case PPC_IRQ_MCHECK:
+ Offset = 0x00200;
+ break;
+ case PPC_IRQ_PROTECT:
+ Offset = 0x00300;
+ break;
+ case PPC_IRQ_ISI:
+ Offset = 0x00400;
+ break;
+ case PPC_IRQ_EXTERNAL:
+ Offset = 0x00500;
+ break;
+ case PPC_IRQ_ALIGNMENT:
+ Offset = 0x00600;
+ break;
+ case PPC_IRQ_PROGRAM:
+ Offset = 0x00700;
+ break;
+ case PPC_IRQ_NOFP:
+ Offset = 0x00800;
+ break;
+ case PPC_IRQ_DECREMENTER:
+ Offset = 0x00900;
+ break;
+ case PPC_IRQ_RESERVED_A:
+ Offset = 0x00a00;
+ break;
+ case PPC_IRQ_RESERVED_B:
+ Offset = 0x00b00;
+ break;
+ case PPC_IRQ_SCALL:
+ Offset = 0x00c00;
+ break;
+ case PPC_IRQ_TRACE:
+ Offset = 0x00d00;
+ break;
+ case PPC_IRQ_FP_ASST:
+ Offset = 0x00e00;
+ break;
+
+#if defined(ppc403)
+
+/* PPC_IRQ_CRIT is the same vector as PPC_IRQ_RESET
+ case PPC_IRQ_CRIT:
+ Offset = 0x00100;
+ break;
+*/
+ case PPC_IRQ_PIT:
+ Offset = 0x01000;
+ break;
+ case PPC_IRQ_FIT:
+ Offset = 0x01010;
+ break;
+ case PPC_IRQ_WATCHDOG:
+ Offset = 0x01020;
+ break;
+ case PPC_IRQ_DEBUG:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc601)
+ case PPC_IRQ_TRACE:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc603)
+ case PPC_IRQ_TRANS_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_DATA_LOAD:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_STORE:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(ppc603e)
+ case PPC_TLB_INST_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_TLB_LOAD_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_TLB_STORE_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDRBRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc604)
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc860) || defined(mpc821)
+ case PPC_IRQ_EMULATE:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_INST_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_INST_ERR:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_DATA_ERR:
+ Offset = 0x1400;
+ break;
+ case PPC_IRQ_DATA_BPNT:
+ Offset = 0x1c00;
+ break;
+ case PPC_IRQ_INST_BPNT:
+ Offset = 0x1d00;
+ break;
+ case PPC_IRQ_IO_BPNT:
+ Offset = 0x1e00;
+ break;
+ case PPC_IRQ_DEV_PORT:
+ Offset = 0x1f00;
+ break;
+ case PPC_IRQ_IRQ0:
+ Offset = 0x2000;
+ break;
+ case PPC_IRQ_LVL0:
+ Offset = 0x2040;
+ break;
+ case PPC_IRQ_IRQ1:
+ Offset = 0x2080;
+ break;
+ case PPC_IRQ_LVL1:
+ Offset = 0x20c0;
+ break;
+ case PPC_IRQ_IRQ2:
+ Offset = 0x2100;
+ break;
+ case PPC_IRQ_LVL2:
+ Offset = 0x2140;
+ break;
+ case PPC_IRQ_IRQ3:
+ Offset = 0x2180;
+ break;
+ case PPC_IRQ_LVL3:
+ Offset = 0x21c0;
+ break;
+ case PPC_IRQ_IRQ4:
+ Offset = 0x2200;
+ break;
+ case PPC_IRQ_LVL4:
+ Offset = 0x2240;
+ break;
+ case PPC_IRQ_IRQ5:
+ Offset = 0x2280;
+ break;
+ case PPC_IRQ_LVL5:
+ Offset = 0x22c0;
+ break;
+ case PPC_IRQ_IRQ6:
+ Offset = 0x2300;
+ break;
+ case PPC_IRQ_LVL6:
+ Offset = 0x2340;
+ break;
+ case PPC_IRQ_IRQ7:
+ Offset = 0x2380;
+ break;
+ case PPC_IRQ_LVL7:
+ Offset = 0x23c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_0:
+ Offset = 0x2400;
+ break;
+ case PPC_IRQ_CPM_PC4:
+ Offset = 0x2410;
+ break;
+ case PPC_IRQ_CPM_PC5:
+ Offset = 0x2420;
+ break;
+ case PPC_IRQ_CPM_SMC2:
+ Offset = 0x2430;
+ break;
+ case PPC_IRQ_CPM_SMC1:
+ Offset = 0x2440;
+ break;
+ case PPC_IRQ_CPM_SPI:
+ Offset = 0x2450;
+ break;
+ case PPC_IRQ_CPM_PC6:
+ Offset = 0x2460;
+ break;
+ case PPC_IRQ_CPM_TIMER4:
+ Offset = 0x2470;
+ break;
+ case PPC_IRQ_CPM_RESERVED_8:
+ Offset = 0x2480;
+ break;
+ case PPC_IRQ_CPM_PC7:
+ Offset = 0x2490;
+ break;
+ case PPC_IRQ_CPM_PC8:
+ Offset = 0x24a0;
+ break;
+ case PPC_IRQ_CPM_PC9:
+ Offset = 0x24b0;
+ break;
+ case PPC_IRQ_CPM_TIMER3:
+ Offset = 0x24c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_D:
+ Offset = 0x24d0;
+ break;
+ case PPC_IRQ_CPM_PC10:
+ Offset = 0x24e0;
+ break;
+ case PPC_IRQ_CPM_PC11:
+ Offset = 0x24f0;
+ break;
+ case PPC_IRQ_CPM_I2C:
+ Offset = 0x2500;
+ break;
+ case PPC_IRQ_CPM_RISC_TIMER:
+ Offset = 0x2510;
+ break;
+ case PPC_IRQ_CPM_TIMER2:
+ Offset = 0x2520;
+ break;
+ case PPC_IRQ_CPM_RESERVED_13:
+ Offset = 0x2530;
+ break;
+ case PPC_IRQ_CPM_IDMA2:
+ Offset = 0x2540;
+ break;
+ case PPC_IRQ_CPM_IDMA1:
+ Offset = 0x2550;
+ break;
+ case PPC_IRQ_CPM_SDMA_ERROR:
+ Offset = 0x2560;
+ break;
+ case PPC_IRQ_CPM_PC12:
+ Offset = 0x2570;
+ break;
+ case PPC_IRQ_CPM_PC13:
+ Offset = 0x2580;
+ break;
+ case PPC_IRQ_CPM_TIMER1:
+ Offset = 0x2590;
+ break;
+ case PPC_IRQ_CPM_PC14:
+ Offset = 0x25a0;
+ break;
+ case PPC_IRQ_CPM_SCC4:
+ Offset = 0x25b0;
+ break;
+ case PPC_IRQ_CPM_SCC3:
+ Offset = 0x25c0;
+ break;
+ case PPC_IRQ_CPM_SCC2:
+ Offset = 0x25d0;
+ break;
+ case PPC_IRQ_CPM_SCC1:
+ Offset = 0x25e0;
+ break;
+ case PPC_IRQ_CPM_PC15:
+ Offset = 0x25f0;
+ break;
+#endif
+
+ }
+ Top += Offset;
+ return Top;
+}
+
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/cpu.h b/c/src/lib/libcpu/powerpc/old_exception_processing/cpu.h
new file mode 100644
index 0000000000..2a502d0745
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/cpu.h
@@ -0,0 +1,1200 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/ppc.h> /* pick up machine definitions */
+#ifndef ASM
+struct CPU_Interrupt_frame;
+typedef void ( *ppc_isr_entry )( int, struct CPU_Interrupt_frame * );
+
+#include <rtems/score/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+/*
+ * ACB: This is a lie, but it gets us a handle on a call to set up
+ * a variable derived from the top of the interrupt stack.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 1
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT \
+ __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ *
+ * The interrupt level is bit mapped for the PowerPC family. The
+ * bits are set to 0 to indicate that a particular exception source
+ * enabled and 1 if it is disabled. This keeps with RTEMS convention
+ * that interrupt level 0 means all sources are enabled.
+ *
+ * The bits are assigned to correspond to enable bits in the MSR.
+ */
+
+#define PPC_INTERRUPT_LEVEL_ME 0x01
+#define PPC_INTERRUPT_LEVEL_EE 0x02
+#define PPC_INTERRUPT_LEVEL_CE 0x04
+
+/* XXX should these be maskable? */
+#if 0
+#define PPC_INTERRUPT_LEVEL_DE 0x08
+#define PPC_INTERRUPT_LEVEL_BE 0x10
+#define PPC_INTERRUPT_LEVEL_SE 0x20
+#endif
+
+#define CPU_MODES_INTERRUPT_MASK 0x00000007
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ unsigned32 dummy[13]; /* Used by callees: PowerOpen ABI */
+#else
+ unsigned32 dummy[1]; /* Used by callees: SVR4/EABI */
+#endif
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 idle_task_stack_size;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_mpci_receive_server_stack;
+ void * (*stack_allocate_hook)( unsigned32 );
+ void (*stack_free_hook)( void* );
+ /* end of fields required on all CPUs */
+
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ void (*spurious_handler)(unsigned32 vector, CPU_Interrupt_frame *);
+ boolean exceptions_in_RAM; /* TRUE if in RAM */
+
+#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
+ unsigned32 serial_per_sec; /* Serial clocks per second */
+ boolean serial_external_clock;
+ boolean serial_xon_xoff;
+ boolean serial_cts_rts;
+ unsigned32 serial_rate;
+ unsigned32 timer_average_overhead; /* Average overhead of timer in ticks */
+ unsigned32 timer_least_valid; /* Least valid number from timer */
+ boolean timer_internal_clock; /* TRUE, when timer runs with CPU clk */
+#endif
+
+#if (defined(mpc860) || defined(mpc821))
+ unsigned32 clock_speed; /* Speed of CPU in Hz */
+#endif
+} rtems_cpu_table;
+
+/*
+ * Macros to access required entires in the CPU Table are in
+ * the file rtems/system.h.
+ */
+
+/*
+ * Macros to access PowerPC specific additions to the CPU Table
+ */
+
+#define rtems_cpu_configuration_get_clicks_per_usec() \
+ (_CPU_Table.clicks_per_usec)
+
+#define rtems_cpu_configuration_get_spurious_handler() \
+ (_CPU_Table.spurious_handler)
+
+#define rtems_cpu_configuration_get_exceptions_in_ram() \
+ (_CPU_Table.exceptions_in_RAM)
+
+#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
+
+#define rtems_cpu_configuration_get_serial_per_sec() \
+ (_CPU_Table.serial_per_sec)
+
+#define rtems_cpu_configuration_get_serial_external_clock() \
+ (_CPU_Table.serial_external_clock)
+
+#define rtems_cpu_configuration_get_serial_xon_xoff() \
+ (_CPU_Table.serial_xon_xoff)
+
+#define rtems_cpu_configuration_get_serial_cts_rts() \
+ (_CPU_Table.serial_cts_rts)
+
+#define rtems_cpu_configuration_get_serial_rate() \
+ (_CPU_Table.serial_rate)
+
+#define rtems_cpu_configuration_get_timer_average_overhead() \
+ (_CPU_Table.timer_average_overhead)
+
+#define rtems_cpu_configuration_get_timer_least_valid() \
+ (_CPU_Table.timer_least_valid)
+
+#define rtems_cpu_configuration_get_timer_internal_clock() \
+ (_CPU_Table.timer_internal_clock)
+
+#endif
+
+#if (defined(mpc860) || defined(mpc821))
+#define rtems_cpu_configuration_get_clock_speed() \
+ (_CPU_Table.clock_speed)
+#endif
+
+
+/*
+ * The following type defines an entry in the PPC's trap table.
+ *
+ * NOTE: The instructions chosen are RTEMS dependent although one is
+ * obligated to use two of the four instructions to perform a
+ * long jump. The other instructions load one register with the
+ * trap type (a.k.a. vector) and another with the psr.
+ */
+
+typedef struct {
+ unsigned32 stwu_r1; /* stwu %r1, -(??+IP_END)(%1)*/
+ unsigned32 stw_r0; /* stw %r0, IP_0(%r1) */
+ unsigned32 li_r0_IRQ; /* li %r0, _IRQ */
+ unsigned32 b_Handler; /* b PROC (_ISR_Handler) */
+} CPU_Trap_table_entry;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+
+SCORE_EXTERN struct {
+ unsigned32 *Nest_level;
+ unsigned32 *Disable_level;
+ void *Vector_table;
+ void *Stack;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ unsigned32 Dispatch_r2;
+#else
+ unsigned32 Default_r2;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ unsigned32 Default_r13;
+#endif
+#endif
+ volatile boolean *Switch_necessary;
+ boolean *Signal;
+
+ unsigned32 msr_initial;
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _isr_cookie.
+ */
+
+#define loc_string(a,b) a " (" #b ")\n"
+
+#define _CPU_MSR_Value( _msr_value ) \
+ do { \
+ _msr_value = 0; \
+ asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
+ } while (0)
+
+#define _CPU_MSR_SET( _msr_value ) \
+{ asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
+
+#if 0
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ _isr_cookie = 0; \
+ asm volatile (
+ "mfmsr %0" : \
+ "=r" ((_isr_cookie)) : \
+ "0" ((_isr_cookie)) \
+ ); \
+ asm volatile (
+ "andc %1,%0,%1" : \
+ "=r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ asm volatile (
+ "mtmsr %1" : \
+ "=r" ((_disable_mask)) : \
+ "0" ((_disable_mask)) \
+ ); \
+ }
+#endif
+
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ _isr_cookie = 0; \
+ asm volatile ( \
+ "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ }
+
+
+#define _CPU_Data_Cache_Block_Flush( _address ) \
+ do { register void *__address = (_address); \
+ register unsigned32 _zero = 0; \
+ asm volatile ( "dcbf %0,%1" : \
+ "=r" (_zero), "=r" (__address) : \
+ "0" (_zero), "1" (__address) \
+ ); \
+ } while (0)
+
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of an RTEMS critical section. The parameter
+ * _isr_cookie is not modified.
+ */
+
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ asm volatile ( "mtmsr %0" : \
+ "=r" ((_isr_cookie)) : \
+ "0" ((_isr_cookie))); \
+ }
+
+/*
+ * This temporarily restores the interrupt to _isr_cookie before immediately
+ * disabling them again. This is used to divide long RTEMS critical
+ * sections into two or more parts. The parameter _isr_cookie is not
+ * modified.
+ *
+ * NOTE: The version being used is not very optimized but it does
+ * not trip a problem in gcc where the disable mask does not
+ * get loaded. Check this for future (post 10/97 gcc versions.
+ */
+
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ asm volatile ( \
+ "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=r" ((_isr_cookie)), "=r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ }
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+);
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+);
+
+unsigned32 _CPU_ISR_Get_level( void );
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/* end of ISR handler macros */
+
+/*
+ * Simple spin delay in microsecond units for device drivers.
+ * This is very dependent on the clock speed of the target.
+ */
+
+#define CPU_Get_timebase_low( _value ) \
+ asm volatile( "mftb %0" : "=r" (_value) )
+
+#define delay( _microseconds ) \
+ do { \
+ unsigned32 start, ticks, now; \
+ CPU_Get_timebase_low( start ) ; \
+ ticks = (_microseconds) * _CPU_Table.clicks_per_usec; \
+ do \
+ CPU_Get_timebase_low( now ) ; \
+ while (now - start < ticks); \
+ } while (0)
+
+#define delay_in_bus_cycles( _cycles ) \
+ do { \
+ unsigned32 start, now; \
+ CPU_Get_timebase_low( start ); \
+ do \
+ CPU_Get_timebase_low( now ); \
+ while (now - start < (_cycles)); \
+ } while (0)
+
+
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _CPU_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=&r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
+ } while (0)
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline unsigned64 PPC_Get_timebase_register( void )
+{
+ unsigned32 tbr_low;
+ unsigned32 tbr_high;
+ unsigned32 tbr_high_old;
+ unsigned64 tbr;
+
+ do {
+ asm volatile( "mftbu %0" : "=r" (tbr_high_old));
+ asm volatile( "mftb %0" : "=r" (tbr_low));
+ asm volatile( "mftbu %0" : "=r" (tbr_high));
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/cpu_asm.S b/c/src/lib/libcpu/powerpc/old_exception_processing/cpu_asm.S
new file mode 100644
index 0000000000..a377fa5d2a
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/cpu_asm.S
@@ -0,0 +1,809 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+#if (PPC_HAS_DOUBLE == 1)
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 8)
+ .set FP_2, (FP_1 + 8)
+ .set FP_3, (FP_2 + 8)
+ .set FP_4, (FP_3 + 8)
+ .set FP_5, (FP_4 + 8)
+ .set FP_6, (FP_5 + 8)
+ .set FP_7, (FP_6 + 8)
+ .set FP_8, (FP_7 + 8)
+ .set FP_9, (FP_8 + 8)
+ .set FP_10, (FP_9 + 8)
+ .set FP_11, (FP_10 + 8)
+ .set FP_12, (FP_11 + 8)
+ .set FP_13, (FP_12 + 8)
+ .set FP_14, (FP_13 + 8)
+ .set FP_15, (FP_14 + 8)
+ .set FP_16, (FP_15 + 8)
+ .set FP_17, (FP_16 + 8)
+ .set FP_18, (FP_17 + 8)
+ .set FP_19, (FP_18 + 8)
+ .set FP_20, (FP_19 + 8)
+ .set FP_21, (FP_20 + 8)
+ .set FP_22, (FP_21 + 8)
+ .set FP_23, (FP_22 + 8)
+ .set FP_24, (FP_23 + 8)
+ .set FP_25, (FP_24 + 8)
+ .set FP_26, (FP_25 + 8)
+ .set FP_27, (FP_26 + 8)
+ .set FP_28, (FP_27 + 8)
+ .set FP_29, (FP_28 + 8)
+ .set FP_30, (FP_29 + 8)
+ .set FP_31, (FP_30 + 8)
+ .set FP_FPSCR, (FP_31 + 8)
+#else
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+#endif
+
+ .set IP_LINK, 0
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ .set IP_0, (IP_LINK + 56)
+#else
+ .set IP_0, (IP_LINK + 8)
+#endif
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ /* _CPU_IRQ_info offsets */
+
+ /* These must be in this order */
+ .set Nest_level, 0
+ .set Disable_level, 4
+ .set Vector_table, 8
+ .set Stack, 12
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ .set Dispatch_r2, 16
+ .set Switch_necessary, 20
+#else
+ .set Default_r2, 16
+#if (PPC_ABI != PPC_ABI_GCC27)
+ .set Default_r13, 20
+ .set Switch_necessary, 24
+#else
+ .set Switch_necessary, 20
+#endif
+#endif
+ .set Signal, Switch_necessary + 4
+ .set msr_initial, Signal + 4
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ stfd f0, FP_0(r3)
+ stfd f1, FP_1(r3)
+ stfd f2, FP_2(r3)
+ stfd f3, FP_3(r3)
+ stfd f4, FP_4(r3)
+ stfd f5, FP_5(r3)
+ stfd f6, FP_6(r3)
+ stfd f7, FP_7(r3)
+ stfd f8, FP_8(r3)
+ stfd f9, FP_9(r3)
+ stfd f10, FP_10(r3)
+ stfd f11, FP_11(r3)
+ stfd f12, FP_12(r3)
+ stfd f13, FP_13(r3)
+ stfd f14, FP_14(r3)
+ stfd f15, FP_15(r3)
+ stfd f16, FP_16(r3)
+ stfd f17, FP_17(r3)
+ stfd f18, FP_18(r3)
+ stfd f19, FP_19(r3)
+ stfd f20, FP_20(r3)
+ stfd f21, FP_21(r3)
+ stfd f22, FP_22(r3)
+ stfd f23, FP_23(r3)
+ stfd f24, FP_24(r3)
+ stfd f25, FP_25(r3)
+ stfd f26, FP_26(r3)
+ stfd f27, FP_27(r3)
+ stfd f28, FP_28(r3)
+ stfd f29, FP_29(r3)
+ stfd f30, FP_30(r3)
+ stfd f31, FP_31(r3)
+ mffs f2
+ stfd f2, FP_FPSCR(r3)
+#else
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ lfd f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfd f0, FP_0(r3)
+ lfd f1, FP_1(r3)
+ lfd f2, FP_2(r3)
+ lfd f3, FP_3(r3)
+ lfd f4, FP_4(r3)
+ lfd f5, FP_5(r3)
+ lfd f6, FP_6(r3)
+ lfd f7, FP_7(r3)
+ lfd f8, FP_8(r3)
+ lfd f9, FP_9(r3)
+ lfd f10, FP_10(r3)
+ lfd f11, FP_11(r3)
+ lfd f12, FP_12(r3)
+ lfd f13, FP_13(r3)
+ lfd f14, FP_14(r3)
+ lfd f15, FP_15(r3)
+ lfd f16, FP_16(r3)
+ lfd f17, FP_17(r3)
+ lfd f18, FP_18(r3)
+ lfd f19, FP_19(r3)
+ lfd f20, FP_20(r3)
+ lfd f21, FP_21(r3)
+ lfd f22, FP_22(r3)
+ lfd f23, FP_23(r3)
+ lfd f24, FP_24(r3)
+ lfd f25, FP_25(r3)
+ lfd f26, FP_26(r3)
+ lfd f27, FP_27(r3)
+ lfd f28, FP_28(r3)
+ lfd f29, FP_29(r3)
+ lfd f30, FP_30(r3)
+ lfd f31, FP_31(r3)
+#else
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */
+ stw r1, GP_1(r3)
+ lwz r1, GP_1(r4)
+ stw r2, GP_2(r3)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ stmw r13, GP_13(r3)
+ lmw r13, GP_13(r4)
+#else
+ stw r13, GP_13(r3)
+ lwz r13, GP_13(r4)
+ stw r14, GP_14(r3)
+ lwz r14, GP_14(r4)
+ stw r15, GP_15(r3)
+ lwz r15, GP_15(r4)
+ stw r16, GP_16(r3)
+ lwz r16, GP_16(r4)
+ stw r17, GP_17(r3)
+ lwz r17, GP_17(r4)
+ stw r18, GP_18(r3)
+ lwz r18, GP_18(r4)
+ stw r19, GP_19(r3)
+ lwz r19, GP_19(r4)
+ stw r20, GP_20(r3)
+ lwz r20, GP_20(r4)
+ stw r21, GP_21(r3)
+ lwz r21, GP_21(r4)
+ stw r22, GP_22(r3)
+ lwz r22, GP_22(r4)
+ stw r23, GP_23(r3)
+ lwz r23, GP_23(r4)
+ stw r24, GP_24(r3)
+ lwz r24, GP_24(r4)
+ stw r25, GP_25(r3)
+ lwz r25, GP_25(r4)
+ stw r26, GP_26(r3)
+ lwz r26, GP_26(r4)
+ stw r27, GP_27(r3)
+ lwz r27, GP_27(r4)
+ stw r28, GP_28(r3)
+ lwz r28, GP_28(r4)
+ stw r29, GP_29(r3)
+ lwz r29, GP_29(r4)
+ stw r30, GP_30(r3)
+ lwz r30, GP_30(r4)
+ stw r31, GP_31(r3)
+ lwz r31, GP_31(r4)
+#endif
+ mfcr r5
+ stw r5, GP_CR(r3)
+ lwz r5, GP_CR(r4)
+ mflr r6
+ mtcrf 255, r5
+ stw r6, GP_PC(r3)
+ lwz r6, GP_PC(r4)
+ mfmsr r7
+ mtlr r6
+ stw r7, GP_MSR(r3)
+ lwz r7, GP_MSR(r4)
+ mtmsr r7
+#endif
+#if (PPC_CACHE_ALIGNMENT == 16)
+ /* This assumes that all the registers are in the given order */
+ li r5, 16
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_14+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+
+ addi r3, r3, GP_18-GP_14
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_22-GP_18
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_26-GP_22
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_26(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stwu r14, GP_14+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r15, GP_15-GP_14(r3)
+ stw r16, GP_16-GP_14(r3)
+ stw r17, GP_17-GP_14(r3)
+ stwu r18, GP_18-GP_14(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stwu r22, GP_22-GP_18(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r23, GP_23-GP_22(r3)
+ stw r24, GP_24-GP_22(r3)
+ stw r25, GP_25-GP_22(r3)
+ stwu r26, GP_26-GP_22(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r27, GP_27-GP_26(r3)
+ stw r28, GP_28-GP_26(r3)
+ stw r29, GP_29-GP_26(r3)
+ stw r30, GP_30-GP_26(r3)
+ stw r31, GP_31-GP_26(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_26(r3)
+ mflr r7
+ stw r7, GP_PC-GP_26(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_26(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_19-GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_23-GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_27-GP_23
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_27(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwzu r15, GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r16, GP_16-GP_15(r4)
+ lwz r17, GP_17-GP_15(r4)
+ lwz r18, GP_18-GP_15(r4)
+ lwzu r19, GP_19-GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwzu r23, GP_23-GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r24, GP_24-GP_23(r4)
+ lwz r25, GP_25-GP_23(r4)
+ lwz r26, GP_26-GP_23(r4)
+ lwzu r27, GP_27-GP_23(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r28, GP_28-GP_27(r4)
+ lwz r29, GP_29-GP_27(r4)
+ lwz r30, GP_30-GP_27(r4)
+ lwz r31, GP_31-GP_27(r4)
+#endif
+ lwz r6, GP_CR-GP_27(r4)
+ lwz r7, GP_PC-GP_27(r4)
+ lwz r8, GP_MSR-GP_27(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+#if (PPC_CACHE_ALIGNMENT == 32)
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
+/* Individual interrupt prologues look like this:
+ * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ * #if (PPC_HAS_FPU)
+ * stwu r1, -(20*4 + 18*8 + IP_END)(r1)
+ * #else
+ * stwu r1, -(20*4 + IP_END)(r1)
+ * #endif
+ * #else
+ * stwu r1, -(IP_END)(r1)
+ * #endif
+ * stw r0, IP_0(r1)
+ *
+ * li r0, vectornum
+ * b PROC (_ISR_Handler{,C})
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_Handler)
+PROC (_ISR_Handler):
+#define LABEL(x) x
+/* XXX ??
+#define MTSAVE(x) mtspr sprg0, x
+#define MFSAVE(x) mfspr x, sprg0
+*/
+#define MTPC(x) mtspr srr0, x
+#define MFPC(x) mfspr x, srr0
+#define MTMSR(x) mtspr srr1, x
+#define MFMSR(x) mfspr x, srr1
+
+ #include "irq_stub.S"
+ rfi
+
+#if (PPC_HAS_RFCI == 1)
+/* void __ISR_HandlerC()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * For critical interrupts
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_HandlerC)
+PROC (_ISR_HandlerC):
+#undef LABEL
+#undef MTSAVE
+#undef MFSAVE
+#undef MTPC
+#undef MFPC
+#undef MTMSR
+#undef MFMSR
+#define LABEL(x) x##_C
+/* XXX??
+#define MTSAVE(x) mtspr sprg1, x
+#define MFSAVE(x) mfspr x, sprg1
+*/
+#define MTPC(x) mtspr srr2, x
+#define MFPC(x) mfspr x, srr2
+#define MTMSR(x) mtspr srr3, x
+#define MFMSR(x) mfspr x, srr3
+ #include "irq_stub.S"
+ rfci
+#endif
+
+/* PowerOpen descriptors for indirect function calls.
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (_CPU_Context_save_fp)
+ DESCRIPTOR (_CPU_Context_restore_fp)
+ DESCRIPTOR (_CPU_Context_switch)
+ DESCRIPTOR (_CPU_Context_restore)
+ DESCRIPTOR (_ISR_Handler)
+#if (PPC_HAS_RFCI == 1)
+ DESCRIPTOR (_ISR_HandlerC)
+#endif
+#endif
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/irq_stub.S b/c/src/lib/libcpu/powerpc/old_exception_processing/irq_stub.S
new file mode 100644
index 0000000000..76c8927305
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/irq_stub.S
@@ -0,0 +1,268 @@
+/*
+ * This file contains the interrupt handler assembly code for the PowerPC
+ * implementation of RTEMS. It is #included from cpu_asm.s.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * $Id$
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ PUBLIC_VAR (_CPU_IRQ_info )
+
+ /* Finish off the interrupt frame */
+ stw r2, IP_2(r1)
+ stw r3, IP_3(r1)
+ stw r4, IP_4(r1)
+ stw r5, IP_5(r1)
+ stw r6, IP_6(r1)
+ stw r7, IP_7(r1)
+ stw r8, IP_8(r1)
+ stw r9, IP_9(r1)
+ stw r10, IP_10(r1)
+ stw r11, IP_11(r1)
+ stw r12, IP_12(r1)
+ stw r13, IP_13(r1)
+ stmw r28, IP_28(r1)
+ mfcr r5
+ mfctr r6
+ mfxer r7
+ mflr r8
+ MFPC (r9)
+ MFMSR (r10)
+ /* Establish addressing */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+#endif
+ dcbt r0, r11
+ stw r5, IP_CR(r1)
+ stw r6, IP_CTR(r1)
+ stw r7, IP_XER(r1)
+ stw r8, IP_LR(r1)
+ stw r9, IP_PC(r1)
+ stw r10, IP_MSR(r1)
+
+ lwz r30, Vector_table(r11)
+ slwi r4,r0,2
+ lwz r28, Nest_level(r11)
+ add r4, r4, r30
+
+ lwz r30, 0(r28)
+ mr r3, r0
+ lwz r31, Stack(r11)
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ */
+ /* Switch stacks, here we must prevent ALL interrupts */
+#if (PPC_USE_SPRG)
+ mfmsr r5
+ mfspr r6, sprg2
+#else
+ lwz r6,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r6,r6,r5
+ mfmsr r5
+#endif
+ mtmsr r6
+ cmpwi r30, 0
+ lwz r29, Disable_level(r11)
+ subf r31,r1,r31
+ bne LABEL (nested)
+ stwux r1,r1,r31
+LABEL (nested):
+ /*
+ * _ISR_Nest_level++;
+ */
+ lwz r31, 0(r29)
+ addi r30,r30,1
+ stw r30,0(r28)
+ /* From here on out, interrupts can be re-enabled. RTEMS
+ * convention says not.
+ */
+ lwz r4,0(r4)
+ /*
+ * _Thread_Dispatch_disable_level++;
+ */
+ addi r31,r31,1
+ stw r31, 0(r29)
+/* SCE 980217
+ *
+ * We need address translation ON when we call our ISR routine
+
+ mtmsr r5
+
+ */
+
+ /*
+ * (*_ISR_Vector_table[ vector ])( vector );
+ */
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r6,0(r4)
+ lwz r2,4(r4)
+ mtlr r6
+ lwz r11,8(r4)
+#endif
+#if (PPC_ABI == PPC_ABI_GCC27)
+ lwz r2, Default_r2(r11)
+ mtlr r4
+ #lwz r2, 0(r2)
+#endif
+#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI)
+ mtlr r4
+ lwz r2, Default_r2(r11)
+ lwz r13, Default_r13(r11)
+ #lwz r2, 0(r2)
+ #lwz r13, 0(r13)
+#endif
+ mr r4,r1
+ blrl
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+
+ /* We must re-disable the interrupts */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r30,~PPC_MSR_DISABLE_MASK@ha
+ ori r30,r30,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r30
+#endif
+ mtmsr r0
+ lwz r30, 0(r28)
+ lwz r31, 0(r29)
+
+ /*
+ * if (--Thread_Dispatch_disable,--_ISR_Nest_level)
+ * goto easy_exit;
+ */
+ addi r30, r30, -1
+ cmpwi r30, 0
+ addi r31, r31, -1
+ stw r30, 0(r28)
+ stw r31, 0(r29)
+ bne LABEL (easy_exit)
+ cmpwi r31, 0
+
+ lwz r30, Switch_necessary(r11)
+
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ */
+ lwz r1,0(r1)
+ bne LABEL (easy_exit)
+ lwz r30, 0(r30)
+ lwz r31, Signal(r11)
+
+ /*
+ * if ( _Context_Switch_necessary )
+ * goto switch
+ */
+ cmpwi r30, 0
+ lwz r28, 0(r31)
+ li r6,0
+ bne LABEL (switch)
+ /*
+ * if ( !_ISR_Signals_to_thread_executing )
+ * goto easy_exit
+ * _ISR_Signals_to_thread_executing = 0;
+ */
+ cmpwi r28, 0
+ beq LABEL (easy_exit)
+
+ /*
+ * switch:
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ */
+LABEL (switch):
+ stw r6, 0(r31)
+ /* Re-enable interrupts */
+ lwz r0, IP_MSR(r1)
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r2, Dispatch_r2(r11)
+#else
+ /* R2 and R13 still hold their values from the last call */
+#endif
+ mtmsr r0
+ bl SYM (_Thread_Dispatch)
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+ /*
+ * prepare to get out of interrupt
+ */
+ /* Re-disable IRQs */
+#if (PPC_USE_SPRG)
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r5
+#endif
+ mtmsr r0
+
+ /*
+ * easy_exit:
+ * prepare to get out of interrupt
+ * return from interrupt
+ */
+LABEL (easy_exit):
+ lwz r5, IP_CR(r1)
+ lwz r6, IP_CTR(r1)
+ lwz r7, IP_XER(r1)
+ lwz r8, IP_LR(r1)
+ lwz r9, IP_PC(r1)
+ lwz r10, IP_MSR(r1)
+ mtcrf 255,r5
+ mtctr r6
+ mtxer r7
+ mtlr r8
+ MTPC (r9)
+ MTMSR (r10)
+ lwz r0, IP_0(r1)
+ lwz r2, IP_2(r1)
+ lwz r3, IP_3(r1)
+ lwz r4, IP_4(r1)
+ lwz r5, IP_5(r1)
+ lwz r6, IP_6(r1)
+ lwz r7, IP_7(r1)
+ lwz r8, IP_8(r1)
+ lwz r9, IP_9(r1)
+ lwz r10, IP_10(r1)
+ lwz r11, IP_11(r1)
+ lwz r12, IP_12(r1)
+ lwz r13, IP_13(r1)
+ lmw r28, IP_28(r1)
+ lwz r1, 0(r1)
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/ppccache.c b/c/src/lib/libcpu/powerpc/old_exception_processing/ppccache.c
new file mode 100644
index 0000000000..ecfb4b96ca
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/ppccache.c
@@ -0,0 +1,61 @@
+/*
+ * PowerPC Cache enable routines
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+
+void powerpc_instruction_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * Enable the instruction cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00008000; /* Set ICE bit */
+
+ PPC_Set_HID0( value );
+}
+
+void powerpc_data_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * enable data cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00004000; /* set DCE bit */
+
+ PPC_Set_HID0( value );
+}
+
diff --git a/c/src/lib/libcpu/powerpc/old_exception_processing/rtems.S b/c/src/lib/libcpu/powerpc/old_exception_processing/rtems.S
new file mode 100644
index 0000000000..b653152411
--- /dev/null
+++ b/c/src/lib/libcpu/powerpc/old_exception_processing/rtems.S
@@ -0,0 +1,132 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the PowerPC implementation of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/rtems.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+ BEGIN_CODE
+/*
+ * RTEMS
+ *
+ * This routine jumps to the directive indicated in r11.
+ * This routine is used when RTEMS is linked by itself and placed
+ * in ROM. This routine is the first address in the ROM space for
+ * RTEMS. The user "calls" this address with the directive arguments
+ * in the normal place.
+ * This routine then jumps indirectly to the correct directive
+ * preserving the arguments. The directive should not realize
+ * it has been "wrapped" in this way. The table "_Entry_points"
+ * is used to look up the directive.
+ */
+
+ ALIGN (4, 2)
+ PUBLIC_PROC (RTEMS)
+PROC (RTEMS):
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ mflr r0
+ stw r0, 8(r1)
+ stwu r1, -64(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r12, r12, r11
+
+ stw r2, 56(r1)
+ lwz r0, 0(r12)
+ mtlr r0
+ lwz r2, 4(r12)
+ lwz r11, 8(r12)
+ blrl
+ lwz r2, 56(r1)
+ addi r1, r1, 64
+ lwz r0, 8(r1)
+ mtlr r0
+#else
+ mflr r0
+ stw r0, 4(r1)
+ stwu r1, -16(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r11, r12, r11
+
+ stw r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ stw r13, 12(r1)
+#endif
+ mtlr r11
+ lwz r11, irqinfo-abase(r12)
+ lwz r2, 0(r11)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 4(r11)
+#endif
+ blrl
+ lwz r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 12(r1)
+#endif
+ addi r1, r1, 16
+ lwz r0, 4(r1)
+ mtlr r0
+#endif
+ blr
+
+
+ /* Addressability stuff */
+tabaddr:
+abase:
+ EXTERN_VAR (_Entry_points)
+Entry_points:
+ EXT_SYM_REF (_Entry_points)
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ EXTERN_VAR (_CPU_IRQ_info)
+irqinfo:
+ EXT_SYM_REF (_CPU_IRQ_info)
+#endif
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (RTEMS)
+#endif
+
+
diff --git a/c/src/lib/libcpu/powerpc/shared/Makefile.in b/c/src/lib/libcpu/powerpc/shared/Makefile.in
index d752be4daa..d0078a7476 100644
--- a/c/src/lib/libcpu/powerpc/shared/Makefile.in
+++ b/c/src/lib/libcpu/powerpc/shared/Makefile.in
@@ -14,7 +14,7 @@ PROJECT_ROOT = @PROJECT_ROOT@
VPATH = @srcdir@
# C source names, if any, go here -- minus the .c
-C_PIECES =
+C_PIECES = cpuIdent
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
diff --git a/c/src/lib/libcpu/powerpc/shared/cpu.h b/c/src/lib/libcpu/powerpc/shared/cpu.h
index 8b886ffc2d..f071683972 100644
--- a/c/src/lib/libcpu/powerpc/shared/cpu.h
+++ b/c/src/lib/libcpu/powerpc/shared/cpu.h
@@ -169,6 +169,30 @@ n:
#define SR14 14
#define SR15 15
+#ifndef ASM
+typedef enum {
+ PPC_601 = 0x1,
+ PPC_603 = 0x3,
+ PPC_604 = 0x4,
+ PPC_603e = 0x6,
+ PPC_603ev = 0x7,
+ PPC_750 = 0x8,
+ PPC_604e = 0x9,
+ PPC_604r = 0xA,
+ PPC_620 = 0x16,
+ PPC_860 = 0x50,
+ PPC_821 = PPC_860,
+ PPC_UNKNOWN = 0xff
+} ppc_cpu_id_t;
+
+typedef unsigned short ppc_cpu_revision_t;
+
+extern ppc_cpu_id_t get_ppc_cpu_type();
+extern ppc_cpu_id_t current_ppc_cpu;
+extern ppc_cpu_revision_t get_ppc_cpu_revision();
+extern ppc_cpu_revision_t current_ppc_revision;
+#endif
+
#define _CPU_MSR_GET( _msr_value ) \
do { \
_msr_value = 0; \
diff --git a/c/src/lib/libcpu/powerpc/wrapup/Makefile.in b/c/src/lib/libcpu/powerpc/wrapup/Makefile.in
index b7521a2f93..5535579e1d 100644
--- a/c/src/lib/libcpu/powerpc/wrapup/Makefile.in
+++ b/c/src/lib/libcpu/powerpc/wrapup/Makefile.in
@@ -18,7 +18,7 @@ GENERIC_PIECES = shared
# Use two variables so that I can make two ar command.
# So far FAMILY_OBJ is empty and ar dislike it...
CPU_SPECIFIC_OBJS = $(wildcard ../$(RTEMS_CPU_MODEL)/*/$(ARCH)/*.o)
-FAMILY_OBJS = $(wildcard ../shared/*/$(ARCH)/*.o)
+FAMILY_OBJS = $(wildcard ../shared/$(ARCH)/*.o ../shared/*/$(ARCH)/*.o ../mpc6xx/*/$(ARCH)/*.o)
LIB = $(ARCH)/libcpu.a