summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libbsp/powerpc/shared/bootloader
diff options
context:
space:
mode:
authorRalf Corsepius <ralf.corsepius@rtems.org>2004-04-21 10:43:04 +0000
committerRalf Corsepius <ralf.corsepius@rtems.org>2004-04-21 10:43:04 +0000
commit6128a4aa5e791ed4e0a655bfd346a52d92da7883 (patch)
treeaf53ca3f67ce405b6fbc6c98399c8e0c87e01a9e /c/src/lib/libbsp/powerpc/shared/bootloader
parent2004-04-20 Ralf Corsepius <ralf_corsepius@rtems.org> (diff)
downloadrtems-6128a4aa5e791ed4e0a655bfd346a52d92da7883.tar.bz2
Remove stray white spaces.
Diffstat (limited to 'c/src/lib/libbsp/powerpc/shared/bootloader')
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h52
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/em86.c88
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S530
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/exception.S296
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/head.S126
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/lib.c4
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/misc.c74
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/mm.c154
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/pci.c220
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/pci.h24
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c36
-rw-r--r--c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h10
12 files changed, 807 insertions, 807 deletions
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h b/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h
index 4cb72fc3c1..594737efea 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/bootldr.h
@@ -51,16 +51,16 @@ typedef struct _ctxt {
/* The main structure which is pointed to permanently by r13. Things
* are not separated very well between parts because it would cause
* too much code bloat for such a simple program like the bootloader.
- * The code is designed to be compiled with the -m relocatable option and
- * tries to minimize the number of relocations/fixups and the number of
- * functions who have to access the .got2 sections (this increases the
+ * The code is designed to be compiled with the -m relocatable option and
+ * tries to minimize the number of relocations/fixups and the number of
+ * functions who have to access the .got2 sections (this increases the
* size of the prologue in every function).
*/
typedef struct _boot_data {
RESIDUAL *residual;
void *load_address;
void *of_entry;
- void *r6, *r7, *r8, *r9, *r10;
+ void *r6, *r7, *r8, *r9, *r10;
u_long cache_lsize;
void *image; /* Where to copy ourselves */
void *stack;
@@ -77,80 +77,80 @@ typedef struct _boot_data {
register boot_data *bd __asm__("r13");
extern inline int
-pcibios_read_config_byte(u_char bus, u_char dev_fn,
+pcibios_read_config_byte(u_char bus, u_char dev_fn,
u_char where, u_char * val) {
return bd->pci_functions->read_config_byte(bus, dev_fn, where, val);
}
extern inline int
-pcibios_read_config_word(u_char bus, u_char dev_fn,
+pcibios_read_config_word(u_char bus, u_char dev_fn,
u_char where, u_short * val) {
return bd->pci_functions->read_config_word(bus, dev_fn, where, val);
}
extern inline int
-pcibios_read_config_dword(u_char bus, u_char dev_fn,
+pcibios_read_config_dword(u_char bus, u_char dev_fn,
u_char where, u_int * val) {
return bd->pci_functions->read_config_dword(bus, dev_fn, where, val);
}
extern inline int
-pcibios_write_config_byte(u_char bus, u_char dev_fn,
+pcibios_write_config_byte(u_char bus, u_char dev_fn,
u_char where, u_char val) {
return bd->pci_functions->write_config_byte(bus, dev_fn, where, val);
}
extern inline int
-pcibios_write_config_word(u_char bus, u_char dev_fn,
+pcibios_write_config_word(u_char bus, u_char dev_fn,
u_char where, u_short val) {
return bd->pci_functions->write_config_word(bus, dev_fn, where, val);
}
extern inline int
-pcibios_write_config_dword(u_char bus, u_char dev_fn,
+pcibios_write_config_dword(u_char bus, u_char dev_fn,
u_char where, u_int val) {
return bd->pci_functions->write_config_dword(bus, dev_fn, where, val);
}
extern inline int
pci_read_config_byte(struct pci_dev *dev, u_char where, u_char * val) {
- return bd->pci_functions->read_config_byte(dev->bus->number,
- dev->devfn,
+ return bd->pci_functions->read_config_byte(dev->bus->number,
+ dev->devfn,
where, val);
}
extern inline int
pci_read_config_word(struct pci_dev *dev, u_char where, u_short * val) {
- return bd->pci_functions->read_config_word(dev->bus->number,
- dev->devfn,
+ return bd->pci_functions->read_config_word(dev->bus->number,
+ dev->devfn,
where, val);
}
extern inline int
pci_read_config_dword(struct pci_dev *dev, u_char where, u_int * val) {
- return bd->pci_functions->read_config_dword(dev->bus->number,
- dev->devfn,
+ return bd->pci_functions->read_config_dword(dev->bus->number,
+ dev->devfn,
where, val);
}
extern inline int
pci_write_config_byte(struct pci_dev *dev, u_char where, u_char val) {
- return bd->pci_functions->write_config_byte(dev->bus->number,
- dev->devfn,
+ return bd->pci_functions->write_config_byte(dev->bus->number,
+ dev->devfn,
where, val);
}
extern inline int
pci_write_config_word(struct pci_dev *dev, u_char where, u_short val) {
- return bd->pci_functions->write_config_word(dev->bus->number,
- dev->devfn,
+ return bd->pci_functions->write_config_word(dev->bus->number,
+ dev->devfn,
where, val);
}
extern inline int
pci_write_config_dword(struct pci_dev *dev, u_char where, u_int val) {
- return bd->pci_functions->write_config_dword(dev->bus->number,
- dev->devfn,
+ return bd->pci_functions->write_config_dword(dev->bus->number,
+ dev->devfn,
where, val);
}
@@ -159,12 +159,12 @@ pci_write_config_dword(struct pci_dev *dev, u_char where, u_int val) {
* zero, it performs more or less like memmove. No copy is performed if
* source and destination addresses are equal. However the caches
* are synchronized. Note that the size is always rounded up to the
- * next mutiple of 4.
+ * next mutiple of 4.
*/
extern void * codemove(void *, const void *, size_t, unsigned long);
/* The physical memory allocator allows to align memory by
- * powers of 2 given by the lower order bits of flags.
+ * powers of 2 given by the lower order bits of flags.
* By default it allocates from higher addresses towrds lower ones,
* setting PA_LOW reverses this behaviour.
*/
@@ -212,11 +212,11 @@ int find_max_mem(struct pci_dev *);
#ifdef ASM
/* These definitions simplify the ugly declarations necessary for
- * GOT definitions.
+ * GOT definitions.
*/
#define GOT_ENTRY(NAME) .L_ ## NAME = . - .LCTOC1 ; .long NAME
-#define GOT(NAME) .L_ ## NAME (r30)
+#define GOT(NAME) .L_ ## NAME (r30)
#define START_GOT \
.section ".got2","aw"; \
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c b/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c
index be444b2279..1ca667a82f 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/em86.c
@@ -16,7 +16,7 @@
*/
/*****************************************************************************
-*
+*
* Code to interpret Video BIOS ROM routines.
*
*
@@ -33,7 +33,7 @@
#endif
-/* Code options, put them on the compiler command line */
+/* Code options, put them on the compiler command line */
/* #define EIP_STATS */ /* EIP based profiling */
/* #undef EIP_STATS */
@@ -61,10 +61,10 @@ typedef struct _x86 {
*esbase, *csbase, *ssbase, *dsbase, *fsbase, *gsbase;
volatile unsigned char *iobase;
unsigned char *ioperm;
- unsigned
+ unsigned
reason, nexteip, parm1, parm2, opcode, base;
unsigned *optable, opreg; /* no more used! */
- unsigned char* vbase;
+ unsigned char* vbase;
unsigned instructions;
#ifdef __BOOT__
u_char * ram;
@@ -80,7 +80,7 @@ x86 v86_private __attribute__((aligned(32)));
/* Emulator is in another source file */
-extern
+extern
void em86_enter(x86 * p);
#define EAX (p->_eax.e)
@@ -116,19 +116,19 @@ void em86_enter(x86 * p);
static void dump86(x86 * p){
unsigned char *s = p->csbase + p->eip;
printf("cs:eip=%04x:%08x, eax=%08x, ecx=%08x, edx=%08x, ebx=%08x\n",
- p->cs, p->eip, ld_le32(&EAX),
+ p->cs, p->eip, ld_le32(&EAX),
ld_le32(&ECX), ld_le32(&EDX), ld_le32(&EBX));
printf("ss:esp=%04x:%08x, ebp=%08x, esi=%08x, edi=%08x, efl=%08x\n",
- p->ss, ld_le32(&ESP), ld_le32(&EBP),
+ p->ss, ld_le32(&ESP), ld_le32(&EBP),
ld_le32(&ESI), ld_le32(&EDI), p->eflags);
printf("nip=%08x, ds=%04x, es=%04x, fs=%04x, gs=%04x, total=%d\n",
p->nexteip, p->ds, p->es, p->fs, p->gs, p->instructions);
- printf("code: %02x %02x %02x %02x %02x %02x "
+ printf("code: %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x\n",
- s[0], s[1], s[2], s[3], s[4], s[5],
+ s[0], s[1], s[2], s[3], s[4], s[5],
s[6], s[7], s[8], s[9], s[10], s[11]);
#ifndef __BOOT__
- printf("op1=%08x, op2=%08x, result=%08x, flags=%08x\n",
+ printf("op1=%08x, op2=%08x, result=%08x, flags=%08x\n",
p->filler[11], p->filler[12], p->filler[13], p->filler[14]);
#endif
}
@@ -139,10 +139,10 @@ static void dump86(x86 * p){
int bios86pci(x86 * p) {
unsigned reg=ld_le16(&DI);
reg_type2 tmp;
-
+
if (AL>=8 && AL<=13 && reg>0xff) {
AH = PCIBIOS_BAD_REGISTER_NUMBER;
- } else {
+ } else {
switch(AL) {
case 2: /* find_device */
/* Should be improved for BIOS able to handle
@@ -222,13 +222,13 @@ int int10h(x86 * p) { /* Process BIOS video interrupt */
#else
p->eflags = (p->eflags&0xfcff)|0x100; /* Set TF for debugging */
#endif
- /* p->eflags|=0x100; uncomment to force a trap */
+ /* p->eflags|=0x100; uncomment to force a trap */
return(0);
} else {
switch(AH) {
case 0x12:
switch(BL){
- case 0x32:
+ case 0x32:
p->eip=p->nexteip;
return(0);
break;
@@ -238,7 +238,7 @@ int int10h(x86 * p) { /* Process BIOS video interrupt */
default:
break;
}
- printf("unhandled soft interrupt 0x10: vector=%x\n", vector);
+ printf("unhandled soft interrupt 0x10: vector=%x\n", vector);
return(1);
}
}
@@ -261,11 +261,11 @@ int process_softint(x86 * p) {
}
dump86(p);
printf("Unhandled soft interrupt number 0x%04x, AX=0x%04x\n",
- p->parm1, ld_le16(&AX));
+ p->parm1, ld_le16(&AX));
return(1);
}
-/* The only function called back by the emulator is em86_trap, all
+/* The only function called back by the emulator is em86_trap, all
instructions may that change the code segment are trapped here.
p->reason is one of the following codes. */
#define code_zerdiv 0
@@ -275,7 +275,7 @@ int process_softint(x86 * p) {
#define code_bound 5
#define code_ud 6
#define code_dna 7
-
+
#define code_iretw 256
#define code_iretl 257
#define code_lcallw 258
@@ -290,8 +290,8 @@ int process_softint(x86 * p) {
- The three LSB define the port size (1, 2 or 4)
- bit of weight 512 means out if set, in if clear
- bit of weight 256 means ins/outs if set, in/out if clear
- - bit of weight 128 means use esi/edi if set, si/di if clear
- (only used for ins/outs instructions, always clear for in/out)
+ - bit of weight 128 means use esi/edi if set, si/di if clear
+ (only used for ins/outs instructions, always clear for in/out)
*/
#define code_inb 1024+1
#define code_inw 1024+2
@@ -327,7 +327,7 @@ int em86_trap(x86 *p) {
switch(p->reason) {
case code_int3:
#ifndef __BOOT__
- if(p->csbase+p->eip == bptaddr) {
+ if(p->csbase+p->eip == bptaddr) {
*bptaddr=bptopc;
bptaddr=NULL;
}
@@ -352,8 +352,8 @@ int em86_trap(x86 *p) {
if(bptaddr) *bptaddr=bptopc;
t=strtok(0," \n");
i=sscanf(t,"%x",&tmp);
- if(i==1) {
- bptaddr=p->vbase + tmp;
+ if(i==1) {
+ bptaddr=p->vbase + tmp;
bptopc=*bptaddr;
*bptaddr=0xcc;
} else bptaddr=NULL;
@@ -362,13 +362,13 @@ int em86_trap(x86 *p) {
case 'Q':
return 1;
break;
-
+
case 'g':
case 'G':
p->eflags &= ~0x100;
return 0;
break;
-
+
case 's':
case 'S': /* Print the 8 stack top words */
fp = (unsigned short *)(p->ssbase+ld_le16(&SP));
@@ -390,7 +390,7 @@ int em86_trap(x86 *p) {
break;
case code_ud:
printf("Attempt to execute an unimplemented"
- "or undefined opcode!\n");
+ "or undefined opcode!\n");
dump86(p);
return(1); /* exit interpreter */
break;
@@ -433,7 +433,7 @@ int em86_trap(x86 *p) {
for(i=p->parm1; i<p->parm1+(p->reason&7); i++) {
p->ioperm[i/8] &= ~(1<<i%8);
}
- printf("Access to ports %04x-%04x enabled.\n",
+ printf("Access to ports %04x-%04x enabled.\n",
p->parm1, p->parm1+(p->reason&7)-1);
return(0);
#endif
@@ -451,7 +451,7 @@ int em86_trap(x86 *p) {
void cleanup_v86_mess(void) {
x86 *p = (x86 *) bd->v86_private;
-
+
/* This automatically removes the mappings ! */
vfree(p->vbase);
p->vbase = 0;
@@ -460,11 +460,11 @@ void cleanup_v86_mess(void) {
sfree(p->ioperm);
p->ioperm=0;
}
-
+
int init_v86(void) {
x86 *p = (x86 *) bd->v86_private;
-
+
/* p->vbase is non null when the v86 is properly set-up */
if (p->vbase) return 0;
@@ -485,7 +485,7 @@ int init_v86(void) {
/* These calls should never fail. */
vmap(p->vbase, (u_long)p->ram|PTE_RAM, 0xa0000);
vmap(p->vbase+0x100000, (u_long)p->ram|PTE_RAM, 0x10000);
- vmap(p->vbase+0xa0000,
+ vmap(p->vbase+0xa0000,
((u_long)ptr_mem_map->isa_mem_base+0xa0000)|PTE_IO, 0x20000);
return 0;
}
@@ -502,7 +502,7 @@ void em86_main(struct pci_dev *dev){
#define IOMASK 0
#endif
-
+
#ifndef __BOOT__
int i;
/* Allow or disable access to all ports */
@@ -523,21 +523,21 @@ void em86_main(struct pci_dev *dev){
AH=dev->bus->number;
AL=dev->devfn;
- /* All other registers are irrelevant except ES:DI which
+ /* All other registers are irrelevant except ES:DI which
* should point to a PnP installation check block. This
* is not yet implemented due to lack of references. */
/* Store a return address of 0xffff:0xffff as eyecatcher */
*(u_int *)(p->ssbase+ld_le16(&SP)) = UINT_MAX;
-
+
/* Interrupt for BIOS EGA services is 0xf000:0xf065 (int 0x10) */
st_le32((u_int *)p->vbase + 0x10, 0xf000f065);
-
+
/* Enable the ROM, read it and disable it immediately */
pci_read_config_dword(dev, PCI_ROM_ADDRESS, &saved_rom);
pci_write_config_dword(dev, PCI_ROM_ADDRESS, 0x000c0001);
- /* Check that there is an Intel ROM. Should we also check that
+ /* Check that there is an Intel ROM. Should we also check that
* the first instruction is a jump (0xe9 or 0xeb) ?
*/
signature = *(u_short *)(ptr_mem_map->isa_mem_base+0xc0000);
@@ -551,26 +551,26 @@ void em86_main(struct pci_dev *dev){
if (!p->rom) return;
- for(dst=(u_int *) p->rom,
+ for(dst=(u_int *) p->rom,
src=(volatile u_int *)(ptr_mem_map->isa_mem_base+0xc0000),
- left = length*512/sizeof(u_int);
- left--;
+ left = length*512/sizeof(u_int);
+ left--;
*dst++=*src++);
-
- /* Disable the ROM and map the copy in virtual address space, note
+
+ /* Disable the ROM and map the copy in virtual address space, note
* that the ROM has to be mapped as RAM since some BIOSes (at least
* Cirrus) perform write accesses to their own ROM. The reason seems
* to be that they check that they must execute from shadow RAM
- * because accessing the ROM prevents accessing the video RAM
+ * because accessing the ROM prevents accessing the video RAM
* according to comments in linux/arch/alpha/kernel/bios32.c.
*/
-
+
pci_write_config_dword(dev, PCI_ROM_ADDRESS, saved_rom);
vmap(p->vbase+0xc0000, (u_long)p->rom|PTE_RAM, length*512);
/* Now actually emulate the ROM init routine */
em86_enter(p);
-
+
/* Free the acquired resources */
vunmap(p->vbase+0xc0000);
pfree(p->rom);
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S b/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S
index 120b5c09ee..ad38fb24fb 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/em86real.S
@@ -16,7 +16,7 @@
*/
/* If the symbol __BOOT__ is defined, a slightly different version is
- * generated to be compiled with the -m relocatable option
+ * generated to be compiled with the -m relocatable option
*/
#ifdef __BOOT__
@@ -24,23 +24,23 @@
/* It is impossible to gather statistics in the boot version */
#undef EIP_STATS
#endif
-
+
/*
*
* Given the size of this code, it deserves a few comments on how it works,
- * and why it was implemented the way it is.
- *
+ * and why it was implemented the way it is.
+ *
* The goal is to have a real mode i486SX emulator to initialize hardware,
* mostly graphics boards, by interpreting ROM BIOSes. The choice of a 486SX
* is logical since this is the lowest processor that PCI ROM BIOSes must run
* on.
- *
+ *
* The goal of this emulator is not performance, but a small enough memory
* footprint to include it in a bootloader.
*
* It is actually likely to be comparable to a 25MHz 386DX on a 200MHz 603e !
- * This is not as serious as it seems since most of the BIOS code performs
- * a lot of accesses to I/O and non-cacheable memory spaces. For such
+ * This is not as serious as it seems since most of the BIOS code performs
+ * a lot of accesses to I/O and non-cacheable memory spaces. For such
* instructions, the execution time is often dominated by bus accesses.
* Statistics of the code also shows that it spends a large function of
* the time in loops waiting for vertical retrace or programs one of the
@@ -61,41 +61,41 @@
* (debug registers are impossible to implement at a reasonable cost)
*/
-/* Code options, put them on the compiler command line */
+/* Code options, put them on the compiler command line */
/* #define EIP_STATS */ /* EIP based profiling */
/* #undef EIP_STATS */
/*
* Implementation notes:
*
- * A) flags emulation.
- *
+ * A) flags emulation.
+ *
* The most important decisions when it comes to obtain a reasonable speed
* are related to how the EFLAGS register is emulated.
*
* Note: the code to set up flags is complex, but it is only seldom
- * executed since cmp and test instructions use much faster flag evaluation
- * paths. For example the overflow flag is almost only needed for pushf and
+ * executed since cmp and test instructions use much faster flag evaluation
+ * paths. For example the overflow flag is almost only needed for pushf and
* int. Comparison results only involve (SF^OF) or (SF^OF)+ZF and the
- * implementation is fast in this case.
+ * implementation is fast in this case.
*
* Rarely used flags: AC, NT and IOPL are kept in a memory EFLAGS image.
* All other flags are either kept explicitly in PPC cr (DF, IF, and TF) or
* lazily evaluated from the state of 4 registers called flags, result, op1,
- * op2, and sometimes the cr itself. The emulation has been designed for
- * minimal overhead for the common case where the flags are never used. With
- * few exceptions, all instructions that set flags leave the result of the
- * computation in a register called result, and operands are taken from op1
- * and op2 registers. However a few instructions like cmp, test and bit tests
+ * op2, and sometimes the cr itself. The emulation has been designed for
+ * minimal overhead for the common case where the flags are never used. With
+ * few exceptions, all instructions that set flags leave the result of the
+ * computation in a register called result, and operands are taken from op1
+ * and op2 registers. However a few instructions like cmp, test and bit tests
* (bt/btc/btr/bts/bsf/bsr) explicitly set cr bits to short circuit
* condition code evaluation of conditional instructions.
*
* As a very brief summary:
- *
- * - the result of the last flag setting operation is often either in the
- * result register or in op2 after increment or decrement instructions
+ *
+ * - the result of the last flag setting operation is often either in the
+ * result register or in op2 after increment or decrement instructions
* because result and op1 may be needed to compute the carry.
- *
+ *
* - compare instruction leave the result of the unsigned comparison
* in cr4 and of signed comparison in cr6. This means that:
* - cr4[0]=CF (short circuit for jc/jnc)
@@ -103,7 +103,7 @@
* - cr6[0]=(OF^SF) (short circuit for jl/jnl)
* - cr6[1]=~((SF^OF)+ZF) (short circuit for jg/jng)
* - cr6[2]=ZF (short circuit for jz/jnz)
- *
+ *
* - test instruction set flags in cr6 and clear overflow. This means that:
* - cr6[0]=SF=(SF^OF) (short circuit for jl/jnl/js/jns)
* - cr6[1]=~((SF^OF)+ZF) (short circuit for jg/jng)
@@ -111,16 +111,16 @@
*
* All flags may be lazily evaluated from several values kept in registers:
*
- * Flag: Depends upon:
+ * Flag: Depends upon:
* OF result, op1, op2, flags[INCDEC_FIELD,SUBTRACTING,OF_STATE_MASK]
* SF result, op2, flags[INCDEC_FIELD,RES_SIZE]
* ZF result, op2, cr6[2], flags[INCDEC_FIELD,RES_SIZE,ZF_PROTECT]
* AF op1, op2, flags[INCDEC_FIELD,SUBTRACTING,CF_IN]
* PF result, op2, flags[INCDEC_FIELD]
* CF result, op1, flags[CF_STATE_MASK, CF_IN]
- *
- * The order of the fields in the flags register has been chosen so that a
- * single rlwimi is necessary for common instruction that do not affect all
+ *
+ * The order of the fields in the flags register has been chosen so that a
+ * single rlwimi is necessary for common instruction that do not affect all
* flags. (See the code for inc/dec emulation).
*
*
@@ -129,8 +129,8 @@
* The register called opcode holds in its low order 8 bits the opcode
* (second byte if the first byte is 0x0f). More precisely it holds the
* last byte fetched before the modrm byte or the immediate operand(s)
- * of the instruction, if any. High order 24 bits are zero unless the
- * instruction has prefixes. These higher order bits have the following
+ * of the instruction, if any. High order 24 bits are zero unless the
+ * instruction has prefixes. These higher order bits have the following
* meaning:
* 0x80000000 segment override prefix
* 0x00001000 repnz prefix (0xf2)
@@ -139,8 +139,8 @@
* 0x00000200 operand size prefix (0x66)
* (bit 0x1000 and 0x800 cannot be set simultaneously)
*
- * Therefore if there is a segment override the value will be between very
- * negative (between 0x80000000 and 0x800016ff), if there is no segment
+ * Therefore if there is a segment override the value will be between very
+ * negative (between 0x80000000 and 0x800016ff), if there is no segment
* override, the value will be between 0 and 0x16ff. The reason for
* this choice will be understood in the next part.
*
@@ -149,7 +149,7 @@
* the encoding of the modrm bytes (especially in 16 bit mode) is quite
* complex. Hence a table, indexed by the five useful bits of the modrm
* byte is used to simplify decoding. Here is a description:
- *
+ *
* bit mask meaning
* 0x80000000 use ss as default segment register
* 0x00004000 means that this addressing mode needs a base register
@@ -164,8 +164,8 @@
* 10: 32 bit addressing mode
* 60: 16 bit addressing mode with %si as index
* 70: 16 bit addressing mode with %di as index
- *
- * This convention leads to the following special values used to check for
+ *
+ * This convention leads to the following special values used to check for
* sib present and displacement-only, which happen to the three lowest
* values in the table (unsigned):
* 0x00003090 sib follows (implies it is a 32 bit mode)
@@ -186,11 +186,11 @@
* instruction has no override prefix.
*
* D) BUGS
- *
+ *
* This software is obviously bug-free :-). Nevertheless, if you encounter
* an interesting feature. Mail me a note, if possible with a detailed
* instruction example showing where and how it fails.
- *
+ *
*/
@@ -205,7 +205,7 @@ is actually never checked (real mode is CPL 0 anyway). */
/* Actually NT and IOPL are kept in memory */
#define NT86 17
#define IOPL86 18 /* Actually 18 and 19 */
-#define OF86 20
+#define OF86 20
#define DF86 21
#define IF86 22
#define TF86 23
@@ -222,11 +222,11 @@ is actually never checked (real mode is CPL 0 anyway). */
#define TF 23 /* Single step flag: cr5[3] */
/* Now the flags which are frequently used */
-/*
+/*
* CF_IN is a copy of the input carry with PPC polarity,
* it is cleared for add, set for sub and cmp,
- * equal to the x86 carry for adc and to its complement for sbb.
- * it is used to evaluate AF and CF.
+ * equal to the x86 carry for adc and to its complement for sbb.
+ * it is used to evaluate AF and CF.
*/
#define CF_IN 0x80000000
@@ -237,10 +237,10 @@ is actually never checked (real mode is CPL 0 anyway). */
#define EVAL_CF andis. r3,flags,(CF_IN_CR)>>16; beql- _eval_cf
-/*
- * CF_STATE tells how to compute the carry bit.
- * NOTRESULT16 and NOTRESULT8 are never set explicitly,
- * but they may happen after a cmc instruction.
+/*
+ * CF_STATE tells how to compute the carry bit.
+ * NOTRESULT16 and NOTRESULT8 are never set explicitly,
+ * but they may happen after a cmc instruction.
*/
#define CF 16 /* cr4[0] */
#define CF_LOCATION 0x30000000
@@ -256,7 +256,7 @@ is actually never checked (real mode is CPL 0 anyway). */
#define CF_NOTRES16 0x28000000
#define CF_RES8 0x30000000
#define CF_NOTRES8 0x38000000
-
+
#define CF_ADDL CF_RES32
#define CF_SUBL CF_NOTRES32
#define CF_ADDW CF_RES16
@@ -269,11 +269,11 @@ is actually never checked (real mode is CPL 0 anyway). */
#define CF_POL_INSERT(dst,pos) \
rlwimi dst,flags,(36-pos)%32,pos,pos
#define RES2CF(dst) rlwinm dst,result,8,7,15
-
-/*
+
+/*
* OF_STATE tells how to compute the overflow bit. When the low order bit
* is set (OF_EXPLICIT), it means that OF is the exclusive or of the
- * two other bits. For the reason of this choice, see rotate instructions.
+ * two other bits. For the reason of this choice, see rotate instructions.
*/
#define OF 1 /* Only after EVAL_OF */
#define OF_STATE_MASK 0x07000000
@@ -289,11 +289,11 @@ is actually never checked (real mode is CPL 0 anyway). */
#define OF_ARITHB 0x04000000
#define EVAL_OF rlwinm. r3,flags,6,0,1; bngl+ _eval_of; andis. r3,flags,OF_VALUE>>16
-
+
/* See _eval_of to see how this can be used */
#define OF_ROTCNT(dst) rlwinm dst,flags,10,0x1c
-
-/*
+
+/*
* SIGNED_IN_CR means that cr6 is set as after a signed compare:
* - cr6[0] is SF^OF for jl/jnl/setl/setnl...
* - cr6[1] is ~((SF^OF)+ZF) for jg/jng/setg/setng...
@@ -305,7 +305,7 @@ is actually never checked (real mode is CPL 0 anyway). */
#define EVAL_SIGNED andis. r3,flags,SIGNED_IN_CR>>16; beql- _eval_signed
-/*
+/*
* Above in CR means that cr4 is set as after an unsigned compare:
* - cr4[0] is CF (CF_IN_CR is also set)
* - cr4[1] is ~(CF+ZF) (ZF_IN_CR is also set)
@@ -320,28 +320,28 @@ is actually never checked (real mode is CPL 0 anyway). */
#define SF_IN_CR 0x00200000
#define EVAL_SF andis. r3,flags,SF_IN_CR>>16; beql- _eval_sf_zf
-
+
/* ZF_IN_CR means cr6[2] is a copy of ZF. */
-#define ZF 26
+#define ZF 26
#define ZF_IN_CR 0x00100000
-
+
#define EVAL_ZF andis. r3,flags,ZF_IN_CR>>16; beql- _eval_sf_zf
#define ZF2ZF86(s,d) rlwimi d,s,ZF-ZF86,ZF86,ZF86
#define ZF862ZF(reg) rlwimi reg,reg,32+ZF86-ZF,ZF,ZF
-
-/*
+
+/*
* ZF_PROTECT means cr6[2] is the only valid value for ZF. This is necessary
- * because some infrequent instructions may leave SF and ZF in an apparently
+ * because some infrequent instructions may leave SF and ZF in an apparently
* inconsistent state (both set): sahf, popf and the few (not implemented)
* instructions that only affect ZF.
*/
#define ZF_PROTECT 0x00080000
-
+
/* The parity is always evaluated when it is needed */
#define PF 0 /* Only after EVAL_PF */
#define EVAL_PF bl _eval_pf
-/* This field gives the shift amount to use to evaluate SF
+/* This field gives the shift amount to use to evaluate SF
and ZF when ZF_PROTECT is not set */
#define RES_SIZE_MASK 0x00060000
#define RESL 0x00000000
@@ -355,12 +355,12 @@ is actually never checked (real mode is CPL 0 anyway). */
#define SUBTRACTING 0x00010000
#define GET_ADDSUB(dst) rlwinm dst,flags,16,0x01
-
+
/* rotate (rcl/rcr/rol/ror) affect CF and OF but not other flags */
#define ROTATE_MASK (CF_IN_CR|CF_STATE_MASK|ABOVE_IN_CR|OF_STATE_MASK|SIGNED_IN_CR)
#define ROTATE_FLAGS rlwimi flags,one,24,ROTATE_MASK
-/*
+/*
* INCDEC_FIELD has at most one bit set when the last flag setting instruction
* was either inc or dec (which do not affect the carry). When one of these
* bits is set, it affects the way OF, SF, ZF, AF, and PF are evaluated.
@@ -380,7 +380,7 @@ is actually never checked (real mode is CPL 0 anyway). */
/* Operations to perform to tell where the flags are after inc or dec */
#define INC_FLAGS(BWL) rlwimi flags,one,INC##BWL##_SHIFT,INCDEC_MASK
#define DEC_FLAGS(BWL) rlwimi flags,one,DEC##BWL##_SHIFT,INCDEC_MASK
-
+
/* How the flags are set after arithmetic operations */
#define FLAGS_ADD(BWL) (CF_ADD##BWL|OF_ARITH##BWL|RES##BWL)
#define FLAGS_SBB(BWL) (CF_SUB##BWL|OF_ARITH##BWL|RES##BWL|SUBTRACTING)
@@ -405,7 +405,7 @@ is actually never checked (real mode is CPL 0 anyway). */
/* How the flags are set after multiplies */
#define FLAGS_MUL (CF_EXPLICIT|OF_EXPLICIT)
-
+
#define SET_FLAGS(fl) lis flags,(fl)>>16
#define ADD_FLAGS(fl) addis flags,flags,(fl)>>16
@@ -413,14 +413,14 @@ is actually never checked (real mode is CPL 0 anyway). */
* We are always off by one when compared with Intel's eip, this shortens
* code by allowing to load next byte with lbzu x,1(eip). The register
* called eip actually contains csbase+eip, and thus should be called lip
- * for linear ip.
+ * for linear ip.
*/
-
-/*
- * Reason codes passed to the C part of the emulator, this includes all
- * instructions which may change the current code segment. These definitions
+
+/*
+ * Reason codes passed to the C part of the emulator, this includes all
+ * instructions which may change the current code segment. These definitions
* will soon go into a separate include file. Codes 0 to 255 correspond
- * directly to the interrupt/trap that has to be generated.
+ * directly to the interrupt/trap that has to be generated.
*/
#define code_divide_err 0
@@ -430,7 +430,7 @@ is actually never checked (real mode is CPL 0 anyway). */
#define code_bound 5
#define code_ud 6
#define code_dna 7 /* FPU not available */
-
+
#define code_iretw 256 /* Interrupt returns */
#define code_iretl 257
#define code_lcallw 258 /* Far calls and jumps */
@@ -446,7 +446,7 @@ is actually never checked (real mode is CPL 0 anyway). */
- bit of weight 512 means out if set, in if clear
- bit of weight 256 means ins/outs if set, in/out if clear
- bit of weight 128 means use 32 bit addresses if set, 16 bit if clear
- (only used for ins/outs instructions, always clear for in/out)
+ (only used for ins/outs instructions, always clear for in/out)
*/
#define code_inb 1024+1
#define code_inw 1024+2
@@ -468,13 +468,13 @@ is actually never checked (real mode is CPL 0 anyway). */
#define code_outsl_a32 1024+512+256+128+4
#define state 31
-/* r31 (state) is a pointer to a structure describing the emulated x86
+/* r31 (state) is a pointer to a structure describing the emulated x86
processor, its layout is the following:
first the general purpose registers, they are in little endian byte order
offset name
-
+
0 eax/ax/al
1 ah
4 ecx/cx/cl
@@ -509,10 +509,10 @@ offset name
#define DI 28
#define EDI 28
-/*
+/*
than the rest of the machine state, big endian !
-offset name
+offset name
32 essel segment register selectors (values)
36 cssel
@@ -541,7 +541,7 @@ offset name
128 vbase where the 1Mb memory is mapped
132 cntimg instruction counter
- 136 scratch
+ 136 scratch
192 eipstat array of 32k unsigned long pairs for eip stats
*/
@@ -575,18 +575,18 @@ offset name
#endif
/* Global registers */
-/* Some segment register bases are permanently kept in registers since they
+/* Some segment register bases are permanently kept in registers since they
are often used: these are csb, esb and ssb because they are
required for jumps, string instructions, and pushes/pops/calls/rets.
dsbase is not kept in a register but loaded from memory to allow somewhat
-more parallelism in the main emulation loop.
+more parallelism in the main emulation loop.
*/
#define one 30 /* Constant one, so pervasive */
#define ssb 29
#define csb 28
#define esb 27
-#define eip 26 /* That one is indeed csbase+(e)ip-1 */
+#define eip 26 /* That one is indeed csbase+(e)ip-1 */
#define result 25 /* For the use of result, op1, op2 */
#define op1 24 /* see the section on flag emulation */
#define op2 23
@@ -605,11 +605,11 @@ specified by the modrm byte */
#define adbase 16 /* addressing mode table */
/* Following registers are used only as dedicated temporaries during decoding,
they are free for use during emulation */
-/*
- * ceip (current eip) is only in use when we call the external emulator for
- * instructions that fault. Note that it is forbidden to change flags before
- * the check for the fault happens (divide by zero...) ! ceip is also used
- * when measuring timing.
+/*
+ * ceip (current eip) is only in use when we call the external emulator for
+ * instructions that fault. Note that it is forbidden to change flags before
+ * the check for the fault happens (divide by zero...) ! ceip is also used
+ * when measuring timing.
*/
#define ceip 15
@@ -641,7 +641,7 @@ they are free for use during emulation */
GOT_ENTRY(jtab_www)
GOT_ENTRY(adtable)
END_GOT
-#else
+#else
.text
#endif
.align 2
@@ -717,7 +717,7 @@ exit: lwz r0,100(r1)
mtcr r4
addi r1,r1,96
blr
-
+
trap: crmove 0,RF
crclr RF
bt- 0,resume
@@ -745,23 +745,23 @@ complex: addi eip,eip,1
cmpwi r3,0
bne exit
b restart
-
+
/* Main loop */
-/*
+/*
* The two LSB of each entry in the main table mean the following:
- * 00: indirect opcode: modrm follows and the three middle bits are an
+ * 00: indirect opcode: modrm follows and the three middle bits are an
* opcode extension. The entry points to another jump table.
* 01: direct instruction, branch directly to the routine.
* 10: modrm specifies byte size memory and register operands.
* 11: modrm specifies word/long memory and register operands.
- *
+ *
* The modrm byte, if present, is always loaded in r7.
*
* Note: most "mr x,y" instructions have been replaced by "addi x,y,0" since
- * the latter can be executed in the second integer unit on 603e.
+ * the latter can be executed in the second integer unit on 603e.
*/
-/*
+/*
* This code is very good example of absolutely unmaintainable code.
* It was actually much easier to write than it is to understand !
* If my computations are right, the maximum path length from fetching
@@ -769,7 +769,7 @@ complex: addi eip,eip,1
* 46 instructions (for non-prefixed, single byte opcode instructions).
*
*/
- .align 5
+ .align 5
#ifdef EIP_STATS
nop: NEXTBYTE(opcode)
gotopcode: slwi r3,opcode,2
@@ -838,9 +838,9 @@ _ds: NEXTBYTE(r7)
/* Lock (unimplemented) and repeat prefixes */
_lock: li r3,code_lock; b complex
-_repnz: NEXTBYTE(r7); rlwimi opcode,one,12,0x1800; b 2f
+_repnz: NEXTBYTE(r7); rlwimi opcode,one,12,0x1800; b 2f
_repz: NEXTBYTE(r7); rlwimi opcode,one,11,0x1800; b 2f
-
+
/* Operand and address size prefixes */
.align 4
_opsize: NEXTBYTE(r7); ori opcode,opcode,0x200
@@ -859,7 +859,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
NEXTBYTE(r7) # modrm byte
cmpwi cr1,r7,192
rlwinm opreg,r7,31,0x1c
- beq- 6f
+ beq- 6f
/* modrm with middle 3 bits specifying a register (prefixed) */
rlwinm r0,r4,3,0x8
li r4,0x1c0d
@@ -874,7 +874,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
rlwimi r3,r7,31,0x60
lwzx r4,r3,adbase
cmpwi cr1,r4,0x3090
- bnl+ cr1,10f
+ bnl+ cr1,10f
/* displacement only addressing modes */
4: cmpwi r4,0x2000
bne 5f
@@ -882,7 +882,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
bctr
5: NEXTDWORD(offset)
bctr
-/* modrm with opcode extension (prefixed) */
+/* modrm with opcode extension (prefixed) */
6: lwzx r4,r4,opreg
mtctr r4
blt cr1,3b
@@ -914,7 +914,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
rlwinm r3,r4,30,0x1c # 16bit/32bit/%si index/%di index
cmpwi cr1,r3,8 # set cr1 as early as possible
rlwinm r6,r4,26,0x1c # base register
- lwbrx offset,state,r6 # load the base register
+ lwbrx offset,state,r6 # load the base register
beq cr0,14f # no displacement
cmpw cr2,r4,opcode # check for ss as default base
bgt cr0,12f # byte offset
@@ -933,7 +933,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
bgtctr cr2
addi base,ssb,0
bctr
-/* 8 bit displacement */
+/* 8 bit displacement */
12: NEXTBYTE(r5)
extsb r5,r5
bgt cr1,13f
@@ -953,7 +953,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
bgtctr cr2
addi base,ssb,0
bctr
-/* no displacement: only indexed modes may use ss as default base */
+/* no displacement: only indexed modes may use ss as default base */
14: beqctr cr1 # 32 bit register indirect
clrlwi offset,offset,16
bltctr cr1 # 16 bit register indirect
@@ -970,7 +970,7 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
rlwinm r3,r7,31,0x1c # index
rlwinm offset,r7,2,0x1c # base
cmpwi cr1,r3,ESP # has index ?
- bne cr0,18f # base+d8/d32
+ bne cr0,18f # base+d8/d32
cmpwi offset,EBP
beq 17f # d32(,index,scale)
xori r4,one,0xcc01 # build 0x0000cc00
@@ -1026,25 +1026,25 @@ _twobytes: NEXTBYTE(r7); addi r3,r3,0x400
/*
* Flag evaluation subroutines: they have not been written for performance
- * since they are not often used in practice. The rule of the game was to
+ * since they are not often used in practice. The rule of the game was to
* write them with as few branches as possible.
* The first routines eveluate either one or 2 (ZF and SF simultaneously)
* flags and do not use r0 and r7.
* The more complex routines (_eval_above, _eval_signed and _eval_flags)
* call the former ones, using r0 as a return address save register and
- * r7 as a safe temporary.
+ * r7 as a safe temporary.
*/
-/*
+/*
* _eval_sf_zf evaluates simultaneously SF and ZF unless ZF is already valid
* and protected because it is possible, although it is exceptional, to have
- * SF and ZF set at the same time after a few instructions which may leave the
- * flags in this apparently inconsistent state: sahf, popf, iret and the few
- * (for now unimplemented) instructions which only affect ZF (lar, lsl, arpl,
- * cmpxchg8b). This also solves the obscure case of ZF set and PF clear.
+ * SF and ZF set at the same time after a few instructions which may leave the
+ * flags in this apparently inconsistent state: sahf, popf, iret and the few
+ * (for now unimplemented) instructions which only affect ZF (lar, lsl, arpl,
+ * cmpxchg8b). This also solves the obscure case of ZF set and PF clear.
* On return: SF=cr6[0], ZF=cr6[2].
*/
-
+
_eval_sf_zf: andis. r5,flags,ZF_PROTECT>>16
rlwinm r3,flags,0,INCDEC_FIELD
RES_SHIFT(r4)
@@ -1066,7 +1066,7 @@ _eval_sf_zf: andis. r5,flags,ZF_PROTECT>>16
crmove SF,0
blr
-/*
+/*
* _eval_cf may be called at any time, no other flag is affected.
* On return: CF=cr4[0], r3= CF ? 0x100:0 = CF<<8.
*/
@@ -1083,11 +1083,11 @@ _eval_cf: addc r3,flags,flags # CF_IN to xer[ca]
cmplw cr4,one,r3 # sets cr4[0]
blr
-/*
+/*
* eval_of returns the overflow flag in OF_STATE field, which will be
* either 001 (OF clear) or 101 (OF set), is is only called when the two
- * low order bits of OF_STATE are not 01 (otherwise it will work but
- * it is an elaborate variant of a nop with a few registers destroyed)
+ * low order bits of OF_STATE are not 01 (otherwise it will work but
+ * it is an elaborate variant of a nop with a few registers destroyed)
* The code multiplexes several sources in a branchless way, was fun to write.
*/
_eval_of: GET_ADDSUB(r4) # 0(add)/1(sub)
@@ -1113,7 +1113,7 @@ _eval_of: GET_ADDSUB(r4) # 0(add)/1(sub)
rlwimi flags,r3,3,OF_VALUE # insert OF
blr
-/*
+/*
* _eval_pf will always be called when needed (complex but infrequent),
* there are a few quirks for a branchless solution.
* On return: PF=cr0[0], PF=MSB(r3)
@@ -1135,12 +1135,12 @@ _eval_pf: rlwinm r3,flags,0,INCDEC_FIELD
add. r3,r4,r5 # and test to simplify
blr # returns in r3 and cr0 set.
-/*
+/*
* _eval_af will always be called when needed (complex but infrequent):
* - if after inc, af is set when 4 low order bits of op1 are 0
* - if after dec, af is set when 4 low order bits of op1 are 1
* (or 0 after adding 1 as implemented here)
- * - if after add/sub/adc/sbb/cmp af is set from sum of 4 LSB of op1
+ * - if after add/sub/adc/sbb/cmp af is set from sum of 4 LSB of op1
* and 4 LSB of op2 (eventually complemented) plus carry in.
* - other instructions leave AF undefined so the returned value is irrelevant.
* Returned value must be masked with 0x10, since all other bits are undefined.
@@ -1164,7 +1164,7 @@ _eval_af: rlwinm r3,flags,0,INCDEC_FIELD
or r3,r4,r5
blr
-/*
+/*
* _eval_above will only be called if ABOVE_IN_CR is not set.
* On return: ZF=cr6[2], CF=cr4[0], ABOVE=cr4[1]
*/
@@ -1218,7 +1218,7 @@ _eval_flags: mflr r0
/* Quite simple for real mode, input in r4, returns in r3. */
_segment_load: lwz r5,vbase(state)
- rlwinm r3,r4,4,0xffff0 # segment selector * 16
+ rlwinm r3,r4,4,0xffff0 # segment selector * 16
add r3,r3,r5
blr
@@ -1234,10 +1234,10 @@ _check_port: lwz r5,ioperm(state)
and. r0,r0,r5
bne- complex
blr
-/*
+/*
* Instructions are in approximate functional order:
- * 1) move, exchange, lea, push/pop, pusha/popa
- * 2) cbw/cwde/cwd/cdq, zero/sign extending moves, in/out
+ * 1) move, exchange, lea, push/pop, pusha/popa
+ * 2) cbw/cwde/cwd/cdq, zero/sign extending moves, in/out
* 3) arithmetic: add/sub/adc/sbb/cmp/inc/dec/neg
* 4) logical: and/or/xor/test/not/bt/btc/btr/bts/bsf/bsr
* 5) jump, call, ret
@@ -1256,20 +1256,20 @@ _check_port: lwz r5,ioperm(state)
movb_imm_reg: rlwinm opreg,opcode,2,28,29; lbz r3,1(eip)
rlwimi opreg,opcode,30,31,31; lbzu opcode,2(eip)
stbx r3,REG; GOTNEXT
-
-movw_imm_reg: lhz r3,1(eip); clrlslwi opreg,opcode,29,2; lbzu opcode,3(eip)
- sthx r3,REG; GOTNEXT
-
-movl_imm_reg: lwz r3,1(eip); clrlslwi opreg,opcode,29,2; lbzu opcode,5(eip)
- stwx r3,REG; GOTNEXT
-
+
+movw_imm_reg: lhz r3,1(eip); clrlslwi opreg,opcode,29,2; lbzu opcode,3(eip)
+ sthx r3,REG; GOTNEXT
+
+movl_imm_reg: lwz r3,1(eip); clrlslwi opreg,opcode,29,2; lbzu opcode,5(eip)
+ stwx r3,REG; GOTNEXT
+
movb_imm_mem: lbz r0,1(eip); cmpwi opreg,0
lbzu opcode,2(eip); bne- ud
stbx r0,MEM; GOTNEXT
movw_imm_mem: lhz r0,1(eip); cmpwi opreg,0
lbzu opcode,3(eip); bne- ud
- sthx r0,MEM; GOTNEXT
+ sthx r0,MEM; GOTNEXT
movl_imm_mem: lwz r0,1(eip); cmpwi opreg,0
lbzu opcode,5(eip); bne- ud
@@ -1277,7 +1277,7 @@ movl_imm_mem: lwz r0,1(eip); cmpwi opreg,0
/* The special short form moves between memory and al/ax/eax */
movb_al_a32: lwbrx offset,eip,one; lbz r0,AL(state); lbzu opcode,5(eip)
- stbx r0,MEM; GOTNEXT
+ stbx r0,MEM; GOTNEXT
movb_al_a16: lhbrx offset,eip,one; lbz r0,AL(state); lbzu opcode,3(eip)
stbx r0,MEM; GOTNEXT
@@ -1298,13 +1298,13 @@ movb_a32_al: lwbrx offset,eip,one; lbzu opcode,5(eip); lbzx r0,MEM
stb r0,AL(state); GOTNEXT
movb_a16_al: lhbrx offset,eip,one; lbzu opcode,3(eip); lbzx r0,MEM
- stb r0,AL(state); GOTNEXT
+ stb r0,AL(state); GOTNEXT
movw_a32_ax: lwbrx offset,eip,one; lbzu opcode,5(eip); lhzx r0,MEM
sth r0,AX(state); GOTNEXT
movw_a16_ax: lhbrx offset,eip,one; lbzu opcode,3(eip); lhzx r0,MEM
- sth r0,AX(state); GOTNEXT
+ sth r0,AX(state); GOTNEXT
movl_a32_eax: lwbrx offset,eip,one; lbzu opcode,5(eip); lwzx r0,MEM
stw r0,EAX(state); GOTNEXT
@@ -1384,12 +1384,12 @@ leaw: cmpw base,state
beq- ud
sthbrx offset,REG
NEXT
-
+
leal: cmpw base,state
beq- ud
stwbrx offset,REG
NEXT
-
+
/* Short form pushes and pops */
pushw_sp_reg: li r3,SP
lhbrx r4,state,r3
@@ -1400,7 +1400,7 @@ pushw_sp_reg: li r3,SP
clrlwi r4,r4,16
sthx r0,ssb,r4
NEXT
-
+
pushl_sp_reg: li r3,SP
lhbrx r4,state,r3
clrlslwi opreg,opcode,29,2
@@ -1410,7 +1410,7 @@ pushl_sp_reg: li r3,SP
clrlwi r4,r4,16
stwx r0,ssb,r4
NEXT
-
+
popw_sp_reg: li r3,SP
lhbrx r4,state,r3
clrlslwi opreg,opcode,29,2
@@ -1419,7 +1419,7 @@ popw_sp_reg: li r3,SP
sthbrx r4,state,r3
sthx r0,REG
NEXT
-
+
popl_sp_reg: li r3,SP
lhbrx r4,state,r3
clrlslwi opreg,opcode,29,2
@@ -1437,9 +1437,9 @@ pushw_sp_imm: li r3,SP
sthbrx r4,state,r3
clrlwi r4,r4,16
lbzu opcode,3(eip)
- sthx r0,ssb,r4
+ sthx r0,ssb,r4
GOTNEXT
-
+
pushl_sp_imm: li r3,SP
lhbrx r4,state,r3
lwz r0,1(eip)
@@ -1447,7 +1447,7 @@ pushl_sp_imm: li r3,SP
sthbrx r4,state,r3
clrlwi r4,r4,16
lbzu opcode,5(eip)
- stwx r0,ssb,r4
+ stwx r0,ssb,r4
GOTNEXT
pushw_sp_imm8: li r3,SP
@@ -1458,9 +1458,9 @@ pushw_sp_imm8: li r3,SP
clrlwi r4,r4,16
lbzu opcode,2(eip)
extsb r0,r0
- sthx r0,ssb,r4
+ sthx r0,ssb,r4
GOTNEXT
-
+
pushl_sp_imm8: li r3,SP
lhbrx r4,state,r3
lhz r0,1(eip)
@@ -1469,9 +1469,9 @@ pushl_sp_imm8: li r3,SP
clrlwi r4,r4,16
lbzu opcode,2(eip)
extsb r0,r0
- stwx r0,ssb,r4
+ stwx r0,ssb,r4
GOTNEXT
-
+
/* General push/pop */
pushw_sp: lhbrx r0,MEM
li r3,SP
@@ -1481,7 +1481,7 @@ pushw_sp: lhbrx r0,MEM
clrlwi r4,r4,16
sthbrx r0,r4,ssb
NEXT
-
+
pushl_sp: lwbrx r0,MEM
li r3,SP
lhbrx r4,state,r3
@@ -1490,11 +1490,11 @@ pushl_sp: lwbrx r0,MEM
clrlwi r4,r4,16
stwbrx r0,r4,ssb
NEXT
-
+
/* pop is an exception with 32 bit addressing modes, it is possible
to calculate wrongly the address when esp is used as base. But 16 bit
addressing modes are safe */
-
+
popw_sp_a16: cmpw cr1,opreg,0 # first check the opcode
li r3,SP
lhbrx r4,state,r3
@@ -1504,7 +1504,7 @@ popw_sp_a16: cmpw cr1,opreg,0 # first check the opcode
sthx r0,MEM
sthbrx r4,state,r3
NEXT
-
+
popl_sp_a16: cmpw cr1,opreg,0
li r3,SP
lhbrx r4,state,r3
@@ -1558,7 +1558,7 @@ popaw_sp: li r3,SP
bdnz 1b
sthbrx r4,r3,state # updated sp
NEXT
-
+
popal_sp: li r3,SP
lis r0,0xef00 # mask to skip esp
lhbrx r4,state,r3
@@ -1577,12 +1577,12 @@ popal_sp: li r3,SP
2: sthbrx r4,state,r3 # updated sp
NEXT
-/* Moves with zero or sign extension: first the special cases */
+/* Moves with zero or sign extension: first the special cases */
cbw: lbz r3,AL(state)
extsb r3,r3
sthbrx r3,AX,state
NEXT
-
+
cwde: lhbrx r3,AX,state
extsh r3,r3
stwbrx r3,EAX,state
@@ -1618,12 +1618,12 @@ movsbl: lbzx r3,MEM
NEXT
.equ movsww, movw_mem_reg
-
+
movswl: lhbrx r3,MEM
extsh r3,r3
stwbrx r3,REG
NEXT
-
+
movzbw: lbzx r3,MEM
rlwimi opreg,opreg,4,0x10
rlwinm opreg,opreg,0,0x1c
@@ -1635,19 +1635,19 @@ movzbl: lbzx r3,MEM
rlwinm opreg,opreg,0,0x1c
stwbrx r3,REG
NEXT
-
+
.equ movzww, movw_mem_reg
movzwl: lhbrx r3,MEM
stwbrx r3,REG
NEXT
-/* Byte swapping */
+/* Byte swapping */
bswap: clrlslwi opreg,opcode,29,2 # extract reg from opcode
lwbrx r0,REG
stwx r0,REG
NEXT
-
+
/* Input/output */
inb_port_al: NEXTBYTE(r4)
b 1f
@@ -1659,8 +1659,8 @@ inb_dx_al: li r4,DX
lbzx r5,r4,r3
eieio
stb r5,AL(state)
- NEXT
-
+ NEXT
+
inw_port_ax: NEXTBYTE(r4)
b 1f
inw_dx_ax: li r4,DX
@@ -1671,8 +1671,8 @@ inw_dx_ax: li r4,DX
lhzx r5,r4,r3
eieio
sth r5,AX(state)
- NEXT
-
+ NEXT
+
inl_port_eax: NEXTBYTE(r4)
b 1f
inl_dx_eax: li r4,DX
@@ -1684,7 +1684,7 @@ inl_dx_eax: li r4,DX
eieio
stw r5,EAX(state)
NEXT
-
+
outb_al_port: NEXTBYTE(r4)
b 1f
outb_al_dx: li r4,DX
@@ -1695,8 +1695,8 @@ outb_al_dx: li r4,DX
lbz r5,AL(state)
stbx r5,r4,r3
eieio
- NEXT
-
+ NEXT
+
outw_ax_port: NEXTBYTE(r4)
b 1f
outw_ax_dx: li r4,DX
@@ -1707,8 +1707,8 @@ outw_ax_dx: li r4,DX
lhz r5,AX(state)
sthx r5,r4,r3
eieio
- NEXT
-
+ NEXT
+
outl_eax_port: NEXTBYTE(r4)
b 1f
outl_eax_dx: li r4,DX
@@ -1825,13 +1825,13 @@ carryforadc: addc r3,flags,flags # CF_IN to xer[ca]
blr
ARITH_WITH_CARRY(adc, FLAGS_ADD)
-
+
/* for sbb the input carry must be the complement of the x86 carry */
carryforsbb: addc r3,flags,flags # CF_IN to xer[ca]
RES2CF(r4) # 8/16 bit carry from result
subfe r3,result,op1
CF_ROTCNT(r5)
- addze r3,r4
+ addze r3,r4
CF_POL(r4,23)
rlwnm r3,r3,r5,0x100
eqv flags,r4,r3 # CF86 ? 0xfffffeff:0xffffffff
@@ -1934,7 +1934,7 @@ cmpw_imm8: lbz op2,1(eip)
sub result,op1,op2
cmplw cr4,op1,op2
GOTNEXT
-
+
cmpl_imm_eax: addi base,state,0
li offset,EAX
cmpl_imm: lwbrx op1,MEM
@@ -2082,7 +2082,7 @@ op##l_imm8: lbz op2,1(eip); SET_FLAGS(FLAGS_LOG(L)); lwbrx op1,MEM; \
extsb op2,op2; lbzu opcode,2(eip); \
op result,op1,op2; \
stwbrx result,MEM; GOTNEXT
-
+
LOGICAL(or)
LOGICAL(and)
@@ -2149,17 +2149,17 @@ notb: lbzx r3,MEM
xori r3,r3,255
stbx r3,MEM
NEXT
-
+
notw: lhzx r3,MEM
xori r3,r3,65535
sthx r3,MEM
NEXT
-
+
notl: lwzx r3,MEM
not r3,r3
stwx r3,MEM
NEXT
-
+
boundw: lhbrx r4,REG
li r3,code_bound
lhbrx r5,MEM
@@ -2173,7 +2173,7 @@ boundw: lhbrx r4,REG
cmpw r4,r6
ble+ nop
b complex
-
+
boundl: lwbrx r4,REG
li r3,code_bound
lwbrx r5,MEM
@@ -2186,10 +2186,10 @@ boundl: lwbrx r4,REG
b complex
/* Bit test and modify instructions */
-
-/* Common routine: bit index in op2, returns memory value in r3, mask in op2,
-and of mask and value in op1. CF flag is set as with 32 bit add when bit is
-non zero since result (which is cleared) will be less than op1, and in cr4,
+
+/* Common routine: bit index in op2, returns memory value in r3, mask in op2,
+and of mask and value in op1. CF flag is set as with 32 bit add when bit is
+non zero since result (which is cleared) will be less than op1, and in cr4,
all other flags are undefined from Intel doc. Here OF and SF are cleared
and ZF is set as a side effect of result being cleared. */
_setup_bitw: cmpw base,state
@@ -2205,7 +2205,7 @@ _setup_bitw: cmpw base,state
and op1,r3,op2 # if result<op1
cmplw cr4,result,op1 # sets CF in cr4
blr
-
+
_setup_bitl: cmpw base,state
SET_FLAGS(FLAGS_BTEST)
beq- 1f
@@ -2217,14 +2217,14 @@ _setup_bitl: cmpw base,state
and op1,r3,op2
cmplw cr4,result,op1
blr
-
+
/* Immediate forms bit tests are not frequent since logical are often faster */
btw_imm: NEXTBYTE(op2)
b 1f
btw_reg_mem: lhbrx op2,REG
1: bl _setup_bitw
NEXT
-
+
btl_imm: NEXTBYTE(op2)
b 1f
btl_reg_mem: lhbrx op2,REG
@@ -2238,7 +2238,7 @@ btcw_reg_mem: lhbrx op2,REG
xor r3,r3,op2
sthbrx r3,MEM
NEXT
-
+
btcl_imm: NEXTBYTE(op2)
b 1f
btcl_reg_mem: lhbrx op2,REG
@@ -2246,7 +2246,7 @@ btcl_reg_mem: lhbrx op2,REG
xor r3,r3,op2
stwbrx result,MEM
NEXT
-
+
btrw_imm: NEXTBYTE(op2)
b 1f
btrw_reg_mem: lhbrx op2,REG
@@ -2254,7 +2254,7 @@ btrw_reg_mem: lhbrx op2,REG
andc r3,r3,op2
sthbrx r3,MEM
NEXT
-
+
btrl_imm: NEXTBYTE(op2)
b 1f
btrl_reg_mem: lhbrx op2,REG
@@ -2262,7 +2262,7 @@ btrl_reg_mem: lhbrx op2,REG
andc r3,r3,op2
stwbrx r3,MEM
NEXT
-
+
btsw_imm: NEXTBYTE(op2)
b 1f
btsw_reg_mem: lhbrx op2,REG
@@ -2270,7 +2270,7 @@ btsw_reg_mem: lhbrx op2,REG
or r3,r3,op2
sthbrx r3,MEM
NEXT
-
+
btsl_imm: NEXTBYTE(op2)
b 1f
btsl_reg_mem: lhbrx op2,REG
@@ -2352,11 +2352,11 @@ sjmp_l: lbz r3,1(eip)
jmp_l: lwbrx r3,eip,one # Simple
addi eip,eip,5
lbzux opcode,eip,r3
- GOTNEXT
+ GOTNEXT
-/* The conditional jumps: although it should not happen,
+/* The conditional jumps: although it should not happen,
byte relative jumps (sjmp) may wrap around in 16 bit mode */
-
+
#define NOTTAKEN_S lbzu opcode,2(eip); GOTNEXT
#define NOTTAKEN_W lbzu opcode,3(eip); GOTNEXT
#define NOTTAKEN_L lbzu opcode,5(eip); GOTNEXT
@@ -2388,35 +2388,35 @@ jecxz_l: lwz r3,ECX(state); cmpwi r3,0; beq- sjmp_l; NOTTAKEN_S
/* Note that loop is somewhat strange, the data size attribute gives
the size of eip, and the address size whether the counter is cx or ecx.
This is the same for jcxz/jecxz. */
-
+
loopw_w: li opreg,CX
lhbrx r0,REG
sub. r0,r0,one
sthbrx r0,REG
bne+ sjmp_w
NOTTAKEN_S
-
+
loopl_w: li opreg,ECX
lwbrx r0,REG
sub. r0,r0,one
stwbrx r0,REG
bne+ sjmp_w
NOTTAKEN_S
-
+
loopw_l: li opreg,CX
lhbrx r0,REG
sub. r0,r0,one
sthbrx r0,REG
bne+ sjmp_l
NOTTAKEN_S
-
+
loopl_l: li opreg,ECX
lwbrx r0,REG
sub. r0,r0,one
stwbrx r0,REG
bne+ sjmp_l
NOTTAKEN_S
-
+
loopzw_w: li opreg,CX
lhbrx r0,REG
EVAL_ZF
@@ -2425,7 +2425,7 @@ loopzw_w: li opreg,CX
bf ZF,1f
bne+ sjmp_w
1: NOTTAKEN_S
-
+
loopzl_w: li opreg,ECX
lwbrx r0,REG
EVAL_ZF
@@ -2434,7 +2434,7 @@ loopzl_w: li opreg,ECX
bf ZF,1f
bne+ sjmp_w
1: NOTTAKEN_S
-
+
loopzw_l: li opreg,CX
lhbrx r0,REG
EVAL_ZF
@@ -2443,7 +2443,7 @@ loopzw_l: li opreg,CX
bf ZF,1f
bne+ sjmp_l
1: NOTTAKEN_S
-
+
loopzl_l: li opreg,ECX
lwbrx r0,REG
EVAL_ZF
@@ -2452,7 +2452,7 @@ loopzl_l: li opreg,ECX
bf ZF,1f
bne+ sjmp_l
1: NOTTAKEN_S
-
+
loopnzw_w: li opreg,CX
lhbrx r0,REG
EVAL_ZF
@@ -2461,7 +2461,7 @@ loopnzw_w: li opreg,CX
bt ZF,1f
bne+ sjmp_w
1: NOTTAKEN_S
-
+
loopnzl_w: li opreg,ECX
lwbrx r0,REG
EVAL_ZF
@@ -2470,7 +2470,7 @@ loopnzl_w: li opreg,ECX
bt ZF,1f
bne+ sjmp_w
1: NOTTAKEN_S
-
+
loopnzw_l: li opreg,CX
lhbrx r0,REG
EVAL_ZF
@@ -2479,7 +2479,7 @@ loopnzw_l: li opreg,CX
bt ZF,1f
bne+ sjmp_l
1: NOTTAKEN_S
-
+
loopnzl_l: li opreg,ECX
lwbrx r0,REG
EVAL_ZF
@@ -2489,7 +2489,7 @@ loopnzl_l: li opreg,ECX
bne+ sjmp_l
1: NOTTAKEN_S
-/* Memory indirect calls are rare enough to limit code duplication */
+/* Memory indirect calls are rare enough to limit code duplication */
callw_sp_mem: lhbrx r3,MEM
sub r4,eip,csb
addi r4,r4,1 # r4 is now return address
@@ -2522,7 +2522,7 @@ retw_sp_imm: li opreg,SP
GOTNEXT
.equ retl_sp_imm, unimpl
-
+
retw_sp: li opreg,SP
lhbrx r4,REG
addi r5,r4,2
@@ -2535,8 +2535,8 @@ retw_sp: li opreg,SP
/* Enter is a mess, and the description in Intel documents is actually wrong
* in most revisions (all PPro/PII I have but the old Pentium is Ok) !
- */
-
+ */
+
enterw_sp: lhbrx r0,eip,one # Stack space to allocate
li opreg,SP
lhbrx r3,REG # SP
@@ -2557,12 +2557,12 @@ enterw_sp: lhbrx r0,eip,one # Stack space to allocate
addi r3,r3,-2
clrlwi r3,r3,16
sthx r4,ssb,r3
-2: bdnz 1b
+2: bdnz 1b
addi r3,r3,-2 # save current frame pointer
clrlwi r3,r3,16
sthbrx r6,ssb,r3
3: sthbrx r6,state,r7 # New BP
- sub r3,r3,r0
+ sub r3,r3,r0
sthbrx r3,REG # Save new stack pointer
NEXT
@@ -2570,13 +2570,13 @@ enterw_sp: lhbrx r0,eip,one # Stack space to allocate
leavew_sp: li opreg,BP
lhbrx r3,REG # Stack = BP
- addi r4,r3,2 #
+ addi r4,r3,2 #
lhzx r3,ssb,r3
li opreg,SP
sthbrx r4,REG # New Stack
sth r3,BP(state) # Popped BP
NEXT
-
+
.equ leavel_sp, unimpl
/* String instructions: first a generic setup routine, which exits early
@@ -2596,11 +2596,11 @@ _setup_stringw: li offset,SI #
cmpwi r3,0
beq nop # early exit here !
1: mtctr r3 # ctr=CX or 1
- li r7,1 # stride
+ li r7,1 # stride
bflr+ DF
li r7,-1 # change stride sign
blr
-
+
/* Ending routine to update all changed registers (goes directly to NEXT) */
_finish_strw: li r4,SI
sthbrx offset,state,r4 # update si
@@ -2620,7 +2620,7 @@ lodsb_a16: bl _setup_stringw
bdnz 1b
stb r0,AL(state)
b _finish_strw
-
+
lodsw_a16: bl _setup_stringw
slwi r7,r7,1
1: lhzx r0,STRINGSRC # [rep] lodsw
@@ -2629,7 +2629,7 @@ lodsw_a16: bl _setup_stringw
bdnz 1b
sth r0,AX(state)
b _finish_strw
-
+
lodsl_a16: bl _setup_stringw
slwi r7,r7,2
1: lwzx r0,STRINGSRC # [rep] lodsl
@@ -2638,7 +2638,7 @@ lodsl_a16: bl _setup_stringw
bdnz 1b
stw r0,EAX(state)
b _finish_strw
-
+
stosb_a16: bl _setup_stringw
lbz r0,AL(state)
1: stbx r0,STRINGDST # [rep] stosb
@@ -2646,7 +2646,7 @@ stosb_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
stosw_a16: bl _setup_stringw
lhz r0,AX(state)
slwi r7,r7,1
@@ -2655,7 +2655,7 @@ stosw_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
stosl_a16: bl _setup_stringw
lwz r0,EAX(state)
slwi r7,r7,2
@@ -2664,7 +2664,7 @@ stosl_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
movsb_a16: bl _setup_stringw
1: lbzx r0,STRINGSRC # [rep] movsb
add offset,offset,r7
@@ -2674,7 +2674,7 @@ movsb_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
movsw_a16: bl _setup_stringw
slwi r7,r7,1
1: lhzx r0,STRINGSRC # [rep] movsw
@@ -2685,7 +2685,7 @@ movsw_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
movsl_a16: bl _setup_stringw
slwi r7,r7,2
1: lwzx r0,STRINGSRC # [rep] movsl
@@ -2696,14 +2696,14 @@ movsl_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
/* At least on a Pentium, repeated string I/O instructions check for
access port permission even if count is 0 ! So the order of the check is not
important. */
insb_a16: li r4,DX
li r3,code_insb_a16
lhbrx r4,state,r4
- bl _check_port
+ bl _check_port
bl _setup_stringw
lwz base,iobase(state)
1: lbzx r0,base,r4 # [rep] insb
@@ -2713,11 +2713,11 @@ insb_a16: li r4,DX
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
insw_a16: li r4,DX
li r3,code_insw_a16
lhbrx r4,state,r4
- bl _check_port
+ bl _check_port
bl _setup_stringw
lwz base,iobase(state)
slwi r7,r7,1
@@ -2728,11 +2728,11 @@ insw_a16: li r4,DX
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
insl_a16: li r4,DX
li r3,code_insl_a16
lhbrx r4,state,r4
- bl _check_port
+ bl _check_port
bl _setup_stringw
lwz base,iobase(state)
slwi r7,r7,2
@@ -2743,17 +2743,17 @@ insl_a16: li r4,DX
clrlwi opreg,opreg,16
bdnz 1b
b _finish_strw
-
+
outsb_a16: li r4,DX
li r3,code_outsb_a16
lhbrx r4,state,r4
- bl _check_port
+ bl _check_port
bl _setup_stringw
lwz r6,iobase(state)
1: lbzx r0,STRINGSRC # [rep] outsb
add offset,offset,r7
stbx r0,r6,r4
- clrlwi offset,offset,16
+ clrlwi offset,offset,16
eieio
bdnz 1b
b _finish_strw
@@ -2761,7 +2761,7 @@ outsb_a16: li r4,DX
outsw_a16: li r4,DX
li r3,code_outsw_a16
lhbrx r4,state,r4
- bl _check_port
+ bl _check_port
bl _setup_stringw
li r5,DX
lwz r6,iobase(state)
@@ -2769,7 +2769,7 @@ outsw_a16: li r4,DX
1: lhzx r0,STRINGSRC # [rep] outsw
add offset,offset,r7
sthx r0,r6,r4
- clrlwi offset,offset,16
+ clrlwi offset,offset,16
eieio
bdnz 1b
b _finish_strw
@@ -2777,14 +2777,14 @@ outsw_a16: li r4,DX
outsl_a16: li r4,DX
li r3,code_outsl_a16
lhbrx r4,state,r4
- bl _check_port
+ bl _check_port
bl _setup_stringw
lwz r6,iobase(state)
slwi r7,r7,2
1: lwzx r0,STRINGSRC # [rep] outsl
add offset,offset,r7
stwx r0,r6,r4
- clrlwi offset,offset,16
+ clrlwi offset,offset,16
eieio
bdnz 1b
b _finish_strw
@@ -2869,7 +2869,7 @@ cmpsl_a16: bl _setup_stringw
clrlwi opreg,opreg,16
bdnzf CF+2,3b
b 2b
-
+
scasb_a16: bl _setup_stringw
lbzx op1,AL,state # AL
SET_FLAGS(FLAGS_CMP(B))
@@ -2939,7 +2939,7 @@ scasl_a16: bl _setup_stringw
.equ lodsb_a32, unimpl
.equ lodsw_a32, unimpl
.equ lodsl_a32, unimpl
- .equ stosb_a32, unimpl
+ .equ stosb_a32, unimpl
.equ stosw_a32, unimpl
.equ stosl_a32, unimpl
.equ movsb_a32, unimpl
@@ -2964,22 +2964,22 @@ xlatb_a16: li offset,BX
add r3,r3,base
lbzx r3,r3,offset
stb r3,AL(state)
- NEXT
+ NEXT
.equ xlatb_a32, unimpl
-/*
+/*
* Shift and rotates: note the oddity that rotates do not affect SF/ZF/AF/PF
* but shifts do. Also testing has indicated that rotates with a count of zero
- * do not affect any flag. The documentation specifies this for shifts but
- * is more obscure for rotates. The overflow flag setting is only specified
+ * do not affect any flag. The documentation specifies this for shifts but
+ * is more obscure for rotates. The overflow flag setting is only specified
* when count is 1, otherwise OF is undefined which simplifies emulation.
*/
-/*
+/*
* The rotates through carry are among the most difficult instructions,
* they are implemented as a shift of 2*n+some bits depending on case.
- * First the left rotates through carry.
+ * First the left rotates through carry.
*/
/* Byte rcl is performed on 18 bits (17 actually used) in a single register */
@@ -3008,7 +3008,7 @@ rclb_1: li r3,1
rlwnm r0,r0,r3,0x000001ff # (23)0:NewCF:Result8
rlwimi flags,r0,19,CF_VALUE
stbx r0,MEM
- rlwimi flags,r0,18,OF_XOR
+ rlwimi flags,r0,18,OF_XOR
NEXT
/* Word rcl is performed on 33 bits (CF:data16:CF:(15 MSB of data16) */
@@ -3040,7 +3040,7 @@ rclw_1: li r3,1
add r0,r0,r4 # result
rlwimi flags,r0,11,CF_VALUE
sthbrx r0,MEM
- rlwimi flags,r0,10,OF_XOR
+ rlwimi flags,r0,10,OF_XOR
NEXT
/* Longword rcl only needs 64 bits because the maximum rotate count is 31 ! */
@@ -3106,7 +3106,7 @@ rcrb_1: li r3,1
/* Word rcr is a 33 bit right shift with a quirk, because the 33rd bit
is only needed when the rotate count is 16 and rotating left or right
-by 16 a 32 bit quantity is the same ! */
+by 16 a 32 bit quantity is the same ! */
rcrw_imm: NEXTBYTE(r3)
b 1f
rcrw_cl: lbz r3,CL(state)
@@ -3179,7 +3179,7 @@ rolb_1: li r3,1
rlwimi r0,r0,24,0xff000000 # replicate for shift in
beq- nop # no flags changed if count 0
ROTATE_FLAGS
- rotlw r0,r0,r3
+ rotlw r0,r0,r3
rlwimi flags,r0,27,CF_VALUE # New CF
stbx r0,MEM
rlwimi flags,r0,26,OF_XOR # New OF (CF xor MSB)
@@ -3660,7 +3660,7 @@ divl: li opreg,EDX # Not yet fully implemented
stwbrx r5,EAX,state
stwbrx r4,REG
NEXT
-/*
+/*
* Divide r4:r5 by r3, quotient in r5, remainder in r4.
* The algorithm is stupid because it won't be used very often.
*/
@@ -3805,7 +3805,7 @@ movw_sr_mem: cmpwi opreg,20 # SREG 0 to 5 only
1: sthbrx r0,MEM
NEXT
-/* Now the instructions that modify the segment registers, note that
+/* Now the instructions that modify the segment registers, note that
move/pop to ss disable interrupts and traps for one instruction ! */
popl_sp_sr: li r6,4
b 1f
@@ -3826,7 +3826,7 @@ popw_sp_sr: li r6,2
lwz ssb,ssbase(state) # pop ss
crmove RF,TF # prevent traps
NEXT
-
+
movw_mem_sr: cmpwi opreg,20
addi r7,state,SELBASES
bgt- ud
@@ -3841,11 +3841,11 @@ movw_mem_sr: cmpwi opreg,20
bne+ nop
lwz ssb,ssbase(state)
crmove RF,TF # prevent traps
- NEXT
-
+ NEXT
+
.equ movl_mem_sr, movw_mem_sr
-/* The encoding of les/lss/lds/lfs/lgs is strange, opcode is c4/b2/c5/b4/b5
+/* The encoding of les/lss/lds/lfs/lgs is strange, opcode is c4/b2/c5/b4/b5
for es/ss/ds/fs/gs which are sreg 0/2/3/4/5. And obviously there is
no lcs instruction, it's called a far jump. */
@@ -3859,7 +3859,7 @@ ldlptrw: lhzux r7,MEM
bl 1f
sthx r7,REG
NEXT
-
+
1: cmpw base,state
lis r3,0xc011 # es/ss/ds/fs/gs
rlwinm r5,opcode,2,0x0c # 00/08/04/00/04
@@ -3879,7 +3879,7 @@ ldlptrw: lhzux r7,MEM
blr
-/* Intructions that may modify the current code segment: the next optimization
+/* Intructions that may modify the current code segment: the next optimization
* might be to avoid calling C code when the code segment does not change. But
* it's probably not worth the effort.
*/
@@ -3972,13 +3972,13 @@ stc: oris flags,flags,\
(CF_IN_CR|CF_LOCATION|CF_COMPLEMENT|ABOVE_IN_CR)>>16
xoris flags,flags,(CF_IN_CR|CF_LOCATION|ABOVE_IN_CR)>>16
NEXT
-
+
cld: crclr DF
NEXT
std: crset DF
NEXT
-
+
cli: crclr IF
NEXT
@@ -4029,7 +4029,7 @@ popfl_sp: li r4,SP
stw r3,eflags(state)
sthbrx r5,r4,state
b 1f
-
+
popfw_sp: li r4,SP
lhbrx r5,r4,state
lhbrx r3,ssb,r5
@@ -4066,7 +4066,7 @@ setnz: EVAL_ZF
#define SETCC(cond, eval, flag) \
set##cond: EVAL_##eval; bt flag,1b; b 0b; \
setn##cond: EVAL_##eval; bt flag,0b; b 1b
-
+
SETCC(c, CF, CF)
SETCC(a, ABOVE, ABOVE)
SETCC(s, SF, SF)
@@ -4134,7 +4134,7 @@ daa: lbz r0,AL(state)
stb result,AL(state)
rlwimi result,r3,2,0x100 # set CF if added
NEXT
-
+
das: lbz r0,AL(state)
bl _eval_af
rlwinm r7,r3,0,0x10
@@ -4153,7 +4153,7 @@ das: lbz r0,AL(state)
stb result,AL(state)
rlwimi result,r3,2,0x100 # set CF
NEXT
-
+
/* 486 specific instructions */
/* For cmpxchg, only the zero flag is important */
@@ -4226,7 +4226,7 @@ esc: li r3,code_dna # DNA interrupt
.equ invd, unimpl
-/* Undefined in real address mode */
+/* Undefined in real address mode */
.equ lar, ud
.equ lgdt, unimpl
@@ -4250,7 +4250,7 @@ esc: li r3,code_dna # DNA interrupt
.equ smsw, unimpl
.equ str, ud
-
+
ud: li r3,code_ud
li r4,0
b complex
@@ -4272,7 +4272,7 @@ em86_end:
.section .rodata
#define ENTRY(x,t) .long x+t
#endif
-
+
#define BOP(x) ENTRY(x,2) /* Byte operation with mod/rm byte */
#define WLOP(x) ENTRY(x,3) /* 16 or 32 bit operation with mod/rm byte */
#define EXTOP(x) ENTRY(x,0) /* Opcode with extension in mod/rm byte */
@@ -4488,7 +4488,7 @@ _jtables: jtable(w, a16, sp, ax, www) /* data16, addr16 */
jtable(l, a32, sp, eax, llw) /* data32, addr32 */
/* The other possible combinations are only required by protected mode
code using a big stack segment */
-/* Here are the auxiliary tables for opcode extensions, note that
+/* Here are the auxiliary tables for opcode extensions, note that
all entries get 2 or 3 added. */
#define grp1table(bwl,t,s8) \
grp1##bwl##_imm##s8:; \
@@ -4543,7 +4543,7 @@ grp5##wl##_##spesp: \
WLOP(inc##wl); WLOP(dec##wl); \
WLOP(call##wl##_##spesp##_mem); WLOP(lcall##wl##); \
WLOP(jmp##wl); WLOP(ljmp##wl); \
- WLOP(push##wl##_##spesp); OP(ud)
+ WLOP(push##wl##_##spesp); OP(ud)
grp5table(w,sp)
grp5table(l,sp)
@@ -4551,7 +4551,7 @@ grp5##wl##_##spesp: \
#define grp8table(wl) \
grp8##wl: OP(ud); OP(ud); OP(ud); OP(ud); \
WLOP(bt##wl##_imm); WLOP(bts##wl##_imm); \
- WLOP(btr##wl##_imm); WLOP(btc##wl##_imm)
+ WLOP(btr##wl##_imm); WLOP(btc##wl##_imm)
grp8table(w)
grp8table(l)
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S b/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S
index 059c62cd32..46f719c443 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/exception.S
@@ -16,80 +16,80 @@
*/
/* This is an improved version of the TLB interrupt handling code from
- * the 603e users manual (603eUM.pdf) downloaded from the WWW. All the
- * visible bugs have been removed. Note that many have survived in the errata
- * to the 603 user manual (603UMer.pdf).
- *
+ * the 603e users manual (603eUM.pdf) downloaded from the WWW. All the
+ * visible bugs have been removed. Note that many have survived in the errata
+ * to the 603 user manual (603UMer.pdf).
+ *
* This code also pays particular attention to optimization, takes into
* account the differences between 603 and 603e, single/multiple processor
* systems and tries to order instructions for dual dispatch in many places.
- *
+ *
* The optimization has been performed along two lines:
* 1) to minimize the number of instruction cache lines needed for the most
* common execution paths (the ones that do not result in an exception).
- * 2) then to order the code to maximize the number of dual issue and
- * completion opportunities without increasing the number of cache lines
+ * 2) then to order the code to maximize the number of dual issue and
+ * completion opportunities without increasing the number of cache lines
* used in the same cases.
- *
+ *
* The last goal of this code is to fit inside the address range
* assigned to the interrupt vectors: 192 instructions with fixed
* entry points every 64 instructions.
- *
+ *
* Some typos have also been corrected and the Power l (lowercase L)
* instructions replaced by lwz without comment.
- *
+ *
* I have attempted to describe the reasons of the order and of the choice
* of the instructions but the comments may be hard to understand without
* the processor manual.
- *
+ *
* Note that the fact that the TLB are reloaded by software in theory
- * allows tremendous flexibility, for example we could avoid setting the
+ * allows tremendous flexibility, for example we could avoid setting the
* reference bit of the PTE which will could actually not be accessed because
- * of protection violation by changing a few lines of code. However,
+ * of protection violation by changing a few lines of code. However,
* this would significantly slow down most TLB reload operations, and
* this is the reason for which we try never to make checks which would be
* redundant with hardware and usually indicate a bug in a program.
- *
+ *
* There are some inconsistencies in the documentation concerning the
- * settings of SRR1 bit 15. All recent documentations say now that it is set
+ * settings of SRR1 bit 15. All recent documentations say now that it is set
* for stores and cleared for loads. Anyway this handler never uses this bit.
- *
+ *
* A final remark, the rfi instruction seems to implicitly clear the
* MSR<14> (tgpr)bit. The documentation claims that this bit is restored
* from SRR1 by rfi, but the corresponding bit in SRR1 is the LRU way bit.
* Anyway, the only exception which can occur while TGPR is set is a machine
* check which would indicate an unrecoverable problem. Recent documentation
- * now says in some place that rfi clears MSR<14>.
- *
- * TLB software load for 602/603/603e/603ev:
- * Specific Instructions:
- * tlbld - write the dtlb with the pte in rpa reg
- * tlbli - write the itlb with the pte in rpa reg
- * Specific SPRs:
- * dmiss - address of dstream miss
+ * now says in some place that rfi clears MSR<14>.
+ *
+ * TLB software load for 602/603/603e/603ev:
+ * Specific Instructions:
+ * tlbld - write the dtlb with the pte in rpa reg
+ * tlbli - write the itlb with the pte in rpa reg
+ * Specific SPRs:
+ * dmiss - address of dstream miss
* imiss - address of istream miss
- * hash1 - address primary hash PTEG address
- * hash2 - returns secondary hash PTEG address
- * iCmp - returns the primary istream compare value
- * dCmp - returns the primary dstream compare value
+ * hash1 - address primary hash PTEG address
+ * hash2 - returns secondary hash PTEG address
+ * iCmp - returns the primary istream compare value
+ * dCmp - returns the primary dstream compare value
* rpa - the second word of pte used by tlblx
- * Other specific resources:
+ * Other specific resources:
* cr0 saved in 4 high order bits of SRR1,
- * SRR1 bit 14 [WAY] selects TLB set to load from LRU algorithm
- * gprs r0..r3 shadowed by the setting of MSR bit 14 [TGPR]
+ * SRR1 bit 14 [WAY] selects TLB set to load from LRU algorithm
+ * gprs r0..r3 shadowed by the setting of MSR bit 14 [TGPR]
* other bits in SRR1 (unused by this handler but see earlier comments)
- *
+ *
* There are three basic flows corresponding to three vectors:
- * 0x1000: Instruction TLB miss,
+ * 0x1000: Instruction TLB miss,
* 0x1100: Data TLB miss on load,
- * 0x1200: Data TLB miss on store or not dirty page
+ * 0x1200: Data TLB miss on store or not dirty page
*/
-
+
/* define the following if code does not have to run on basic 603 */
/* #define USE_KEY_BIT */
-
+
/* define the following for safe multiprocessing */
-/* #define MULTIPROCESSING */
+/* #define MULTIPROCESSING */
/* define the following for mixed endian */
/* #define CHECK_MIXED_ENDIAN */
@@ -100,53 +100,53 @@
/* Some OS kernels may want to keep a single copy of the dirty bit in a per
* page table. In this case writable pages are always write-protected as long
* as they are clean, and the dirty bit set actually means that the page
- * is writable.
+ * is writable.
*/
-#define DIRTY_MEANS_WRITABLE
-
+#define DIRTY_MEANS_WRITABLE
+
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include "bootldr.h"
-/*
- * Instruction TLB miss flow
- * Entry at 0x1000 with the following:
- * srr0 -> address of instruction that missed
- * srr1 -> 0:3=cr0, 13=1 (instruction), 14=lru way, 16:31=saved MSR
- * msr<tgpr> -> 1
- * iMiss -> ea that missed
- * iCmp -> the compare value for the va that missed
+/*
+ * Instruction TLB miss flow
+ * Entry at 0x1000 with the following:
+ * srr0 -> address of instruction that missed
+ * srr1 -> 0:3=cr0, 13=1 (instruction), 14=lru way, 16:31=saved MSR
+ * msr<tgpr> -> 1
+ * iMiss -> ea that missed
+ * iCmp -> the compare value for the va that missed
* hash1 -> pointer to first hash pteg
- * hash2 -> pointer to second hash pteg
+ * hash2 -> pointer to second hash pteg
*
- * Register usage:
- * r0 is limit address during search / scratch after
+ * Register usage:
+ * r0 is limit address during search / scratch after
* r1 is pte data / error code for ISI exception when search fails
- * r2 is pointer to pte
+ * r2 is pointer to pte
* r3 is compare value during search / scratch after
*/
/* Binutils or assembler bug ? Declaring the section executable and writable
* generates an error message on the @fixup entries.
*/
- .section .exception,"aw"
+ .section .exception,"aw"
# .org 0x1000 # instruction TLB miss entry point
.globl tlb_handlers
tlb_handlers:
.type tlb_handlers,@function
#define ISIVec tlb_handlers-0x1000+0x400
#define DSIVec tlb_handlers-0x1000+0x300
- mfspr r2,HASH1
+ mfspr r2,HASH1
lwz r1,0(r2) # Start memory access as soon as possible
- mfspr r3,ICMP # to load the cache.
+ mfspr r3,ICMP # to load the cache.
0: la r0,48(r2) # Use explicit loop to avoid using ctr
1: cmpw r1,r3 # In theory the loop is somewhat slower
beq- 2f # than documentation example
- cmpw r0,r2 # but we gain from starting cache load
- lwzu r1,8(r2) # earlier and using slots between load
- bne+ 1b # and comparison for other purposes.
+ cmpw r0,r2 # but we gain from starting cache load
+ lwzu r1,8(r2) # earlier and using slots between load
+ bne+ 1b # and comparison for other purposes.
cmpw r1,r3
bne- 4f # Secondary hash check
-2: lwz r1,4(r2) # Found: load second word of PTE
+2: lwz r1,4(r2) # Found: load second word of PTE
mfspr r0,IMISS # get miss address during load delay
#ifdef ASSUME_REF_SET
andi. r3,r1,8 # check for guarded memory
@@ -159,12 +159,12 @@ tlb_handlers:
# andi. r3,r1,8 # check for guarded memory
# bne- 5f
# andi. r3,r1,0x100 # check R bit ahead to help folding
-/* However there is a better solution: these last three instructions can be
-replaced by the following which should cause less pipeline stalls because
+/* However there is a better solution: these last three instructions can be
+replaced by the following which should cause less pipeline stalls because
both tests are combined and there is a single CR rename buffer */
extlwi r3,r1,6,23 # Keep only RCWIMG in 6 most significant bits.
- rlwinm. r3,r3,5,0,27 # Keep only G (in sign) and R and test.
- blt- 5f # Negative means guarded, zero R not set.
+ rlwinm. r3,r3,5,0,27 # Keep only G (in sign) and R and test.
+ blt- 5f # Negative means guarded, zero R not set.
mfsrr1 r3 # get saved cr0 bits now to dual issue
ori r1,r1,0x100
mtspr RPA,r1
@@ -174,7 +174,7 @@ writeback at a later time, and avoid even more bus traffic in
multiprocessing systems, when several processors access the same PTEGs.
We also hope that the reference bit will be already set. */
bne+ 3f
-#ifdef MULTIPROCESSING
+#ifdef MULTIPROCESSING
srwi r1,r1,8 # get byte 7 of pte
stb r1,+6(r2) # update page table
#else
@@ -183,7 +183,7 @@ We also hope that the reference bit will be already set. */
#endif
3: mtcrf 0x80,r3 # restore CR0
rfi # return to executing program
-
+
/* The preceding code is 20 to 25 instructions long, which occupies
3 or 4 cache lines. */
4: andi. r0,r3,0x0040 # see if we have done second hash
@@ -194,9 +194,9 @@ We also hope that the reference bit will be already set. */
lwz r1,0(r2) # load first entry
b 0b # and go back to main loop
/* We are now at 27 to 32 instructions, using 3 or 4 cache lines for all
-cases in which the TLB is successfully loaded. */
+cases in which the TLB is successfully loaded. */
-/* Guarded memory protection violation: synthesize an ISI exception. */
+/* Guarded memory protection violation: synthesize an ISI exception. */
5: lis r1,0x1000 # set srr1<3>=1 to flag guard violation
/* Entry Not Found branches here with r1 correctly set. */
6: mfsrr1 r3
@@ -209,41 +209,41 @@ a field of contiguous bits in a register by setting mask_begin>mask_end. */
mtcrf 0x80, r3 # restore CR0
mtmsr r0 # flip back to the native gprs
isync # Required from 602 doc!
- b ISIVec # go to instruction access exception
-/* Up to now there are 37 to 42 instructions so at least 20 could be
-inserted for complex cases or for statistics recording. */
+ b ISIVec # go to instruction access exception
+/* Up to now there are 37 to 42 instructions so at least 20 could be
+inserted for complex cases or for statistics recording. */
-/*
- Data TLB miss on load flow
- Entry at 0x1100 with the following:
- srr0 -> address of instruction that caused the miss
- srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=0, 16:31=saved MSR
- msr<tgpr> -> 1
- dMiss -> ea that missed
- dCmp -> the compare value for the va that missed
+/*
+ Data TLB miss on load flow
+ Entry at 0x1100 with the following:
+ srr0 -> address of instruction that caused the miss
+ srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=0, 16:31=saved MSR
+ msr<tgpr> -> 1
+ dMiss -> ea that missed
+ dCmp -> the compare value for the va that missed
hash1 -> pointer to first hash pteg
- hash2 -> pointer to second hash pteg
-
- Register usage:
- r0 is limit address during search / scratch after
+ hash2 -> pointer to second hash pteg
+
+ Register usage:
+ r0 is limit address during search / scratch after
r1 is pte data / error code for DSI exception when search fails
- r2 is pointer to pte
+ r2 is pointer to pte
r3 is compare value during search / scratch after
*/
- .org tlb_handlers+0x100
- mfspr r2,HASH1
+ .org tlb_handlers+0x100
+ mfspr r2,HASH1
lwz r1,0(r2) # Start memory access as soon as possible
mfspr r3,DCMP # to load the cache.
0: la r0,48(r2) # Use explicit loop to avoid using ctr
1: cmpw r1,r3 # In theory the loop is somewhat slower
beq- 2f # than documentation example
- cmpw r0,r2 # but we gain from starting cache load
- lwzu r1,8(r2) # earlier and using slots between load
- bne+ 1b # and comparison for other purposes.
+ cmpw r0,r2 # but we gain from starting cache load
+ lwzu r1,8(r2) # earlier and using slots between load
+ bne+ 1b # and comparison for other purposes.
cmpw r1,r3
bne- 4f # Secondary hash check
-2: lwz r1,4(r2) # Found: load second word of PTE
+2: lwz r1,4(r2) # Found: load second word of PTE
mfspr r0,DMISS # get miss address during load delay
#ifdef ASSUME_REF_SET
mtspr RPA,r1
@@ -260,7 +260,7 @@ writeback at a later time, and avoid even more bus traffic in
multiprocessing systems, when several processors access the same PTEGs.
We also hope that the reference bit will be already set. */
bne+ 3f
-#ifdef MULTIPROCESSING
+#ifdef MULTIPROCESSING
srwi r1,r1,8 # get byte 7 of pte
stb r1,+6(r2) # update page table
#else
@@ -269,7 +269,7 @@ We also hope that the reference bit will be already set. */
#endif
3: mtcrf 0x80,r3 # restore CR0
rfi # return to executing program
-
+
/* The preceding code is 18 to 23 instructions long, which occupies
3 cache lines. */
4: andi. r0,r3,0x0040 # see if we have done second hash
@@ -280,55 +280,55 @@ We also hope that the reference bit will be already set. */
lwz r1,0(r2) # load first entry asap
b 0b # and go back to main loop
/* We are now at 25 to 30 instructions, using 3 or 4 cache lines for all
-cases in which the TLB is successfully loaded. */
+cases in which the TLB is successfully loaded. */
-/*
- Data TLB miss on store or not dirty page flow
- Entry at 0x1200 with the following:
- srr0 -> address of instruction that caused the miss
- srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=1, 16:31=saved MSR
- msr<tgpr> -> 1
- dMiss -> ea that missed
- dCmp -> the compare value for the va that missed
+/*
+ Data TLB miss on store or not dirty page flow
+ Entry at 0x1200 with the following:
+ srr0 -> address of instruction that caused the miss
+ srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=1, 16:31=saved MSR
+ msr<tgpr> -> 1
+ dMiss -> ea that missed
+ dCmp -> the compare value for the va that missed
hash1 -> pointer to first hash pteg
- hash2 -> pointer to second hash pteg
-
- Register usage:
- r0 is limit address during search / scratch after
+ hash2 -> pointer to second hash pteg
+
+ Register usage:
+ r0 is limit address during search / scratch after
r1 is pte data / error code for DSI exception when search fails
- r2 is pointer to pte
+ r2 is pointer to pte
r3 is compare value during search / scratch after
-*/
+*/
.org tlb_handlers+0x200
- mfspr r2,HASH1
+ mfspr r2,HASH1
lwz r1,0(r2) # Start memory access as soon as possible
- mfspr r3,DCMP # to load the cache.
+ mfspr r3,DCMP # to load the cache.
0: la r0,48(r2) # Use explicit loop to avoid using ctr
1: cmpw r1,r3 # In theory the loop is somewhat slower
beq- 2f # than documentation example
- cmpw r0,r2 # but we gain from starting cache load
- lwzu r1,8(r2) # earlier and using slots between load
- bne+ 1b # and comparison for other purposes.
+ cmpw r0,r2 # but we gain from starting cache load
+ lwzu r1,8(r2) # earlier and using slots between load
+ bne+ 1b # and comparison for other purposes.
cmpw r1,r3
bne- 4f # Secondary hash check
-2: lwz r1,4(r2) # Found: load second word of PTE
+2: lwz r1,4(r2) # Found: load second word of PTE
mfspr r0,DMISS # get miss address during load delay
-/* We could simply set the C bit and then rely on hardware to flag protection
-violations. This raises the problem that a page which actually has not been
-modified may be marked as dirty and violates the OEA model for guaranteed
-bit settings (table 5-8 of 603eUM.pdf). This can have harmful consequences
-on operating system memory management routines, and play havoc with copy on
+/* We could simply set the C bit and then rely on hardware to flag protection
+violations. This raises the problem that a page which actually has not been
+modified may be marked as dirty and violates the OEA model for guaranteed
+bit settings (table 5-8 of 603eUM.pdf). This can have harmful consequences
+on operating system memory management routines, and play havoc with copy on
write schemes. So the protection check is ABSOLUTELY necessary. */
andi. r3,r1,0x80 # check C bit
- beq- 5f # if (C==0) go to check protection
-3: mfsrr1 r3 # get the saved cr0 bits
+ beq- 5f # if (C==0) go to check protection
+3: mfsrr1 r3 # get the saved cr0 bits
mtspr RPA,r1 # set the pte
- tlbld r0 # load the dtlb
- mtcrf 0x80,r3 # restore CR0
- rfi # return to executing program
+ tlbld r0 # load the dtlb
+ mtcrf 0x80,r3 # restore CR0
+ rfi # return to executing program
/* The preceding code is 20 instructions long, which occupy
-3 cache lines. */
+3 cache lines. */
4: andi. r0,r3,0x0040 # see if we have done second hash
lis r1,0x4200 # set up error code in case next branch taken
bne- 9f # speculatively issue the following
@@ -342,21 +342,21 @@ cases in which the TLB C bit is already set. */
#ifdef DIRTY_MEANS_WRITABLE
5: lis r1,0x0A00 # protection violation on store
#else
-/*
- Entry found and C==0: check protection before setting C:
- Register usage:
+/*
+ Entry found and C==0: check protection before setting C:
+ Register usage:
r0 is dMiss register
- r1 is PTE entry (to be copied to RPA if success)
- r2 is pointer to pte
- r3 is trashed
+ r1 is PTE entry (to be copied to RPA if success)
+ r2 is pointer to pte
+ r3 is trashed
For the 603e, the key bit in SRR1 helps to decide whether there is a
protection violation. However the way the check is done in the manual is
not very efficient. The code shown here works as well for 603 and 603e and
is much more efficient for the 603 and comparable to the manual example
- for 603e. This code however has quite a bad structure due to the fact it
- has been reordered to speed up the most common cases.
-*/
+ for 603e. This code however has quite a bad structure due to the fact it
+ has been reordered to speed up the most common cases.
+*/
/* The first of the following two instructions could be replaced by
andi. r3,r1,3 but it would compete with cmplwi for cr0 resource. */
5: clrlwi r3,r1,30 # Extract two low order bits
@@ -368,59 +368,59 @@ andi. r3,r1,3 but it would compete with cmplwi for cr0 resource. */
/* We are now at 33 instructions, using 5 cache lines. */
7: bgt- 8f # if PP=11 then DSI protection exception
/* This code only works if key bit is present (602/603e/603ev) */
-#ifdef USE_KEY_BIT
+#ifdef USE_KEY_BIT
mfsrr1 r3 # get the KEY bit and test it
andis. r3,r3,0x0008
beq 6b # default prediction taken, truly better ?
-#else
+#else
/* This code is for all 602 and 603 family models: */
mfsrr1 r3 # Here the trick is to use the MSR PR bit as a
mfsrin r0,r0 # shift count for an rlwnm. instruction which
extrwi r3,r3,1,17 # extracts and tests the correct key bit from
rlwnm. r3,r0,r3,1,1 # the segment register. RISC they said...
- mfspr r0,DMISS # Restore fault address to r0
+ mfspr r0,DMISS # Restore fault address to r0
beq 6b # if 0 load tlb else protection fault
#endif
/* We are now at 40 instructions, (37 if using key bit), using 5 cache
lines in all cases in which the C bit is successfully set */
8: lis r1,0x0A00 # protection violation on store
#endif /* DIRTY_IS_WRITABLE */
-/* PTE entry not found branch here with DSISR code in r1 */
+/* PTE entry not found branch here with DSISR code in r1 */
9: mfsrr1 r3
mtdsisr r1
- clrlwi r2,r3,16 # set up srr1 for DSI exception
+ clrlwi r2,r3,16 # set up srr1 for DSI exception
mfmsr r0
/* I have some doubts about the usefulness of the xori instruction in
mixed or pure little-endian environment. The address is in the same
doubleword, hence in the same protection domain and performing an exclusive
or with 7 is only valid for byte accesses. */
-#ifdef CHECK_MIXED_ENDIAN
+#ifdef CHECK_MIXED_ENDIAN
andi. r1,r2,1 # test LE bit ahead to help folding
#endif
mtsrr1 r2
- rlwinm r0,r0,0,15,13 # clear the msr<tgpr> bit
+ rlwinm r0,r0,0,15,13 # clear the msr<tgpr> bit
mfspr r1,DMISS # get miss address
#ifdef CHECK_MIXED_ENDIAN
- beq 1f # if little endian then:
- xori r1,r1,0x07 # de-mung the data address
+ beq 1f # if little endian then:
+ xori r1,r1,0x07 # de-mung the data address
1:
-#endif
- mtdar r1 # put in dar
- mtcrf 0x80,r3 # restore CR0
+#endif
+ mtdar r1 # put in dar
+ mtcrf 0x80,r3 # restore CR0
mtmsr r0 # flip back to the native gprs
- isync # required from 602 manual
+ isync # required from 602 manual
b DSIVec # branch to DSI exception
/* We are now between 50 and 56 instructions. Close to the limit
but should be sufficient in case bugs are found. */
-/* Altogether the three handlers occupy 128 instructions in the worst
+/* Altogether the three handlers occupy 128 instructions in the worst
case, 64 instructions could still be added (non contiguously). */
.org tlb_handlers+0x300
.globl _handler_glue
_handler_glue:
/* Entry code for exceptions: DSI (0x300), ISI(0x400), alignment(0x600) and
* traps(0x700). In theory it is not necessary to save and restore r13 and all
- * higher numbered registers, but it is done because it allowed to call the
- * firmware (PPCBug) for debugging in the very first stages when writing the
+ * higher numbered registers, but it is done because it allowed to call the
+ * firmware (PPCBug) for debugging in the very first stages when writing the
* bootloader.
*/
stwu r1,-160(r1)
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/head.S b/c/src/lib/libbsp/powerpc/shared/bootloader/head.S
index b0eeb0e550..cb6d9134fc 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/head.S
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/head.S
@@ -19,16 +19,16 @@
#include <rtems/score/cpu.h>
#include "bootldr.h"
-#define TEST_PPCBUG_CALLS
+#define TEST_PPCBUG_CALLS
#undef TEST_PPCBUG_CALLS
-
+
#define FRAME_SIZE 32
#define LOCK_CACHES (HID0_DLOCK | HID0_ILOCK)
#define INVL_CACHES (HID0_DCI | HID0_ICFI)
#define ENBL_CACHES (HID0_DCE | HID0_ICE)
#define USE_PPCBUG
-
+
#define PRINT_CHAR(c) \
addi r20,r3,0 ; \
li r3,c ; \
@@ -37,10 +37,10 @@
addi r3,r20,0 ; \
li r10,0x26 ; \
sc
-
-
-
-
+
+
+
+
#define MONITOR_ENTER \
mfmsr r10 ; \
ori r10,r10,MSR_IP ; \
@@ -48,8 +48,8 @@
li r10,0x63 ; \
sc
-
-
+
+
START_GOT
GOT_ENTRY(_GOT2_TABLE_)
GOT_ENTRY(_FIXUP_TABLE_)
@@ -61,21 +61,21 @@
GOT_ENTRY(_binary_rtems_gz_start)
GOT_ENTRY(_binary_initrd_gz_start)
GOT_ENTRY(_binary_initrd_gz_end)
-#ifdef TEST_PPCBUG_CALLS
+#ifdef TEST_PPCBUG_CALLS
GOT_ENTRY(banner_start)
GOT_ENTRY(banner_end)
-#endif
+#endif
#ifdef USE_PPCBUG
GOT_ENTRY(nioc_reset_packet)
#endif
END_GOT
.globl start
.type start,@function
-
+
/* Point the stack into the PreP partition header in the x86 reserved
- * code area, so that simple C routines can be called.
+ * code area, so that simple C routines can be called.
*/
-start:
+start:
#if defined(USE_PPCBUG) && defined(DEBUG) && defined(REENTER_MONITOR)
MONITOR_ENTER
#endif
@@ -89,9 +89,9 @@ start:
ori r0,r28,MSR_EE
xori r0,r0,MSR_EE
mtmsr r0
-
+
/* Enable the caches, from now on cr2.eq set means processor is 601 */
-
+
mfpvr r0
mfspr r29,HID0
srwi r0,r0,16
@@ -103,7 +103,7 @@ start:
* commented out, 11/7/2002, gregm. This instruction sequence seems to
* be pathological on the 603e.
*
-
+
#ifndef USE_PPCBUG
ori r0,r29,ENBL_CACHES|INVL_CACHES|LOCK_CACHES
xori r0,r0,INVL_CACHES|LOCK_CACHES
@@ -112,10 +112,10 @@ start:
mtspr HID0,r0
#endif
*/
-
-
+
+
2: bl reloc
-
+
/* save all the parameters and the orginal msr/hid0/r31 */
lwz bd,GOT(__bd)
stw r3,0(bd)
@@ -135,21 +135,21 @@ start:
* corrupted by the IF DMAing data into its old buffers or
* by writing descriptors...
*/
- lwz r3,GOT(nioc_reset_packet)
+ lwz r3,GOT(nioc_reset_packet)
li r10, 0x1d /* .NETCTRL */
sc
#endif
-/* Call the routine to fill boot_data structure from residual data.
- * And to find where the code has to be moved.
+/* Call the routine to fill boot_data structure from residual data.
+ * And to find where the code has to be moved.
*/
lis r3,__size@sectoff@ha
addi r3,r3,__size@sectoff@l
bl early_setup
-/* Now we need to relocate ourselves, where we are told to. First put a
+/* Now we need to relocate ourselves, where we are told to. First put a
* copy of the codemove routine to some place in memory.
- * (which may be where the 0x41 partition was loaded, so size is critical).
+ * (which may be where the 0x41 partition was loaded, so size is critical).
*/
lwz r4,GOT(codemove)
li r5,_size_codemove
@@ -175,14 +175,14 @@ start:
mtlr r8 # for the return address
bctr # returns to the moved instruction
-
+
/* Establish the new top stack frame. */
moved: lwz r1,stack(bd)
li r0,0
stwu r0,-16(r1)
/* relocate again */
- bl reloc
+ bl reloc
/* Clear all of BSS */
lwz r10,GOT(.bss)
li r0,__bss_words@sectoff@l
@@ -213,7 +213,7 @@ moved: lwz r1,stack(bd)
/* Some firmware versions leave stale values in the BATs, it's time
* to invalidate them to avoid interferences with our own mappings.
* But the 601 valid bit is in the BATL (IBAT only) and others are in
- * the [ID]BATU. Bloat, bloat.. fortunately thrown away later.
+ * the [ID]BATU. Bloat, bloat.. fortunately thrown away later.
*/
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('T')
@@ -239,22 +239,22 @@ moved: lwz r1,stack(bd)
PRINT_CHAR('i')
#endif
bl mm_init
-
+
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('M')
#endif
bl MMUon
-
+
/* Now we are mapped and can perform I/O if we want */
-#ifdef TEST_PPCBUG_CALLS
+#ifdef TEST_PPCBUG_CALLS
/* Experience seems to show that PPCBug can only be called with the
* data cache disabled and with MMU disabled. Bummer.
- */
+ */
li r10,0x22 # .OUTLN
lwz r3,GOT(banner_start)
lwz r4,GOT(banner_end)
sc
-#endif
+#endif
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('H')
#endif
@@ -294,9 +294,9 @@ moved: lwz r1,stack(bd)
li r30,0
*/
dcbst 0,r30 /* Make sure it's in memory ! */
-
-/* We just flash invalidate and disable the dcache, unless it's a 601,
- * critical areas have been flushed and we don't care about the stack
+
+/* We just flash invalidate and disable the dcache, unless it's a 601,
+ * critical areas have been flushed and we don't care about the stack
* and other scratch areas.
*/
beq cr2,1f
@@ -306,20 +306,20 @@ moved: lwz r1,stack(bd)
mtspr HID0,r0
xori r0,r0,HID0_DCI|HID0_DCE
mtspr HID0,r0
-
+
/* Provisional return to FW, works for PPCBug */
#if 0 && defined(REENTER_MONITOR)
MONITOR_ENTER
#else
1: bctr
#endif
-
-
+
+
/* relocation function, r30 must point to got2+0x8000 */
-reloc:
+reloc:
/* Adjust got2 pointers, no need to check for 0, this code already puts
- * a few entries in the table.
+ * a few entries in the table.
*/
li r0,__got2_entries@sectoff@l
la r12,GOT(_GOT2_TABLE_)
@@ -331,10 +331,10 @@ reloc:
add r0,r0,r11
stw r0,0(r12)
bdnz 1b
-
+
/* Now adjust the fixups and the pointers to the fixups in case we need
- * to move ourselves again.
- */
+ * to move ourselves again.
+ */
2: li r0,__fixup_entries@sectoff@l
lwz r12,GOT(_FIXUP_TABLE_)
cmpwi r0,0
@@ -347,18 +347,18 @@ reloc:
stw r10,0(r12)
stw r0,0(r10)
bdnz 3b
- blr
+ blr
/* Set the MMU on and off: code is always mapped 1:1 and does not need MMU,
* but it does not cost so much to map it also and it catches calls through
- * NULL function pointers.
+ * NULL function pointers.
*/
.globl MMUon
.type MMUon,@function
MMUon: blr
nop
-/*
+/*
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR|MSR_IP
mflr r11
@@ -371,7 +371,7 @@ MMUon: blr
.type MMUoff,@function
MMUoff: blr
nop
-
+
/*
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR|MSR_IP
@@ -383,9 +383,9 @@ MMUoff: blr
*/
/* Due to the PPC architecture (and according to the specifications), a
- * series of tlbie which goes through a whole 256 MB segment always flushes
- * the whole TLB. This is obviously overkill and slow, but who cares ?
- * It takes about 1 ms on a 200 MHz 603e and works even if residual data
+ * series of tlbie which goes through a whole 256 MB segment always flushes
+ * the whole TLB. This is obviously overkill and slow, but who cares ?
+ * It takes about 1 ms on a 200 MHz 603e and works even if residual data
* get the number of TLB entries wrong.
*/
flush_tlb:
@@ -396,8 +396,8 @@ flush_tlb:
/* tlbsync is not implemented on 601, so use sync which seems to be a superset
* of tlbsync in all cases and do not bother with CPU dependant code
*/
- sync
- blr
+ sync
+ blr
.globl codemove
codemove:
@@ -410,11 +410,11 @@ codemove:
beq 7f /* Protect against 0 count */
mtctr r0
bge cr1,2f
-
+
la r8,-4(r4)
la r7,-4(r3)
1: lwzu r0,4(r8)
- stwu r0,4(r7)
+ stwu r0,4(r7)
bdnz 1b
b 4f
@@ -424,23 +424,23 @@ codemove:
3: lwzu r0,-4(r8)
stwu r0,-4(r7)
bdnz 3b
-
+
/* Now flush the cache: note that we must start from a cache aligned
- * address. Otherwise we might miss one cache line.
+ * address. Otherwise we might miss one cache line.
*/
4: cmpwi r6,0
add r5,r3,r5
- beq 7f /* Always flush prefetch queue in any case */
+ beq 7f /* Always flush prefetch queue in any case */
subi r0,r6,1
andc r3,r3,r0
mr r4,r3
-5: cmplw r4,r5
+5: cmplw r4,r5
dcbst 0,r4
add r4,r4,r6
blt 5b
sync /* Wait for all dcbst to complete on bus */
mr r4,r3
-6: cmplw r4,r5
+6: cmplw r4,r5
icbi 0,r4
add r4,r4,r6
blt 6b
@@ -467,8 +467,8 @@ nioc_reset_packet:
.long 0 /* Number of bytes */
.long 0 /* Status/Control Flags (unused for reset) */
#endif
-#ifdef TEST_PPCBUG_CALLS
-banner_start:
+#ifdef TEST_PPCBUG_CALLS
+banner_start:
.ascii "This message was printed by PPCBug with MMU enabled"
-banner_end:
+banner_end:
#endif
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c b/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c
index b988d968b4..ae6cf1fafc 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/lib.c
@@ -27,11 +27,11 @@ void* memcpy(void *dst, const void * src, unsigned int n)
{
unsigned char *d=dst;
const unsigned char *s=src;
-
+
while(n-- > 0) *d++=*s++;
return dst;
}
-
+
char* strcat(char * dest, const char * src)
{
char *tmp = dest;
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c b/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c
index 1ae0cf7a10..6b4718e7ea 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/misc.c
@@ -57,13 +57,13 @@ extern struct console_io vacuum_console_functions;
extern opaque log_console_setup, serial_console_setup, vga_console_setup;
boot_data __bd = {0, 0, 0, 0, 0, 0, 0, 0,
- 32, 0, 0, 0, 0, 0, 0,
+ 32, 0, 0, 0, 0, 0, 0,
&mm_private,
NULL,
&pci_private,
NULL,
&v86_private,
- "root=/dev/hdc1"
+ "root=/dev/hdc1"
};
static void exit(void) __attribute__((noreturn));
@@ -80,7 +80,7 @@ void hang(const char *s, u_long x, ctxt *p) {
#ifdef DEBUG
print_all_maps("\nMemory mappings at exception time:\n");
#endif
- printk("%s %lx NIP: %p LR: %p\n"
+ printk("%s %lx NIP: %p LR: %p\n"
"Callback trace (stack:return address)\n",
s, x, (void *) p->nip, (void *) p->lr);
asm volatile("lwz %0,0(1); lwz %0,0(%0); lwz %0,0(%0)": "=b" (r1));
@@ -142,7 +142,7 @@ void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
printk("gunzip: ran out of data in header\n");
exit();
}
-
+
s.zalloc = zalloc;
s.zfree = zfree;
r = inflateInit2(&s, -MAX_WBITS);
@@ -163,9 +163,9 @@ void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
inflateEnd(&s);
}
-void decompress_kernel(int kernel_size, void * zimage_start, int len,
+void decompress_kernel(int kernel_size, void * zimage_start, int len,
void * initrd_start, int initrd_len ) {
- u_char *parea;
+ u_char *parea;
RESIDUAL* rescopy;
int zimage_size= len;
@@ -181,12 +181,12 @@ void decompress_kernel(int kernel_size, void * zimage_start, int len,
exit();
}
/* Note that this clears the bss as a side effect, so some code
- * with ugly special case for SMP could be removed from the kernel!
+ * with ugly special case for SMP could be removed from the kernel!
*/
memset(parea, 0, kernel_size);
printk("\nUncompressing the kernel...\n");
rescopy=salloc(sizeof(RESIDUAL));
- /* Let us hope that residual data is aligned on word boundary */
+ /* Let us hope that residual data is aligned on word boundary */
*rescopy = *bd->residual;
bd->residual = (void *)PAGE_ALIGN(kernel_size);
@@ -203,7 +203,7 @@ void decompress_kernel(int kernel_size, void * zimage_start, int len,
* DMA from the last pages of memory is slower because
* prefetching from PCI has to be disabled to avoid accessing
* non existing memory. So it is the ideal place to put the
- * hash table.
+ * hash table.
*/
unsigned tmp = rescopy->TotalMemory;
/* It's equivalent to tmp & (-tmp), but using the negation
@@ -227,7 +227,7 @@ void decompress_kernel(int kernel_size, void * zimage_start, int len,
printk("done\nNow booting...\n");
MMUoff(); /* We need to access address 0 ! */
codemove(0, parea, kernel_size, bd->cache_lsize);
- codemove(bd->residual, rescopy, sizeof(RESIDUAL), bd->cache_lsize);
+ codemove(bd->residual, rescopy, sizeof(RESIDUAL), bd->cache_lsize);
codemove(bd->r6, bd->cmd_line, sizeof(bd->cmd_line), bd->cache_lsize);
/* codemove checks for 0 length */
codemove(bd->load_address, initrd_start, initrd_len, bd->cache_lsize);
@@ -248,7 +248,7 @@ boot_udelay(uint32_t _microseconds)
} while (now - start < ticks);
}
-void
+void
setup_hw(void)
{
char *cp, ch;
@@ -258,9 +258,9 @@ setup_hw(void)
int timer, err;
u_short default_vga_cmd;
static unsigned int indic;
-
+
indic = 0;
-
+
res=bd->residual;
default_vga=NULL;
default_vga_cmd = 0;
@@ -274,10 +274,10 @@ setup_hw(void)
ticks_per_ms = 16500; /* assume 66 MHz on bus */
}
}
-
+
select_console(CONSOLE_LOG);
- /* We check that the keyboard is present and immediately
+ /* We check that the keyboard is present and immediately
* select the serial console if not.
*/
err = kbdreset();
@@ -294,11 +294,11 @@ setup_hw(void)
(vpd.TimeBaseDivisor ? vpd.TimeBaseDivisor : 4000),
res->TotalMemory);
printk("Original MSR: %lx\nOriginal HID0: %lx\nOriginal R31: %lx\n",
- bd->o_msr, bd->o_hid0, bd->o_r31);
+ bd->o_msr, bd->o_hid0, bd->o_r31);
/* This reconfigures all the PCI subsystem */
pci_init();
-
+
/* The Motorola NT firmware does not set the correct mem size */
if ( vpd.FirmwareSupplier == 0x10000 ) {
int memsize;
@@ -311,7 +311,7 @@ setup_hw(void)
}
}
#define ENABLE_VGA_USAGE
-#undef ENABLE_VGA_USAGE
+#undef ENABLE_VGA_USAGE
#ifdef ENABLE_VGA_USAGE
/* Find the primary VGA device, chosing the first one found
* if none is enabled. The basic loop structure has been copied
@@ -338,7 +338,7 @@ setup_hw(void)
/* Disable the enabled VGA device, if any. */
if (default_vga)
- pci_write_config_word(default_vga, PCI_COMMAND,
+ pci_write_config_word(default_vga, PCI_COMMAND,
default_vga_cmd&
~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
init_v86();
@@ -349,23 +349,23 @@ setup_hw(void)
((p->class) >> 16 != PCI_BASE_CLASS_DISPLAY))
continue;
if (p->bus->number != 0) continue;
- pci_read_config_word(p, PCI_COMMAND, &cmd);
- pci_write_config_word(p, PCI_COMMAND,
+ pci_read_config_word(p, PCI_COMMAND, &cmd);
+ pci_write_config_word(p, PCI_COMMAND,
cmd|PCI_COMMAND_IO|PCI_COMMAND_MEMORY);
printk("Calling the emulator.\n");
em86_main(p);
pci_write_config_word(p, PCI_COMMAND, cmd);
- }
+ }
cleanup_v86_mess();
-#endif
+#endif
/* Reenable the primary VGA device */
if (default_vga) {
- pci_write_config_word(default_vga, PCI_COMMAND,
+ pci_write_config_word(default_vga, PCI_COMMAND,
default_vga_cmd|
(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
- if (err) {
- printk("Keyboard error %d, using serial console!\n",
+ if (err) {
+ printk("Keyboard error %d, using serial console!\n",
err);
} else {
select_console(CONSOLE_VGA);
@@ -386,14 +386,14 @@ setup_hw(void)
/* In the future we may use the NVRAM to store default
* kernel parameters.
*/
- nvram=residual_find_device(~0UL, NULL, SystemPeripheral, NVRAM,
+ nvram=residual_find_device(~0UL, NULL, SystemPeripheral, NVRAM,
~0UL, 0);
if (nvram) {
PnP_TAG_PACKET * pkt;
- switch (nvram->DevId.Interface) {
+ switch (nvram->DevId.Interface) {
case IndirectNVRAM:
pkt=PnP_find_packet(res->DevicePnpHeap
- +nvram->AllocatedOffset,
+ +nvram->AllocatedOffset,
)
}
}
@@ -426,7 +426,7 @@ setup_hw(void)
/* Functions to deal with the residual data */
static int same_DevID(unsigned short vendor,
unsigned short Number,
- char * str)
+ char * str)
{
static unsigned const char hexdigit[]="0123456789ABCDEF";
if (strlen(str)!=7) return 0;
@@ -473,11 +473,11 @@ PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
masked_tag = packet_tag&mask;
for(; *p != END_TAG; p+=size) {
- if ((*p & mask) == masked_tag && !(n--))
+ if ((*p & mask) == masked_tag && !(n--))
return (PnP_TAG_PACKET *) p;
if (tag_type(*p))
size=ld_le16((unsigned short *)(p+1))+3;
- else
+ else
size=tag_small_count(*p)+1;
}
return 0; /* not found */
@@ -490,7 +490,7 @@ PnP_TAG_PACKET *PnP_find_small_vendor_packet(unsigned char *p,
int next=0;
while (p) {
p = (unsigned char *) PnP_find_packet(p, 0x70, next);
- if (p && p[1]==packet_type && !(n--))
+ if (p && p[1]==packet_type && !(n--))
return (PnP_TAG_PACKET *) p;
next = 1;
};
@@ -504,7 +504,7 @@ PnP_TAG_PACKET *PnP_find_large_vendor_packet(unsigned char *p,
int next=0;
while (p) {
p = (unsigned char *) PnP_find_packet(p, 0x84, next);
- if (p && p[3]==packet_type && !(n--))
+ if (p && p[3]==packet_type && !(n--))
return (PnP_TAG_PACKET *) p;
next = 1;
};
@@ -526,12 +526,12 @@ find_max_mem( struct pci_dev *dev )
(dev->device == PCI_DEVICE_ID_MOTOROLA_MPC105)) ||
((dev->vendor == PCI_VENDOR_ID_IBM) &&
(dev->device == 0x0037/*IBM 660 Bridge*/)) ) {
- pci_read_config_byte(dev, 0xa0, &banks);
+ pci_read_config_byte(dev, 0xa0, &banks);
for (i = 0; i < 8; i++) {
if ( banks & (1<<i) ) {
- pci_read_config_byte(dev, 0x90+i, &tmp);
+ pci_read_config_byte(dev, 0x90+i, &tmp);
top = tmp;
- pci_read_config_byte(dev, 0x98+i, &tmp);
+ pci_read_config_byte(dev, 0x98+i, &tmp);
top |= (tmp&3)<<8;
if ( top > max ) max = top;
}
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c b/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c
index 38739fbb92..4371ae6a0d 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c
@@ -25,16 +25,16 @@
* residual data. The holes between these areas can be virtually
* remapped to any of these, since for some functions it is very handy
* to have virtually contiguous but physically discontiguous memory.
- *
+ *
* Physical memory allocation is also very crude, since it's only
* designed to manage a small number of large chunks. For valloc/vfree
* and palloc/pfree, the unit of allocation is the 4kB page.
- *
+ *
* The salloc/sfree has been added after tracing gunzip and seeing
* how it performed a very large number of small allocations.
* For these the unit of allocation is 8 bytes (the s stands for
- * small or subpage). This memory is cleared when allocated.
- *
+ * small or subpage). This memory is cleared when allocated.
+ *
*/
#include <rtems/bspIo.h>
@@ -50,7 +50,7 @@
* we want to avoid potential clashes with kernel includes.
* Here a map maps contiguous areas from base to end,
* the firstpte entry corresponds to physical address and has the low
- * order bits set for caching and permission.
+ * order bits set for caching and permission.
*/
typedef struct _map {
@@ -82,7 +82,7 @@ typedef struct _map {
#define MAP_FREE_SUBS 6
#define MAP_USED_SUBS 7
-#define MAP_FREE 4
+#define MAP_FREE 4
#define MAP_FREE_PHYS 12
#define MAP_USED_PHYS 13
#define MAP_FREE_VIRT 20
@@ -114,7 +114,7 @@ struct _mm_private {
map *sallocused; /* Used maps for salloc */
map *sallocphys; /* Physical areas used by salloc */
u_int hashcnt; /* Used to cycle in PTEG when they overflow */
-} mm_private = {hashmask: 0xffc0,
+} mm_private = {hashmask: 0xffc0,
freemaps: free_maps+0};
/* A simplified hash table entry declaration */
@@ -125,7 +125,7 @@ typedef struct _hash_entry {
void print_maps(map *, const char *);
-/* The handler used for all exceptions although for now it is only
+/* The handler used for all exceptions although for now it is only
* designed to properly handle MMU interrupts to fill the hash table.
*/
@@ -149,7 +149,7 @@ void _handler(int vec, ctxt *p) {
printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
hang("Memory protection violation at ", vaddr, p);
}
-
+
for(area=mm->mappings; area; area=area->next) {
if(area->base<=vaddr && vaddr<=area->end) break;
}
@@ -158,13 +158,13 @@ void _handler(int vec, ctxt *p) {
u_long hash, vsid, rpn;
hash_entry volatile *hte, *_hte1;
u_int i, alt=0, flushva;
-
+
vsid = _read_SR((void *)vaddr);
rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
hash = vsid<<6;
hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
hash &= mm->hashmask;
- /* Find an empty entry in the PTEG, else
+ /* Find an empty entry in the PTEG, else
* replace a random one.
*/
hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
@@ -174,14 +174,14 @@ void _handler(int vec, ctxt *p) {
hash ^= mm->hashmask;
alt = 0x40; _hte1 = hte;
hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
-
+
for (i=0; i<8; i++) {
if (hte[i].key>=0) goto found;
}
alt = 0;
hte = _hte1;
/* Chose a victim entry and replace it. There might be
- * better policies to choose the victim, but in a boot
+ * better policies to choose the victim, but in a boot
* loader we want simplicity as long as it works.
*
* We would not need to invalidate the TLB entry since
@@ -211,7 +211,7 @@ void _handler(int vec, ctxt *p) {
}
} else {
MMUon();
- printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
+ printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
if (vec == 7) {
unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
@@ -308,13 +308,13 @@ map * alloc_map_page(void) {
if (!from) return NULL;
from->end -= PAGE_SIZE;
-
+
mm->freemaps = (map *) (from->end+1);
-
+
for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
p->next = p+1;
p->firstpte = MAP_FREE;
- }
+ }
(p-1)->next=0;
/* Take the last one as pointer to self and insert
@@ -324,12 +324,12 @@ map * alloc_map_page(void) {
p->firstpte = MAP_PERM_PHYS;
p->base=(u_long) mm->freemaps;
p->end = p->base+PAGE_SIZE-1;
-
+
insert_map(&mm->physperm, p);
-
- if (from->end+1 == from->base)
+
+ if (from->end+1 == from->base)
free_map(remove_map(&mm->physavail, from));
-
+
return mm->freemaps;
}
@@ -364,13 +364,13 @@ void coalesce_maps(map *p) {
/* These routines are used to find the free memory zones to avoid
* overlapping destructive copies when initializing.
- * They work from the top because of the way we want to boot.
+ * They work from the top because of the way we want to boot.
* In the following the term zone refers to the memory described
* by one or several contiguous so called segments in the
* residual data.
*/
#define STACK_PAGES 2
-static inline u_long
+static inline u_long
find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
u_long i, newmin=0, size=0;
for(i=0; i<res->ActualNumMemSegs; i++) {
@@ -384,14 +384,14 @@ find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
return newmin+size;
}
-static inline u_long
+static inline u_long
find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
u_long i;
int progress;
do {
progress=0;
for (i=0; i<res->ActualNumMemSegs; i++) {
- if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
+ if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
== highpage)
&& res->Segs[i].Usage & flags) {
highpage=res->Segs[i].BasePage;
@@ -441,8 +441,8 @@ fix_residual( RESIDUAL *res )
res->Segs[i].BasePage = seg_fix[i].BasePage;
res->Segs[i].PageCount = seg_fix[i].PageCount;
}
- /* The following should be fixed in the current version of the
- * kernel and of the bootloader.
+ /* The following should be fixed in the current version of the
+ * kernel and of the bootloader.
*/
#if 0
/* PPCBug has this zero */
@@ -468,10 +468,10 @@ fix_residual( RESIDUAL *res )
/* This routine is the first C code called with very little stack space!
* Its goal is to find where the boot image can be moved. This will
- * be the highest address with enough room.
+ * be the highest address with enough room.
*/
int early_setup(u_long image_size) {
- register RESIDUAL *res = bd->residual;
+ register RESIDUAL *res = bd->residual;
u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
/* Fix residual if we are loaded by Motorola NT firmware */
@@ -481,19 +481,19 @@ int early_setup(u_long image_size) {
/* FIXME: if OF we should do something different */
if( !bd->of_entry && res &&
res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
- u_long lowpage=ULONG_MAX, highpage;
+ u_long lowpage=ULONG_MAX, highpage;
u_long imghigh=0, stkhigh=0;
- /* Find the highest and large enough contiguous zone
+ /* Find the highest and large enough contiguous zone
consisting of free and BootImage sections. */
- /* Find 3 free areas of memory, one for the main image, one
- * for the stack (STACK_PAGES), and page one to put the map
- * structures. They are allocated from the top of memory.
+ /* Find 3 free areas of memory, one for the main image, one
+ * for the stack (STACK_PAGES), and page one to put the map
+ * structures. They are allocated from the top of memory.
* In most cases the stack will be put just below the image.
*/
- while((highpage =
+ while((highpage =
find_next_zone(res, lowpage, BootImage|Free))) {
lowpage=find_zone_start(res, highpage, BootImage|Free);
- if ((highpage-lowpage)>minpages &&
+ if ((highpage-lowpage)>minpages &&
highpage>imghigh) {
imghigh=highpage;
highpage -=minpages;
@@ -510,14 +510,14 @@ int early_setup(u_long image_size) {
/* The code mover is put at the lowest possible place
* of free memory. If this corresponds to the loaded boot
- * partition image it does not matter because it overrides
- * the unused part of it (x86 code).
+ * partition image it does not matter because it overrides
+ * the unused part of it (x86 code).
*/
bd->mover=(void *) (lowpage<<PAGE_SHIFT);
- /* Let us flush the caches in all cases. After all it should
- * not harm even on 601 and we don't care about performance.
- * Right now it's easy since all processors have a line size
+ /* Let us flush the caches in all cases. After all it should
+ * not harm even on 601 and we don't care about performance.
+ * Right now it's easy since all processors have a line size
* of 32 bytes. Once again residual data has proved unreliable.
*/
bd->cache_lsize = 32;
@@ -548,14 +548,14 @@ void * valloc(u_long size) {
return (void *)q->base;
}
-static
+static
void vflush(map *virtmap) {
struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
u_long i, limit=(mm->hashmask>>3)+8;
hash_entry volatile *p=(hash_entry *) mm->sdr1;
/* PTE handling is simple since the processor never update
- * the entries. Writable pages always have the C bit set and
+ * the entries. Writable pages always have the C bit set and
* all valid entries have the R bit set. From the processor
* point of view the hash table is read only.
*/
@@ -578,7 +578,7 @@ void vflush(map *virtmap) {
void vfree(void *vaddr) {
map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
-
+
/* Flush memory queues */
asm volatile("sync": : : "memory");
@@ -588,7 +588,7 @@ void vfree(void *vaddr) {
/* Remove mappings corresponding to virtmap */
for (physmap=mm->mappings; physmap; ) {
map *nextmap=physmap->next;
- if (physmap->base>=virtmap->base
+ if (physmap->base>=virtmap->base
&& physmap->base<virtmap->end) {
free_map(remove_map(&mm->mappings, physmap));
}
@@ -598,22 +598,22 @@ void vfree(void *vaddr) {
vflush(virtmap);
virtmap->firstpte= MAP_FREE_VIRT;
- insert_map(&mm->virtavail, virtmap);
+ insert_map(&mm->virtavail, virtmap);
coalesce_maps(mm->virtavail);
}
void vunmap(void *vaddr) {
map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
-
+
/* Flush memory queues */
asm volatile("sync": : : "memory");
/* vaddr must be within one of the vm areas in use and
- * then must correspond to one of the physical areas
+ * then must correspond to one of the physical areas
*/
for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
- if (virtmap->base<=(u_long)vaddr &&
+ if (virtmap->base<=(u_long)vaddr &&
virtmap->end>=(u_long)vaddr) break;
}
if (!virtmap) return;
@@ -632,7 +632,7 @@ int vmap(void *vaddr, u_long p, u_long size) {
if(!size) return 1;
/* Check that the requested area fits in one vm image */
for (q=mm->virtused; q; q=q->next) {
- if ((q->base <= (u_long)vaddr) &&
+ if ((q->base <= (u_long)vaddr) &&
(q->end>=(u_long)vaddr+size -1)) break;
}
if (!q) return 1;
@@ -673,7 +673,7 @@ void add_free_map(u_long base, u_long end) {
q->base=base;
q->end=end-1;
q->firstpte=MAP_FREE_VIRT;
- insert_map(&mm->virtavail, q);
+ insert_map(&mm->virtavail, q);
}
static inline
@@ -691,10 +691,10 @@ void create_free_vm(void) {
}
/* Memory management initialization.
- * Set up the mapping lists.
+ * Set up the mapping lists.
*/
-static inline
+static inline
void add_perm_map(u_long start, u_long size) {
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
map *p=alloc_map();
@@ -704,7 +704,7 @@ void add_perm_map(u_long start, u_long size) {
insert_map(& mm->physperm , p);
}
-void mm_init(u_long image_size)
+void mm_init(u_long image_size)
{
u_long lowpage=ULONG_MAX, highpage;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
@@ -716,7 +716,7 @@ void mm_init(u_long image_size)
/* The checks are simplified by the fact that the image
* and stack area are always allocated at the upper end
- * of a free block.
+ * of a free block.
*/
while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
lowpage=find_zone_start(res, highpage, BootImage|Free);
@@ -727,7 +727,7 @@ void mm_init(u_long image_size)
}
if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
highpage -= STACK_PAGES;
- add_perm_map(highpage<<PAGE_SHIFT,
+ add_perm_map(highpage<<PAGE_SHIFT,
STACK_PAGES*PAGE_SIZE);
}
/* Protect the interrupt handlers that we need ! */
@@ -751,8 +751,8 @@ void mm_init(u_long image_size)
/* Setup the segment registers as we want them */
for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
/* Create the maps for the physical memory, firwmarecode does not
- * seem to be necessary. ROM is mapped read-only to reduce the risk
- * of reprogramming it because it's often Flash and some are
+ * seem to be necessary. ROM is mapped read-only to reduce the risk
+ * of reprogramming it because it's often Flash and some are
* amazingly easy to overwrite.
*/
create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
@@ -762,14 +762,14 @@ void mm_init(u_long image_size)
PCIAddr|PCIConfig|ISAAddr, PTE_IO);
create_free_vm();
-
+
/* Install our own MMU and trap handlers. */
- codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
- codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
- codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
- codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
+ codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
}
-
+
void * salloc(u_long size) {
map *p, *q;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
@@ -816,17 +816,17 @@ void sfree(void *p) {
}
/* first/last area fit, flags is a power of 2 indicating the required
- * alignment. The algorithms are stupid because we expect very little
+ * alignment. The algorithms are stupid because we expect very little
* fragmentation of the areas, if any. The unit of allocation is the page.
* The allocation is by default performed from higher addresses down,
- * unless flags&PA_LOW is true.
+ * unless flags&PA_LOW is true.
*/
-void * __palloc(u_long size, int flags)
+void * __palloc(u_long size, int flags)
{
u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
map *newmap, *frommap, *p, *splitmap=0;
- map **queue;
+ map **queue;
u_long qflags;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
@@ -849,7 +849,7 @@ void * __palloc(u_long size, int flags)
}
/* We need to allocate that one now so no two allocations may attempt
* to take the same memory simultaneously. Alloc_map_page does
- * not call back here to avoid infinite recursion in alloc_map.
+ * not call back here to avoid infinite recursion in alloc_map.
*/
if (mask&PAGE_MASK) {
@@ -868,11 +868,11 @@ void * __palloc(u_long size, int flags)
if (!frommap) {
if (splitmap) free_map(splitmap);
- return NULL;
+ return NULL;
}
-
+
newmap=alloc_map();
-
+
if (flags&PA_LOW) {
newmap->base = (frommap->base+mask)&~mask;
} else {
@@ -883,7 +883,7 @@ void * __palloc(u_long size, int flags)
newmap->firstpte = qflags;
/* Add a fragment if we don't allocate until the end. */
-
+
if (splitmap) {
splitmap->base=newmap->base+size;
splitmap->end=frommap->end;
@@ -904,13 +904,13 @@ void * __palloc(u_long size, int flags)
if (splitmap->base == splitmap->end+1) {
free_map(remove_map(&mm->physavail, splitmap));
} else {
- insert_map(&mm->physavail, splitmap);
+ insert_map(&mm->physavail, splitmap);
}
}
insert_map(queue, newmap);
return (void *) newmap->base;
-
+
}
void pfree(void * p) {
@@ -923,13 +923,13 @@ void pfree(void * p) {
coalesce_maps(mm->physavail);
}
-#ifdef DEBUG
+#ifdef DEBUG
/* Debugging functions */
void print_maps(map *chain, const char *s) {
map *p;
printk("%s",s);
for(p=chain; p; p=p->next) {
- printk(" %08lx-%08lx: %08lx\n",
+ printk(" %08lx-%08lx: %08lx\n",
p->base, p->end, p->firstpte);
}
}
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c
index 6bd25a86c9..8f17d06b46 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.c
@@ -45,7 +45,7 @@ typedef unsigned int u32;
typedef struct _pci_resource {
struct _pci_resource *next;
- struct pci_dev *dev;
+ struct pci_dev *dev;
u_long base; /* will be 64 bits on 64 bits machines */
u_long size;
u_char type; /* 1 is I/O else low order 4 bits of the memory type */
@@ -80,9 +80,9 @@ struct _pci_private {
pci_area_head io, mem;
} pci_private = {
- config_addr: NULL,
- config_data: (volatile u_char *) 0x80800000,
- last_dev_p: NULL,
+ config_addr: NULL,
+ config_data: (volatile u_char *) 0x80800000,
+ last_dev_p: NULL,
resources: NULL,
io: {NULL, 0xfff, 0},
mem: {NULL, 0xfffff, 0}
@@ -100,33 +100,33 @@ struct _pci_private {
#endif
#if defined(PCI_DEBUG)
-static void
+static void
print_pci_resources(const char *s) {
pci_resource *p;
printk("%s", s);
for (p=pci->resources; p; p=p->next) {
/*
- printk(" %p:%p %06x %08lx %08lx %d\n",
+ printk(" %p:%p %06x %08lx %08lx %d\n",
p, p->next,
(p->dev->devfn<<8)+(p->dev->bus->number<<16)
+0x10+p->reg*4,
p->base,
p->size,
- p->type);
+ p->type);
*/
- printk(" %p:%p %d:%02x (%04x:%04x) %08lx %08lx %d\n",
+ printk(" %p:%p %d:%02x (%04x:%04x) %08lx %08lx %d\n",
p, p->next,
- p->dev->bus->number, PCI_SLOT(p->dev->devfn),
+ p->dev->bus->number, PCI_SLOT(p->dev->devfn),
p->dev->vendor, p->dev->device,
p->base,
p->size,
- p->type);
+ p->type);
}
}
-static void
+static void
print_pci_area(pci_area *p) {
for (; p; p=p->next) {
printk(" %p:%p %p %08lx %08lx\n",
@@ -134,7 +134,7 @@ print_pci_area(pci_area *p) {
}
}
-static void
+static void
print_pci_areas(const char *s) {
printk("%s PCI I/O areas:\n",s);
print_pci_area(pci->io.head);
@@ -142,7 +142,7 @@ print_pci_areas(const char *s) {
print_pci_area(pci->mem.head);
}
#else
-#define print_pci_areas(x)
+#define print_pci_areas(x)
#define print_pci_resources(x)
#endif
@@ -159,7 +159,7 @@ struct blacklist_entry {
};
#define BLACKLIST(vid, did, breg, actual_size) \
- {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##vid##_##did, breg, actual_size}
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##vid##_##did, breg, actual_size}
static struct blacklist_entry blacklist[] = {
BLACKLIST(S3, TRIO, 0, 0x04000000),
@@ -168,7 +168,7 @@ static struct blacklist_entry blacklist[] = {
/* This function filters resources and then inserts them into a list of
- * configurable pci resources.
+ * configurable pci resources.
*/
@@ -180,7 +180,7 @@ static struct blacklist_entry blacklist[] = {
static int insert_before(pci_resource *e, pci_resource *t) {
- if (e->dev->bus->number != t->dev->bus->number)
+ if (e->dev->bus->number != t->dev->bus->number)
return e->dev->bus->number > t->dev->bus->number;
if (AREA(e) != AREA(t)) return AREA(e)<AREA(t);
return (e->size > t->size);
@@ -195,8 +195,8 @@ static void insert_resource(pci_resource *r) {
pci_resource *p;
if (!r) return;
- /* First fixup in case we have a blacklist entry. Note that this
- * may temporarily leave a resource in an inconsistent state: with
+ /* First fixup in case we have a blacklist entry. Note that this
+ * may temporarily leave a resource in an inconsistent state: with
* (base & (size-1)) !=0. This is harmless.
*/
for (b=blacklist; b->vendor!=0xffff; b++) {
@@ -207,13 +207,13 @@ static void insert_resource(pci_resource *r) {
break;
}
}
-
+
/* Motorola NT firmware does not configure pci devices which are not
* required for booting, others do. For now:
* - allocated devices in the ISA range (64kB I/O, 16Mb memory)
* but non zero base registers are left as is.
- * - all other registers, whether already allocated or not, are
- * reallocated unless they require an inordinate amount of
+ * - all other registers, whether already allocated or not, are
+ * reallocated unless they require an inordinate amount of
* resources (>256 Mb for memory >64kB for I/O). These
* devices with too large mapping requirements are simply ignored
* and their bases are set to 0. This should disable the
@@ -233,37 +233,37 @@ static void insert_resource(pci_resource *r) {
** the hardware, we are stuck with the kludge below. Note that
** everything is remapped on the CPCI backplane and any downstream
** hardware, its just the builtin stuff we're tiptoeing around.
- **
+ **
** Gregm, 7/16/2003
*/
if( r->dev->bus->number <= 1 )
{
- if ((r->type==PCI_BASE_ADDRESS_SPACE_IO)
+ if ((r->type==PCI_BASE_ADDRESS_SPACE_IO)
? (r->base && r->base <0x10000)
: (r->base && r->base <0x1000000)) {
#ifdef PCI_DEBUG
- printk("freeing region; %p:%p %d:%02x (%04x:%04x) %08lx %08lx %d\n",
+ printk("freeing region; %p:%p %d:%02x (%04x:%04x) %08lx %08lx %d\n",
r, r->next,
- r->dev->bus->number, PCI_SLOT(r->dev->devfn),
+ r->dev->bus->number, PCI_SLOT(r->dev->devfn),
r->dev->vendor, r->dev->device,
r->base,
r->size,
- r->type);
+ r->type);
#endif
sfree(r);
return;
}
}
- if ((r->type==PCI_BASE_ADDRESS_SPACE_IO)
+ if ((r->type==PCI_BASE_ADDRESS_SPACE_IO)
? (r->size >= 0x10000)
: (r->size >= 0x10000000)) {
r->size = 0;
r->base = 0;
}
- /* Now insert into the list sorting by
+ /* Now insert into the list sorting by
* 1) decreasing bus number
* 2) space: prefetchable memory, non-prefetchable and finally I/O
* 3) decreasing size
@@ -284,21 +284,21 @@ static void insert_resource(pci_resource *r) {
-/* This version only works for bus 0. I don't have any P2P bridges to test
+/* This version only works for bus 0. I don't have any P2P bridges to test
* a more sophisticated version which has therefore not been implemented.
* Prefetchable memory is not yet handled correctly either.
* And several levels of PCI bridges much less even since there must be
- * allocated together to be able to setup correctly the top bridge.
+ * allocated together to be able to setup correctly the top bridge.
*/
-static u_long find_range(u_char bus, u_char type,
+static u_long find_range(u_char bus, u_char type,
pci_resource **first,
pci_resource **past, u_int *flags) {
pci_resource *p;
u_long total=0;
u_int fl=0;
- for (p=pci->resources; p; p=p->next)
+ for (p=pci->resources; p; p=p->next)
{
if ((p->dev->bus->number == bus) &&
AREA(p)==type) break;
@@ -306,12 +306,12 @@ static u_long find_range(u_char bus, u_char type,
*first = p;
- for (; p; p=p->next)
+ for (; p; p=p->next)
{
if ((p->dev->bus->number != bus) ||
AREA(p)!=type || p->size == 0) break;
total = total+p->size;
- fl |= 1<<p->type;
+ fl |= 1<<p->type;
}
*past = p;
@@ -328,7 +328,7 @@ static u_long find_range(u_char bus, u_char type,
-static inline void init_free_area(pci_area_head *h, u_long start,
+static inline void init_free_area(pci_area_head *h, u_long start,
u_long end, u_int mask, int high) {
pci_area *p;
p = salloc(sizeof(pci_area));
@@ -376,12 +376,12 @@ static void insert_area(pci_area_head *h, pci_area *p) {
static
-void remove_area(pci_area_head *h, pci_area *p)
+void remove_area(pci_area_head *h, pci_area *p)
{
pci_area *q = h->head;
if (!p || !q) return;
- if (q==p)
+ if (q==p)
{
h->head = q->next;
return;
@@ -401,7 +401,7 @@ static pci_area * alloc_area(pci_area_head *h, struct pci_bus *bus,
pci_area *from, *split, *new;
required = (required+h->mask) & ~h->mask;
- for (p=h->head, from=NULL; p; p=p->next)
+ for (p=h->head, from=NULL; p; p=p->next)
{
u_long l1 = ((p->start+required+mask)&~mask)-1;
u_long l2 = ((p->start+mask)&~mask)+required-1;
@@ -417,41 +417,41 @@ static pci_area * alloc_area(pci_area_head *h, struct pci_bus *bus,
/* If allocation of new succeeds then allocation of split has
* also been successful (given the current mm algorithms) !
*/
- if (!new) {
- sfree(split);
- return NULL;
+ if (!new) {
+ sfree(split);
+ return NULL;
}
new->bus = bus;
new->flags = flags;
/* Now allocate pci_space taking alignment into account ! */
- if (h->high)
+ if (h->high)
{
u_long l1 = ((from->end+1)&~mask)-required;
- u_long l2 = (from->end+1-required)&~mask;
+ u_long l2 = (from->end+1-required)&~mask;
new->start = (l1>l2) ? l1 : l2;
split->end = from->end;
from->end = new->start-1;
split->start = new->start+required;
new->end = new->start+required-1;
- }
- else
+ }
+ else
{
u_long l1 = ((from->start+mask)&~mask)+required-1;
- u_long l2 = ((from->start+required+mask)&~mask)-1;
+ u_long l2 = ((from->start+required+mask)&~mask)-1;
new->end = (l1<l2) ? l1 : l2;
split->start = from->start;
from->start = new->end+1;
new->start = new->end+1-required;
split->end = new->start-1;
}
-
+
if (from->end+1 == from->start) remove_area(h, from);
- if (split->end+1 != split->start)
+ if (split->end+1 != split->start)
{
split->bus = NULL;
insert_area(h, split);
- }
- else
+ }
+ else
{
sfree(split);
}
@@ -465,7 +465,7 @@ static pci_area * alloc_area(pci_area_head *h, struct pci_bus *bus,
static inline
-void alloc_space(pci_area *p, pci_resource *r)
+void alloc_space(pci_area *p, pci_resource *r)
{
if (p->start & (r->size-1)) {
r->base = p->end+1-r->size;
@@ -480,7 +480,7 @@ void alloc_space(pci_area *p, pci_resource *r)
-static void reconfigure_bus_space(u_char bus, u_char type, pci_area_head *h)
+static void reconfigure_bus_space(u_char bus, u_char type, pci_area_head *h)
{
pci_resource *first, *past, *r;
pci_area *area, tmp;
@@ -494,7 +494,7 @@ static void reconfigure_bus_space(u_char bus, u_char type, pci_area_head *h)
if (!area) return;
tmp = *area;
- for (r=first; r!=past; r=r->next)
+ for (r=first; r!=past; r=r->next)
{
alloc_space(&tmp, r);
}
@@ -537,8 +537,8 @@ static void reconfigure_pci(void) {
/* First reconfigure the I/O space, this will be more
- * complex when there is more than 1 bus. And 64 bits
- * devices are another kind of problems.
+ * complex when there is more than 1 bus. And 64 bits
+ * devices are another kind of problems.
*/
reconfigure_bus_space(0, PCI_AREA_IO, &pci->io);
reconfigure_bus_space(0, PCI_AREA_MEMORY, &pci->mem);
@@ -546,7 +546,7 @@ static void reconfigure_pci(void) {
/* Now we have to touch the configuration space of all
* the devices to remap them better than they are right now.
- * This is done in 3 steps:
+ * This is done in 3 steps:
* 1) first disable I/O and memory response of all devices
* 2) modify the base registers
* 3) restore the original PCI_COMMAND register.
@@ -562,12 +562,12 @@ static void reconfigure_pci(void) {
}
for (r=pci->resources; r; r= r->next) {
- pci_write_config_dword(r->dev,
+ pci_write_config_dword(r->dev,
PCI_BASE_ADDRESS_0+(r->reg<<2),
r->base);
if ((r->type&
(PCI_BASE_ADDRESS_SPACE|
- PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY|
PCI_BASE_ADDRESS_MEM_TYPE_64)) {
pci_write_config_dword(r->dev,
@@ -592,60 +592,60 @@ static void reconfigure_pci(void) {
static int
-indirect_pci_read_config_byte(unsigned char bus, unsigned char dev_fn,
+indirect_pci_read_config_byte(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned char *val) {
- out_be32(pci->config_addr,
+ out_be32(pci->config_addr,
0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
*val=in_8(pci->config_data + (offset&3));
return PCIBIOS_SUCCESSFUL;
}
static int
-indirect_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
+indirect_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned short *val) {
- *val = 0xffff;
+ *val = 0xffff;
if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(pci->config_addr,
+ out_be32(pci->config_addr,
0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
*val=in_le16((volatile u_short *)(pci->config_data + (offset&3)));
return PCIBIOS_SUCCESSFUL;
}
static int
-indirect_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
+indirect_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned int *val) {
- *val = 0xffffffff;
+ *val = 0xffffffff;
if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(pci->config_addr,
+ out_be32(pci->config_addr,
0x80|(bus<<8)|(dev_fn<<16)|(offset<<24));
*val=in_le32((volatile u_int *)pci->config_data);
return PCIBIOS_SUCCESSFUL;
}
static int
-indirect_pci_write_config_byte(unsigned char bus, unsigned char dev_fn,
+indirect_pci_write_config_byte(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned char val) {
- out_be32(pci->config_addr,
+ out_be32(pci->config_addr,
0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
out_8(pci->config_data + (offset&3), val);
return PCIBIOS_SUCCESSFUL;
}
static int
-indirect_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
+indirect_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned short val) {
if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(pci->config_addr,
+ out_be32(pci->config_addr,
0x80|(bus<<8)|(dev_fn<<16)|((offset&~3)<<24));
out_le16((volatile u_short *)(pci->config_data + (offset&3)), val);
return PCIBIOS_SUCCESSFUL;
}
static int
-indirect_pci_write_config_dword(unsigned char bus, unsigned char dev_fn,
+indirect_pci_write_config_dword(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned int val) {
if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(pci->config_addr,
+ out_be32(pci->config_addr,
0x80|(bus<<8)|(dev_fn<<16)|(offset<<24));
out_le32((volatile u_int *)pci->config_data, val);
return PCIBIOS_SUCCESSFUL;
@@ -662,21 +662,21 @@ static const struct pci_config_access_functions indirect_functions = {
static int
-direct_pci_read_config_byte(unsigned char bus, unsigned char dev_fn,
+direct_pci_read_config_byte(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned char *val) {
if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
*val=0xff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
- *val=in_8(pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ *val=in_8(pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ (PCI_FUNC(dev_fn)<<8) + offset);
return PCIBIOS_SUCCESSFUL;
}
static int
-direct_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
+direct_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned short *val) {
- *val = 0xffff;
+ *val = 0xffff;
if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -688,9 +688,9 @@ direct_pci_read_config_word(unsigned char bus, unsigned char dev_fn,
}
static int
-direct_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
+direct_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned int *val) {
- *val = 0xffffffff;
+ *val = 0xffffffff;
if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -702,19 +702,19 @@ direct_pci_read_config_dword(unsigned char bus, unsigned char dev_fn,
}
static int
-direct_pci_write_config_byte(unsigned char bus, unsigned char dev_fn,
+direct_pci_write_config_byte(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned char val) {
if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
return PCIBIOS_DEVICE_NOT_FOUND;
}
- out_8(pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
- + (PCI_FUNC(dev_fn)<<8) + offset,
+ out_8(pci->config_data + ((1<<PCI_SLOT(dev_fn))&~1)
+ + (PCI_FUNC(dev_fn)<<8) + offset,
val);
return PCIBIOS_SUCCESSFUL;
}
static int
-direct_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
+direct_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned short val) {
if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
@@ -728,7 +728,7 @@ direct_pci_write_config_word(unsigned char bus, unsigned char dev_fn,
}
static int
-direct_pci_write_config_dword(unsigned char bus, unsigned char dev_fn,
+direct_pci_write_config_dword(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned int val) {
if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
if (bus != 0 || (1<<PCI_SLOT(dev_fn) & 0xff8007fe)) {
@@ -765,35 +765,35 @@ void pci_read_bases(struct pci_dev *dev, unsigned int howmany)
u32 l, ml;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
- for(reg=0; reg<howmany; reg=nextreg)
+ for(reg=0; reg<howmany; reg=nextreg)
{
pci_resource *r;
nextreg=reg+1;
pci_read_config_dword(dev, REG, &l);
#if 0
- if (l == 0xffffffff /*AJF || !l*/) continue;
+ if (l == 0xffffffff /*AJF || !l*/) continue;
#endif
- /* Note that disabling the memory response of a host bridge
- * would lose data if a DMA transfer were in progress. In a
- * bootloader we don't care however. Also we can't print any
+ /* Note that disabling the memory response of a host bridge
+ * would lose data if a DMA transfer were in progress. In a
+ * bootloader we don't care however. Also we can't print any
* message for a while since we might just disable the console.
*/
- pci_write_config_word(dev, PCI_COMMAND, cmd &
+ pci_write_config_word(dev, PCI_COMMAND, cmd &
~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
pci_write_config_dword(dev, REG, ~0);
pci_read_config_dword(dev, REG, &ml);
pci_write_config_dword(dev, REG, l);
- /* Reenable the device now that we've played with
- * base registers.
+ /* Reenable the device now that we've played with
+ * base registers.
*/
pci_write_config_word(dev, PCI_COMMAND, cmd);
/* seems to be an unused entry skip it */
if ( ml == 0 || ml == 0xffffffff ) continue;
- if ((l &
+ if ((l &
(PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK))
== (PCI_BASE_ADDRESS_MEM_TYPE_64
|PCI_BASE_ADDRESS_SPACE_MEMORY)) {
@@ -864,7 +864,7 @@ u_int pci_scan_bus(struct pci_bus *bus)
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
- pcibios_read_config_dword(bus->number, devfn,
+ pcibios_read_config_dword(bus->number, devfn,
PCI_CLASS_REVISION, &class);
class >>= 8; /* upper 3 bytes */
dev->class = class;
@@ -1030,14 +1030,14 @@ u_int pci_scan_bus(struct pci_bus *bus)
#if 0
void
-pci_fixup(void)
+pci_fixup(void)
{
struct pci_dev *p;
struct pci_bus *bus;
- for (bus = &pci_root; bus; bus=bus->next)
+ for (bus = &pci_root; bus; bus=bus->next)
{
- for (p=bus->devices; p; p=p->sibling)
+ for (p=bus->devices; p; p=p->sibling)
{
}
}
@@ -1059,7 +1059,7 @@ static void print_pci_info()
for(pb= &pci_root; pb; pb=pb->children )
{
printk(" number %d, primary %d, secondary %d, subordinate %d\n",
- pb->number,
+ pb->number,
pb->primary,
pb->secondary,
pb->subordinate );
@@ -1076,7 +1076,7 @@ static void print_pci_info()
pd->vendor,
pd->device,
pd->irq );
-
+
}
printk("\n");
}
@@ -1088,7 +1088,7 @@ static void print_pci_info()
for (r=pci->resources; r; r= r->next)
{
printk(" bus %d, vendor %04x, device %04x, base %08x, size %08x, type %d\n",
- r->dev->bus->number,
+ r->dev->bus->number,
r->dev->vendor,
r->dev->device,
r->base,
@@ -1198,9 +1198,9 @@ static void recursive_bus_reconfigure( struct pci_bus *pbus )
childbus->subordinate );
#endif
-
- /*
+
+ /*
**use the current values & the saved ones to figure out
** the address spaces for the bridge
*/
@@ -1269,7 +1269,7 @@ static void recursive_bus_reconfigure( struct pci_bus *pbus )
printk("pci: pf memory %04x, limit %04x\n", base16, limit16);
#endif
#ifdef WRITE_BRIDGE_PF
- pcibios_write_config_dword(pdev->bus->number, pdev->devfn, PCI_PREF_BASE_UPPER32, 0);
+ pcibios_write_config_dword(pdev->bus->number, pdev->devfn, PCI_PREF_BASE_UPPER32, 0);
pcibios_write_config_word(pdev->bus->number, pdev->devfn, PCI_PREF_MEMORY_BASE, base16 );
pcibios_write_config_dword(pdev->bus->number, pdev->devfn, PCI_PREF_LIMIT_UPPER32, 0);
pcibios_write_config_word(pdev->bus->number, pdev->devfn, PCI_PREF_MEMORY_LIMIT, limit16 );
@@ -1280,7 +1280,7 @@ static void recursive_bus_reconfigure( struct pci_bus *pbus )
pcibios_write_config_word(pdev->bus->number, pdev->devfn, PCI_BRIDGE_CONTROL, (uint16_t)( PCI_BRIDGE_CTL_PARITY |
PCI_BRIDGE_CTL_SERR ));
- pcibios_write_config_word(pdev->bus->number, pdev->devfn, PCI_COMMAND, (uint16_t)( PCI_COMMAND_IO |
+ pcibios_write_config_word(pdev->bus->number, pdev->devfn, PCI_COMMAND, (uint16_t)( PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER |
PCI_COMMAND_PARITY |
@@ -1351,7 +1351,7 @@ static void recursive_bus_reconfigure( struct pci_bus *pbus )
else
{
/* memory space */
-
+
/* shift base pointer up to an integer multiple of the size of the desired region */
if( astart.start_pcimem % r->size )
astart.start_pcimem = (((astart.start_pcimem / r->size) + 1) * r->size);
@@ -1379,7 +1379,7 @@ static void recursive_bus_reconfigure( struct pci_bus *pbus )
-void pci_init(void)
+void pci_init(void)
{
PPC_DEVICE *hostbridge;
@@ -1388,18 +1388,18 @@ void pci_init(void)
return;
}
pci->last_dev_p = &(bd->pci_devices);
- hostbridge=residual_find_device(PROCESSORDEVICE, NULL,
+ hostbridge=residual_find_device(PROCESSORDEVICE, NULL,
BridgeController,
PCIBridge, -1, 0);
if (hostbridge) {
if (hostbridge->DeviceId.Interface==PCIBridgeIndirect) {
bd->pci_functions=&indirect_functions;
- /* Should be extracted from residual data,
+ /* Should be extracted from residual data,
* indeed MPC106 in CHRP mode is different,
* but we should not use residual data in
- * this case anyway.
+ * this case anyway.
*/
- pci->config_addr = ((volatile u_int *)
+ pci->config_addr = ((volatile u_int *)
(ptr_mem_map->io_base+0xcf8));
pci->config_data = ptr_mem_map->io_base+0xcfc;
} else if(hostbridge->DeviceId.Interface==PCIBridgeDirect) {
@@ -1412,7 +1412,7 @@ void pci_init(void)
u_int id0;
bd->pci_functions = &direct_functions;
/* On all direct bridges I know the host bridge itself
- * appears as device 0 function 0.
+ * appears as device 0 function 0.
*/
pcibios_read_config_dword(0, 0, PCI_VENDOR_ID, &id0);
if (id0==~0U) {
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h
index caf0c3e12f..3884760c8b 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/pci.h
@@ -42,7 +42,7 @@
#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
-#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_FAST 0x000
#define PCI_STATUS_DEVSEL_MEDIUM 0x200
#define PCI_STATUS_DEVSEL_SLOW 0x400
#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
@@ -71,8 +71,8 @@
/*
* Base addresses specify locations in memory or I/O space.
- * Decoded size can be determined by writing a value of
- * 0xffffffff to the register, and reading it back. Only
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
* 1 bits are decoded.
*/
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
@@ -96,7 +96,7 @@
/* Header type 0 (normal devices) */
#define PCI_CARDBUS_CIS 0x28
#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
-#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
@@ -455,8 +455,8 @@
#define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010
#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
-#define PCI_VENDOR_ID_DPT 0x1044
-#define PCI_DEVICE_ID_DPT 0xa400
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
#define PCI_VENDOR_ID_OPTI 0x1045
#define PCI_DEVICE_ID_OPTI_92C178 0xc178
@@ -1072,17 +1072,17 @@
/* Functions used to access pci configuration space */
struct pci_config_access_functions {
- int (*read_config_byte)(unsigned char, unsigned char,
+ int (*read_config_byte)(unsigned char, unsigned char,
unsigned char, unsigned char *);
- int (*read_config_word)(unsigned char, unsigned char,
+ int (*read_config_word)(unsigned char, unsigned char,
unsigned char, unsigned short *);
- int (*read_config_dword)(unsigned char, unsigned char,
+ int (*read_config_dword)(unsigned char, unsigned char,
unsigned char, unsigned int *);
- int (*write_config_byte)(unsigned char, unsigned char,
+ int (*write_config_byte)(unsigned char, unsigned char,
unsigned char, unsigned char);
- int (*write_config_word)(unsigned char, unsigned char,
+ int (*write_config_word)(unsigned char, unsigned char,
unsigned char, unsigned short);
- int (*write_config_dword)(unsigned char, unsigned char,
+ int (*write_config_dword)(unsigned char, unsigned char,
unsigned char, unsigned int);
};
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c
index 78ba7867fc..70e97abb6e 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.c
@@ -119,7 +119,7 @@ typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
/* deflate.h -- internal compression state
* Copyright (C) 1995 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -130,7 +130,7 @@ typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
/*+++++*/
/* infblock.h -- header to use infblock.c
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -171,7 +171,7 @@ local int inflate_packet_flush OF((
/*+++++*/
/* inftrees.h -- header to use inftrees.c
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -233,7 +233,7 @@ local int inflate_trees_free OF((
/*+++++*/
/* infcodes.h -- header to use infcodes.c
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -262,7 +262,7 @@ local void inflate_codes_free OF((
/*+++++*/
/* inflate.c -- zlib interface to inflate modules
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* inflate private state */
@@ -294,7 +294,7 @@ struct internal_state {
/* mode independent information */
int nowrap; /* flag for no wrapper */
uInt wbits; /* log2(window size) (8..15, defaults to 15) */
- inflate_blocks_statef
+ inflate_blocks_statef
*blocks; /* current inflate_blocks state */
};
@@ -569,7 +569,7 @@ z_stream *z;
/*+++++*/
/* infutil.h -- types and macros common to blocks and codes
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -607,7 +607,7 @@ struct inflate_blocks_state {
} trees; /* if DTREE, decoding info for trees */
struct {
inflate_huft *tl, *td; /* trees to free */
- inflate_codes_statef
+ inflate_codes_statef
*codes;
} decode; /* if CODES, current state */
} sub; /* submode */
@@ -665,7 +665,7 @@ local int inflate_flush OF((
/*+++++*/
/* inffast.h -- header to use inffast.c
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -685,7 +685,7 @@ local int inflate_fast OF((
/*+++++*/
/* infblock.c -- interpret and process block types to last block
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* Table for deflate from PKZIP's appnote.txt. */
@@ -1133,7 +1133,7 @@ local int inflate_packet_flush(s)
/*+++++*/
/* inftrees.c -- generate Huffman trees for efficient decoding
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* simplify the use of the inflate_huft type with some defines */
@@ -1226,7 +1226,7 @@ uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
uInt n; /* number of codes (assumed <= N_MAX) */
uInt s; /* number of simple-valued codes (0..s-1) */
uIntf *d; /* list of base values for non-simple codes */
-uIntf *e; /* list of extra bits for non-simple codes */
+uIntf *e; /* list of extra bits for non-simple codes */
inflate_huft * FAR *t; /* result: starting table */
uIntf *m; /* maximum lookup bits, returns actual */
z_stream *zs; /* for zalloc function */
@@ -1596,14 +1596,14 @@ z_stream *z; /* for zfree function */
q = (--p)->next;
ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
p = q;
- }
+ }
return Z_OK;
}
/*+++++*/
/* infcodes.c -- process literals and length/distance pairs
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* simplify the use of the inflate_huft type with some defines */
@@ -1844,7 +1844,7 @@ z_stream *z;
/*+++++*/
/* inflate_util.c -- data and routines common to blocks and codes
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* copy as much as possible from the sliding window to the output area */
@@ -1917,7 +1917,7 @@ int r;
/*+++++*/
/* inffast.c -- process literals and length/distance pairs fast
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* simplify the use of the inflate_huft type with some defines */
@@ -2078,7 +2078,7 @@ z_stream *z;
/*+++++*/
/* zutil.c -- target dependent utility functions for the compression library
* Copyright (C) 1995 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
@@ -2099,7 +2099,7 @@ char *z_errmsg[] = {
/*+++++*/
/* adler32.c -- compute the Adler-32 checksum of a data stream
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
diff --git a/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h
index 31485f4632..11b040595c 100644
--- a/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h
+++ b/c/src/lib/libbsp/powerpc/shared/bootloader/zlib.h
@@ -52,7 +52,7 @@
/* zconf.h -- configuration of the zlib compression library
* Copyright (C) 1995 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
@@ -145,7 +145,7 @@ typedef uLong FAR uLongf;
#define ZLIB_VERSION "0.95P"
-/*
+/*
The 'zlib' compression library provides in-memory compression and
decompression functions, including integrity checks of the uncompressed
data. This version of the library supports only one compression method
@@ -262,7 +262,7 @@ extern char *zlib_version;
/* basic functions */
extern int inflateInit OF((z_stream *strm));
-/*
+/*
Initializes the internal stream state for decompression. The fields
zalloc and zfree must be initialized before by the caller. If zalloc and
zfree are set to Z_NULL, inflateInit updates them to use default allocation
@@ -341,7 +341,7 @@ extern int inflateEnd OF((z_stream *strm));
extern int inflateInit2 OF((z_stream *strm,
int windowBits));
-/*
+/*
This is another version of inflateInit with more compression options. The
fields next_out, zalloc and zfree must be initialized before by the caller.
@@ -373,7 +373,7 @@ extern int inflateInit2 OF((z_stream *strm,
*/
extern int inflateSync OF((z_stream *strm));
-/*
+/*
Skips invalid compressed data until the special marker (see deflate()
above) can be found, or until all available input is skipped. No output
is provided.