summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu/powerpc/mpc6xx
diff options
context:
space:
mode:
authorRalf Corsepius <ralf.corsepius@rtems.org>2011-02-11 09:46:53 +0000
committerRalf Corsepius <ralf.corsepius@rtems.org>2011-02-11 09:46:53 +0000
commitf9acc339fe66dce682d124d98956dd65af467676 (patch)
tree8af3a889cbb103d58797db6eff866511e4d36d4d /c/src/lib/libcpu/powerpc/mpc6xx
parent2011-02-11 Ralf Corsépius <ralf.corsepius@rtems.org> (diff)
downloadrtems-f9acc339fe66dce682d124d98956dd65af467676.tar.bz2
2011-02-11 Ralf Corsépius <ralf.corsepius@rtems.org>
* e500/mmu/mmu.c, mpc505/ictrl/ictrl.c, mpc505/timer/timer.c, mpc5xx/ictrl/ictrl.c, mpc5xx/timer/timer.c, mpc6xx/altivec/vec_sup.c, mpc6xx/clock/c_clock.c, mpc6xx/mmu/bat.c, mpc6xx/mmu/bat.h, mpc6xx/mmu/pte121.c, mpc8260/timer/timer.c, mpc8xx/timer/timer.c, new-exceptions/cpu.c, new-exceptions/bspsupport/ppc_exc_initialize.c, ppc403/clock/clock.c, ppc403/console/console.c, ppc403/console/console.c.polled, ppc403/console/console405.c, ppc403/irq/ictrl.c, ppc403/tty_drv/tty_drv.c, rtems/powerpc/cache.h, shared/include/powerpc-utility.h, shared/src/cache.c: Use "__asm__" instead of "asm" for improved c99-compliance.
Diffstat (limited to 'c/src/lib/libcpu/powerpc/mpc6xx')
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c4
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c2
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c6
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h2
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c18
5 files changed, 16 insertions, 16 deletions
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c b/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c
index ae9a5097a2..4fa16e5aa8 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c
@@ -141,8 +141,8 @@ rst_MSR_VE(uint32_t old)
*/
static void dummy(void) __attribute__((noinline));
-/* add (empty) asm statement to make sure this isn't optimized away */
-static void dummy(void) { asm volatile(""); }
+/* add (empty) __asm__ statement to make sure this isn't optimized away */
+static void dummy(void) { __asm__ volatile(""); }
static unsigned probe_r1(void) __attribute__((noinline));
static unsigned probe_r1(void)
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c
index 0f7fe86ef0..9983a78fdb 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c
@@ -133,7 +133,7 @@ int decr;
do {
register uint32_t flags;
rtems_interrupt_disable(flags);
- asm volatile (
+ __asm__ volatile (
"mfdec %0; add %0, %0, %1; mtdec %0"
: "=&r"(decr)
: "r"(Clock_Decrementer_value));
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
index c52e1b6249..65dc92a754 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
@@ -124,7 +124,7 @@ SPR_RO (HID0);
static void
set_hid0_sync (unsigned long val)
{
- asm volatile (
+ __asm__ volatile (
" sync \n"
" isync \n"
" mtspr %0, %1 \n"
@@ -221,7 +221,7 @@ do_dssall (void)
* rely on consistent compiler flags).
*/
#define DSSALL 0x7e00066c /* dssall opcode */
- asm volatile (" .long %0"::"i" (DSSALL));
+ __asm__ volatile (" .long %0"::"i" (DSSALL));
#undef DSSALL
}
}
@@ -287,7 +287,7 @@ check_bat_size (unsigned long size)
if (0xffffffff == size) {
bit = 32;
} else {
- asm volatile (" cntlzw %0, %1":"=r" (bit):"r" (size));
+ __asm__ volatile (" cntlzw %0, %1":"=r" (bit):"r" (size));
bit = 31 - bit;
if (1 << bit != size)
return -1;
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
index c7f68a97e8..28e1748a13 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
@@ -65,7 +65,7 @@ extern int getdbat(int bat_index, unsigned long *pu, unsigned long *pl);
/* Same as getdbat but reads IBAT */
extern int getibat(int bat_index, unsigned long *pu, unsigned long *pl);
-/* Do not use the asm routines; they are obsolete; use setdbat() instead */
+/* Do not use the __asm__ routines; they are obsolete; use setdbat() instead */
extern void asm_setdbat0(unsigned int uperPart, unsigned int lowerPart);
extern void asm_setdbat1(unsigned int uperPart, unsigned int lowerPart);
extern void asm_setdbat2(unsigned int uperPart, unsigned int lowerPart);
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
index 4389d392e4..f11727f62e 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
@@ -157,7 +157,7 @@
static uint32_t
seg2vsid (uint32_t ea)
{
- asm volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
+ __asm__ volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
return ea & ((1 << LD_VSID_SIZE) - 1);
}
#else
@@ -539,9 +539,9 @@ triv121PgTblMap (Triv121PgTbl pt,
uint32_t flags;
rtems_interrupt_disable (flags);
/* order setting 'v' after writing everything else */
- asm volatile ("eieio":::"memory");
+ __asm__ volatile ("eieio":::"memory");
pte->v = 1;
- asm volatile ("sync":::"memory");
+ __asm__ volatile ("sync":::"memory");
rtems_interrupt_enable (flags);
} else {
pte->v = 1;
@@ -869,7 +869,7 @@ triv121UnmapEa (unsigned long ea)
rtems_interrupt_disable (flags);
pte->v = 0;
do_dssall ();
- asm volatile (" sync \n\t"
+ __asm__ volatile (" sync \n\t"
" tlbie %0 \n\t"
" eieio \n\t"
" tlbsync \n\t"
@@ -916,7 +916,7 @@ do_dssall (void)
* rely on consistent compiler flags).
*/
#define DSSALL 0x7e00066c /* dssall opcode */
- asm volatile (" .long %0"::"i" (DSSALL));
+ __asm__ volatile (" .long %0"::"i" (DSSALL));
#undef DSSALL
}
}
@@ -946,21 +946,21 @@ triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
if (wimg < 0 && pp < 0)
return pte;
- asm volatile ("mfmsr %0":"=r" (msr));
+ __asm__ volatile ("mfmsr %0":"=r" (msr));
/* switch MMU and IRQs off */
SYNC_LONGJMP (msr & ~(MSR_EE | MSR_DR | MSR_IR));
pte->v = 0;
do_dssall ();
- asm volatile ("sync":::"memory");
+ __asm__ volatile ("sync":::"memory");
if (wimg >= 0)
pte->wimg = wimg;
if (pp >= 0)
pte->pp = pp;
- asm volatile ("tlbie %0; eieio"::"r" (ea):"memory");
+ __asm__ volatile ("tlbie %0; eieio"::"r" (ea):"memory");
pte->v = 1;
- asm volatile ("tlbsync; sync":::"memory");
+ __asm__ volatile ("tlbsync; sync":::"memory");
/* restore, i.e., switch MMU and IRQs back on */
SYNC_LONGJMP (msr);