summaryrefslogblamecommitdiffstats
path: root/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_initialize.c
blob: 1d635c58e988ac178a75fb4721fdbf0c95e01a82 (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13
14













                                                                 
                                           


















































































































































































                                                                                                                       
/**
 * @file
 *
 * @ingroup ppc_exc
 *
 * @brief PowerPC Exceptions implementation.
 */

/*
 * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
 *                    Canon Centre Recherche France.
 *
 * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
 *
 * Copyright (C) 2009 embedded brains GmbH.
 *
 * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
 * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
 *
 * The license and distribution terms for this file may be
 * found in found in the file LICENSE in this distribution or at
 * http://www.rtems.com/license/LICENSE.
 *
 * $Id$
 */

#include <rtems.h>

#include <bsp/vectors.h>

uint32_t ppc_exc_cache_wb_check = 1;

#define MTIVPR(prefix) asm volatile ("mtivpr %0" : : "r" (prefix))
#define MTIVOR(x, vec) asm volatile ("mtivor"#x" %0" : : "r" (vec))

static void ppc_exc_initialize_e500(void)
{
  /* Interupt vector prefix register */
  MTIVPR(ppc_exc_vector_base);

  /* Interupt vector offset register */
  MTIVOR(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR)); /* Critical input not (yet) supported; use reset vector */
  MTIVOR(1,  ppc_exc_vector_address(ASM_MACH_VECTOR));
  MTIVOR(2,  ppc_exc_vector_address(ASM_PROT_VECTOR));
  MTIVOR(3,  ppc_exc_vector_address(ASM_ISI_VECTOR));
  MTIVOR(4,  ppc_exc_vector_address(ASM_EXT_VECTOR));
  MTIVOR(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR));
  MTIVOR(6,  ppc_exc_vector_address(ASM_PROG_VECTOR));
  MTIVOR(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR));
  MTIVOR(8,  ppc_exc_vector_address(ASM_SYS_VECTOR));
  MTIVOR(9,  ppc_exc_vector_address(0x0b));
  MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR));
  MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR));
  MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR));
  MTIVOR(13, ppc_exc_vector_address(ASM_60X_DSMISS_VECTOR));
  MTIVOR(14, ppc_exc_vector_address(ASM_60X_DLMISS_VECTOR));
  MTIVOR(15, ppc_exc_vector_address(ASM_TRACE_VECTOR));
  MTIVOR(32, ppc_exc_vector_address(ASM_60X_VEC_VECTOR));
  MTIVOR(33, ppc_exc_vector_address(0x16));
  MTIVOR(34, ppc_exc_vector_address(0x15));
  MTIVOR(35, ppc_exc_vector_address(ASM_60X_PERFMON_VECTOR));
}

static void ppc_exc_initialize_e200(void)
{
  /* Interupt vector prefix register */
  MTIVPR(ppc_exc_vector_base);

  /* Interupt vector offset register */
  MTIVOR(0,  0); /* Critical input */
  MTIVOR(1,  ppc_exc_vector_address( ASM_MACH_VECTOR));
  MTIVOR(2,  ppc_exc_vector_address( ASM_PROT_VECTOR));
  MTIVOR(3,  ppc_exc_vector_address( ASM_ISI_VECTOR));
  MTIVOR(4,  ppc_exc_vector_address( ASM_EXT_VECTOR));
  MTIVOR(5,  ppc_exc_vector_address( ASM_ALIGN_VECTOR));
  MTIVOR(6,  ppc_exc_vector_address( ASM_PROG_VECTOR));
  MTIVOR(7,  ppc_exc_vector_address( ASM_FLOAT_VECTOR));
  MTIVOR(8,  ppc_exc_vector_address( ASM_SYS_VECTOR));
  MTIVOR(9,  0); /* APU unavailable */
  MTIVOR(10, ppc_exc_vector_address( ASM_BOOKE_DEC_VECTOR));
  MTIVOR(11, ppc_exc_vector_address( ASM_BOOKE_FIT_VECTOR));
  MTIVOR(12, ppc_exc_vector_address( ASM_BOOKE_WDOG_VECTOR));
  MTIVOR(13, ppc_exc_vector_address( ASM_BOOKE_ITLBMISS_VECTOR));
  MTIVOR(14, ppc_exc_vector_address( ASM_BOOKE_DTLBMISS_VECTOR));
  MTIVOR(15, ppc_exc_vector_address( ASM_TRACE_VECTOR));
  MTIVOR(32, ppc_exc_vector_address( ASM_E200_SPE_UNAVAILABLE_VECTOR));
  MTIVOR(33, ppc_exc_vector_address( ASM_E200_SPE_DATA_VECTOR));
  MTIVOR(34, ppc_exc_vector_address( ASM_E200_SPE_ROUND_VECTOR));
}

rtems_status_code ppc_exc_initialize(
  uint32_t interrupt_disable_mask,
  uintptr_t interrupt_stack_begin,
  uintptr_t interrupt_stack_size
)
{
  rtems_status_code sc = RTEMS_SUCCESSFUL;
  const ppc_exc_categories *const categories = ppc_exc_current_categories();
  uintptr_t const interrupt_stack_end = interrupt_stack_begin + interrupt_stack_size;
  uintptr_t interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE;
  unsigned vector = 0;
  uint32_t sda_base = 0;
  uint32_t r13 = 0;

  if (categories == NULL) {
    return RTEMS_NOT_IMPLEMENTED;
  }

  /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
   * early init code put it there.
   */
  asm volatile (
    "lis %0, _SDA_BASE_@h\n"
    "ori %0, %0, _SDA_BASE_@l\n"
    "mr  %1, 13\n"
    : "=r" (sda_base), "=r"(r13)
  );

  if (sda_base != r13) {
    return RTEMS_NOT_CONFIGURED;
  }

  /* Ensure proper interrupt stack alignment */
  interrupt_stack_pointer &= ~((uint32_t) CPU_STACK_ALIGNMENT - 1);

  /* Tag interrupt stack bottom */
  *(uint32_t *) interrupt_stack_pointer = 0;

  /* Move interrupt stack values to special purpose registers */
  PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG1, interrupt_stack_pointer);
  PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG2, interrupt_stack_begin);

  ppc_interrupt_set_disable_mask(interrupt_disable_mask);

  /* Use current MMU / RI settings when running C exception handlers */
  ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);

  if (ppc_cpu_is(PPC_e200z6)) {
    ppc_exc_initialize_e200();
  } else if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
    ppc_exc_initialize_e500();
  }

  for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
    ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);

    if (category != PPC_EXC_INVALID) {
      void *const vector_address = ppc_exc_vector_address(vector);
      uint32_t prologue [16];
      size_t prologue_size = sizeof(prologue);

      sc = ppc_exc_make_prologue(vector, category, prologue, &prologue_size);
      if (sc != RTEMS_SUCCESSFUL) {
        return RTEMS_INTERNAL_ERROR;
      }

      ppc_code_copy(vector_address, prologue, prologue_size);
    }
  }

  /* If we are on a classic PPC with MSR_DR enabled then
   * assert that the mapping for at least this task's
   * stack is write-back-caching enabled (see README/CAVEATS)
   * Do this only if the cache is physically enabled.
   * Since it is not easy to figure that out in a
   * generic way we need help from the BSP: BSPs
   * which run entirely w/o the cache may set
   * ppc_exc_cache_wb_check to zero prior to calling
   * this routine.
   *
   * We run this check only after exception handling is
   * initialized so that we have some chance to get
   * information printed if it fails.
   *
   * Note that it is unsafe to ignore this issue; if
   * the check fails, do NOT disable it unless caches
   * are always physically disabled.
   */
  if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
    /* The size of 63 assumes cache lines are at most 32 bytes */
    uint8_t dummy[63];
    uintptr_t p = (uintptr_t) dummy;
    /* If the dcbz instruction raises an alignment exception
     * then the stack is mapped as write-thru or caching-disabled.
     * The low-level code is not capable of dealing with this
     * ATM.
     */
    p = (p + 31U) & ~31U;
    asm volatile ("dcbz 0, %0"::"b" (p));
    /* If we make it thru here then things seem to be OK */
  }

  return RTEMS_SUCCESSFUL;
}