summaryrefslogtreecommitdiffstats
path: root/bsps/shared
diff options
context:
space:
mode:
Diffstat (limited to 'bsps/shared')
-rw-r--r--bsps/shared/dev/clock/arm-generic-timer.c19
-rw-r--r--bsps/shared/dev/clock/bcm2835-system-timer.c94
-rw-r--r--bsps/shared/dev/clock/clockimpl.h54
-rw-r--r--bsps/shared/dev/clock/xil-ttc.c214
-rw-r--r--bsps/shared/dev/i2c/cadence-i2c.c1
-rw-r--r--bsps/shared/dev/irq/arm-gicv2-get-attributes.c5
-rw-r--r--bsps/shared/dev/irq/arm-gicv2-zynqmp.c6
-rw-r--r--bsps/shared/dev/irq/arm-gicv2.c36
-rw-r--r--bsps/shared/dev/irq/arm-gicv3.c35
-rw-r--r--bsps/shared/dev/nand/xnandpsu.c103
-rw-r--r--bsps/shared/dev/nand/xnandpsu_bbm.c89
-rw-r--r--bsps/shared/dev/rtc/mcp7940m.c361
-rw-r--r--bsps/shared/dev/serial/console-polled.c2
-rw-r--r--bsps/shared/dev/serial/legacy-console.c6
-rw-r--r--bsps/shared/dev/serial/zynq-uart-kernel-io.c88
-rw-r--r--bsps/shared/dev/serial/zynq-uart-polled.c28
-rw-r--r--bsps/shared/dev/serial/zynq-uart.c19
-rw-r--r--bsps/shared/dev/spi/xqspipsu-flash-helper.c177
-rw-r--r--bsps/shared/dev/spi/xqspipsu.c38
-rw-r--r--bsps/shared/grlib/btimer/gptimer.c178
-rw-r--r--bsps/shared/grlib/btimer/tlib_ckinit.c4
-rw-r--r--bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c17
-rw-r--r--bsps/shared/grlib/uart/apbuart_cons.c137
-rw-r--r--bsps/shared/grlib/uart/apbuart_polled.c52
-rw-r--r--bsps/shared/grlib/uart/apbuart_termios.c49
-rw-r--r--bsps/shared/irq/irq-affinity.c1
-rw-r--r--bsps/shared/irq/irq-generic.c9
-rw-r--r--bsps/shared/rtems-version.c9
-rw-r--r--bsps/shared/start/bspgetworkarea-default.c1
-rw-r--r--bsps/shared/xil/arm/ARMv8/xil_cache.c (renamed from bsps/shared/xil/xil_cache.c)0
-rw-r--r--bsps/shared/xil/arm/cortexr5/xil_cache.c561
-rw-r--r--bsps/shared/xil/arm/cortexr5/xil_mpu.c645
32 files changed, 2723 insertions, 315 deletions
diff --git a/bsps/shared/dev/clock/arm-generic-timer.c b/bsps/shared/dev/clock/arm-generic-timer.c
index 1188800170..ba159f6833 100644
--- a/bsps/shared/dev/clock/arm-generic-timer.c
+++ b/bsps/shared/dev/clock/arm-generic-timer.c
@@ -54,15 +54,12 @@ typedef struct {
static arm_gt_clock_context arm_gt_clock_instance;
-/* This is defined in dev/clock/clockimpl.h */
-void Clock_isr(rtems_irq_hdl_param arg);
-
-static void arm_gt_clock_at_tick(void)
+static void arm_gt_clock_at_tick(arm_gt_clock_context *ctx)
{
uint64_t cval;
uint32_t interval;
- interval = arm_gt_clock_instance.interval;
+ interval = ctx->interval;
cval = arm_gt_clock_get_compare_value();
cval += interval;
arm_gt_clock_set_compare_value(cval);
@@ -71,7 +68,7 @@ static void arm_gt_clock_at_tick(void)
#endif /* ARM_GENERIC_TIMER_UNMASK_AT_TICK */
}
-static void arm_gt_clock_handler_install(void)
+static void arm_gt_clock_handler_install(rtems_interrupt_handler handler)
{
rtems_status_code sc;
@@ -79,8 +76,8 @@ static void arm_gt_clock_handler_install(void)
arm_gt_clock_instance.irq,
"Clock",
RTEMS_INTERRUPT_UNIQUE,
- (rtems_interrupt_handler) Clock_isr,
- NULL
+ handler,
+ &arm_gt_clock_instance
);
if (sc != RTEMS_SUCCESSFUL) {
bsp_fatal(BSP_ARM_FATAL_GENERIC_TIMER_CLOCK_IRQ_INSTALL);
@@ -185,14 +182,14 @@ RTEMS_SYSINIT_ITEM(
RTEMS_SYSINIT_ORDER_FIRST
);
-#define Clock_driver_support_at_tick() \
- arm_gt_clock_at_tick()
+#define Clock_driver_support_at_tick(arg) \
+ arm_gt_clock_at_tick(arg)
#define Clock_driver_support_initialize_hardware() \
arm_gt_clock_initialize()
#define Clock_driver_support_install_isr(isr) \
- arm_gt_clock_handler_install()
+ arm_gt_clock_handler_install(isr)
/* Include shared source clock driver code */
#include "../../shared/dev/clock/clockimpl.h"
diff --git a/bsps/shared/dev/clock/bcm2835-system-timer.c b/bsps/shared/dev/clock/bcm2835-system-timer.c
new file mode 100644
index 0000000000..bb8490d03a
--- /dev/null
+++ b/bsps/shared/dev/clock/bcm2835-system-timer.c
@@ -0,0 +1,94 @@
+/**
+ * @file
+ *
+ * @ingroup RTEMSDriverClockImpl
+ *
+ * @brief This source file contains the implementation of the BCM2835 Clock
+ * Driver.
+ */
+
+/*
+ * Copyright (c) 2013 Alan Cudmore
+ * Copyright (c) 2016 Pavel Pisa
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ *
+ * http://www.rtems.org/license/LICENSE
+ *
+*/
+
+#include <rtems.h>
+#include <bsp.h>
+#include <bsp/irq.h>
+#include <bsp/irq-generic.h>
+#include <bsp/raspberrypi.h>
+#include <rtems/timecounter.h>
+
+static struct timecounter raspberrypi_tc;
+
+static uint32_t raspberrypi_clock_get_timecount(struct timecounter *tc)
+{
+ return BCM2835_REG(BCM2835_GPU_TIMER_CLO);
+}
+
+static void raspberrypi_clock_at_tick(void)
+{
+ uint32_t act_val;
+ uint32_t next_cmp = BCM2835_REG(BCM2835_GPU_TIMER_C3);
+ next_cmp += rtems_configuration_get_microseconds_per_tick();
+ BCM2835_REG(BCM2835_GPU_TIMER_C3) = next_cmp;
+ act_val = BCM2835_REG(BCM2835_GPU_TIMER_CLO);
+
+ /*
+ * Clear interrupt only if there is time left to the next tick.
+ * If time of the next tick has already passed then interrupt
+ * request stays active and fires immediately after current tick
+ * processing is finished.
+ */
+ if ((int32_t)(next_cmp - act_val) > 0)
+ BCM2835_REG(BCM2835_GPU_TIMER_CS) = BCM2835_GPU_TIMER_CS_M3;
+}
+
+static void raspberrypi_clock_handler_install_isr(
+ rtems_interrupt_handler clock_isr
+)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_interrupt_handler_install(
+ BCM2835_IRQ_ID_GPU_TIMER_M3,
+ "Clock",
+ RTEMS_INTERRUPT_UNIQUE,
+ clock_isr,
+ NULL
+ );
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ rtems_fatal_error_occurred(0xdeadbeef);
+ }
+}
+
+static void raspberrypi_clock_initialize_hardware(void)
+{
+ uint32_t next_cmp = BCM2835_REG(BCM2835_GPU_TIMER_CLO);
+ next_cmp += rtems_configuration_get_microseconds_per_tick();
+ BCM2835_REG(BCM2835_GPU_TIMER_C3) = next_cmp;
+ BCM2835_REG(BCM2835_GPU_TIMER_CS) = BCM2835_GPU_TIMER_CS_M3;
+
+ raspberrypi_tc.tc_get_timecount = raspberrypi_clock_get_timecount;
+ raspberrypi_tc.tc_counter_mask = 0xffffffff;
+ raspberrypi_tc.tc_frequency = 1000000; /* 1 MHz */
+ raspberrypi_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER;
+ rtems_timecounter_install(&raspberrypi_tc);
+}
+
+#define Clock_driver_support_at_tick(arg) raspberrypi_clock_at_tick()
+
+#define Clock_driver_support_initialize_hardware() raspberrypi_clock_initialize_hardware()
+
+#define Clock_driver_support_install_isr(clock_isr) \
+ raspberrypi_clock_handler_install_isr(clock_isr)
+
+#define CLOCK_DRIVER_USE_ONLY_BOOT_PROCESSOR 1
+
+#include "../../../shared/dev/clock/clockimpl.h"
diff --git a/bsps/shared/dev/clock/clockimpl.h b/bsps/shared/dev/clock/clockimpl.h
index e922c0b320..b27f7c15bc 100644
--- a/bsps/shared/dev/clock/clockimpl.h
+++ b/bsps/shared/dev/clock/clockimpl.h
@@ -42,6 +42,7 @@
#include <bsp.h>
#include <rtems/clockdrv.h>
#include <rtems/score/percpu.h>
+#include <rtems/score/processormaskimpl.h>
#include <rtems/score/smpimpl.h>
#include <rtems/score/timecounter.h>
#include <rtems/score/thread.h>
@@ -63,6 +64,13 @@
#error "Fast Idle PLUS n ISRs per tick is not supported"
#endif
+#if defined(BSP_FEATURE_IRQ_EXTENSION) || \
+ (CPU_SIMPLE_VECTORED_INTERRUPTS != TRUE)
+typedef void * Clock_isr_argument;
+#else
+typedef rtems_vector_number Clock_isr_argument;
+#endif
+
/**
* @brief Do nothing by default.
*/
@@ -81,7 +89,7 @@
* @brief Do nothing by default.
*/
#ifndef Clock_driver_support_at_tick
- #define Clock_driver_support_at_tick()
+ #define Clock_driver_support_at_tick( arg ) do { (void) arg; } while (0)
#endif
/**
@@ -96,8 +104,9 @@
* instead of the default.
*/
#ifndef Clock_driver_timecounter_tick
-static void Clock_driver_timecounter_tick( void )
+static void Clock_driver_timecounter_tick( Clock_isr_argument arg )
{
+ (void) arg;
#if defined(CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER)
rtems_clock_tick();
#elif defined(RTEMS_SMP) && defined(CLOCK_DRIVER_USE_ONLY_BOOT_PROCESSOR)
@@ -139,25 +148,31 @@ volatile uint32_t Clock_driver_ticks;
#error "Clock_driver_support_shutdown_hardware() is no longer supported"
#endif
+#if CLOCK_DRIVER_USE_FAST_IDLE
+static bool _Clock_Has_watchdogs(const Per_CPU_Control *cpu)
+{
+ size_t i;
+
+ for (i = 0; i < RTEMS_ARRAY_SIZE(cpu->Watchdog.Header); ++i) {
+ if (_Watchdog_Header_first(&cpu->Watchdog.Header[i]) != NULL) {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
/**
* @brief Clock_isr
*
* This is the clock tick interrupt handler.
*
- * @param vector Vector number.
+ * @param arg is the clock interrupt handler argument.
*/
-#if defined(BSP_FEATURE_IRQ_EXTENSION) || \
- (CPU_SIMPLE_VECTORED_INTERRUPTS != TRUE)
-void Clock_isr(void *arg);
-void Clock_isr(void *arg)
-{
-#else
-rtems_isr Clock_isr(rtems_vector_number vector);
-rtems_isr Clock_isr(
- rtems_vector_number vector
-)
+void Clock_isr( Clock_isr_argument arg );
+void Clock_isr( Clock_isr_argument arg )
{
-#endif
/*
* Accurate count of ISRs
*/
@@ -165,7 +180,7 @@ rtems_isr Clock_isr(
#if CLOCK_DRIVER_USE_FAST_IDLE
{
- Clock_driver_timecounter_tick();
+ Clock_driver_timecounter_tick( arg );
if (_SMP_Get_processor_maximum() == 1) {
struct timecounter *tc;
@@ -182,6 +197,7 @@ rtems_isr Clock_isr(
cpu_self->thread_dispatch_disable_level == cpu_self->isr_nest_level
&& cpu_self->heir == cpu_self->executing
&& cpu_self->executing->is_idle
+ && _Clock_Has_watchdogs(cpu_self)
) {
ISR_lock_Context lock_context;
@@ -194,7 +210,7 @@ rtems_isr Clock_isr(
}
}
- Clock_driver_support_at_tick();
+ Clock_driver_support_at_tick( arg );
}
#else
/*
@@ -202,14 +218,14 @@ rtems_isr Clock_isr(
*
* The counter/timer may or may not be set to automatically reload.
*/
- Clock_driver_support_at_tick();
+ Clock_driver_support_at_tick( arg );
#if CLOCK_DRIVER_ISRS_PER_TICK
/*
* The driver is multiple ISRs per clock tick.
*/
if ( !Clock_driver_isrs ) {
- Clock_driver_timecounter_tick();
+ Clock_driver_timecounter_tick( arg );
Clock_driver_isrs = CLOCK_DRIVER_ISRS_PER_TICK_VALUE;
}
@@ -218,7 +234,7 @@ rtems_isr Clock_isr(
/*
* The driver is one ISR per clock tick.
*/
- Clock_driver_timecounter_tick();
+ Clock_driver_timecounter_tick( arg );
#endif
#endif
}
diff --git a/bsps/shared/dev/clock/xil-ttc.c b/bsps/shared/dev/clock/xil-ttc.c
new file mode 100644
index 0000000000..624845d71c
--- /dev/null
+++ b/bsps/shared/dev/clock/xil-ttc.c
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSDriverClockXilTTC
+ *
+ * @brief This source file contains a Clock Driver implementation using the
+ * Xilinx Triple Timer Counter (TTC).
+ */
+
+/*
+ * Copyright (C) 2024 embedded brains GmbH & Co. KG
+ * Copyright (C) 2023 Reflex Aerospace GmbH
+ *
+ * Written by Philip Kirkpatrick <p.kirkpatrick@reflexaerospace.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bsp.h>
+#include <bsp/irq.h>
+#include <bsp/fatal.h>
+#include <dev/clock/xttcps_hw.h>
+#include <rtems/sysinit.h>
+#include <rtems/timecounter.h>
+
+#if XTTCPS_COUNT_VALUE_MASK != UINT32_MAX
+#error "unexpected XTTCPS_COUNT_VALUE_MASK value"
+#endif
+
+/**
+ * @defgroup RTEMSDriverClockXilTTC \
+ * Xilinx Triple Timer Counter (TTC) Clock Driver
+ *
+ * @ingroup RTEMSDriverClockImpl
+ *
+ * @brief This group contains the Xilinx Triple Timer Counter (TTC) Clock
+ * Driver implementation.
+ *
+ * @{
+ */
+
+uint32_t _CPU_Counter_frequency( void )
+{
+ return XIL_CLOCK_TTC_REFERENCE_CLOCK;
+}
+
+CPU_Counter_ticks _CPU_Counter_read(void)
+{
+ return XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_COUNT_VALUE_OFFSET);
+}
+
+static void xil_ttc_initialize(void)
+{
+ /* Do not use a prescaler to get a high resolution time source */
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_CLK_CNTRL_OFFSET, 0);
+
+ /* Disable interupts */
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_IER_OFFSET, 0);
+
+ /*
+ * Enable the timer, do not enable waveform output, increment up, use
+ * overflow mode, enable match mode.
+ */
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_CNT_CNTRL_OFFSET,
+ XTTCPS_CNT_CNTRL_EN_WAVE_MASK | XTTCPS_CNT_CNTRL_MATCH_MASK);
+}
+
+RTEMS_SYSINIT_ITEM(
+ xil_ttc_initialize,
+ RTEMS_SYSINIT_CPU_COUNTER,
+ RTEMS_SYSINIT_ORDER_MIDDLE
+);
+
+typedef struct {
+ struct timecounter base;
+ uint32_t irq_match_interval;
+} xil_ttc_timecounter;
+
+static xil_ttc_timecounter xil_ttc_clock_instance;
+
+static uint32_t xil_ttc_get_timecount(struct timecounter *tc)
+{
+ (void) tc;
+ return XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_COUNT_VALUE_OFFSET);
+}
+
+static void xil_ttc_clock_driver_support_initialize_hardware(void)
+{
+ xil_ttc_timecounter *tc;
+ uint64_t frequency;
+ uint32_t irq_match_interval;
+ uint32_t count;
+
+ tc = &xil_ttc_clock_instance;
+ frequency = XIL_CLOCK_TTC_REFERENCE_CLOCK;
+ irq_match_interval = (uint32_t)
+ ((frequency * rtems_configuration_get_microseconds_per_tick()) / 1000000);
+
+ /* Setup match register to generate clock interrupts */
+ count = XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_COUNT_VALUE_OFFSET);
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_MATCH_0_OFFSET,
+ count + irq_match_interval);
+
+ /* Clear interupts (clear on read) */
+ (void) XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_ISR_OFFSET);
+
+ /* Enable interupt for match register */
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_IER_OFFSET,
+ XTTCPS_IXR_MATCH_0_MASK);
+
+ /* Install timecounter */
+ tc->irq_match_interval = irq_match_interval;
+ tc->base.tc_counter_mask = UINT32_MAX;
+ tc->base.tc_frequency = XIL_CLOCK_TTC_REFERENCE_CLOCK;
+ tc->base.tc_get_timecount = xil_ttc_get_timecount;
+ tc->base.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER;
+ rtems_timecounter_install(&tc->base);
+}
+
+static void xil_ttc_clock_driver_support_at_tick(xil_ttc_timecounter *tc)
+{
+ uint32_t irq_match_interval;
+ uint32_t count;
+ uint32_t match;
+
+ irq_match_interval = tc->irq_match_interval;
+
+ /* Update match register */
+ match = XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_MATCH_0_OFFSET);
+ match += irq_match_interval;
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_MATCH_0_OFFSET, match);
+
+ /* Clear interupts (clear on read) */
+ (void) XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_ISR_OFFSET);
+
+ /* Check that the new match value is in the future */
+ count = XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_COUNT_VALUE_OFFSET);
+
+ while (RTEMS_PREDICT_FALSE(match - count > irq_match_interval)) {
+ /*
+ * Tick misses may happen if interrupts are disabled for an extremly long
+ * period or while debugging.
+ */
+ rtems_timecounter_tick();
+
+ /* Update match register */
+ match += irq_match_interval;
+ XTtcPs_WriteReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_MATCH_0_OFFSET, match);
+
+ /* Clear interupts (clear on read) */
+ (void) XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_ISR_OFFSET);
+
+ /* Maybe the new match value is now in the future */
+ count = XTtcPs_ReadReg(XIL_CLOCK_TTC_BASE_ADDR, XTTCPS_COUNT_VALUE_OFFSET);
+ }
+}
+
+static rtems_interrupt_entry xil_ttc_interrupt_entry;
+
+static void xil_ttc_clock_driver_support_install_isr(
+ rtems_interrupt_handler handler
+)
+{
+ rtems_status_code sc;
+
+ rtems_interrupt_entry_initialize(
+ &xil_ttc_interrupt_entry,
+ handler,
+ &xil_ttc_clock_instance,
+ "Clock"
+ );
+ sc = rtems_interrupt_entry_install(
+ XIL_CLOCK_TTC_IRQ,
+ RTEMS_INTERRUPT_UNIQUE,
+ &xil_ttc_interrupt_entry
+ );
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ bsp_fatal(XIL_FATAL_TTC_IRQ_INSTALL);
+ }
+}
+
+#define Clock_driver_support_at_tick(arg) \
+ xil_ttc_clock_driver_support_at_tick(arg)
+
+#define Clock_driver_support_initialize_hardware \
+ xil_ttc_clock_driver_support_initialize_hardware
+
+#define Clock_driver_support_install_isr(isr) \
+ xil_ttc_clock_driver_support_install_isr(isr)
+
+/** @} */
+
+#include "../../../shared/dev/clock/clockimpl.h"
diff --git a/bsps/shared/dev/i2c/cadence-i2c.c b/bsps/shared/dev/i2c/cadence-i2c.c
index 6704288a79..67dec0da46 100644
--- a/bsps/shared/dev/i2c/cadence-i2c.c
+++ b/bsps/shared/dev/i2c/cadence-i2c.c
@@ -28,7 +28,6 @@
#include <dev/i2c/cadence-i2c.h>
#include <dev/i2c/cadence-i2c-regs.h>
-#include <rtems/irq-extension.h>
#include <rtems/score/assert.h>
#include <dev/i2c/i2c.h>
diff --git a/bsps/shared/dev/irq/arm-gicv2-get-attributes.c b/bsps/shared/dev/irq/arm-gicv2-get-attributes.c
index 3280671ab6..613174bb3a 100644
--- a/bsps/shared/dev/irq/arm-gicv2-get-attributes.c
+++ b/bsps/shared/dev/irq/arm-gicv2-get-attributes.c
@@ -3,9 +3,10 @@
/**
* @file
*
- * @ingroup RTEMSBSPsShared
+ * @ingroup DevIRQGIC
*
- * @brief This source file contains the interrupt get attribute implementation.
+ * @brief This source file contains the implementation of
+ * bsp_interrupt_get_attributes() for the GICv2.
*/
/*
diff --git a/bsps/shared/dev/irq/arm-gicv2-zynqmp.c b/bsps/shared/dev/irq/arm-gicv2-zynqmp.c
index ee4479155a..bb9bfafb48 100644
--- a/bsps/shared/dev/irq/arm-gicv2-zynqmp.c
+++ b/bsps/shared/dev/irq/arm-gicv2-zynqmp.c
@@ -3,9 +3,11 @@
/**
* @file
*
- * @ingroup RTEMSBSPsShared
+ * @ingroup DevIRQGIC
*
- * @brief This source file contains the interrupt get attribute implementation.
+ * @brief This source file contains the implementation of
+ * bsp_interrupt_get_attributes() for the GICv2 of Xilinx Zynq UltraScale+
+ * MPSoC and RFSoC devices.
*/
/*
diff --git a/bsps/shared/dev/irq/arm-gicv2.c b/bsps/shared/dev/irq/arm-gicv2.c
index fcc3d0dfc8..263278148b 100644
--- a/bsps/shared/dev/irq/arm-gicv2.c
+++ b/bsps/shared/dev/irq/arm-gicv2.c
@@ -3,9 +3,10 @@
/**
* @file
*
- * @ingroup RTEMSBSPsARMShared
+ * @ingroup DevIRQGIC
*
- * @brief This source file contains the implementation of ARM GICv2 support.
+ * @brief This source file contains the implementation of the generic GICv2
+ * support.
*/
/*
@@ -36,9 +37,17 @@
#include <dev/irq/arm-gic.h>
#include <dev/irq/arm-gic-arch.h>
-#include <bsp/irq.h>
#include <bsp/irq-generic.h>
#include <bsp/start.h>
+#include <rtems/score/processormaskimpl.h>
+
+/*
+ * The GIC architecture reserves interrupt ID numbers 1020 to 1023 for special
+ * purposes.
+ */
+#if BSP_INTERRUPT_VECTOR_COUNT >= 1020
+#error "BSP_INTERRUPT_VECTOR_COUNT is too large"
+#endif
#define GIC_CPUIF ((volatile gic_cpuif *) BSP_ARM_GIC_CPUIF_BASE)
@@ -74,12 +83,19 @@
void bsp_interrupt_dispatch(void)
{
volatile gic_cpuif *cpuif = GIC_CPUIF;
- uint32_t icciar = cpuif->icciar;
- rtems_vector_number vector = GIC_CPUIF_ICCIAR_ACKINTID_GET(icciar);
- rtems_vector_number spurious = 1023;
- if (vector != spurious) {
- arm_interrupt_handler_dispatch(vector);
+ while (true) {
+ uint32_t icciar = cpuif->icciar;
+ rtems_vector_number vector = GIC_CPUIF_ICCIAR_ACKINTID_GET(icciar);
+ uint32_t status;
+
+ if (!bsp_interrupt_is_valid_vector(vector)) {
+ break;
+ }
+
+ status = arm_interrupt_enable_interrupts();
+ bsp_interrupt_handler_dispatch_unchecked(vector);
+ arm_interrupt_restore_interrupts(status);
cpuif->icceoir = icciar;
}
@@ -328,6 +344,7 @@ rtems_status_code arm_gic_irq_get_group(
return sc;
}
+#ifdef RTEMS_SMP
rtems_status_code bsp_interrupt_set_affinity(
rtems_vector_number vector,
const Processor_mask *affinity
@@ -387,6 +404,7 @@ rtems_status_code bsp_interrupt_get_affinity(
_Processor_mask_From_uint32_t(affinity, targets, 0);
return RTEMS_SUCCESSFUL;
}
+#endif
void arm_gic_trigger_sgi(rtems_vector_number vector, uint32_t targets)
{
@@ -400,9 +418,11 @@ void arm_gic_trigger_sgi(rtems_vector_number vector, uint32_t targets)
| GIC_DIST_ICDSGIR_SGIINTID(vector);
}
+#ifdef RTEMS_SMP
uint32_t arm_gic_irq_processor_count(void)
{
volatile gic_dist *dist = ARM_GIC_DIST;
return GIC_DIST_ICDICTR_CPU_NUMBER_GET(dist->icdictr) + 1;
}
+#endif
diff --git a/bsps/shared/dev/irq/arm-gicv3.c b/bsps/shared/dev/irq/arm-gicv3.c
index 4772ff5db4..958b1061bd 100644
--- a/bsps/shared/dev/irq/arm-gicv3.c
+++ b/bsps/shared/dev/irq/arm-gicv3.c
@@ -1,6 +1,15 @@
-/*
- * SPDX-License-Identifier: BSD-2-Clause
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup DevIRQGIC
*
+ * @brief This source file contains the implementation of the generic GICv3
+ * support.
+ */
+
+/*
* Copyright (C) 2019 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
@@ -27,18 +36,24 @@
#include <dev/irq/arm-gicv3.h>
-#include <bsp/irq.h>
#include <bsp/irq-generic.h>
#include <bsp/start.h>
+#include <rtems/score/processormaskimpl.h>
void bsp_interrupt_dispatch(void)
{
- uint32_t icciar = READ_SR(ICC_IAR1);
- rtems_vector_number vector = GIC_CPUIF_ICCIAR_ACKINTID_GET(icciar);
- rtems_vector_number spurious = 1023;
+ while (true) {
+ uint32_t icciar = READ_SR(ICC_IAR1);
+ rtems_vector_number vector = GIC_CPUIF_ICCIAR_ACKINTID_GET(icciar);
+ uint32_t status;
- if (vector != spurious) {
- arm_interrupt_handler_dispatch(vector);
+ if (!bsp_interrupt_is_valid_vector(vector)) {
+ break;
+ }
+
+ status = arm_interrupt_enable_interrupts();
+ bsp_interrupt_handler_dispatch_unchecked(vector);
+ arm_interrupt_restore_interrupts(status);
WRITE_SR(ICC_EOIR1, icciar);
}
@@ -242,6 +257,7 @@ rtems_status_code arm_gic_irq_get_priority(
return sc;
}
+#ifdef RTEMS_SMP
rtems_status_code bsp_interrupt_set_affinity(
rtems_vector_number vector,
const Processor_mask *affinity
@@ -274,12 +290,14 @@ rtems_status_code bsp_interrupt_get_affinity(
_Processor_mask_From_uint32_t(affinity, targets, 0);
return RTEMS_SUCCESSFUL;
}
+#endif
void arm_gic_trigger_sgi(rtems_vector_number vector, uint32_t targets)
{
gicv3_trigger_sgi(vector, targets);
}
+#ifdef RTEMS_SMP
uint32_t arm_gic_irq_processor_count(void)
{
volatile gic_dist *dist = ARM_GIC_DIST;
@@ -306,3 +324,4 @@ uint32_t arm_gic_irq_processor_count(void)
return cpu_count;
}
+#endif
diff --git a/bsps/shared/dev/nand/xnandpsu.c b/bsps/shared/dev/nand/xnandpsu.c
index 89451d19f8..79025f3c04 100644
--- a/bsps/shared/dev/nand/xnandpsu.c
+++ b/bsps/shared/dev/nand/xnandpsu.c
@@ -244,6 +244,11 @@ s32 XNandPsu_CfgInitialize(XNandPsu *InstancePtr, XNandPsu_Config *ConfigPtr,
InstancePtr->DmaMode = XNANDPSU_MDMA;
InstancePtr->IsReady = XIL_COMPONENT_IS_READY;
+#ifdef __rtems__
+ /* Set page cache to unavailable */
+ InstancePtr->PartialDataPageIndex = XNANDPSU_PAGE_CACHE_UNAVAILABLE;
+#endif
+
/* Initialize the NAND flash targets */
Status = XNandPsu_FlashInit(InstancePtr);
if (Status != XST_SUCCESS) {
@@ -278,6 +283,11 @@ s32 XNandPsu_CfgInitialize(XNandPsu *InstancePtr, XNandPsu_Config *ConfigPtr,
#endif
goto Out;
}
+
+#ifdef __rtems__
+ /* Set page cache to none */
+ InstancePtr->PartialDataPageIndex = XNANDPSU_PAGE_CACHE_NONE;
+#endif
Out:
return Status;
}
@@ -814,6 +824,21 @@ void XNandPsu_DisableEccMode(XNandPsu *InstancePtr)
InstancePtr->EccMode = XNANDPSU_NONE;
}
+#ifdef __rtems__
+#include <rtems/rtems/clock.h>
+static void udelay( void )
+{
+ uint64_t time = rtems_clock_get_uptime_nanoseconds() + 1000;
+ while (1) {
+ uint64_t newtime = rtems_clock_get_uptime_nanoseconds();
+ if (newtime > time) {
+ break;
+ }
+ }
+}
+#define usleep(x) udelay()
+#endif
+
/*****************************************************************************/
/**
*
@@ -1439,6 +1464,12 @@ s32 XNandPsu_Write(XNandPsu *InstancePtr, u64 Offset, u64 Length, u8 *SrcBuf)
goto Out;
}
+#ifdef __rtems__
+ if (InstancePtr->PartialDataPageIndex != XNANDPSU_PAGE_CACHE_UNAVAILABLE) {
+ /* All writes invalidate the page cache */
+ InstancePtr->PartialDataPageIndex = XNANDPSU_PAGE_CACHE_NONE;
+ }
+#endif
while (LengthVar > 0U) {
Block = (u32) (OffsetVar/InstancePtr->Geometry.BlockSize);
/*
@@ -1467,7 +1498,11 @@ s32 XNandPsu_Write(XNandPsu *InstancePtr, u64 Offset, u64 Length, u8 *SrcBuf)
}
Target = (u32) (OffsetVar/InstancePtr->Geometry.TargetSize);
+#ifdef __rtems__
+ {
+#else
if (Page > InstancePtr->Geometry.NumTargetPages) {
+#endif
Page %= InstancePtr->Geometry.NumTargetPages;
}
@@ -1582,7 +1617,11 @@ s32 XNandPsu_Read(XNandPsu *InstancePtr, u64 Offset, u64 Length, u8 *DestBuf)
}
Target = (u32) (OffsetVar/InstancePtr->Geometry.TargetSize);
+#ifdef __rtems__
+ {
+#else
if (Page > InstancePtr->Geometry.NumTargetPages) {
+#endif
Page %= InstancePtr->Geometry.NumTargetPages;
}
/* Check if partial read */
@@ -1596,14 +1635,45 @@ s32 XNandPsu_Read(XNandPsu *InstancePtr, u64 Offset, u64 Length, u8 *DestBuf)
InstancePtr->Geometry.BytesPerPage :
(u32)LengthVar;
}
+#ifdef __rtems__
+ if (Page == InstancePtr->PartialDataPageIndex) {
+ /*
+ * This is a whole page read for the currently cached
+ * page. It will not be taken care of below, so perform
+ * the copy here.
+ */
+ if (PartialBytes == 0U) {
+ (void)Xil_MemCpy(DestBufPtr,
+ &InstancePtr->PartialDataBuf[0],
+ NumBytes);
+ }
+ } else {
+#endif
/* Read page */
Status = XNandPsu_ReadPage(InstancePtr, Target, Page, 0U,
BufPtr);
+#ifdef __rtems__
+ if (PartialBytes > 0U &&
+ InstancePtr->PartialDataPageIndex != XNANDPSU_PAGE_CACHE_UNAVAILABLE) {
+ /*
+ * Partial read into page cache. Update the
+ * cached page index.
+ */
+ InstancePtr->PartialDataPageIndex = Page;
+ }
+ }
+#endif
if (Status != XST_SUCCESS) {
goto Out;
}
if (PartialBytes > 0U) {
(void)Xil_MemCpy(DestBufPtr, BufPtr + Col, NumBytes);
+#ifdef __rtems__
+ /* The destination buffer is touched by hardware, synchronize */
+ if (InstancePtr->Config.IsCacheCoherent == 0) {
+ Xil_DCacheFlushRange((INTPTR)(void *)DestBufPtr, NumBytes);
+ }
+#endif
}
DestBufPtr += NumBytes;
OffsetVar += NumBytes;
@@ -1693,13 +1763,21 @@ s32 XNandPsu_Erase(XNandPsu *InstancePtr, u64 Offset, u64 Length)
for (Block = StartBlock; Block < (StartBlock + NumBlocks); Block++) {
Target = Block/InstancePtr->Geometry.NumTargetBlocks;
+#ifdef __rtems__
+ u32 ModBlock = Block % InstancePtr->Geometry.NumTargetBlocks;
+#else
Block %= InstancePtr->Geometry.NumTargetBlocks;
+#endif
/* Don't erase bad block */
if (XNandPsu_IsBlockBad(InstancePtr, Block) ==
XST_SUCCESS)
continue;
/* Block Erase */
+#ifdef __rtems__
+ Status = XNandPsu_EraseBlock(InstancePtr, Target, ModBlock);
+#else
Status = XNandPsu_EraseBlock(InstancePtr, Target, Block);
+#endif
if (Status != XST_SUCCESS)
goto Out;
@@ -2002,6 +2080,14 @@ static s32 XNandPsu_ReadPage(XNandPsu *InstancePtr, u32 Target, u32 Page,
Status = XNandPsu_Data_ReadWrite(InstancePtr, Buf, PktCount, PktSize, 0, 1);
+#ifdef __rtems__
+ if (InstancePtr->DmaMode == XNANDPSU_MDMA) {
+ if (InstancePtr->Config.IsCacheCoherent == 0) {
+ Xil_DCacheInvalidateRange((INTPTR)(void *)Buf, (PktSize * PktCount));
+ }
+ }
+#endif
+
/* Check ECC Errors */
if (InstancePtr->EccMode == XNANDPSU_HWECC) {
/* Hamming Multi Bit Errors */
@@ -2115,6 +2201,14 @@ s32 XNandPsu_ReadSpareBytes(XNandPsu *InstancePtr, u32 Page, u8 *Buf)
Status = XNandPsu_Data_ReadWrite(InstancePtr, Buf, PktCount, PktSize, 0, 1);
+#ifdef __rtems__
+ if (InstancePtr->DmaMode == XNANDPSU_MDMA) {
+ if (InstancePtr->Config.IsCacheCoherent == 0) {
+ Xil_DCacheInvalidateRange((INTPTR)(void *)Buf, (PktSize * PktCount));
+ }
+ }
+#endif
+
return Status;
}
@@ -2140,7 +2234,11 @@ s32 XNandPsu_EraseBlock(XNandPsu *InstancePtr, u32 Target, u32 Block)
Xil_AssertNonvoid(InstancePtr != NULL);
Xil_AssertNonvoid(InstancePtr->IsReady == XIL_COMPONENT_IS_READY);
Xil_AssertNonvoid(Target < XNANDPSU_MAX_TARGETS);
+#ifdef __rtems__
+ Xil_AssertNonvoid(Block < InstancePtr->Geometry.NumTargetBlocks);
+#else
Xil_AssertNonvoid(Block < InstancePtr->Geometry.NumBlocks);
+#endif
s32 Status = XST_FAILURE;
u32 Page;
@@ -2557,6 +2655,11 @@ static s32 XNandPsu_ChangeWriteColumn(XNandPsu *InstancePtr, u32 Target,
if (InstancePtr->DmaMode == XNANDPSU_MDMA) {
RegVal = XNANDPSU_INTR_STS_EN_TRANS_COMP_STS_EN_MASK |
XNANDPSU_INTR_STS_EN_DMA_INT_STS_EN_MASK;
+#ifdef __rtems__
+ if (InstancePtr->Config.IsCacheCoherent == 0) {
+ Xil_DCacheFlushRange((INTPTR)(void *)Buf, (PktSize * PktCount));
+ }
+#endif
XNandPsu_Update_DmaAddr(InstancePtr, Buf);
} else {
RegVal = XNANDPSU_INTR_STS_EN_BUFF_WR_RDY_STS_EN_MASK;
diff --git a/bsps/shared/dev/nand/xnandpsu_bbm.c b/bsps/shared/dev/nand/xnandpsu_bbm.c
index dd59148536..40cf798965 100644
--- a/bsps/shared/dev/nand/xnandpsu_bbm.c
+++ b/bsps/shared/dev/nand/xnandpsu_bbm.c
@@ -62,7 +62,9 @@ static s32 XNandPsu_WriteBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
static s32 XNandPsu_MarkBbt(XNandPsu* InstancePtr, XNandPsu_BbtDesc *Desc,
u32 Target);
+#ifndef __rtems__
static s32 XNandPsu_UpdateBbt(XNandPsu *InstancePtr, u32 Target);
+#endif
/************************** Variable Definitions *****************************/
@@ -321,13 +323,23 @@ Out:
******************************************************************************/
static void XNandPsu_ConvertBbt(XNandPsu *InstancePtr, u8 *Buf, u32 Target)
{
+#ifndef __rtems__
u32 BlockOffset;
u8 BlockShift;
u32 Data;
u8 BlockType;
u32 BlockIndex;
+#endif
u32 BbtLen = InstancePtr->Geometry.NumTargetBlocks >>
XNANDPSU_BBT_BLOCK_SHIFT;
+#ifdef __rtems__
+ u32 BbtOffset = Target * InstancePtr->Geometry.NumTargetBlocks / XNANDPSU_BBT_ENTRY_NUM_BLOCKS;
+
+ for(u32 BbtIndex = 0; BbtIndex < BbtLen; BbtIndex++) {
+ /* Invert the byte to convert from in-flash BBT to in-memory BBT */
+ InstancePtr->Bbt[BbtIndex + BbtOffset] = ~Buf[BbtIndex];
+ }
+#else
u32 StartBlock = Target * InstancePtr->Geometry.NumTargetBlocks;
for(BlockOffset = StartBlock; BlockOffset < (StartBlock + BbtLen);
@@ -370,6 +382,7 @@ static void XNandPsu_ConvertBbt(XNandPsu *InstancePtr, u8 *Buf, u32 Target)
}
}
}
+#endif
}
/*****************************************************************************/
@@ -400,7 +413,11 @@ static s32 XNandPsu_ReadBbt(XNandPsu *InstancePtr, u32 Target)
XNandPsu_BbtDesc *Desc = &InstancePtr->BbtDesc;
XNandPsu_BbtDesc *MirrorDesc = &InstancePtr->BbtMirrorDesc;
+#ifdef __rtems__
+ BufLen = InstancePtr->Geometry.NumTargetBlocks >>
+#else
BufLen = InstancePtr->Geometry.NumBlocks >>
+#endif
XNANDPSU_BBT_BLOCK_SHIFT;
/* Search the Bad Block Table(BBT) in flash */
Status1 = XNandPsu_SearchBbt(InstancePtr, Desc, Target);
@@ -548,6 +565,9 @@ static s32 XNandPsu_SearchBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
VerOffset = Desc->VerOffset;
MaxBlocks = Desc->MaxBlocks;
SigLength = Desc->SigLength;
+#ifdef __rtems__
+ Desc->Valid = 0;
+#endif
/* Read the last 4 blocks for Bad Block Table(BBT) signature */
for(Block = 0U; Block < MaxBlocks; Block++) {
@@ -612,6 +632,7 @@ static s32 XNandPsu_WriteBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
u8 SpareBuf[XNANDPSU_MAX_SPARE_SIZE] __attribute__ ((aligned(64))) = {0U};
#endif
+#ifndef __rtems__
u8 Mask[4] = {0x00U, 0x01U, 0x02U, 0x03U};
u8 Data;
u32 BlockOffset;
@@ -621,11 +642,21 @@ static s32 XNandPsu_WriteBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
u32 Index;
u8 BlockType;
u32 BbtLen = InstancePtr->Geometry.NumBlocks >>
+#else
+ s32 Status;
+ u32 Index;
+ u32 BbtLen = InstancePtr->Geometry.NumTargetBlocks >>
+#endif
XNANDPSU_BBT_BLOCK_SHIFT;
/* Find a valid block to write the Bad Block Table(BBT) */
if ((!Desc->Valid) != 0U) {
for(Index = 0U; Index < Desc->MaxBlocks; Index++) {
Block = (EndBlock - Index);
+#ifdef __rtems__
+ if (XNandPsu_IsBlockBad(InstancePtr, Block) != XST_FAILURE) {
+ continue;
+ }
+#else
BlockOffset = Block >> XNANDPSU_BBT_BLOCK_SHIFT;
BlockShift = XNandPsu_BbtBlockShift(Block);
BlockType = (InstancePtr->Bbt[BlockOffset] >>
@@ -639,6 +670,7 @@ static s32 XNandPsu_WriteBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
/* Good Block */
break;
}
+#endif
Desc->PageOffset[Target] = Block *
InstancePtr->Geometry.PagesPerBlock;
if (Desc->PageOffset[Target] !=
@@ -666,6 +698,14 @@ static s32 XNandPsu_WriteBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
/* Convert the memory based BBT to flash based table */
(void)memset(Buf, 0xff, BbtLen);
+#ifdef __rtems__
+ u32 BbtTargetOffset = BbtLen * Target;
+ /* Loop through the BBT entries */
+ for(u32 BbtIndex = 0U; BbtIndex < BbtLen; BbtIndex++) {
+ /* Invert byte to convert from in-memory BBT to in-flash BBT */
+ Buf[BbtIndex] = ~InstancePtr->Bbt[BbtIndex + BbtTargetOffset];
+ }
+#else
/* Loop through the number of blocks */
for(BlockOffset = 0U; BlockOffset < BbtLen; BlockOffset++) {
Data = InstancePtr->Bbt[BlockOffset];
@@ -679,8 +719,14 @@ static s32 XNandPsu_WriteBbt(XNandPsu *InstancePtr, XNandPsu_BbtDesc *Desc,
Data >>= XNANDPSU_BBT_BLOCK_SHIFT;
}
}
+#endif
/* Write the Bad Block Table(BBT) to flash */
+#ifdef __rtems__
+ Status = XNandPsu_EraseBlock(InstancePtr, Target,
+ Block % InstancePtr->Geometry.NumTargetBlocks);
+#else
Status = XNandPsu_EraseBlock(InstancePtr, 0U, Block);
+#endif
if (Status != XST_SUCCESS) {
goto Out;
}
@@ -726,7 +772,11 @@ Out:
* - XST_FAILURE if fail.
*
******************************************************************************/
+#ifdef __rtems__
+s32 XNandPsu_UpdateBbt(XNandPsu *InstancePtr, u32 Target)
+#else
static s32 XNandPsu_UpdateBbt(XNandPsu *InstancePtr, u32 Target)
+#endif
{
s32 Status;
u8 Version;
@@ -786,7 +836,11 @@ static s32 XNandPsu_MarkBbt(XNandPsu* InstancePtr, XNandPsu_BbtDesc *Desc,
/* Mark the last four blocks as Reserved */
BlockIndex = ((Target + (u32)1) * InstancePtr->Geometry.NumTargetBlocks) -
+#ifdef __rtems__
+ Desc->MaxBlocks;
+#else
Desc->MaxBlocks - (u32)1;
+#endif
for(Index = 0U; Index < Desc->MaxBlocks; Index++) {
@@ -871,11 +925,22 @@ s32 XNandPsu_IsBlockBad(XNandPsu *InstancePtr, u32 Block)
*
******************************************************************************/
s32 XNandPsu_MarkBlockBad(XNandPsu *InstancePtr, u32 Block)
+#ifdef __rtems__
+{
+ return XNandPsu_MarkBlock(InstancePtr, Block, XNANDPSU_BLOCK_BAD );
+}
+
+s32 XNandPsu_MarkBlock(XNandPsu *InstancePtr, u32 Block, u8 BlockMark)
+#endif
{
Xil_AssertNonvoid(InstancePtr != NULL);
Xil_AssertNonvoid(InstancePtr->IsReady == XIL_COMPONENT_IS_READY)
Xil_AssertNonvoid(Block < InstancePtr->Geometry.NumBlocks);
+#ifdef __rtems__
+ BlockMark &= XNANDPSU_BLOCK_TYPE_MASK;
+#endif
+
u8 Data;
u8 BlockShift;
u32 BlockOffset;
@@ -893,7 +958,11 @@ s32 XNandPsu_MarkBlockBad(XNandPsu *InstancePtr, u32 Block)
/* Mark the block as bad in the RAM based Bad Block Table */
OldVal = Data;
Data &= ~(XNANDPSU_BLOCK_TYPE_MASK << BlockShift);
+#ifdef __rtems__
+ Data |= (BlockMark << BlockShift);
+#else
Data |= (XNANDPSU_BLOCK_BAD << BlockShift);
+#endif
NewVal = Data;
InstancePtr->Bbt[BlockOffset] = Data;
@@ -909,4 +978,24 @@ s32 XNandPsu_MarkBlockBad(XNandPsu *InstancePtr, u32 Block)
Out:
return Status;
}
+
+#ifdef __rtems__
+bool XNandPsu_StageBlockMark(XNandPsu *InstancePtr, u32 Block, u8 BlockMark)
+{
+ u8 BlockShift;
+ u32 BlockOffset;
+ u8 OldVal;
+
+ BlockMark &= XNANDPSU_BLOCK_TYPE_MASK;
+
+ BlockOffset = Block >> XNANDPSU_BBT_BLOCK_SHIFT;
+ BlockShift = XNandPsu_BbtBlockShift(Block);
+ OldVal = InstancePtr->Bbt[BlockOffset] >> BlockShift;
+ OldVal &= XNANDPSU_BLOCK_TYPE_MASK;
+ InstancePtr->Bbt[BlockOffset] &= ~(XNANDPSU_BLOCK_TYPE_MASK << BlockShift);
+ InstancePtr->Bbt[BlockOffset] |= (BlockMark << BlockShift);
+ return BlockMark != OldVal;
+}
+#endif
+
/** @} */
diff --git a/bsps/shared/dev/rtc/mcp7940m.c b/bsps/shared/dev/rtc/mcp7940m.c
new file mode 100644
index 0000000000..1abc5faaad
--- /dev/null
+++ b/bsps/shared/dev/rtc/mcp7940m.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/*
+ * Copyright (C) 2023 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Note: This driver implements only the basic RTC functionality of the
+ * MCP7940M. It tries not to touch any register fields except for the basic
+ * date/time fields in the get/set time functions. That way it should be
+ * possible to re-use the driver for similar RTCs by just replacing the
+ * initialization function. Suggested method for that: Add a field to the struct
+ * mcp7940m_rtc with a function pointer that points to the initialization
+ * function.
+ *
+ * All flags that are considered MCP7940M specific have a MCP7940M in the name.
+ *
+ * Only 24 hour format is supported. If this driver is the only ones who write
+ * the RTC, that shouldn't be a problem.
+ *
+ * The weekday register is not used. It has a user-defined representation anyway
+ * and therefore doesn't really matter.
+ */
+
+#include <dev/i2c/i2c.h>
+#include <libchip/mcp7940m-rtc.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/todimpl.h>
+
+#include <fcntl.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#define REG_RTCSEC 0x00u
+
+#define RTCSEC_SECBCD_SHIFT 0u
+#define RTCSEC_SECBCD_MASK (0x7fu << RTCSEC_SECBCD_SHIFT)
+#define RTCSEC_SECBCD(x) (((x) << RTCSEC_SECBCD_SHIFT) & RTCSEC_SECBCD_MASK)
+#define RTCSEC_SECBCD_GET(x) (((x) & RTCSEC_SECBCD_MASK) >> RTCSEC_SECBCD_SHIFT)
+
+#define MCP7940M_RTCSEC_ST (0x01u << 7)
+
+#define REG_RTCMIN 0x01
+
+#define RTCMIN_MINBCD_SHIFT 0u
+#define RTCMIN_MINBCD_MASK (0x7fu << RTCMIN_MINBCD_SHIFT)
+#define RTCMIN_MINBCD(x) (((x) << RTCMIN_MINBCD_SHIFT) & RTCMIN_MINBCD_MASK)
+#define RTCMIN_MINBCD_GET(x) (((x) & RTCMIN_MINBCD_MASK) >> RTCMIN_MINBCD_SHIFT)
+
+#define REG_RTCHOUR 0x02
+
+#define RTCHOUR_HRBCD12_SHIFT 0u
+#define RTCHOUR_HRBCD12_MASK (0x1fu << RTCHOUR_HRBCD12_SHIFT)
+#define RTCHOUR_HRBCD12(x) (((x) << RTCHOUR_HRBCD12_SHIFT) & RTCHOUR_HRBCD12_MASK)
+#define RTCHOUR_HRBCD12_GET(x) (((x) & RTCHOUR_HRBCD12_MASK) >> RTCHOUR_HRBCD12_SHIFT)
+
+#define RTCHOUR_HRBCD24_SHIFT 0u
+#define RTCHOUR_HRBCD24_MASK (0x3fu << RTCHOUR_HRBCD24_SHIFT)
+#define RTCHOUR_HRBCD24(x) (((x) << RTCHOUR_HRBCD24_SHIFT) & RTCHOUR_HRBCD24_MASK)
+#define RTCHOUR_HRBCD24_GET(x) (((x) & RTCHOUR_HRBCD24_MASK) >> RTCHOUR_HRBCD24_SHIFT)
+
+#define RTCHOUR_AMPM (0x01u << 5)
+#define RTCHOUR_1224 (0x01u << 6)
+
+#define REG_RTCWKDAY 0x03
+
+#define RTCWKDAY_WKDAY_SHIFT 0u
+#define RTCWKDAY_WKDAY_MASK (0x7u << RTCWKDAY_WKDAY_SHIFT)
+#define RTCWKDAY_WKDAY(x) (((x) << RTCWKDAY_WKDAY_SHIFT) & RTCWKDAY_WKDAY_MASK)
+#define RTCWKDAY_WKDAY_GET(x) (((x) & RTCWKDAY_WKDAY_MASK) >> RTCWKDAY_WKDAY_SHIFT)
+
+#define REG_RTCDATE 0x04
+
+#define RTCDATE_DATEBCD_SHIFT 0u
+#define RTCDATE_DATEBCD_MASK (0x3fu << RTCDATE_DATEBCD_SHIFT)
+#define RTCDATE_DATEBCD(x) (((x) << RTCDATE_DATEBCD_SHIFT) & RTCDATE_DATEBCD_MASK)
+#define RTCDATE_DATEBCD_GET(x) (((x) & RTCDATE_DATEBCD_MASK) >> RTCDATE_DATEBCD_SHIFT)
+
+#define REG_RTCMTH 0x05
+
+#define RTCMTH_MTHBCD_SHIFT 0u
+#define RTCMTH_MTHBCD_MASK (0x1fu << RTCMTH_MTHBCD_SHIFT)
+#define RTCMTH_MTHBCD(x) (((x) << RTCMTH_MTHBCD_SHIFT) & RTCMTH_MTHBCD_MASK)
+#define RTCMTH_MTHBCD_GET(x) (((x) & RTCMTH_MTHBCD_MASK) >> RTCMTH_MTHBCD_SHIFT)
+
+#define MCP7940M_RTCMTH_LPYR (0x01u << 5)
+
+#define REG_RTCYEAR 0x06
+
+#define RTCYEAR_YRBCD_SHIFT 0u
+#define RTCYEAR_YRBCD_MASK (0xffu << RTCYEAR_YRBCD_SHIFT)
+#define RTCYEAR_YRBCD(x) (((x) << RTCYEAR_YRBCD_SHIFT) & RTCYEAR_YRBCD_MASK)
+#define RTCYEAR_YRBCD_GET(x) (((x) & RTCYEAR_YRBCD_MASK) >> RTCYEAR_YRBCD_SHIFT)
+
+#define REG_MCP7940M_CONTROL 0x07
+
+#define MCP7940M_CONTROL_OUT (0x1u << 7)
+#define MCP7940M_CONTROL_SQWEN (0x1u << 6)
+#define MCP7940M_CONTROL_ALM1EN (0x1u << 5)
+#define MCP7940M_CONTROL_ALM0EN (0x1u << 4)
+#define MCP7940M_CONTROL_EXTOSC (0x1u << 3)
+#define MCP7940M_CONTROL_CRSTRIM (0x1u << 2)
+#define MCP7940M_CONTROL_SQWFS1 (0x1u << 1)
+#define MCP7940M_CONTROL_SQWFS0 (0x1u << 0)
+
+static inline uint8_t bcd_to_bin(uint8_t bcd)
+{
+ uint8_t bin;
+ bin = bcd & 0x0f;
+ bin += ((bcd >> 4) & 0x0f) * 10;
+ return bin;
+}
+
+static inline uint8_t bin_to_bcd(uint8_t bin)
+{
+ uint8_t bcd;
+ bcd = bin % 10;
+ bcd |= (bin / 10) << 4;
+ return bcd;
+}
+
+static struct mcp7940m_rtc *mcp7940m_get_context(int minor)
+{
+ return (struct mcp7940m_rtc *) RTC_Table[minor].pDeviceParams;
+}
+
+static int mcp7940m_i2c_read(
+ struct mcp7940m_rtc *ctx,
+ uint8_t addr,
+ uint8_t *buf,
+ size_t len
+)
+{
+ int fd;
+ int rv;
+ struct i2c_msg msgs[] = {{
+ .addr = ctx->i2c_addr,
+ .flags = 0,
+ .buf = &addr,
+ .len = 1,
+ }, {
+ .addr = ctx->i2c_addr,
+ .flags = I2C_M_RD,
+ .buf = buf,
+ .len = len,
+ }};
+ struct i2c_rdwr_ioctl_data payload = {
+ .msgs = msgs,
+ .nmsgs = sizeof(msgs)/sizeof(msgs[0]),
+ };
+
+ fd = open(ctx->i2c_bus_path, O_RDWR);
+ if (fd < 0) {
+ return fd;
+ }
+
+ rv = ioctl(fd, I2C_RDWR, &payload);
+
+ close(fd);
+
+ return rv;
+}
+
+static int mcp7940m_i2c_write(
+ struct mcp7940m_rtc *ctx,
+ uint8_t addr,
+ const uint8_t *buf,
+ size_t len
+)
+{
+ int fd;
+ int rv;
+ uint8_t writebuf[len + 1];
+ struct i2c_msg msgs[] = {{
+ .addr = ctx->i2c_addr,
+ .flags = 0,
+ .buf = writebuf,
+ .len = len + 1,
+ }};
+ struct i2c_rdwr_ioctl_data payload = {
+ .msgs = msgs,
+ .nmsgs = sizeof(msgs)/sizeof(msgs[0]),
+ };
+
+ writebuf[0] = addr;
+ memcpy(&writebuf[1], buf, len);
+
+ fd = open(ctx->i2c_bus_path, O_RDWR);
+ if (fd < 0) {
+ return fd;
+ }
+
+ rv = ioctl(fd, I2C_RDWR, &payload);
+
+ close(fd);
+
+ return rv;
+}
+
+static int mcp7940m_initialize_once(struct mcp7940m_rtc *ctx)
+{
+ uint8_t reg;
+ ssize_t rv;
+
+ if (ctx->initialized) {
+ return 0;
+ }
+
+ /*
+ * Make sure that all alarms and outputs are disabled. Enable or disable
+ * oscillator.
+ *
+ * This makes sure that we can start with an uninitialized device that has a
+ * random value in the control register.
+ */
+ reg = 0;
+ if (!ctx->crystal) {
+ reg |= MCP7940M_CONTROL_EXTOSC;
+ }
+ rv = mcp7940m_i2c_write(ctx, REG_MCP7940M_CONTROL, &reg, 1);
+
+ if (rv == 0 && ctx->crystal) {
+ rv = mcp7940m_i2c_read(ctx, REG_RTCSEC, &reg, 1);
+ if (rv == 0 && (reg & MCP7940M_RTCSEC_ST) == 0) {
+ reg |= MCP7940M_RTCSEC_ST;
+ rv = mcp7940m_i2c_write(ctx, REG_RTCSEC, &reg, 1);
+ }
+ }
+
+ ctx->initialized = true;
+
+ return rv;
+}
+
+static int mcp7940m_get_time(int minor, rtems_time_of_day *time)
+{
+ int rv = 0;
+ uint8_t buf[REG_RTCYEAR + 1];
+ struct mcp7940m_rtc *ctx = mcp7940m_get_context(minor);
+
+ if (!_System_state_Is_up(_System_state_Get())) {
+ return -1;
+ }
+
+ rtems_mutex_lock(&ctx->mutex);
+
+ rv = mcp7940m_initialize_once(ctx);
+
+ if (rv == 0) {
+ rv = mcp7940m_i2c_read(ctx, REG_RTCSEC, buf, sizeof(buf));
+ }
+
+ if (rv == 0) {
+ unsigned year = bcd_to_bin(RTCYEAR_YRBCD_GET(buf[REG_RTCYEAR])) +
+ (TOD_BASE_YEAR / 100 * 100);
+ if (year < TOD_BASE_YEAR) {
+ year += 100;
+ }
+ time->year = year;
+ time->month = bcd_to_bin(RTCMTH_MTHBCD_GET(buf[REG_RTCMTH]));
+ time->day = bcd_to_bin(RTCDATE_DATEBCD_GET(buf[REG_RTCDATE]));
+ time->hour = bcd_to_bin(RTCHOUR_HRBCD24_GET(buf[REG_RTCHOUR]));
+ time->minute = bcd_to_bin(RTCMIN_MINBCD_GET(buf[REG_RTCMIN]));
+ time->second = bcd_to_bin(RTCSEC_SECBCD_GET(buf[REG_RTCSEC]));
+ time->ticks = 0;
+ }
+
+ rtems_mutex_unlock(&ctx->mutex);
+
+ return rv;
+}
+
+static int mcp7940m_set_time(int minor, const rtems_time_of_day *time)
+{
+ int rv = 0;
+ uint8_t buf[REG_RTCYEAR + 1];
+ struct mcp7940m_rtc *ctx = mcp7940m_get_context(minor);
+
+ if (!_System_state_Is_up(_System_state_Get())) {
+ return -1;
+ }
+
+ rtems_mutex_lock(&ctx->mutex);
+
+ rv = mcp7940m_initialize_once(ctx);
+
+ if (rv == 0) {
+ rv = mcp7940m_i2c_read(ctx, REG_RTCSEC, buf, sizeof(buf));
+ }
+
+ if (rv == 0) {
+ /* Make sure weekday is not 0 (out of range). Otherwise it's not used. */
+ if (RTCWKDAY_WKDAY_GET(buf[REG_RTCWKDAY]) < 1) {
+ buf[REG_RTCWKDAY] &= ~RTCWKDAY_WKDAY_MASK;
+ buf[REG_RTCWKDAY] |= RTCWKDAY_WKDAY(1);
+ }
+
+ buf[REG_RTCYEAR] &= ~RTCYEAR_YRBCD_MASK;
+ buf[REG_RTCYEAR] |= RTCYEAR_YRBCD(bin_to_bcd(time->year % 100));
+
+ buf[REG_RTCMTH] &= ~RTCMTH_MTHBCD_MASK;
+ buf[REG_RTCMTH] |= RTCMTH_MTHBCD(bin_to_bcd(time->month));
+
+ buf[REG_RTCDATE] &= ~RTCDATE_DATEBCD_MASK;
+ buf[REG_RTCDATE] |= RTCDATE_DATEBCD(bin_to_bcd(time->day));
+
+ buf[REG_RTCHOUR] &= ~(RTCHOUR_HRBCD24_MASK | RTCHOUR_1224);
+ buf[REG_RTCHOUR] |= RTCHOUR_HRBCD24(bin_to_bcd(time->hour));
+
+ buf[REG_RTCMIN] &= ~RTCMIN_MINBCD_MASK;
+ buf[REG_RTCMIN] |= RTCMIN_MINBCD(bin_to_bcd(time->minute));
+
+ buf[REG_RTCSEC] &= ~RTCSEC_SECBCD_MASK;
+ buf[REG_RTCSEC] |= RTCSEC_SECBCD(bin_to_bcd(time->second));
+
+ rv = mcp7940m_i2c_write(ctx, REG_RTCSEC, buf, sizeof(buf));
+ }
+
+ rtems_mutex_unlock(&ctx->mutex);
+
+ return rv;
+}
+
+static void mcp7940m_init(int minor)
+{
+ (void) minor;
+}
+
+bool rtc_mcp7940m_probe(int minor)
+{
+ return true;
+}
+
+const rtc_fns rtc_mcp7940m_fns = {
+ .deviceInitialize = mcp7940m_init,
+ .deviceGetTime = mcp7940m_get_time,
+ .deviceSetTime = mcp7940m_set_time,
+};
diff --git a/bsps/shared/dev/serial/console-polled.c b/bsps/shared/dev/serial/console-polled.c
index 0fc765cb64..37ad5b8f18 100644
--- a/bsps/shared/dev/serial/console-polled.c
+++ b/bsps/shared/dev/serial/console-polled.c
@@ -113,7 +113,7 @@ rtems_device_driver console_open(
NULL, /* setAttributes */
NULL, /* stopRemoteTx */
NULL, /* startRemoteTx */
- 0 /* outputUsesInterrupts */
+ TERMIOS_POLLED /* outputUsesInterrupts */
};
assert( minor == 0 );
diff --git a/bsps/shared/dev/serial/legacy-console.c b/bsps/shared/dev/serial/legacy-console.c
index 7da87276b0..698439b247 100644
--- a/bsps/shared/dev/serial/legacy-console.c
+++ b/bsps/shared/dev/serial/legacy-console.c
@@ -234,7 +234,11 @@ rtems_device_driver console_open(
Callbacks.stopRemoteTx = NULL;
Callbacks.startRemoteTx = NULL;
}
- Callbacks.outputUsesInterrupts = cptr->pDeviceFns->deviceOutputUsesInterrupts;
+ if (cptr->pDeviceFns->deviceOutputUsesInterrupts) {
+ Callbacks.outputUsesInterrupts = TERMIOS_IRQ_DRIVEN;
+ } else {
+ Callbacks.outputUsesInterrupts = TERMIOS_POLLED;
+ }
/* XXX what about
* Console_Port_Tbl[minor].ulMargin,
diff --git a/bsps/shared/dev/serial/zynq-uart-kernel-io.c b/bsps/shared/dev/serial/zynq-uart-kernel-io.c
new file mode 100644
index 0000000000..61b1043cd2
--- /dev/null
+++ b/bsps/shared/dev/serial/zynq-uart-kernel-io.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup zynq_uart
+ *
+ * @brief This source file contains the definition of ::BSP_output_char and
+ * ::BSP_poll_char.
+ */
+
+/*
+ * Copyright (C) 2013, 2024 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/bspIo.h>
+
+#include <bsp.h>
+#include <dev/serial/zynq-uart-regs.h>
+#include <rtems/sysinit.h>
+
+static void zynq_uart_kernel_output_char(char c)
+{
+ volatile zynq_uart *regs =
+ (volatile zynq_uart *) ZYNQ_UART_KERNEL_IO_BASE_ADDR;
+
+ zynq_uart_write_char_polled(regs, c);
+}
+
+static int zynq_uart_kernel_poll_char(void)
+{
+ volatile zynq_uart *regs =
+ (volatile zynq_uart *) ZYNQ_UART_KERNEL_IO_BASE_ADDR;
+
+ return zynq_uart_read_char_polled(regs);
+}
+
+static void zynq_uart_kernel_early_init(char c);
+
+static void zynq_uart_kernel_init(void)
+{
+ volatile zynq_uart *regs =
+ (volatile zynq_uart *) ZYNQ_UART_KERNEL_IO_BASE_ADDR;
+
+ if (BSP_output_char != zynq_uart_kernel_early_init) {
+ return;
+ }
+
+ zynq_uart_initialize(regs);
+ BSP_output_char = zynq_uart_kernel_output_char;
+}
+
+static void zynq_uart_kernel_early_init(char c)
+{
+ zynq_uart_kernel_init();
+ zynq_uart_kernel_output_char(c);
+}
+
+BSP_output_char_function_type BSP_output_char = zynq_uart_kernel_early_init;
+
+BSP_polling_getchar_function_type BSP_poll_char = zynq_uart_kernel_poll_char;
+
+RTEMS_SYSINIT_ITEM(
+ zynq_uart_kernel_init,
+ RTEMS_SYSINIT_BSP_START,
+ RTEMS_SYSINIT_ORDER_LAST_BUT_5
+);
diff --git a/bsps/shared/dev/serial/zynq-uart-polled.c b/bsps/shared/dev/serial/zynq-uart-polled.c
index 7d5dd8ff1a..dbf75539f6 100644
--- a/bsps/shared/dev/serial/zynq-uart-polled.c
+++ b/bsps/shared/dev/serial/zynq-uart-polled.c
@@ -124,10 +124,8 @@ int zynq_cal_baud_rate(uint32_t baudrate,
return 0;
}
-void zynq_uart_initialize(rtems_termios_device_context *base)
+void zynq_uart_initialize(volatile zynq_uart *regs)
{
- zynq_uart_context *ctx = (zynq_uart_context *) base;
- volatile zynq_uart *regs = ctx->regs;
uint32_t brgr = 0x3e;
uint32_t bauddiv = 0x6;
uint32_t mode_clks = regs->mode & ZYNQ_UART_MODE_CLKS;
@@ -154,18 +152,15 @@ void zynq_uart_initialize(rtems_termios_device_context *base)
| ZYNQ_UART_MODE_CHRL(ZYNQ_UART_MODE_CHRL_8)
| mode_clks;
- while (zynq_uart_read_polled(base) >= 0) {
+ while (zynq_uart_read_char_polled(regs) >= 0) {
/* Drop */
}
- zynq_uart_reset_tx_flush(ctx);
+ zynq_uart_reset_tx_flush(regs);
}
-int zynq_uart_read_polled(rtems_termios_device_context *base)
+int zynq_uart_read_char_polled(volatile zynq_uart *regs)
{
- zynq_uart_context *ctx = (zynq_uart_context *) base;
- volatile zynq_uart *regs = ctx->regs;
-
if ((regs->channel_sts & ZYNQ_UART_CHANNEL_STS_REMPTY) != 0) {
return -1;
} else {
@@ -173,14 +168,8 @@ int zynq_uart_read_polled(rtems_termios_device_context *base)
}
}
-void zynq_uart_write_polled(
- rtems_termios_device_context *base,
- char c
-)
+void zynq_uart_write_char_polled(volatile zynq_uart *regs, char c)
{
- zynq_uart_context *ctx = (zynq_uart_context *) base;
- volatile zynq_uart *regs = ctx->regs;
-
while ((regs->channel_sts & ZYNQ_UART_CHANNEL_STS_TNFUL) != 0) {
/* Wait */
}
@@ -188,13 +177,12 @@ void zynq_uart_write_polled(
regs->tx_rx_fifo = ZYNQ_UART_TX_RX_FIFO_FIFO(c);
}
-void zynq_uart_reset_tx_flush(zynq_uart_context *ctx)
+void zynq_uart_reset_tx_flush(volatile zynq_uart *regs)
{
- volatile zynq_uart *regs = ctx->regs;
- int c = 4;
+ int c = 4;
while (c-- > 0)
- zynq_uart_write_polled(&ctx->base, '\r');
+ zynq_uart_write_char_polled(regs, '\r');
while ((regs->channel_sts & ZYNQ_UART_CHANNEL_STS_TEMPTY) == 0 ||
(regs->channel_sts & ZYNQ_UART_CHANNEL_STS_TACTIVE) != 0) {
diff --git a/bsps/shared/dev/serial/zynq-uart.c b/bsps/shared/dev/serial/zynq-uart.c
index 10e5d2acff..0489288271 100644
--- a/bsps/shared/dev/serial/zynq-uart.c
+++ b/bsps/shared/dev/serial/zynq-uart.c
@@ -28,6 +28,7 @@
#include <dev/serial/zynq-uart.h>
#include <dev/serial/zynq-uart-regs.h>
#include <bsp/irq.h>
+#include <rtems/termiostypes.h>
#include <bspopts.h>
@@ -66,14 +67,14 @@ static bool zynq_uart_first_open(
rtems_libio_open_close_args_t *args
)
{
-#ifdef ZYNQ_CONSOLE_USE_INTERRUPTS
zynq_uart_context *ctx = (zynq_uart_context *) base;
volatile zynq_uart *regs = ctx->regs;
+#ifdef ZYNQ_CONSOLE_USE_INTERRUPTS
rtems_status_code sc;
#endif
rtems_termios_set_initial_baud(tty, ZYNQ_UART_DEFAULT_BAUD);
- zynq_uart_initialize(base);
+ zynq_uart_initialize(regs);
#ifdef ZYNQ_CONSOLE_USE_INTERRUPTS
regs->rx_fifo_trg_lvl = 1;
@@ -108,15 +109,23 @@ static void zynq_uart_last_close(
}
#endif
+#ifndef ZYNQ_CONSOLE_USE_INTERRUPTS
+static int zynq_uart_read_polled(rtems_termios_device_context *base)
+{
+ zynq_uart_context *ctx = (zynq_uart_context *) base;
+ return zynq_uart_read_char_polled(ctx->regs);
+}
+#endif
+
static void zynq_uart_write_support(
rtems_termios_device_context *base,
const char *buf,
size_t len
)
{
-#ifdef ZYNQ_CONSOLE_USE_INTERRUPTS
zynq_uart_context *ctx = (zynq_uart_context *) base;
volatile zynq_uart *regs = ctx->regs;
+#ifdef ZYNQ_CONSOLE_USE_INTERRUPTS
regs->irq_dis = ZYNQ_UART_TEMPTY;
@@ -134,9 +143,9 @@ static void zynq_uart_write_support(
regs->irq_en = ZYNQ_UART_TEMPTY;
}
#else
- ssize_t i;
+ size_t i;
for (i = 0; i < len; ++i) {
- zynq_uart_write_polled(base, buf[i]);
+ zynq_uart_write_char_polled(regs, buf[i]);
}
#endif
}
diff --git a/bsps/shared/dev/spi/xqspipsu-flash-helper.c b/bsps/shared/dev/spi/xqspipsu-flash-helper.c
index 43dc700507..10e1066173 100644
--- a/bsps/shared/dev/spi/xqspipsu-flash-helper.c
+++ b/bsps/shared/dev/spi/xqspipsu-flash-helper.c
@@ -274,22 +274,57 @@ static void QspiPsuHandler(
}
}
-/*****************************************************************************/
-/**
- *
- * Reads the flash ID and identifies the flash in FCT table.
- *
- * @param QspiPsuPtr is a pointer to the QSPIPSU driver component to use.
- *
- * @return XST_SUCCESS if successful, else XST_FAILURE.
- *
- * @note None.
- *
- *****************************************************************************/
-static int FlashReadID(XQspiPsu *QspiPsuPtr)
+int QspiPsu_NOR_RDSFDP(
+ XQspiPsu *QspiPsuPtr,
+ u32 Address,
+ u32 ByteCount,
+ u8 **ReadBfrPtr
+)
+{
+ int Status;
+
+ *ReadBfrPtr = ReadBuffer;
+
+ CmdBfr[COMMAND_OFFSET] = READ_SFDP;
+ CmdBfr[ADDRESS_1_OFFSET] =
+ (u8)((Address & 0xFF0000) >> 16);
+ CmdBfr[ADDRESS_2_OFFSET] =
+ (u8)((Address & 0xFF00) >> 8);
+ CmdBfr[ADDRESS_3_OFFSET] =
+ (u8)(Address & 0xFF);
+
+ FlashMsg[0].BusWidth = XQSPIPSU_SELECT_MODE_SPI;
+ FlashMsg[0].TxBfrPtr = CmdBfr;
+ FlashMsg[0].RxBfrPtr = NULL;
+ FlashMsg[0].ByteCount = 4;
+ FlashMsg[0].Flags = XQSPIPSU_MSG_FLAG_TX;
+
+ FlashMsg[1].BusWidth = XQSPIPSU_SELECT_MODE_SPI;
+ FlashMsg[1].TxBfrPtr = NULL;
+ FlashMsg[1].RxBfrPtr = NULL;
+ FlashMsg[1].ByteCount = DUMMY_CLOCKS;
+ FlashMsg[1].Flags = 0;
+
+ FlashMsg[2].BusWidth = XQSPIPSU_SELECT_MODE_SPI;
+ FlashMsg[2].TxBfrPtr = NULL;
+ FlashMsg[2].RxBfrPtr = *ReadBfrPtr;
+ FlashMsg[2].ByteCount = ByteCount;
+ FlashMsg[2].Flags = XQSPIPSU_MSG_FLAG_RX;
+
+ TransferInProgress = TRUE;
+ Status = XQspiPsu_InterruptTransfer(QspiPsuPtr, FlashMsg, 3);
+ if (Status != XST_SUCCESS)
+ return XST_FAILURE;
+
+ while (TransferInProgress);
+
+ rtems_cache_invalidate_multiple_data_lines(ReadBuffer, ByteCount);
+ return 0;
+}
+
+int QspiPsu_NOR_RDID(XQspiPsu *QspiPsuPtr, u8 *ReadBfrPtr, u32 ReadLen)
{
int Status;
- u32 ReadId = 0;
/*
* Read ID
@@ -303,7 +338,7 @@ static int FlashReadID(XQspiPsu *QspiPsuPtr)
FlashMsg[1].TxBfrPtr = NULL;
FlashMsg[1].RxBfrPtr = ReadBfrPtr;
- FlashMsg[1].ByteCount = 3;
+ FlashMsg[1].ByteCount = ReadLen;
FlashMsg[1].BusWidth = XQSPIPSU_SELECT_MODE_SPI;
FlashMsg[1].Flags = XQSPIPSU_MSG_FLAG_RX;
@@ -314,6 +349,33 @@ static int FlashReadID(XQspiPsu *QspiPsuPtr)
}
while (TransferInProgress);
+ rtems_cache_invalidate_multiple_data_lines(ReadBfrPtr, ReadLen);
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ *
+ * Reads the flash ID and identifies the flash in FCT table.
+ *
+ * @param QspiPsuPtr is a pointer to the QSPIPSU driver component to use.
+ *
+ * @return XST_SUCCESS if successful, else XST_FAILURE.
+ *
+ * @note None.
+ *
+ *****************************************************************************/
+static int FlashReadID(XQspiPsu *QspiPsuPtr)
+{
+ u32 ReadId = 0;
+ u32 ReadLen = 3;
+ int Status;
+
+ Status = QspiPsu_NOR_RDID(QspiPsuPtr, ReadBfrPtr, ReadLen);
+ if (Status != XST_SUCCESS) {
+ return XST_FAILURE;
+ }
+
/* In case of dual, read both and ensure they are same make/size */
/*
@@ -333,7 +395,7 @@ static int FlashReadID(XQspiPsu *QspiPsuPtr)
return XST_SUCCESS;
}
-int QspiPsu_NOR_Write(
+int QspiPsu_NOR_Write_Page(
XQspiPsu *QspiPsuPtr,
u32 Address,
u32 ByteCount,
@@ -475,6 +537,51 @@ int QspiPsu_NOR_Write(
return 0;
}
+int QspiPsu_NOR_Write(
+ XQspiPsu *QspiPsuPtr,
+ u32 Address,
+ u32 ByteCount,
+ u8 *WriteBfrPtr
+)
+{
+ int Status;
+ size_t ByteCountRemaining = ByteCount;
+ unsigned char *WriteBfrPartial = WriteBfrPtr;
+ uint32_t AddressPartial = Address;
+ uint32_t PageSize = Flash_Config_Table[FCTIndex].PageSize;
+ if(QspiPsuPtr->Config.ConnectionMode == XQSPIPSU_CONNECTION_MODE_PARALLEL) {
+ PageSize *= 2;
+ }
+
+ while (ByteCountRemaining > 0) {
+ /* Get write boundary */
+ size_t WriteChunkLen = RTEMS_ALIGN_UP(AddressPartial + 1, PageSize);
+
+ /* Get offset to write boundary */
+ WriteChunkLen -= (size_t)AddressPartial;
+
+ /* Cap short writes */
+ if (WriteChunkLen > ByteCountRemaining) {
+ WriteChunkLen = ByteCountRemaining;
+ }
+
+ Status = QspiPsu_NOR_Write_Page(
+ QspiPsuPtr,
+ AddressPartial,
+ WriteChunkLen,
+ WriteBfrPartial
+ );
+ if ( Status != XST_SUCCESS ) {
+ return Status;
+ }
+
+ ByteCountRemaining -= WriteChunkLen;
+ AddressPartial += WriteChunkLen;
+ WriteBfrPartial += WriteChunkLen;
+ }
+ return Status;
+}
+
int QspiPsu_NOR_Erase(
XQspiPsu *QspiPsuPtr,
u32 Address,
@@ -489,22 +596,18 @@ int QspiPsu_NOR_Erase(
u32 NumSect;
int Status;
u32 SectSize;
- u32 SectMask;
WriteEnableCmd = WRITE_ENABLE_CMD;
if(QspiPsuPtr->Config.ConnectionMode == XQSPIPSU_CONNECTION_MODE_PARALLEL) {
- SectMask = (Flash_Config_Table[FCTIndex]).SectMask - (Flash_Config_Table[FCTIndex]).SectSize;
SectSize = (Flash_Config_Table[FCTIndex]).SectSize * 2;
NumSect = (Flash_Config_Table[FCTIndex]).NumSect;
} else if (QspiPsuPtr->Config.ConnectionMode == XQSPIPSU_CONNECTION_MODE_STACKED) {
NumSect = (Flash_Config_Table[FCTIndex]).NumSect * 2;
- SectMask = (Flash_Config_Table[FCTIndex]).SectMask;
SectSize = (Flash_Config_Table[FCTIndex]).SectSize;
} else {
SectSize = (Flash_Config_Table[FCTIndex]).SectSize;
NumSect = (Flash_Config_Table[FCTIndex]).NumSect;
- SectMask = (Flash_Config_Table[FCTIndex]).SectMask;
}
/*
@@ -569,18 +672,9 @@ int QspiPsu_NOR_Erase(
/*
* Calculate no. of sectors to erase based on byte count
*/
- NumSect = (ByteCount / SectSize) + 1;
-
- /*
- * If ByteCount to k sectors,
- * but the address range spans from N to N+k+1 sectors, then
- * increment no. of sectors to be erased
- */
-
- if (((Address + ByteCount) & SectMask) ==
- ((Address + (NumSect * SectSize)) & SectMask)) {
- NumSect++;
- }
+ u32 SectorStartBase = RTEMS_ALIGN_DOWN(Address, SectSize);
+ u32 SectorEndTop = RTEMS_ALIGN_UP(Address + ByteCount, SectSize);
+ NumSect = (SectorEndTop - SectorStartBase)/SectSize;
for (Sector = 0; Sector < NumSect; Sector++) {
@@ -828,6 +922,7 @@ int QspiPsu_NOR_Read(
while (TransferInProgress);
}
+ rtems_cache_invalidate_multiple_data_lines(ReadBuffer, ByteCount);
return 0;
}
@@ -1015,6 +1110,7 @@ static int MultiDieRead(
Address += data_len;
remain_len -= data_len;
}
+ rtems_cache_invalidate_multiple_data_lines(ReadBfrPtr, ByteCount);
return 0;
}
@@ -2226,3 +2322,20 @@ static int MultiDieReadEcc(
}
return 0;
}
+
+u32 QspiPsu_NOR_Get_Sector_Size(XQspiPsu *QspiPsuPtr)
+{
+ if(QspiPsuPtr->Config.ConnectionMode == XQSPIPSU_CONNECTION_MODE_PARALLEL) {
+ return Flash_Config_Table[FCTIndex].SectSize * 2;
+ }
+ return Flash_Config_Table[FCTIndex].SectSize;
+}
+
+u32 QspiPsu_NOR_Get_Device_Size(XQspiPsu *QspiPsuPtr)
+{
+ if(QspiPsuPtr->Config.ConnectionMode == XQSPIPSU_CONNECTION_MODE_STACKED
+ || QspiPsuPtr->Config.ConnectionMode == XQSPIPSU_CONNECTION_MODE_PARALLEL) {
+ return Flash_Config_Table[FCTIndex].FlashDeviceSize * 2;
+ }
+ return Flash_Config_Table[FCTIndex].FlashDeviceSize;
+}
diff --git a/bsps/shared/dev/spi/xqspipsu.c b/bsps/shared/dev/spi/xqspipsu.c
index 1286efd359..93d3fa4c98 100644
--- a/bsps/shared/dev/spi/xqspipsu.c
+++ b/bsps/shared/dev/spi/xqspipsu.c
@@ -84,6 +84,9 @@
#include "xqspipsu.h"
#include "xqspipsu_control.h"
#include "sleep.h"
+#ifdef __rtems__
+#include <rtems/rtems/cache.h>
+#endif
/************************** Constant Definitions *****************************/
#define MAX_DELAY_CNT 10000000U /**< Max delay count */
@@ -275,6 +278,12 @@ void XQspiPsu_Abort(XQspiPsu *InstancePtr)
u32 IntrStatus, ConfigReg, FifoStatus;
u32 DelayCount = 0U;
+#ifdef __rtems__
+ u32 FifoStatusMask = XQSPIPSU_ISR_RXEMPTY_MASK;
+ FifoStatusMask |= XQSPIPSU_ISR_TXEMPTY_MASK;
+ FifoStatusMask |= XQSPIPSU_ISR_GENFIFOEMPTY_MASK;
+#endif
+
Xil_AssertVoid(InstancePtr != NULL);
#ifdef DEBUG
xil_printf("\nXQspiPsu_Abort\r\n");
@@ -326,8 +335,13 @@ void XQspiPsu_Abort(XQspiPsu *InstancePtr)
*/
FifoStatus = XQspiPsu_ReadReg(InstancePtr->Config.BaseAddress,
+#ifdef __rtems__
+ XQSPIPSU_ISR_OFFSET) & FifoStatusMask;
+ while(FifoStatus != FifoStatusMask) {
+#else
XQSPIPSU_FIFO_CTRL_OFFSET);
while(FifoStatus != 0U) {
+#endif
if (DelayCount == MAX_DELAY_CNT) {
#ifdef DEBUG
xil_printf("Timeout error, FIFO reset failed.\r\n");
@@ -337,7 +351,11 @@ void XQspiPsu_Abort(XQspiPsu *InstancePtr)
usleep(1);
DelayCount++;
FifoStatus = XQspiPsu_ReadReg(InstancePtr->Config.BaseAddress,
+#ifdef __rtems__
+ XQSPIPSU_ISR_OFFSET) & FifoStatusMask;
+#else
XQSPIPSU_FIFO_CTRL_OFFSET);
+#endif
}
}
@@ -442,7 +460,16 @@ s32 XQspiPsu_PolledTransfer(XQspiPsu *InstancePtr, XQspiPsu_Msg *Msg,
for (Index = 0; Index < (s32)NumMsg; Index++) {
Xil_AssertNonvoid(Msg[Index].ByteCount > 0U);
+#ifdef __rtems__
+ if (Msg[Index].TxBfrPtr != NULL) {
+ rtems_cache_flush_multiple_data_lines(Msg[Index].TxBfrPtr, Msg[Index].ByteCount);
+ }
+#endif
}
+#ifdef __rtems__
+ rtems_cache_flush_multiple_data_lines(Msg, NumMsg * sizeof(*Msg));
+#endif
+
/*
* Check whether there is another transfer in progress.
* Not thread-safe
@@ -582,7 +609,18 @@ s32 XQspiPsu_InterruptTransfer(XQspiPsu *InstancePtr, XQspiPsu_Msg *Msg,
Xil_AssertNonvoid(InstancePtr->IsReady == XIL_COMPONENT_IS_READY);
for (Index = 0; Index < (s32)NumMsg; Index++)
+#ifdef __rtems__
+ {
+#endif
Xil_AssertNonvoid(Msg[Index].ByteCount > 0U);
+#ifdef __rtems__
+ if (Msg[Index].TxBfrPtr != NULL) {
+ rtems_cache_flush_multiple_data_lines(Msg[Index].TxBfrPtr, Msg[Index].ByteCount);
+ }
+ }
+ rtems_cache_flush_multiple_data_lines(Msg, NumMsg * sizeof(*Msg));
+#endif
+
/*
* Check whether there is another transfer in progress.
* Not thread-safe
diff --git a/bsps/shared/grlib/btimer/gptimer.c b/bsps/shared/grlib/btimer/gptimer.c
index f31b7c052f..cbf058ccef 100644
--- a/bsps/shared/grlib/btimer/gptimer.c
+++ b/bsps/shared/grlib/btimer/gptimer.c
@@ -50,18 +50,15 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <rtems.h>
-#include <bsp.h>
-#include <stdlib.h>
-#include <string.h>
#include <drvmgr/drvmgr.h>
#include <grlib/ambapp_bus.h>
-#include <grlib/grlib.h>
#include <grlib/gptimer.h>
+#include <grlib/gptimer-regs.h>
+#include <grlib/io.h>
#include <grlib/tlib.h>
#if defined(LEON3)
-#include <leon.h>
+#include <bsp/leon3.h>
#endif
#ifdef GPTIMER_INFO_AVAIL
@@ -75,49 +72,21 @@
#include <grlib/grlib_impl.h>
-/* GPTIMER Core Configuration Register (READ-ONLY) */
-#define GPTIMER_CFG_TIMERS_BIT 0
-#define GPTIMER_CFG_IRQ_BIT 3
-#define GPTIMER_CFG_SI_BIT 8
-#define GPTIMER_CFG_DF_BIT 9
-
-#define GPTIMER_CFG_TIMERS (0x7<<GPTIMER_CFG_TIMERS_BIT)
-#define GPTIMER_CFG_IRQ (0x1f<<GPTIMER_CFG_IRQ_BIT)
-#define GPTIMER_CFG_SI (1<<GPTIMER_CFG_SI_BIT)
-#define GPTIMER_CFG_DF (1<<GPTIMER_CFG_DF_BIT)
-
-/* GPTIMER Timer Control Register */
-#define GPTIMER_CTRL_EN_BIT 0
-#define GPTIMER_CTRL_RS_BIT 1
-#define GPTIMER_CTRL_LD_BIT 2
-#define GPTIMER_CTRL_IE_BIT 3
-#define GPTIMER_CTRL_IP_BIT 4
-#define GPTIMER_CTRL_CH_BIT 5
-#define GPTIMER_CTRL_DH_BIT 6
-
-#define GPTIMER_CTRL_EN (1<<GPTIMER_CTRL_EN_BIT)
-#define GPTIMER_CTRL_RS (1<<GPTIMER_CTRL_RS_BIT)
-#define GPTIMER_CTRL_LD (1<<GPTIMER_CTRL_LD_BIT)
-#define GPTIMER_CTRL_IE (1<<GPTIMER_CTRL_IE_BIT)
-#define GPTIMER_CTRL_IP (1<<GPTIMER_CTRL_IP_BIT)
-#define GPTIMER_CTRL_CH (1<<GPTIMER_CTRL_CH_BIT)
-#define GPTIMER_CTRL_DH (1<<GPTIMER_CTRL_DH_BIT)
-
#define DBG(x...)
/* GPTIMER timer private */
-struct gptimer_timer {
+struct gptimer_timer_priv {
struct tlib_dev tdev; /* Must be first in struct */
- struct gptimer_timer_regs *tregs;
+ gptimer_timer *tregs;
char index; /* Timer Index in this driver */
char tindex; /* Timer Index In Hardware */
- unsigned char irq_ack_mask;
+ uint32_t irq_ack_mask;
};
/* GPTIMER Core private */
struct gptimer_priv {
struct drvmgr_dev *dev;
- struct gptimer_regs *regs;
+ gptimer *regs;
unsigned int base_clk;
unsigned int base_freq;
unsigned int widthmask;
@@ -126,7 +95,7 @@ struct gptimer_priv {
/* Structure per Timer unit, the core supports up to 8 timers */
int timer_cnt;
- struct gptimer_timer timers[0];
+ struct gptimer_timer_priv timers[0];
};
void gptimer_isr(void *data);
@@ -200,14 +169,14 @@ void gptimer_register_drv (void)
int gptimer_init1(struct drvmgr_dev *dev)
{
struct gptimer_priv *priv;
- struct gptimer_regs *regs;
+ gptimer *regs;
struct amba_dev_info *ambadev;
struct ambapp_core *pnpinfo;
int timer_hw_cnt, timer_cnt, timer_start;
int i, size;
- struct gptimer_timer *timer;
+ struct gptimer_timer_priv *timer;
union drvmgr_key_value *value;
- unsigned char irq_ack_mask;
+ uint32_t irq_ack_mask;
/* Get device information from AMBA PnP information */
ambadev = (struct amba_dev_info *)dev->businfo;
@@ -215,12 +184,12 @@ int gptimer_init1(struct drvmgr_dev *dev)
return -1;
}
pnpinfo = &ambadev->info;
- regs = (struct gptimer_regs *)pnpinfo->apb_slv->start;
+ regs = (gptimer *)pnpinfo->apb_slv->start;
DBG("GPTIMER[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
/* Get number of Timers */
- timer_hw_cnt = regs->cfg & GPTIMER_CFG_TIMERS;
+ timer_hw_cnt = GPTIMER_CONFIG_TIMERS_GET(grlib_load_32(&regs->config));
/* Let user spelect a range of timers to be used. In AMP systems
* it is sometimes neccessary to leave timers for other CPU instances.
@@ -251,7 +220,7 @@ int gptimer_init1(struct drvmgr_dev *dev)
* are present.
*/
size = sizeof(struct gptimer_priv) +
- timer_cnt*sizeof(struct gptimer_timer);
+ timer_cnt*sizeof(struct gptimer_timer_priv);
priv = dev->priv = grlib_calloc(1, size);
if ( !priv )
return DRVMGR_NOMEM;
@@ -277,24 +246,24 @@ int gptimer_init1(struct drvmgr_dev *dev)
*/
value = drvmgr_dev_key_get(priv->dev, "prescaler", DRVMGR_KT_INT);
if ( value )
- regs->scaler_reload = value->i;
+ grlib_store_32(&regs->sreload, value->i);
/* Get Frequency that the timers are operating in (after prescaler) */
- priv->base_freq = priv->base_clk / (priv->regs->scaler_reload + 1);
+ priv->base_freq = priv->base_clk / (grlib_load_32(&regs->sreload) + 1);
/* Stop Timer and probe Pending bit. In newer hardware the
* timer has pending bit is cleared by writing a one to it,
* whereas older versions it is cleared with a zero.
*/
- priv->regs->timer[timer_start].ctrl = GPTIMER_CTRL_IP;
- if ((priv->regs->timer[timer_start].ctrl & GPTIMER_CTRL_IP) != 0)
- irq_ack_mask = ~GPTIMER_CTRL_IP;
+ grlib_store_32(&regs->timer[timer_start].tctrl, GPTIMER_TCTRL_IP);
+ if ((grlib_load_32(&regs->timer[timer_start].tctrl) & GPTIMER_TCTRL_IP) != 0)
+ irq_ack_mask = ~GPTIMER_TCTRL_IP;
else
- irq_ack_mask = ~0;
+ irq_ack_mask = ~0U;
/* Probe timer register width mask */
- priv->regs->timer[timer_start].value = 0xffffffff;
- priv->widthmask = priv->regs->timer[timer_start].value;
+ grlib_store_32(&regs->timer[timer_start].tcntval, 0xffffffff);
+ priv->widthmask = grlib_load_32(&regs->timer[timer_start].tcntval);
priv->timer_cnt = timer_cnt;
for (i=0; i<timer_cnt; i++) {
@@ -314,7 +283,7 @@ int gptimer_init1(struct drvmgr_dev *dev)
* B. Each Timer have an individual IRQ. The number is:
* BASE_IRQ + timer_index
*/
- priv->separate_interrupt = (regs->cfg & GPTIMER_CFG_SI) != 0;
+ priv->separate_interrupt = (grlib_load_32(&regs->config) & GPTIMER_CONFIG_SI) != 0;
return DRVMGR_OK;
}
@@ -326,7 +295,7 @@ static int gptimer_info(
void *p, int argc, char *argv[])
{
struct gptimer_priv *priv = dev->priv;
- struct gptimer_timer *timer;
+ struct gptimer_timer_priv *timer;
char buf[64];
int i;
@@ -337,7 +306,7 @@ static int gptimer_info(
print_line(p, buf);
sprintf(buf, "REGS: 0x%08x", (unsigned int)priv->regs);
print_line(p, buf);
- sprintf(buf, "BASE SCALER: %d", priv->regs->scaler_reload);
+ sprintf(buf, "BASE SCALER: %d", grlib_load_32(&priv->regs->sreload));
print_line(p, buf);
sprintf(buf, "BASE FREQ: %dkHz", priv->base_freq / 1000);
print_line(p, buf);
@@ -350,9 +319,9 @@ static int gptimer_info(
print_line(p, buf);
sprintf(buf, " TLIB Index: %d", timer->index);
print_line(p, buf);
- sprintf(buf, " RELOAD REG: %d", timer->tregs->reload);
+ sprintf(buf, " RELOAD REG: %d", grlib_load_32(&timer->tregs->trldval));
print_line(p, buf);
- sprintf(buf, " CTRL REG: %d", timer->tregs->ctrl);
+ sprintf(buf, " CTRL REG: %d", grlib_load_32(&timer->tregs->tctrl));
print_line(p, buf);
}
@@ -360,24 +329,28 @@ static int gptimer_info(
}
#endif
-static inline struct gptimer_priv *priv_from_timer(struct gptimer_timer *t)
+static inline struct gptimer_priv *priv_from_timer(struct gptimer_timer_priv *t)
{
return (struct gptimer_priv *)
((unsigned int)t -
sizeof(struct gptimer_priv) -
- t->index * sizeof(struct gptimer_timer));
+ t->index * sizeof(struct gptimer_timer_priv));
}
static int gptimer_tlib_int_pend(struct tlib_dev *hand, int ack)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
- unsigned int ctrl = timer->tregs->ctrl;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
+ uint32_t tctrl;
+
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
- if ((ctrl & (GPTIMER_CTRL_IP | GPTIMER_CTRL_IE)) ==
- (GPTIMER_CTRL_IP | GPTIMER_CTRL_IE)) {
+ if ((tctrl & (GPTIMER_TCTRL_IP | GPTIMER_TCTRL_IE)) ==
+ (GPTIMER_TCTRL_IP | GPTIMER_TCTRL_IE)) {
/* clear Pending IRQ ? */
- if (ack)
- timer->tregs->ctrl = ctrl & timer->irq_ack_mask;
+ if (ack) {
+ tctrl &= timer->irq_ack_mask;
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
+ }
return 1; /* timer generated IRQ */
} else
return 0; /* was not timer causing IRQ */
@@ -406,12 +379,15 @@ void gptimer_isr(void *data)
static void gptimer_tlib_reset(struct tlib_dev *hand)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
-
- timer->tregs->ctrl = (timer->tregs->ctrl & timer->irq_ack_mask) &
- GPTIMER_CTRL_IP;
- timer->tregs->reload = 0xffffffff;
- timer->tregs->ctrl = GPTIMER_CTRL_LD;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
+ uint32_t tctrl;
+
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
+ tctrl &= timer->irq_ack_mask;
+ tctrl &= GPTIMER_TCTRL_IP;
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
+ grlib_store_32(&timer->tregs->trldval, 0xffffffff);
+ grlib_store_32(&timer->tregs->tctrl, GPTIMER_TCTRL_LD);
}
static void gptimer_tlib_get_freq(
@@ -419,24 +395,24 @@ static void gptimer_tlib_get_freq(
unsigned int *basefreq,
unsigned int *tickrate)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
struct gptimer_priv *priv = priv_from_timer(timer);
/* Calculate base frequency from Timer Clock and Prescaler */
if ( basefreq )
*basefreq = priv->base_freq;
if ( tickrate )
- *tickrate = timer->tregs->reload + 1;
+ *tickrate = grlib_load_32(&timer->tregs->trldval) + 1;
}
static int gptimer_tlib_set_freq(struct tlib_dev *hand, unsigned int tickrate)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
- timer->tregs->reload = tickrate - 1;
+ grlib_store_32(&timer->tregs->trldval, tickrate - 1);
/*Check that value was allowed (Timer may not be as wide as expected)*/
- if ( timer->tregs->reload != (tickrate - 1) )
+ if (grlib_load_32(&timer->tregs->trldval) != (tickrate - 1))
return -1;
else
return 0;
@@ -444,8 +420,9 @@ static int gptimer_tlib_set_freq(struct tlib_dev *hand, unsigned int tickrate)
static void gptimer_tlib_irq_reg(struct tlib_dev *hand, tlib_isr_t func, void *data, int flags)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
struct gptimer_priv *priv = priv_from_timer(timer);
+ uint32_t tctrl;
if ( priv->separate_interrupt ) {
drvmgr_interrupt_register(priv->dev, timer->tindex,
@@ -476,16 +453,21 @@ static void gptimer_tlib_irq_reg(struct tlib_dev *hand, tlib_isr_t func, void *d
}
#endif
- timer->tregs->ctrl |= GPTIMER_CTRL_IE;
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
+ tctrl |= GPTIMER_TCTRL_IE;
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
}
static void gptimer_tlib_irq_unreg(struct tlib_dev *hand, tlib_isr_t func, void *data)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
struct gptimer_priv *priv = priv_from_timer(timer);
+ uint32_t tctrl;
/* Turn off IRQ at source, unregister IRQ handler */
- timer->tregs->ctrl &= ~GPTIMER_CTRL_IE;
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
+ tctrl &= ~GPTIMER_TCTRL_IE;
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
if ( priv->separate_interrupt ) {
drvmgr_interrupt_unregister(priv->dev, timer->tindex,
@@ -502,46 +484,54 @@ static void gptimer_tlib_irq_unreg(struct tlib_dev *hand, tlib_isr_t func, void
static void gptimer_tlib_start(struct tlib_dev *hand, int once)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
- unsigned int ctrl;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
+ uint32_t tctrl;
/* Load the selected frequency before starting Frequency */
- ctrl = GPTIMER_CTRL_LD | GPTIMER_CTRL_EN;
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
+ tctrl &= timer->irq_ack_mask;
+ tctrl &= ~GPTIMER_TCTRL_RS;
+ tctrl |= GPTIMER_TCTRL_LD | GPTIMER_TCTRL_EN;
if ( once == 0 )
- ctrl |= GPTIMER_CTRL_RS; /* Restart Timer */
- timer->tregs->ctrl = ctrl | (timer->tregs->ctrl & timer->irq_ack_mask &
- ~GPTIMER_CTRL_RS);
+ tctrl |= GPTIMER_TCTRL_RS; /* Restart Timer */
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
}
static void gptimer_tlib_stop(struct tlib_dev *hand)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
+ uint32_t tctrl;
/* Load the selected Frequency */
- timer->tregs->ctrl &= ~(GPTIMER_CTRL_EN|GPTIMER_CTRL_IP);
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
+ tctrl &= ~(GPTIMER_TCTRL_EN|GPTIMER_TCTRL_IP);
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
}
static void gptimer_tlib_restart(struct tlib_dev *hand)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
+ uint32_t tctrl;
- timer->tregs->ctrl |= GPTIMER_CTRL_LD | GPTIMER_CTRL_EN;
+ tctrl = grlib_load_32(&timer->tregs->tctrl);
+ tctrl |= GPTIMER_TCTRL_LD | GPTIMER_TCTRL_EN;
+ grlib_store_32(&timer->tregs->tctrl, tctrl);
}
static void gptimer_tlib_get_counter(
struct tlib_dev *hand,
unsigned int *counter)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
- *counter = timer->tregs->value;
+ *counter = grlib_load_32(&timer->tregs->tcntval);
}
static void gptimer_tlib_get_widthmask(
struct tlib_dev *hand,
unsigned int *widthmask)
{
- struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_timer_priv *timer = (struct gptimer_timer_priv *)hand;
struct gptimer_priv *priv = priv_from_timer(timer);
*widthmask = priv->widthmask;
diff --git a/bsps/shared/grlib/btimer/tlib_ckinit.c b/bsps/shared/grlib/btimer/tlib_ckinit.c
index 8a5ee33842..e2179090fa 100644
--- a/bsps/shared/grlib/btimer/tlib_ckinit.c
+++ b/bsps/shared/grlib/btimer/tlib_ckinit.c
@@ -433,10 +433,10 @@ static const struct ops ops_irqamp = {
} \
} while (0)
-#define Clock_driver_timecounter_tick() \
+#define Clock_driver_timecounter_tick(arg) \
tlib_clock_timecounter_tick()
-#define Clock_driver_support_at_tick() \
+#define Clock_driver_support_at_tick(arg) \
do { \
rtems_device_driver ret; \
ret = tlib_clock_at_tick(); \
diff --git a/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c b/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c
index 96b77907a6..35d23c1858 100644
--- a/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c
+++ b/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c
@@ -41,7 +41,7 @@
#include <grlib/genirq.h>
#include <bsp.h>
-#include <bsp/irq.h>
+#include <bsp/irq-generic.h>
#include <grlib/grlib_impl.h>
@@ -227,7 +227,10 @@ static int ambapp_grlib_int_clear
struct drvmgr_dev *dev,
int irq)
{
- BSP_shared_interrupt_clear(irq);
+ if (rtems_interrupt_clear(irq) != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
return DRVMGR_OK;
}
@@ -237,7 +240,10 @@ static int ambapp_grlib_int_mask
int irq
)
{
- BSP_shared_interrupt_mask(irq);
+ if (rtems_interrupt_vector_disable(irq) != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
return DRVMGR_OK;
}
@@ -247,7 +253,10 @@ static int ambapp_grlib_int_unmask
int irq
)
{
- BSP_shared_interrupt_unmask(irq);
+ if (rtems_interrupt_vector_enable(irq) != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
return DRVMGR_OK;
}
diff --git a/bsps/shared/grlib/uart/apbuart_cons.c b/bsps/shared/grlib/uart/apbuart_cons.c
index 5d47b7f4a1..a8b4eaaf88 100644
--- a/bsps/shared/grlib/uart/apbuart_cons.c
+++ b/bsps/shared/grlib/uart/apbuart_cons.c
@@ -47,11 +47,15 @@
#include <grlib/ambapp_bus.h>
#include <grlib/apbuart.h>
#include <grlib/ambapp.h>
-#include <grlib/grlib.h>
+#include <grlib/io.h>
#include <grlib/cons.h>
#include <rtems/termiostypes.h>
#include <grlib/apbuart_cons.h>
+#ifdef LEON3
+#include <bsp/leon3.h>
+#endif
+
/*#define DEBUG 1 */
#ifdef DEBUG
@@ -60,12 +64,6 @@
#define DBG(x...)
#endif
-/* LEON3 Low level transmit/receive functions provided by debug-uart code */
-#ifdef LEON3
-#include <leon.h>
-extern struct apbuart_regs *leon3_debug_uart; /* The debug UART */
-#endif
-
/* Probed hardware capabilities */
enum {
CAP_FIFO = 0x01, /* FIFO available */
@@ -74,7 +72,7 @@ enum {
struct apbuart_priv {
struct console_dev condev;
struct drvmgr_dev *dev;
- struct apbuart_regs *regs;
+ apbuart *regs;
struct rtems_termios_tty *tty;
char devName[52];
volatile int sending;
@@ -213,18 +211,23 @@ static const rtems_termios_device_handler handler_polled = {
* can select appropriate routines for the hardware. probecap() return value
* is a CAP_ bitmask.
*/
-static int probecap(struct apbuart_regs *regs)
+static int probecap(apbuart *regs)
{
int cap = 0;
+ uint32_t ctrl;
/* Probe FIFO */
- if (regs->ctrl & APBUART_CTRL_FA) {
+ ctrl = grlib_load_32(&regs->ctrl);
+ if (ctrl & APBUART_CTRL_FA) {
cap |= CAP_FIFO;
/* Probe RX delayed interrupt */
- regs->ctrl |= APBUART_CTRL_DI;
- if (regs->ctrl & APBUART_CTRL_DI) {
- regs->ctrl &= ~APBUART_CTRL_DI;
+ ctrl |= APBUART_CTRL_DI;
+ grlib_store_32(&regs->ctrl, ctrl);
+ ctrl = grlib_load_32(&regs->ctrl);
+ if (ctrl & APBUART_CTRL_DI) {
+ ctrl &= ~APBUART_CTRL_DI;
+ grlib_store_32(&regs->ctrl, ctrl);
cap |= CAP_DI;
}
}
@@ -241,6 +244,7 @@ int apbuart_init1(struct drvmgr_dev *dev)
char prefix[32];
unsigned int db;
static int first_uart = 1;
+ uint32_t ctrl;
/* The default operation in AMP is to use APBUART[0] for CPU[0],
* APBUART[1] for CPU[1] and so on. The remaining UARTs is not used
@@ -269,10 +273,12 @@ int apbuart_init1(struct drvmgr_dev *dev)
if (ambadev == NULL)
return -1;
pnpinfo = &ambadev->info;
- priv->regs = (struct apbuart_regs *)pnpinfo->apb_slv->start;
+ priv->regs = (apbuart *)pnpinfo->apb_slv->start;
/* Clear HW regs, leave baudrate register as it is */
- priv->regs->status = 0;
+ grlib_store_32(&priv->regs->status, 0);
+
+ ctrl = grlib_load_32(&priv->regs->ctrl);
/* leave Transmitter/receiver if this is the RTEMS debug UART (assume
* it has been setup by boot loader).
@@ -280,10 +286,10 @@ int apbuart_init1(struct drvmgr_dev *dev)
db = 0;
#ifdef LEON3
if (priv->regs == leon3_debug_uart) {
- db = priv->regs->ctrl & (APBUART_CTRL_RE |
- APBUART_CTRL_TE |
- APBUART_CTRL_PE |
- APBUART_CTRL_PS);
+ db = ctrl & (APBUART_CTRL_RE |
+ APBUART_CTRL_TE |
+ APBUART_CTRL_PE |
+ APBUART_CTRL_PS);
}
#endif
/* Let UART debug tunnelling be untouched if Flow-control is set.
@@ -293,12 +299,12 @@ int apbuart_init1(struct drvmgr_dev *dev)
* guess that we are debugging if FL is already set, the debugger set
* either LB or DB depending on UART capabilities.
*/
- if (priv->regs->ctrl & APBUART_CTRL_FL) {
- db |= priv->regs->ctrl & (APBUART_CTRL_DB |
+ if (ctrl & APBUART_CTRL_FL) {
+ db |= ctrl & (APBUART_CTRL_DB |
APBUART_CTRL_LB | APBUART_CTRL_FL);
}
- priv->regs->ctrl = db;
+ grlib_store_32(&priv->regs->ctrl, db);
priv->cap = probecap(priv->regs);
@@ -387,12 +393,13 @@ static int apbuart_info(
sprintf(buf, "FS Name: %s", priv->condev.fsname);
print_line(p, buf);
}
- sprintf(buf, "STATUS REG: 0x%x", priv->regs->status);
+ sprintf(buf, "STATUS REG: 0x%x", grlib_load_32(&priv->regs->status));
print_line(p, buf);
- sprintf(buf, "CTRL REG: 0x%x", priv->regs->ctrl);
+ sprintf(buf, "CTRL REG: 0x%x", grlib_load_32(&priv->regs->ctrl));
print_line(p, buf);
sprintf(buf, "SCALER REG: 0x%x baud rate %d",
- priv->regs->scaler, apbuart_get_baud(priv));
+ grlib_load_32(&priv->regs->scaler),
+ apbuart_get_baud(priv));
print_line(p, buf);
return DRVMGR_OK;
@@ -407,6 +414,8 @@ static bool first_open(
)
{
struct apbuart_priv *uart = base_get_priv(base);
+ apbuart *regs = uart->regs;
+ uint32_t ctrl;
uart->tty = tty;
@@ -418,11 +427,11 @@ static bool first_open(
}
/* Enable TX/RX */
- uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
+ ctrl = grlib_load_32(&regs->ctrl);
+ ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
if (uart->mode != TERMIOS_POLLED) {
int ret;
- uint32_t ctrl;
/* Register interrupt and enable it */
ret = drvmgr_interrupt_register(
@@ -435,15 +444,15 @@ static bool first_open(
uart->sending = 0;
/* Turn on RX interrupts */
- ctrl = uart->regs->ctrl;
ctrl |= APBUART_CTRL_RI;
if (uart->cap & CAP_DI) {
/* Use RX FIFO interrupt only if delayed interrupt available. */
ctrl |= (APBUART_CTRL_DI | APBUART_CTRL_RF);
}
- uart->regs->ctrl = ctrl;
}
+ grlib_store_32(&regs->ctrl, ctrl);
+
return true;
}
@@ -454,13 +463,16 @@ static void last_close(
)
{
struct apbuart_priv *uart = base_get_priv(base);
+ apbuart *regs = uart->regs;
rtems_interrupt_lock_context lock_context;
+ uint32_t ctrl;
if (uart->mode != TERMIOS_POLLED) {
/* Turn off RX interrupts */
rtems_termios_device_lock_acquire(base, &lock_context);
- uart->regs->ctrl &=
- ~(APBUART_CTRL_DI | APBUART_CTRL_RI | APBUART_CTRL_RF);
+ ctrl = grlib_load_32(&regs->ctrl);
+ ctrl &= ~(APBUART_CTRL_DI | APBUART_CTRL_RI | APBUART_CTRL_RF);
+ grlib_store_32(&regs->ctrl, ctrl);
rtems_termios_device_lock_release(base, &lock_context);
/**** Flush device ****/
@@ -468,8 +480,8 @@ static void last_close(
/* Wait until all data has been sent */
}
while (
- (uart->regs->ctrl & APBUART_CTRL_TE) &&
- !(uart->regs->status & APBUART_STATUS_TS)
+ (grlib_load_32(&regs->ctrl) & APBUART_CTRL_TE) &&
+ !(grlib_load_32(&regs->status) & APBUART_STATUS_TS)
) {
/* Wait until all data has left shift register */
}
@@ -480,8 +492,11 @@ static void last_close(
#ifdef LEON3
/* Disable TX/RX if not used for DEBUG */
- if (uart->regs != leon3_debug_uart)
- uart->regs->ctrl &= ~(APBUART_CTRL_RE | APBUART_CTRL_TE);
+ if (regs != leon3_debug_uart) {
+ ctrl = grlib_load_32(&regs->ctrl);
+ ctrl &= ~(APBUART_CTRL_RE | APBUART_CTRL_TE);
+ grlib_store_32(&regs->ctrl, ctrl);
+ }
#endif
}
@@ -497,10 +512,11 @@ static int read_task(rtems_termios_device_context *base)
{
rtems_interrupt_lock_context lock_context;
struct apbuart_priv *uart = base_get_priv(base);
- struct apbuart_regs *regs = uart->regs;
+ apbuart *regs = uart->regs;
int cnt;
char buf[33];
struct rtems_termios_tty *tty;
+ uint32_t ctrl;
uint32_t ctrl_add;
ctrl_add = APBUART_CTRL_RI;
@@ -511,10 +527,10 @@ static int read_task(rtems_termios_device_context *base)
do {
cnt = 0;
while (
- (regs->status & APBUART_STATUS_DR) &&
+ (grlib_load_32(&regs->status) & APBUART_STATUS_DR) &&
(cnt < sizeof(buf))
) {
- buf[cnt] = regs->data;
+ buf[cnt] = grlib_load_32(&regs->data);
cnt++;
}
if (0 < cnt) {
@@ -528,9 +544,11 @@ static int read_task(rtems_termios_device_context *base)
* afterwards.
*/
rtems_termios_device_lock_acquire(base, &lock_context);
- regs->ctrl |= ctrl_add;
+ ctrl = grlib_load_32(&regs->ctrl);
+ ctrl |= ctrl_add;
+ grlib_store_32(&regs->ctrl, ctrl);
rtems_termios_device_lock_release(base, &lock_context);
- } while (regs->status & APBUART_STATUS_DR);
+ } while (grlib_load_32(&regs->status) & APBUART_STATUS_DR);
return EOF;
}
@@ -541,7 +559,7 @@ int apbuart_get_baud(struct apbuart_priv *uart)
unsigned int scaler;
/* Get current scaler setting */
- scaler = uart->regs->scaler;
+ scaler = grlib_load_32(&uart->regs->scaler);
/* Get APBUART core frequency */
drvmgr_freq_get(uart->dev, DEV_APB_SLV, &core_clk_hz);
@@ -576,7 +594,7 @@ static bool set_attributes(
rtems_termios_device_lock_acquire(base, &lock_context);
/* Read out current value */
- ctrl = uart->regs->ctrl;
+ ctrl = grlib_load_32(&uart->regs->ctrl);
switch(t->c_cflag & (PARENB|PARODD)){
case (PARENB|PARODD):
@@ -603,7 +621,7 @@ static bool set_attributes(
ctrl &= ~APBUART_CTRL_FL;
/* Update new settings */
- uart->regs->ctrl = ctrl;
+ grlib_store_32(&uart->regs->ctrl, ctrl);
rtems_termios_device_lock_release(base, &lock_context);
@@ -617,7 +635,7 @@ static bool set_attributes(
scaler = (((core_clk_hz*10)/(baud*8))-5)/10;
/* Set new baud rate by setting scaler */
- uart->regs->scaler = scaler;
+ grlib_store_32(&uart->regs->scaler, scaler);
}
return true;
@@ -637,7 +655,7 @@ static void get_attributes(
t->c_cflag |= CS8;
/* Read out current parity */
- ctrl = uart->regs->ctrl;
+ ctrl = grlib_load_32(&uart->regs->ctrl);
if (ctrl & APBUART_CTRL_PE) {
if (ctrl & APBUART_CTRL_PS)
t->c_cflag |= PARENB|PARODD; /* Odd parity */
@@ -673,11 +691,11 @@ static void write_interrupt(
)
{
struct apbuart_priv *uart = base_get_priv(base);
- struct apbuart_regs *regs = uart->regs;
+ apbuart *regs = uart->regs;
int sending;
unsigned int ctrl;
- ctrl = regs->ctrl;
+ ctrl = grlib_load_32(&regs->ctrl);
if (len > 0) {
/*
@@ -685,28 +703,30 @@ static void write_interrupt(
* we can tell termios later.
*/
/* Enable TX interrupt (interrupt is edge-triggered) */
- regs->ctrl = ctrl | APBUART_CTRL_TI;
+ ctrl |= APBUART_CTRL_TI;
+ grlib_store_32(&regs->ctrl, ctrl);
if (ctrl & APBUART_CTRL_FA) {
/* APBUART with FIFO.. Fill as many as FIFO allows */
sending = 0;
while (
- ((regs->status & APBUART_STATUS_TF) == 0) &&
+ ((grlib_load_32(&regs->status) & APBUART_STATUS_TF) == 0) &&
(sending < len)
) {
- regs->data = *buf;
+ grlib_store_32(&regs->data, *buf);
buf++;
sending++;
}
} else {
/* start UART TX, this will result in an interrupt when done */
- regs->data = *buf;
+ grlib_store_32(&regs->data, *buf);
sending = 1;
}
} else {
/* No more to send, disable TX interrupts */
- regs->ctrl = ctrl & ~APBUART_CTRL_TI;
+ ctrl &= ~APBUART_CTRL_TI;
+ grlib_store_32(&regs->ctrl, ctrl);
/* Tell close that we sent everything */
sending = 0;
@@ -722,21 +742,24 @@ static void apbuart_cons_isr(void *arg)
rtems_termios_device_context *base;
struct console_dev *condev = rtems_termios_get_device_context(tty);
struct apbuart_priv *uart = condev_get_priv(condev);
- struct apbuart_regs *regs = uart->regs;
+ apbuart *regs = uart->regs;
unsigned int status;
char buf[33];
int cnt;
if (uart->mode == TERMIOS_TASK_DRIVEN) {
- if ((status = regs->status) & APBUART_STATUS_DR) {
+ if ((status = grlib_load_32(&regs->status)) & APBUART_STATUS_DR) {
rtems_interrupt_lock_context lock_context;
+ uint32_t ctrl;
/* Turn off RX interrupts */
base = rtems_termios_get_device_context(tty);
rtems_termios_device_lock_acquire(base, &lock_context);
- regs->ctrl &=
+ ctrl = grlib_load_32(&regs->ctrl);
+ ctrl &=
~(APBUART_CTRL_DI | APBUART_CTRL_RI |
APBUART_CTRL_RF);
+ grlib_store_32(&regs->ctrl, ctrl);
rtems_termios_device_lock_release(base, &lock_context);
/* Activate termios RX daemon task */
rtems_termios_rxirq_occured(tty);
@@ -749,10 +772,10 @@ static void apbuart_cons_isr(void *arg)
*/
cnt = 0;
while (
- ((status=regs->status) & APBUART_STATUS_DR) &&
+ ((status=grlib_load_32(&regs->status)) & APBUART_STATUS_DR) &&
(cnt < sizeof(buf))
) {
- buf[cnt] = regs->data;
+ buf[cnt] = grlib_load_32(&regs->data);
cnt++;
}
if (0 < cnt) {
diff --git a/bsps/shared/grlib/uart/apbuart_polled.c b/bsps/shared/grlib/uart/apbuart_polled.c
index b83af56b09..84c31795df 100644
--- a/bsps/shared/grlib/uart/apbuart_polled.c
+++ b/bsps/shared/grlib/uart/apbuart_polled.c
@@ -1,8 +1,17 @@
/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @file
+ *
+ * @ingroup RTEMSDeviceGRLIBAPBUART
+ *
+ * @brief This source file contains the implementation of
+ * apbuart_outbyte_wait(), apbuart_outbyte_polled(), and
+ * apbuart_inbyte_nonblocking().
+ */
+
/*
- * COPYRIGHT (c) 2010.
- * Cobham Gaisler AB.
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,38 +36,37 @@
*/
#include <grlib/apbuart.h>
+#include <grlib/io.h>
-#include <rtems/score/cpuimpl.h>
+#include <rtems/dev/io.h>
-void apbuart_outbyte_wait(const struct apbuart_regs *regs)
+void apbuart_outbyte_wait( const apbuart *regs )
{
- while ( (regs->status & APBUART_STATUS_TE) == 0 ) {
- /* Lower bus utilization while waiting for UART */
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
- _CPU_Instruction_no_operation();
+ while ( ( grlib_load_32( &regs->status ) & APBUART_STATUS_TE ) == 0 ) {
+ _IO_Relax();
}
}
-void apbuart_outbyte_polled(struct apbuart_regs *regs, char ch)
+void apbuart_outbyte_polled( apbuart *regs, char ch)
{
- apbuart_outbyte_wait(regs);
- regs->data = (uint8_t) ch;
+ apbuart_outbyte_wait( regs );
+ grlib_store_32( &regs->data, (uint8_t) ch );
}
-int apbuart_inbyte_nonblocking(struct apbuart_regs *regs)
+int apbuart_inbyte_nonblocking( apbuart *regs )
{
- /* Clear errors */
- regs->status = ~APBUART_STATUS_ERR;
+ uint32_t status;
+
+ status = grlib_load_32( &regs->status );
+
+ /* Clear errors, writes to non-error flags are ignored */
+ status &= ~( APBUART_STATUS_FE | APBUART_STATUS_PE | APBUART_STATUS_OV |
+ APBUART_STATUS_BR );
+ grlib_store_32( &regs->status, status );
- if ((regs->status & APBUART_STATUS_DR) == 0) {
+ if ( ( status & APBUART_STATUS_DR ) == 0 ) {
return -1;
}
- return (uint8_t) regs->data;
+ return (int) APBUART_DATA_DATA_GET( grlib_load_32( &regs->data ) );
}
diff --git a/bsps/shared/grlib/uart/apbuart_termios.c b/bsps/shared/grlib/uart/apbuart_termios.c
index 6506b70e3d..8b5ccd67f6 100644
--- a/bsps/shared/grlib/uart/apbuart_termios.c
+++ b/bsps/shared/grlib/uart/apbuart_termios.c
@@ -32,6 +32,7 @@
#include <grlib/apbuart_termios.h>
#include <grlib/apbuart.h>
+#include <grlib/io.h>
#include <bsp.h>
static void apbuart_isr(void *arg)
@@ -42,9 +43,9 @@ static void apbuart_isr(void *arg)
char data;
/* Get all received characters */
- while ((status=uart->regs->status) & APBUART_STATUS_DR) {
+ while ((status=grlib_load_32(&uart->regs->status)) & APBUART_STATUS_DR) {
/* Data has arrived, get new data */
- data = uart->regs->data;
+ data = (char)grlib_load_32(&uart->regs->data);
/* Tell termios layer about new character */
rtems_termios_enqueue_raw_characters(tty, &data, 1);
@@ -52,7 +53,7 @@ static void apbuart_isr(void *arg)
if (
(status & APBUART_STATUS_TE)
- && (uart->regs->ctrl & APBUART_CTRL_TI) != 0
+ && (grlib_load_32(&uart->regs->ctrl) & APBUART_CTRL_TI) != 0
) {
/* write_interrupt will get called from this function */
rtems_termios_dequeue_characters(tty, 1);
@@ -67,23 +68,27 @@ static void apbuart_write_support(
{
struct apbuart_context *uart = (struct apbuart_context *) base;
int sending;
+ uint32_t ctrl;
+
+ ctrl = grlib_load_32(&uart->regs->ctrl);
if (len > 0) {
/* Enable TX interrupt (interrupt is edge-triggered) */
- uart->regs->ctrl |= APBUART_CTRL_TI;
+ ctrl |= APBUART_CTRL_TI;
/* start UART TX, this will result in an interrupt when done */
- uart->regs->data = *buf;
+ grlib_store_32(&uart->regs->data, (uint8_t)*buf);
sending = 1;
} else {
/* No more to send, disable TX interrupts */
- uart->regs->ctrl &= ~APBUART_CTRL_TI;
+ ctrl &= ~APBUART_CTRL_TI;
/* Tell close that we sent everything */
sending = 0;
}
+ grlib_store_32(&uart->regs->ctrl, ctrl);
uart->sending = sending;
}
@@ -134,7 +139,7 @@ static bool apbuart_set_attributes(
rtems_termios_device_lock_acquire(base, &lock_context);
/* Read out current value */
- ctrl = uart->regs->ctrl;
+ ctrl = grlib_load_32(&uart->regs->ctrl);
switch (t->c_cflag & (PARENB|PARODD)) {
case (PARENB|PARODD):
@@ -162,7 +167,7 @@ static bool apbuart_set_attributes(
}
/* Update new settings */
- uart->regs->ctrl = ctrl;
+ grlib_store_32(&uart->regs->ctrl, ctrl);
rtems_termios_device_lock_release(base, &lock_context);
@@ -173,7 +178,7 @@ static bool apbuart_set_attributes(
scaler = (((uart->freq_hz * 10) / (baud * 8)) - 5) / 10;
/* Set new baud rate by setting scaler */
- uart->regs->scaler = scaler;
+ grlib_store_32(&uart->regs->scaler, scaler);
}
return true;
@@ -184,7 +189,8 @@ static void apbuart_set_best_baud(
struct termios *term
)
{
- uint32_t baud = (uart->freq_hz * 10) / ((uart->regs->scaler * 10 + 5) * 8);
+ uint32_t baud = (uart->freq_hz * 10) /
+ ((grlib_load_32(&uart->regs->scaler) * 10 + 5) * 8);
rtems_termios_set_best_baud(term, baud);
}
@@ -197,12 +203,15 @@ static bool apbuart_first_open_polled(
)
{
struct apbuart_context *uart = (struct apbuart_context *) base;
+ uint32_t ctrl;
apbuart_set_best_baud(uart, term);
/* Initialize UART on opening */
- uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
- uart->regs->status = 0;
+ ctrl = grlib_load_32(&uart->regs->ctrl);
+ ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
+ grlib_store_32(&uart->regs->ctrl, ctrl);
+ grlib_store_32(&uart->regs->status, 0);
return true;
}
@@ -216,6 +225,7 @@ static bool apbuart_first_open_interrupt(
{
struct apbuart_context *uart = (struct apbuart_context *) base;
rtems_status_code sc;
+ uint32_t ctrl;
apbuart_set_best_baud(uart, term);
@@ -229,11 +239,13 @@ static bool apbuart_first_open_interrupt(
uart->sending = 0;
/* Enable Receiver and transmitter and Turn on RX interrupts */
- uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE |
- APBUART_CTRL_RI;
+ ctrl = grlib_load_32(&uart->regs->ctrl);
+ ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE | APBUART_CTRL_RI;
+ grlib_store_32(&uart->regs->ctrl, ctrl);
/* Initialize UART on opening */
- uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
- uart->regs->status = 0;
+ ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
+ grlib_store_32(&uart->regs->ctrl, ctrl);
+ grlib_store_32(&uart->regs->status, 0);
return true;
}
@@ -246,10 +258,13 @@ static void apbuart_last_close_interrupt(
{
struct apbuart_context *uart = (struct apbuart_context *) base;
rtems_interrupt_lock_context lock_context;
+ uint32_t ctrl;
/* Turn off RX interrupts */
rtems_termios_device_lock_acquire(base, &lock_context);
- uart->regs->ctrl &= ~(APBUART_CTRL_RI);
+ ctrl = grlib_load_32(&uart->regs->ctrl);
+ ctrl &= ~APBUART_CTRL_RI;
+ grlib_store_32(&uart->regs->ctrl, ctrl);
rtems_termios_device_lock_release(base, &lock_context);
/**** Flush device ****/
diff --git a/bsps/shared/irq/irq-affinity.c b/bsps/shared/irq/irq-affinity.c
index 6c1522f77f..7e9250a948 100644
--- a/bsps/shared/irq/irq-affinity.c
+++ b/bsps/shared/irq/irq-affinity.c
@@ -36,7 +36,6 @@
#include <bsp/irq-generic.h>
-#include <rtems/score/processormask.h>
#include <rtems/score/smpimpl.h>
rtems_status_code rtems_interrupt_set_affinity(
diff --git a/bsps/shared/irq/irq-generic.c b/bsps/shared/irq/irq-generic.c
index 26ae1b8505..b6238025b7 100644
--- a/bsps/shared/irq/irq-generic.c
+++ b/bsps/shared/irq/irq-generic.c
@@ -35,9 +35,6 @@
*/
#include <bsp/irq-generic.h>
-#include <bsp/fatal.h>
-
-#include <stdlib.h>
#include <rtems/malloc.h>
@@ -66,9 +63,7 @@ static inline void bsp_interrupt_set_initialized(void)
}
#if defined(BSP_INTERRUPT_USE_INDEX_TABLE)
-static inline rtems_vector_number bsp_interrupt_allocate_handler_index(
- rtems_vector_number vector
-)
+static inline rtems_vector_number bsp_interrupt_allocate_handler_index( void )
{
rtems_vector_number i;
@@ -180,7 +175,7 @@ static rtems_status_code bsp_interrupt_entry_install_first(
rtems_vector_number index;
#ifdef BSP_INTERRUPT_USE_INDEX_TABLE
- index = bsp_interrupt_allocate_handler_index( vector );
+ index = bsp_interrupt_allocate_handler_index();
if ( index == BSP_INTERRUPT_DISPATCH_TABLE_SIZE ) {
/* Handler table is full */
diff --git a/bsps/shared/rtems-version.c b/bsps/shared/rtems-version.c
index b12504a1c9..aaac5a07f3 100644
--- a/bsps/shared/rtems-version.c
+++ b/bsps/shared/rtems-version.c
@@ -1,3 +1,12 @@
+/**
+ * @file
+ *
+ * @ingroup RTEMSImplClassic
+ *
+ * @brief This source file contains the implementation of
+ * rtems_board_support_package() and the definition of ::_RTEMS_version.
+ */
+
/*
* COPYRIGHT (c) 2003, Ralf Corsepius, Ulm, Germany.
* COPYRIGHT (c) 2003, On-Line Applications Research Corporation (OAR).
diff --git a/bsps/shared/start/bspgetworkarea-default.c b/bsps/shared/start/bspgetworkarea-default.c
index 7080aa6053..5f956b124f 100644
--- a/bsps/shared/start/bspgetworkarea-default.c
+++ b/bsps/shared/start/bspgetworkarea-default.c
@@ -39,7 +39,6 @@
*/
#include <bsp.h>
-#include <bsp/bootcard.h>
#if defined(HAS_UBOOT) && !defined(BSP_DISABLE_UBOOT_WORK_AREA_CONFIG)
#define USE_UBOOT
diff --git a/bsps/shared/xil/xil_cache.c b/bsps/shared/xil/arm/ARMv8/xil_cache.c
index aef64b310a..aef64b310a 100644
--- a/bsps/shared/xil/xil_cache.c
+++ b/bsps/shared/xil/arm/ARMv8/xil_cache.c
diff --git a/bsps/shared/xil/arm/cortexr5/xil_cache.c b/bsps/shared/xil/arm/cortexr5/xil_cache.c
new file mode 100644
index 0000000000..631d02f648
--- /dev/null
+++ b/bsps/shared/xil/arm/cortexr5/xil_cache.c
@@ -0,0 +1,561 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.c
+*
+* Contains required functions for the ARM cache functionality.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00 pkp 02/20/14 First release
+* 6.2 mus 01/27/17 Updated to support IAR compiler
+* 7.3 dp 06/25/20 Updated to support armclang compiler
+* 7.7 sk 01/10/22 Update IRQ_FIQ_MASK macro from signed to unsigned
+* to fix misra_c_2012_rule_10_4 violation.
+* 7.7 sk 01/10/22 Typecast to fix wider essential type misra_c_2012_rule_10_7
+* violation.
+* 7.7 mus 02/21/22 Existing note in cache API's says, "bottom 4 bits of input
+* address are forced to 0 as per architecture". As cache line
+* length is of 32 byte, bottom 5 bits of input address would
+* be forced to 0. Updated note to have correct details.
+* It fixes CR#1122561.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xil_io.h"
+#include "xpseudo_asm.h"
+#include "xparameters.h"
+#include "xreg_cortexr5.h"
+#include "xil_exception.h"
+
+
+/************************** Variable Definitions *****************************/
+
+#define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
+
+#if defined (__clang__)
+extern s32 Image$$ARM_LIB_STACK$$Limit;
+extern s32 Image$$ARM_UNDEF_STACK$$Base;
+#elif defined (__GNUC__)
+extern s32 _stack_end;
+extern s32 __undef_stack;
+#endif
+
+/****************************************************************************/
+/************************** Function Prototypes ******************************/
+
+/****************************************************************************/
+/**
+* @brief Enable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheEnable(void)
+{
+ register u32 CtrlReg;
+
+ /* enable caches only if they are disabled */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_C_BIT)==0x00000000U) {
+ /* invalidate the Data cache */
+ Xil_DCacheInvalidate();
+
+ /* enable the Data cache */
+ CtrlReg |= (XREG_CP15_CONTROL_C_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheDisable(void)
+{
+ register u32 CtrlReg;
+
+ /* clean and invalidate the Data cache */
+ Xil_DCacheFlush();
+
+ /* disable the Data cache */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+
+ CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the entire Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheInvalidate(void)
+{
+ u32 currmask;
+ u32 stack_start,stack_end,stack_size;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+#if defined (__clang__)
+ stack_end = (u32 )&Image$$ARM_LIB_STACK$$Limit;
+ stack_start = (u32 )&Image$$ARM_UNDEF_STACK$$Base;
+#elif defined (__GNUC__)
+ stack_end = (u32 )&_stack_end;
+ stack_start = (u32 )&__undef_stack;
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+ stack_size = stack_start-stack_end;
+
+ /* Flush stack memory to save return address */
+ Xil_DCacheFlushRange(stack_end, stack_size);
+#endif
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+
+ /*invalidate all D cache*/
+ mtcp(XREG_CP15_INVAL_DC_ALL, 0);
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate a Data cache line. If the byte specified by the
+* address (adr) is cached by the data cache, the cacheline
+* containing that byte is invalidated.If the cacheline is modified
+* (dirty), the modified contents are lost and are NOT written
+* to system memory before the line is invalidated.
+*
+*
+* @param adr: 32bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+ mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
+
+ /* Wait for invalidate to complete */
+ dsb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the Data cache for the given address range.
+* If the bytes specified by the address (adr) are cached by the
+* Data cache,the cacheline containing that byte is invalidated.
+* If the cacheline is modified (dirty), the modified contents are
+* lost and are NOT written to system memory before the line is
+* invalidated.
+*
+* @param adr: 32bit start address of the range to be invalidated.
+* @param len: Length of range to be invalidated in bytes.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateRange(INTPTR adr, u32 len)
+{
+ const u32 cacheline = 32U;
+ u32 end;
+ u32 tempadr = adr;
+ u32 tempend;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ if (len != 0U) {
+ end = tempadr + len;
+ tempend = end;
+ /* Select L1 Data cache in CSSR */
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
+
+ if ((tempadr & (cacheline-1U)) != 0U) {
+ tempadr &= (~(cacheline - 1U));
+
+ Xil_DCacheFlushLine(tempadr);
+ }
+ if ((tempend & (cacheline-1U)) != 0U) {
+ tempend &= (~(cacheline - 1U));
+
+ Xil_DCacheFlushLine(tempend);
+ }
+
+ while (tempadr < tempend) {
+
+ /* Invalidate Data cache line */
+ asm_inval_dc_line_mva_poc(tempadr);
+
+ tempadr += cacheline;
+ }
+ }
+
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush the entire Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheFlush(void)
+{
+ register u32 CsidReg, C7Reg;
+ u32 CacheSize, LineSize, NumWays;
+ u32 Way, WayIndex, Set, SetIndex, NumSet;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+
+#if defined (__GNUC__)
+ CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_CACHE_SIZE_ID,CsidReg);
+#endif
+ /* Determine Cache Size */
+
+ CacheSize = (CsidReg >> 13U) & 0x000001FFU;
+ CacheSize += 0x00000001U;
+ CacheSize *= (u32)128; /* to get number of bytes */
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x000003ffU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ NumSet = CacheSize/NumWays;
+ NumSet /= (((u32)0x00000001U) << LineSize);
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Invalidate all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set;
+ /* Flush by Set/Way */
+ asm_clean_inval_dc_line_sw(C7Reg);
+
+ Set += (((u32)0x00000001U) << LineSize);
+ }
+ Set = 0U;
+ Way += 0x40000000U;
+ }
+
+ /* Wait for flush to complete */
+ dsb();
+ mtcpsr(currmask);
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush a Data cache line. If the byte specified by the address (adr)
+* is cached by the Data cache, the cacheline containing that byte is
+* invalidated. If the cacheline is modified (dirty), the entire
+* contents of the cacheline are written to system memory before the
+* line is invalidated.
+*
+* @param adr: 32bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheFlushLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+
+ mtcp(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
+
+ /* Wait for flush to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush the Data cache for the given address range.
+* If the bytes specified by the address (adr) are cached by the
+* Data cache, the cacheline containing those bytes is invalidated.If
+* the cacheline is modified (dirty), the written to system memory
+* before the lines are invalidated.
+*
+* @param adr: 32bit start address of the range to be flushed.
+* @param len: Length of the range to be flushed in bytes
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheFlushRange(INTPTR adr, u32 len)
+{
+ u32 LocalAddr = adr;
+ const u32 cacheline = 32U;
+ u32 end;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ if (len != 0x00000000U) {
+ /* Back the starting address up to the start of a cache line
+ * perform cache operations until adr+len
+ */
+ end = LocalAddr + len;
+ LocalAddr &= ~(cacheline - 1U);
+
+ while (LocalAddr < end) {
+ /* Flush Data cache line */
+ asm_clean_inval_dc_line_mva_poc(LocalAddr);
+
+ LocalAddr += cacheline;
+ }
+ }
+ dsb();
+ mtcpsr(currmask);
+}
+/****************************************************************************/
+/**
+* @brief Store a Data cache line. If the byte specified by the address
+* (adr) is cached by the Data cache and the cacheline is modified
+* (dirty), the entire contents of the cacheline are written to
+* system memory.After the store completes, the cacheline is marked
+* as unmodified (not dirty).
+*
+* @param adr: 32bit address of the data to be stored
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheStoreLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+ mtcp(XREG_CP15_CLEAN_DC_LINE_MVA_POC, (adr & (~0x1F)));
+
+ /* Wait for store to complete */
+ dsb();
+ isb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Enable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheEnable(void)
+{
+ register u32 CtrlReg;
+
+ /* enable caches only if they are disabled */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_I_BIT)==0x00000000U) {
+ /* invalidate the instruction cache */
+ mtcp(XREG_CP15_INVAL_IC_POU, 0);
+
+ /* enable the instruction cache */
+ CtrlReg |= (XREG_CP15_CONTROL_I_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheDisable(void)
+{
+ register u32 CtrlReg;
+
+ dsb();
+
+ /* invalidate the instruction cache */
+ mtcp(XREG_CP15_INVAL_IC_POU, 0);
+
+ /* disable the instruction cache */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+
+ CtrlReg &= ~(XREG_CP15_CONTROL_I_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the entire instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidate(void)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
+
+ /* invalidate the instruction cache */
+ mtcp(XREG_CP15_INVAL_IC_POU, 0);
+
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate an instruction cache line.If the instruction specified
+* by the address is cached by the instruction cache, the
+* cacheline containing that instruction is invalidated.
+*
+* @param adr: 32bit address of the instruction to be invalidated.
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
+ mtcp(XREG_CP15_INVAL_IC_LINE_MVA_POU, (adr & (~0x1F)));
+
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the instruction cache for the given address range.
+* If the bytes specified by the address (adr) are cached by the
+* Data cache, the cacheline containing that byte is invalidated.
+* If the cachelineis modified (dirty), the modified contents are
+* lost and are NOT written to system memory before the line is
+* invalidated.
+*
+* @param adr: 32bit start address of the range to be invalidated.
+* @param len: Length of the range to be invalidated in bytes.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateRange(INTPTR adr, u32 len)
+{
+ u32 LocalAddr = adr;
+ const u32 cacheline = 32U;
+ u32 end;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ if (len != 0x00000000U) {
+ /* Back the starting address up to the start of a cache line
+ * perform cache operations until adr+len
+ */
+ end = LocalAddr + len;
+ LocalAddr = LocalAddr & ~(cacheline - 1U);
+
+ /* Select cache L0 I-cache in CSSR */
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
+
+ while (LocalAddr < end) {
+
+ /* Invalidate L1 I-cache line */
+ asm_inval_ic_line_mva_pou(LocalAddr);
+
+ LocalAddr += cacheline;
+ }
+ }
+
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
diff --git a/bsps/shared/xil/arm/cortexr5/xil_mpu.c b/bsps/shared/xil/arm/cortexr5/xil_mpu.c
new file mode 100644
index 0000000000..85f8f7f8da
--- /dev/null
+++ b/bsps/shared/xil/arm/cortexr5/xil_mpu.c
@@ -0,0 +1,645 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+* @file xil_mpu.c
+*
+* This file provides APIs for enabling/disabling MPU and setting the memory
+* attributes for sections, in the MPU translation table.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ---------------------------------------------------
+* 5.00 pkp 02/10/14 Initial version
+* 6.2 mus 01/27/17 Updated to support IAR compiler
+* 6.4 asa 08/16/17 Added many APIs for MPU access to make MPU usage
+* user-friendly. The APIs added are: Xil_UpdateMPUConfig,
+* Xil_GetMPUConfig, Xil_GetNumOfFreeRegions,
+* Xil_GetNextMPURegion, Xil_DisableMPURegionByRegNum,
+* Xil_GetMPUFreeRegMask, Xil_SetMPURegionByRegNum, and
+* Xil_InitializeExistingMPURegConfig.
+* Added a new array of structure of type XMpuConfig to
+* represent the MPU configuration table.
+* 6.8 aru 07/02/18 Returned the pointer instead of address
+* of that pointer in Xil_MemMap().
+* 7.5 asa 03/01/21 Ensure that Mpu_Config does not stay in .boot/.vector
+* sections which generally should be executable code
+* which can be allocated and not written.
+* Mpu_Config array is populated during boot time, hence
+* cannot be placed in .bss or .data section. Putting
+* Mpu_Config in a new .bootdata section.
+* 7.7 sk 01/10/22 Update int to u32 to fix misrac misra_c_2012_directive_4_6
+* violations.
+* 7.7 sk 01/10/22 Typecast variables from signed to unsigned to fix
+* misra_c_2012_rule_10_4 violation.
+* 7.7 sk 01/10/22 Add explicit parentheses for region_size and region_size[0]
+* to fix misra_c_2012_rule_12_1 violation.
+* 7.7 sk 01/10/22 Remove unsigned sign to fix misra_c_2012_rule_10_3 violation.
+* 7.7 sk 01/10/22 Modify if condition to fix misra_c_2012_rule_10_1 violation.
+* 7.7 sk 01/10/22 Typecast to fix wider essential type misra_c_2012_rule_10_7
+* violation.
+* 7.7 sk 01/10/22 Update conditional expression to fix misra_c_2012_rule_14_4
+* violation.
+* 7.7 sk 01/10/22 Add braces for the if statement to make it a compound
+* statement to fix misra_c_2012_rule_15_6 violation.
+* </pre>
+*
+*
+******************************************************************************/
+
+/*
+ * Origin: https://github.com/Xilinx/embeddedsw/blob/master/lib/bsp/standalone/src/arm/cortexr5/xil_mpu.c
+ * __rtems__ changes:
+ * - un-include xdebug.h and add macro for xdbg_printf
+ * - relocate XMpu_Config
+ * - form Xilinx link script section(".bootdata")
+ * - to RTEMS link script section(".bsp_start_data")
+ */
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xpseudo_asm.h"
+#include "xil_types.h"
+#include "xil_mpu.h"
+#ifndef __rtems__
+#include "xdebug.h"
+#else
+#define xdbg_printf(...)
+#endif
+#include "xstatus.h"
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/**************************** Type Definitions *******************************/
+
+/************************** Constant Definitions *****************************/
+#define MPU_REGION_SIZE_MIN 0x20
+/************************** Variable Definitions *****************************/
+
+static const struct {
+ u64 size;
+ u32 encoding;
+}region_size[] = {
+ { 0x20, REGION_32B },
+ { 0x40, REGION_64B },
+ { 0x80, REGION_128B },
+ { 0x100, REGION_256B },
+ { 0x200, REGION_512B },
+ { 0x400, REGION_1K },
+ { 0x800, REGION_2K },
+ { 0x1000, REGION_4K },
+ { 0x2000, REGION_8K },
+ { 0x4000, REGION_16K },
+ { 0x8000, REGION_32K },
+ { 0x10000, REGION_64K },
+ { 0x20000, REGION_128K },
+ { 0x40000, REGION_256K },
+ { 0x80000, REGION_512K },
+ { 0x100000, REGION_1M },
+ { 0x200000, REGION_2M },
+ { 0x400000, REGION_4M },
+ { 0x800000, REGION_8M },
+ { 0x1000000, REGION_16M },
+ { 0x2000000, REGION_32M },
+ { 0x4000000, REGION_64M },
+ { 0x8000000, REGION_128M },
+ { 0x10000000, REGION_256M },
+ { 0x20000000, REGION_512M },
+ { 0x40000000, REGION_1G },
+ { 0x80000000, REGION_2G },
+ { 0x100000000, REGION_4G },
+};
+
+#ifndef __rtems__
+#if defined (__GNUC__)
+XMpu_Config Mpu_Config __attribute__((section(".bootdata")));
+#elif defined (__ICCARM__)
+#pragma default_function_attributes = @ ".bootdata"
+XMpu_Config Mpu_Config;
+#endif
+#else
+XMpu_Config Mpu_Config __attribute__((section(".bsp_start_data")));
+#endif
+
+/************************** Function Prototypes ******************************/
+void Xil_InitializeExistingMPURegConfig(void);
+/*****************************************************************************/
+/**
+* @brief This function sets the memory attributes for a section covering
+* 1MB, of memory in the translation table.
+*
+* @param addr: 32-bit address for which memory attributes need to be set.
+* @param attrib: Attribute for the given memory region.
+* @return None.
+*
+*
+******************************************************************************/
+void Xil_SetTlbAttributes(INTPTR addr, u32 attrib)
+{
+ INTPTR Localaddr = addr;
+ Localaddr &= (INTPTR)(~(0xFFFFFU));
+ /* Setting the MPU region with given attribute with 1MB size */
+ Xil_SetMPURegion(Localaddr, 0x100000, attrib);
+}
+
+/*****************************************************************************/
+/**
+* @brief Set the memory attributes for a section of memory in the
+* translation table.
+*
+* @param addr: 32-bit address for which memory attributes need to be set..
+* @param size: size is the size of the region.
+* @param attrib: Attribute for the given memory region.
+* @return None.
+*
+*
+******************************************************************************/
+u32 Xil_SetMPURegion(INTPTR addr, u64 size, u32 attrib)
+{
+ u32 Regionsize = 0;
+ INTPTR Localaddr = addr;
+ u32 NextAvailableMemRegion;
+ u32 i;
+
+ NextAvailableMemRegion = Xil_GetNextMPURegion();
+ if (NextAvailableMemRegion == 0xFFU) {
+ xdbg_printf(DEBUG, "No regions available\r\n");
+ return XST_FAILURE;
+ }
+
+ Xil_DCacheFlush();
+ Xil_ICacheInvalidate();
+
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,NextAvailableMemRegion);
+ isb();
+
+ /* Lookup the size. */
+ for (i = 0; i < (sizeof (region_size) / sizeof (region_size[0])); i++) {
+ if (size <= region_size[i].size) {
+ Regionsize = region_size[i].encoding;
+ break;
+ }
+ }
+
+ Localaddr &= (INTPTR)(~(region_size[i].size - 1U));
+
+ Regionsize <<= 1;
+ Regionsize |= REGION_EN;
+ dsb();
+ mtcp(XREG_CP15_MPU_REG_BASEADDR, Localaddr); /* Set base address of a region */
+ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL, attrib); /* Set the control attribute */
+ mtcp(XREG_CP15_MPU_REG_SIZE_EN, Regionsize); /* set the region size and enable it*/
+ dsb();
+ isb();
+ Xil_UpdateMPUConfig(NextAvailableMemRegion, Localaddr, Regionsize, attrib);
+ return XST_SUCCESS;
+}
+/*****************************************************************************/
+/**
+* @brief Enable MPU for Cortex R5 processor. This function invalidates I
+* cache and flush the D Caches, and then enables the MPU.
+*
+* @return None.
+*
+******************************************************************************/
+void Xil_EnableMPU(void)
+{
+ u32 CtrlReg, Reg;
+ s32 DCacheStatus=0, ICacheStatus=0;
+ /* enable caches only if they are disabled */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_C_BIT) != 0x00000000U) {
+ DCacheStatus=1;
+ }
+ if ((CtrlReg & XREG_CP15_CONTROL_I_BIT) != 0x00000000U) {
+ ICacheStatus=1;
+ }
+
+ if(DCacheStatus != 0) {
+ Xil_DCacheDisable();
+ }
+ if(ICacheStatus != 0){
+ Xil_ICacheDisable();
+ }
+#if defined (__GNUC__)
+ Reg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,Reg);
+#endif
+ Reg |= 0x00000001U;
+ dsb();
+ mtcp(XREG_CP15_SYS_CONTROL, Reg);
+ isb();
+ /* enable caches only if they are disabled in routine*/
+ if(DCacheStatus != 0) {
+ Xil_DCacheEnable();
+ }
+ if(ICacheStatus != 0) {
+ Xil_ICacheEnable();
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Disable MPU for Cortex R5 processors. This function invalidates I
+* cache and flush the D Caches, and then disabes the MPU.
+*
+* @return None.
+*
+******************************************************************************/
+void Xil_DisableMPU(void)
+{
+ u32 CtrlReg, Reg;
+ s32 DCacheStatus=0, ICacheStatus=0;
+ /* enable caches only if they are disabled */
+
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_C_BIT) != 0x00000000U) {
+ DCacheStatus=1;
+ }
+ if ((CtrlReg & XREG_CP15_CONTROL_I_BIT) != 0x00000000U) {
+ ICacheStatus=1;
+ }
+
+ if(DCacheStatus != 0) {
+ Xil_DCacheDisable();
+ }
+ if(ICacheStatus != 0){
+ Xil_ICacheDisable();
+ }
+
+ mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0);
+#if defined (__GNUC__)
+ Reg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,Reg);
+#endif
+ Reg &= ~(0x00000001U);
+ dsb();
+ mtcp(XREG_CP15_SYS_CONTROL, Reg);
+ isb();
+ /* enable caches only if they are disabled in routine*/
+ if(DCacheStatus != 0) {
+ Xil_DCacheEnable();
+ }
+ if(ICacheStatus != 0) {
+ Xil_ICacheEnable();
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Update the MPU configuration for the requested region number in
+* the global MPU configuration table.
+*
+* @param reg_num: The requested region number to be updated information for.
+* @param address: 32 bit address for start of the region.
+* @param size: Requested size of the region.
+* @param attrib: Attribute for the corresponding region.
+* @return XST_FAILURE: When the requested region number if 16 or more.
+* XST_SUCCESS: When the MPU configuration table is updated.
+*
+*
+******************************************************************************/
+u32 Xil_UpdateMPUConfig(u32 reg_num, INTPTR address, u32 size, u32 attrib)
+{
+ u32 ReturnVal = XST_SUCCESS;
+ u32 Tempsize = size;
+ u32 Index;
+
+ if (reg_num >= MAX_POSSIBLE_MPU_REGS) {
+ xdbg_printf(DEBUG, "Invalid region number\r\n");
+ ReturnVal = XST_FAILURE;
+ goto exit;
+ }
+
+ if ((size & REGION_EN) != 0) {
+ Mpu_Config[reg_num].RegionStatus = MPU_REG_ENABLED;
+ Mpu_Config[reg_num].BaseAddress = address;
+ Tempsize &= (~REGION_EN);
+ Tempsize >>= 1;
+ /* Lookup the size. */
+ for (Index = 0; Index <
+ (sizeof (region_size) / sizeof (region_size[0])); Index++) {
+ if (Tempsize <= region_size[Index].encoding) {
+ Mpu_Config[reg_num].Size = region_size[Index].size;
+ break;
+ }
+ }
+ Mpu_Config[reg_num].Attribute = attrib;
+ } else {
+ Mpu_Config[reg_num].RegionStatus = 0U;
+ Mpu_Config[reg_num].BaseAddress = 0;
+ Mpu_Config[reg_num].Size = 0U;
+ Mpu_Config[reg_num].Attribute = 0U;
+ }
+
+exit:
+ return ReturnVal;
+}
+
+/*****************************************************************************/
+/**
+* @brief The MPU configuration table is passed to the caller.
+*
+* @param mpuconfig: This is of type XMpu_Config which is an array of
+* 16 entries of type structure representing the MPU config table
+* @return none
+*
+*
+******************************************************************************/
+void Xil_GetMPUConfig (XMpu_Config mpuconfig) {
+ u32 Index = 0U;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ mpuconfig[Index].RegionStatus = Mpu_Config[Index].RegionStatus;
+ mpuconfig[Index].BaseAddress = Mpu_Config[Index].BaseAddress;
+ mpuconfig[Index].Attribute = Mpu_Config[Index].Attribute;
+ mpuconfig[Index].Size = Mpu_Config[Index].Size;
+ Index++;
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Returns the total number of free MPU regions available.
+*
+* @return Number of free regions available to users
+*
+*
+******************************************************************************/
+u32 Xil_GetNumOfFreeRegions (void) {
+ u32 Index = 0U;
+ u32 NumofFreeRegs = 0U;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ if (MPU_REG_DISABLED == Mpu_Config[Index].RegionStatus) {
+ NumofFreeRegs++;
+ }
+ Index++;
+ }
+ return NumofFreeRegs;
+}
+
+/*****************************************************************************/
+/**
+* @brief Returns the total number of free MPU regions available in the form
+* of a mask. A bit of 1 in the returned 16 bit value represents the
+* corresponding region number to be available.
+* For example, if this function returns 0xC0000, this would mean, the
+* regions 14 and 15 are available to users.
+*
+* @return The free region mask as a 16 bit value
+*
+*
+******************************************************************************/
+u16 Xil_GetMPUFreeRegMask (void) {
+ u32 Index = 0U;
+ u16 FreeRegMask = 0U;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ if (MPU_REG_DISABLED == Mpu_Config[Index].RegionStatus) {
+ FreeRegMask |= ((u16)1U << Index);
+ }
+ Index++;
+ }
+ return FreeRegMask;
+}
+
+/*****************************************************************************/
+/**
+* @brief Disables the corresponding region number as passed by the user.
+*
+* @param reg_num: The region number to be disabled
+* @return XST_SUCCESS: If the region could be disabled successfully
+* XST_FAILURE: If the requested region number is 16 or more.
+*
+*
+******************************************************************************/
+u32 Xil_DisableMPURegionByRegNum (u32 reg_num) {
+ u32 Temp = 0U;
+ u32 ReturnVal = XST_FAILURE;
+
+ if (reg_num >= 16U) {
+ xdbg_printf(DEBUG, "Invalid region number\r\n");
+ goto exit1;
+ }
+ Xil_DCacheFlush();
+ Xil_ICacheInvalidate();
+
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,reg_num);
+#if defined (__GNUC__)
+ Temp = mfcp(XREG_CP15_MPU_REG_SIZE_EN);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_MPU_REG_SIZE_EN,Temp);
+#endif
+ Temp &= (~REGION_EN);
+ dsb();
+ mtcp(XREG_CP15_MPU_REG_SIZE_EN,Temp);
+ dsb();
+ isb();
+ Xil_UpdateMPUConfig(reg_num, 0, 0U, 0U);
+ ReturnVal = XST_SUCCESS;
+
+exit1:
+ return ReturnVal;
+}
+
+/*****************************************************************************/
+/**
+* @brief Enables the corresponding region number as passed by the user.
+*
+* @param reg_num: The region number to be enabled
+* @param addr: 32 bit address for start of the region.
+* @param size: Requested size of the region.
+* @param attrib: Attribute for the corresponding region.
+* @return XST_SUCCESS: If the region could be created successfully
+* XST_FAILURE: If the requested region number is 16 or more.
+*
+*
+******************************************************************************/
+u32 Xil_SetMPURegionByRegNum (u32 reg_num, INTPTR addr, u64 size, u32 attrib)
+{
+ u32 ReturnVal = XST_SUCCESS;
+ INTPTR Localaddr = addr;
+ u32 Regionsize = 0;
+ u32 Index;
+
+ if (reg_num >= 16U) {
+ xdbg_printf(DEBUG, "Invalid region number\r\n");
+ ReturnVal = XST_FAILURE;
+ goto exit2;
+ }
+
+ if (Mpu_Config[reg_num].RegionStatus == MPU_REG_ENABLED) {
+ xdbg_printf(DEBUG, "Region already enabled\r\n");
+ ReturnVal = XST_FAILURE;
+ goto exit2;
+ }
+
+ Xil_DCacheFlush();
+ Xil_ICacheInvalidate();
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,reg_num);
+ isb();
+
+ /* Lookup the size. */
+ for (Index = 0; Index <
+ (sizeof (region_size) / sizeof (region_size[0])); Index++) {
+ if (size <= region_size[Index].size) {
+ Regionsize = region_size[Index].encoding;
+ break;
+ }
+ }
+
+ Localaddr &= (INTPTR)(~(region_size[Index].size - 1U));
+ Regionsize <<= 1;
+ Regionsize |= REGION_EN;
+ dsb();
+ mtcp(XREG_CP15_MPU_REG_BASEADDR, Localaddr);
+ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL, attrib);
+ mtcp(XREG_CP15_MPU_REG_SIZE_EN, Regionsize);
+ dsb();
+ isb();
+ Xil_UpdateMPUConfig(reg_num, Localaddr, Regionsize, attrib);
+exit2:
+ return ReturnVal;
+
+}
+
+/*****************************************************************************/
+/**
+* @brief Initializes the MPU configuration table that are setup in the
+* R5 boot code in the Init_Mpu function called before C main.
+*
+* @return none
+*
+*
+******************************************************************************/
+void Xil_InitializeExistingMPURegConfig(void)
+{
+ u32 Index = 0U;
+ u32 Index1 = 0U;
+ u32 MPURegSize;
+ INTPTR MPURegBA;
+ u32 MPURegAttrib;
+ u32 Tempsize;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,Index);
+#if defined (__GNUC__)
+ MPURegSize = mfcp(XREG_CP15_MPU_REG_SIZE_EN);
+ MPURegBA = mfcp(XREG_CP15_MPU_REG_BASEADDR);
+ MPURegAttrib = mfcp(XREG_CP15_MPU_REG_ACCESS_CTRL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_MPU_REG_SIZE_EN,MPURegSize);
+ mfcp(XREG_CP15_MPU_REG_BASEADDR, MPURegBA);
+ mfcp(XREG_CP15_MPU_REG_ACCESS_CTRL, MPURegAttrib);
+#endif
+ if ((MPURegSize & REGION_EN) != 0) {
+ Mpu_Config[Index].RegionStatus = MPU_REG_ENABLED;
+ Mpu_Config[Index].BaseAddress = MPURegBA;
+ Mpu_Config[Index].Attribute = MPURegAttrib;
+ Tempsize = MPURegSize & (~REGION_EN);
+ Tempsize >>= 1;
+ for (Index1 = 0; Index1 <
+ (sizeof (region_size) / sizeof (region_size[0])); Index1++) {
+ if (Tempsize <= region_size[Index1].encoding) {
+ Mpu_Config[Index].Size = region_size[Index1].size;
+ break;
+ }
+ }
+ }
+ Index++;
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Returns the next available free MPU region
+*
+* @return The free MPU region available
+*
+*
+******************************************************************************/
+u32 Xil_GetNextMPURegion(void)
+{
+ u32 Index = 0U;
+ u32 NextAvailableReg = 0xFF;
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ if (Mpu_Config[Index].RegionStatus != MPU_REG_ENABLED) {
+ NextAvailableReg = Index;
+ break;
+ }
+ Index++;
+ }
+ return NextAvailableReg;
+}
+
+#ifdef __GNUC__
+#define u32overflow(a, b) ({typeof(a) s; __builtin_uadd_overflow(a, b, &s); })
+#else
+#define u32overflow(a, b) ((a) > ((a) + (b)))
+#endif /* __GNUC__ */
+
+/*****************************************************************************/
+/**
+* @brief Memory mapping for Cortex-R5F. If successful, the mapped
+* region will include all of the memory requested, but may
+* include more. Specifically, it will be a power of 2 in
+* size, aligned on a boundary of that size.
+*
+* @param Physaddr is base physical address at which to start mapping.
+* NULL in Physaddr masks possible mapping errors.
+* @param size of region to be mapped.
+* @param flags used to set translation table.
+*
+* @return Physaddr on success, NULL on error. Ambiguous if Physaddr==NULL
+*
+* @cond Xil_MemMap_internal
+* @note: u32overflow() is defined for readability and (for __GNUC__) to
+* - force the type of the check to be the same as the first argument
+* - hide the otherwise unused third argument of the builtin
+* - improve safety by choosing the explicit _uadd_ version.
+* Consider __builtin_add_overflow_p() when available.
+* Use an alternative (less optimal?) for compilers w/o the builtin.
+* @endcond
+******************************************************************************/
+void *Xil_MemMap(UINTPTR Physaddr, size_t size, u32 flags)
+{
+ size_t Regionsize = MPU_REGION_SIZE_MIN;
+ UINTPTR Basephysaddr = 0, end = Physaddr + size;
+
+ if (flags == 0U) {
+ return (void *)Physaddr;
+ }
+ if (u32overflow(Physaddr, size)) {
+ return NULL;
+ }
+ for ( ; Regionsize != 0U; Regionsize <<= 1) {
+ if (Regionsize >= size) {
+ Basephysaddr = Physaddr & ~(Regionsize - 1U);
+ if (u32overflow(Basephysaddr, Regionsize)) {
+ break;
+ }
+ if ((Basephysaddr + Regionsize) >= end) {
+ return ((Xil_SetMPURegion(Basephysaddr,
+ Regionsize, flags) == XST_SUCCESS) ?
+ (void *)Physaddr : NULL);
+ }
+ }
+ }
+ return NULL;
+}