summaryrefslogtreecommitdiffstats
path: root/bsps/shared/xil
diff options
context:
space:
mode:
Diffstat (limited to 'bsps/shared/xil')
-rw-r--r--bsps/shared/xil/VERSION20
-rw-r--r--bsps/shared/xil/arm/ARMv8/xil_cache.c732
-rw-r--r--bsps/shared/xil/arm/cortexr5/xil_cache.c561
-rw-r--r--bsps/shared/xil/arm/cortexr5/xil_mpu.c645
-rw-r--r--bsps/shared/xil/xil_assert.c126
-rw-r--r--bsps/shared/xil/xil_mem.c70
6 files changed, 2154 insertions, 0 deletions
diff --git a/bsps/shared/xil/VERSION b/bsps/shared/xil/VERSION
new file mode 100644
index 0000000000..d94f574255
--- /dev/null
+++ b/bsps/shared/xil/VERSION
@@ -0,0 +1,20 @@
+The information in this file describes the source of files in
+bsps/shared/xil/ and bsps/include/xil/.
+
+Import from:
+
+https://github.com/Xilinx/embeddedsw.git
+
+commit 8a89579489c88ea5acd23d7d439ac928659c26cf
+Author: msreeram <manikanta.sreeram@xilinx.com>
+AuthorDate: Wed Apr 6 23:24:38 2022 -0600
+Commit: Siva Addepalli <sivaprasad.addepalli@xilinx.com>
+CommitDate: Fri Apr 8 16:47:15 2022 +0530
+
+ update license file for EmbeddedSW 2022.1 release
+
+ Update license file for EmbeddedSW 2022.1 release
+
+ Signed-off-by: Manikanta Sreeram <msreeram@xilinx.com>
+
+ Acked-by : Meena Paleti <meena.paleti@xilinx.com>
diff --git a/bsps/shared/xil/arm/ARMv8/xil_cache.c b/bsps/shared/xil/arm/ARMv8/xil_cache.c
new file mode 100644
index 0000000000..aef64b310a
--- /dev/null
+++ b/bsps/shared/xil/arm/ARMv8/xil_cache.c
@@ -0,0 +1,732 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.c
+*
+* Contains required functions for the ARM cache functionality.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.0 pkp 05/29/14 First release
+* 5.5 pkp 04/15/16 Updated the Xil_DCacheInvalidate,
+* Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
+* functions description for proper explanation
+* 6.2 pkp 01/22/17 Added support for EL1 non-secure
+* 6.2 asa 01/31/17 The existing Xil_DCacheDisable API first flushes the
+* D caches and then disables it. The problem with that is,
+* potentially there will be a small window after the cache
+* flush operation and before the we disable D caches where
+* we might have valid data in cache lines. In such a
+* scenario disabling the D cache can lead to unknown behavior.
+* The ideal solution to this is to use assembly code for
+* the complete API and avoid any memory accesses. But with
+* that we will end up having a huge amount on assembly code
+* which is not maintainable. Changes are done to use a mix
+* of assembly and C code. All local variables are put in
+* registers. Also function calls are avoided in the API to
+* avoid using stack memory.
+* These changes fix CR#966220.
+* 6.2 mus 02/13/17 The new api Xil_ConfigureL1Prefetch is added to disable pre-fetching/configure
+* the maximum number of outstanding data prefetches allowed in
+* L1 cache system.It fixes CR#967864.
+* 6.6 mus 02/27/18 Updated Xil_DCacheInvalidateRange and
+* Xil_ICacheInvalidateRange APIs to change the data type of
+* "cacheline" variable as "INTPTR", This change has been done
+* to avoid the truncation of upper DDR addresses to 32 bit.It
+* fixes CR#995581.
+* 6.6 mus 03/15/18 By default CPUACTLR_EL1 is accessible only from EL3, it
+* results into abort if accessed from EL1 non secure privilege
+* level. Updated Xil_ConfigureL1Prefetch function to access
+* CPUACTLR_EL1 only for EL3.
+* 6.8 mn 08/01/18 Optimize the Xil_DCacheInvalidateRange() function to remove
+* redundant operations
+* 6.8 asa 09/15/18 Fix bug in the Xil_DCacheInvalidateRange API introduced while
+* making optimizations in the previous patch. This change fixes
+* CR-1008926.
+* 7.0 mus 10/12/18 Updated Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
+* APIs to replace IVAC instruction with CIVAC. So that, these
+* APIs will always do flush + invalidate in case of Cortexa53 as
+* well as Cortexa72 processor.
+* 7.1 mus 09/17/19 Xil_DCacheFlushRange and Xil_DCacheInvalidateRange are executing
+* same functionality (clean + validate). Removed
+* Xil_DCacheFlushRange function implementation and defined it as
+* macro. Xil_DCacheFlushRange macro points to the
+* Xil_DCacheInvalidateRange API to avoid code duplication.
+*
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xil_io.h"
+#include "xpseudo_asm.h"
+#include "xparameters.h"
+#include "xreg_cortexa53.h"
+#include "xil_exception.h"
+#include "bspconfig.h"
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+#define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
+
+/****************************************************************************/
+/**
+* @brief Enable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheEnable(void)
+{
+ u32 CtrlReg;
+
+ if (EL3 == 1) {
+ CtrlReg = mfcp(SCTLR_EL3);
+ } else if (EL1_NONSECURE == 1) {
+ CtrlReg = mfcp(SCTLR_EL1);
+ } else {
+ CtrlReg = 0U;
+ }
+
+ /* enable caches only if they are disabled */
+ if((CtrlReg & XREG_CONTROL_DCACHE_BIT) == 0X00000000U){
+
+ /* invalidate the Data cache */
+ Xil_DCacheInvalidate();
+
+ CtrlReg |= XREG_CONTROL_DCACHE_BIT;
+
+ if (EL3 == 1) {
+ /* enable the Data cache for el3*/
+ mtcp(SCTLR_EL3,CtrlReg);
+ } else if (EL1_NONSECURE == 1) {
+ /* enable the Data cache for el1*/
+ mtcp(SCTLR_EL1,CtrlReg);
+ }
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheDisable(void)
+{
+ register u32 CsidReg;
+ register u32 C7Reg;
+ register u32 LineSize;
+ register u32 NumWays;
+ register u32 Way;
+ register u32 WayIndex;
+ register u32 WayAdjust;
+ register u32 Set;
+ register u32 SetIndex;
+ register u32 NumSet;
+ register u32 CacheLevel;
+
+ dsb();
+ asm(
+ "mov x0, #0\n\t"
+#if EL3==1
+ "mrs x0, sctlr_el3 \n\t"
+ "and w0, w0, #0xfffffffb\n\t"
+ "msr sctlr_el3, x0\n\t"
+#elif EL1_NONSECURE==1
+ "mrs x0, sctlr_el1 \n\t"
+ "and w0, w0, #0xfffffffb\n\t"
+ "msr sctlr_el1, x0\n\t"
+#endif
+ "dsb sy\n\t"
+ );
+
+ /* Number of level of cache*/
+ CacheLevel = 0U;
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /*Number of Set*/
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+
+ /* Wait for Flush to complete */
+ dsb();
+
+ /* Select cache level 1 and D cache in CSSR */
+ CacheLevel += (0x00000001U << 1U);
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Number of Sets */
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust=clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set=0U;
+ Way += (0x00000001U<<WayAdjust);
+ }
+ /* Wait for Flush to complete */
+ dsb();
+
+ asm(
+#if EL3==1
+ "tlbi ALLE3\n\t"
+#elif EL1_NONSECURE==1
+ "tlbi VMALLE1\n\t"
+#endif
+ "dsb sy\r\n"
+ "isb\n\t"
+ );
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the Data cache. The contents present in the cache are
+* cleaned and invalidated.
+*
+* @return None.
+*
+* @note In Cortex-A53, functionality to simply invalid the cachelines
+* is not present. Such operations are a problem for an environment
+* that supports virtualisation. It would allow one OS to invalidate
+* a line belonging to another OS. This could lead to the other OS
+* crashing because of the loss of essential data. Hence, such
+* operations are promoted to clean and invalidate which avoids such
+* corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidate(void)
+{
+ register u32 CsidReg, C7Reg;
+ u32 LineSize, NumWays;
+ u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+
+ /* Number of level of cache*/
+
+ CacheLevel=0U;
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0X00000001U;
+
+ /*Number of Set*/
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0X00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Invalidate all the cachelines */
+ for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(ISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+
+ /* Wait for invalidate to complete */
+ dsb();
+
+ /* Select cache level 1 and D cache in CSSR */
+ CacheLevel += (0x00000001U<<1U) ;
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Number of Sets */
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Invalidate all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(ISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+ /* Wait for invalidate to complete */
+ dsb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate a Data cache line. The cacheline is cleaned and
+* invalidated.
+*
+* @param adr: 64bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note In Cortex-A53, functionality to simply invalid the cachelines
+* is not present. Such operations are a problem for an environment
+* that supports virtualisation. It would allow one OS to invalidate
+* a line belonging to another OS. This could lead to the other OS
+* crashing because of the loss of essential data. Hence, such
+* operations are promoted to clean and invalidate which avoids such
+* corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateLine(INTPTR adr)
+{
+
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x0);
+ mtcpdc(CIVAC,(adr & (~0x3F)));
+ /* Wait for invalidate to complete */
+ dsb();
+ /* Select cache level 1 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x2);
+ mtcpdc(IVAC,(adr & (~0x3F)));
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the Data cache for the given address range.
+* The cachelines present in the adderss range are cleaned and
+* invalidated
+*
+* @param adr: 64bit start address of the range to be invalidated.
+* @param len: Length of the range to be invalidated in bytes.
+*
+* @return None.
+*
+* @note In Cortex-A53, functionality to simply invalid the cachelines
+* is not present. Such operations are a problem for an environment
+* that supports virtualisation. It would allow one OS to invalidate
+* a line belonging to another OS. This could lead to the other OS
+* crashing because of the loss of essential data. Hence, such
+* operations are promoted to clean and invalidate which avoids such
+* corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len)
+{
+ const INTPTR cacheline = 64U;
+ INTPTR end = adr + len;
+ adr = adr & (~0x3F);
+ u32 currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ if (len != 0U) {
+ while (adr < end) {
+ mtcpdc(CIVAC,adr);
+ adr += cacheline;
+ }
+ }
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheFlush(void)
+{
+ register u32 CsidReg, C7Reg;
+ u32 LineSize, NumWays;
+ u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+
+ /* Number of level of cache*/
+
+ CacheLevel = 0U;
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /*Number of Set*/
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+
+ /* Wait for Flush to complete */
+ dsb();
+
+ /* Select cache level 1 and D cache in CSSR */
+ CacheLevel += (0x00000001U << 1U);
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Number of Sets */
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust=clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set=0U;
+ Way += (0x00000001U<<WayAdjust);
+ }
+ /* Wait for Flush to complete */
+ dsb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush a Data cache line. If the byte specified by the address (adr)
+* is cached by the Data cache, the cacheline containing that byte is
+* invalidated. If the cacheline is modified (dirty), the entire
+* contents of the cacheline are written to system memory before the
+* line is invalidated.
+*
+* @param adr: 64bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note The bottom 6 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheFlushLine(INTPTR adr)
+{
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x0);
+ mtcpdc(CIVAC,(adr & (~0x3F)));
+ /* Wait for flush to complete */
+ dsb();
+ /* Select cache level 1 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x2);
+ mtcpdc(CIVAC,(adr & (~0x3F)));
+ /* Wait for flush to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Enable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheEnable(void)
+{
+ u32 CtrlReg;
+
+ if (EL3 == 1) {
+ CtrlReg = mfcp(SCTLR_EL3);
+ } else if (EL1_NONSECURE == 1) {
+ CtrlReg = mfcp(SCTLR_EL1);
+ } else {
+ CtrlReg = 0U;
+ }
+
+ /* enable caches only if they are disabled */
+ if((CtrlReg & XREG_CONTROL_ICACHE_BIT)==0x00000000U){
+ /* invalidate the instruction cache */
+ Xil_ICacheInvalidate();
+
+ CtrlReg |= XREG_CONTROL_ICACHE_BIT;
+
+ if (EL3 == 1) {
+ /* enable the instruction cache for el3*/
+ mtcp(SCTLR_EL3,CtrlReg);
+ } else if (EL1_NONSECURE == 1) {
+ /* enable the instruction cache for el1*/
+ mtcp(SCTLR_EL1,CtrlReg);
+ }
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheDisable(void)
+{
+ u32 CtrlReg;
+
+ if (EL3 == 1) {
+ CtrlReg = mfcp(SCTLR_EL3);
+ } else if (EL1_NONSECURE == 1) {
+ CtrlReg = mfcp(SCTLR_EL1);
+ } else {
+ CtrlReg = 0U;
+ }
+ /* invalidate the instruction cache */
+ Xil_ICacheInvalidate();
+ CtrlReg &= ~(XREG_CONTROL_ICACHE_BIT);
+
+ if (EL3 == 1) {
+ /* disable the instruction cache */
+ mtcp(SCTLR_EL3,CtrlReg);
+ } else if (EL1_NONSECURE == 1) {
+ /* disable the instruction cache */
+ mtcp(SCTLR_EL1,CtrlReg);
+ }
+
+
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the entire instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidate(void)
+{
+ unsigned int currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ mtcp(CSSELR_EL1,0x1);
+ dsb();
+ /* invalidate the instruction cache */
+ mtcpicall(IALLU);
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate an instruction cache line. If the instruction specified
+* by the parameter adr is cached by the instruction cache, the
+* cacheline containing that instruction is invalidated.
+*
+* @param adr: 64bit address of the instruction to be invalidated.
+*
+* @return None.
+*
+* @note The bottom 6 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateLine(INTPTR adr)
+{
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(CSSELR_EL1,0x1);
+ /*Invalidate I Cache line*/
+ mtcpic(IVAU,adr & (~0x3F));
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the instruction cache for the given address range.
+* If the instructions specified by the address range are cached by
+* the instrunction cache, the cachelines containing those
+* instructions are invalidated.
+*
+* @param adr: 64bit start address of the range to be invalidated.
+* @param len: Length of the range to be invalidated in bytes.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len)
+{
+ const INTPTR cacheline = 64U;
+ INTPTR end;
+ INTPTR tempadr = adr;
+ INTPTR tempend;
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ if (len != 0x00000000U) {
+ end = tempadr + len;
+ tempend = end;
+ tempadr &= ~(cacheline - 0x00000001U);
+
+ /* Select cache Level 0 I-cache in CSSR */
+ mtcp(CSSELR_EL1,0x1);
+ while (tempadr < tempend) {
+ /*Invalidate I Cache line*/
+ mtcpic(IVAU,adr & (~0x3F));
+
+ tempadr += cacheline;
+ }
+ }
+/* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Configure the maximum number of outstanding data prefetches
+* allowed in L1 cache.
+*
+* @param num: maximum number of outstanding data prefetches allowed,
+* valid values are 0-7.
+*
+* @return None.
+*
+* @note This function is implemented only for EL3 privilege level.
+*
+*****************************************************************************/
+void Xil_ConfigureL1Prefetch (u8 num) {
+#if EL3
+ u64 val=0;
+
+ val= mfcp(S3_1_C15_C2_0 );
+ val &= ~(L1_DATA_PREFETCH_CONTROL_MASK);
+ val |= (num << L1_DATA_PREFETCH_CONTROL_SHIFT);
+ mtcp(S3_1_C15_C2_0,val);
+#endif
+}
diff --git a/bsps/shared/xil/arm/cortexr5/xil_cache.c b/bsps/shared/xil/arm/cortexr5/xil_cache.c
new file mode 100644
index 0000000000..631d02f648
--- /dev/null
+++ b/bsps/shared/xil/arm/cortexr5/xil_cache.c
@@ -0,0 +1,561 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.c
+*
+* Contains required functions for the ARM cache functionality.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00 pkp 02/20/14 First release
+* 6.2 mus 01/27/17 Updated to support IAR compiler
+* 7.3 dp 06/25/20 Updated to support armclang compiler
+* 7.7 sk 01/10/22 Update IRQ_FIQ_MASK macro from signed to unsigned
+* to fix misra_c_2012_rule_10_4 violation.
+* 7.7 sk 01/10/22 Typecast to fix wider essential type misra_c_2012_rule_10_7
+* violation.
+* 7.7 mus 02/21/22 Existing note in cache API's says, "bottom 4 bits of input
+* address are forced to 0 as per architecture". As cache line
+* length is of 32 byte, bottom 5 bits of input address would
+* be forced to 0. Updated note to have correct details.
+* It fixes CR#1122561.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xil_io.h"
+#include "xpseudo_asm.h"
+#include "xparameters.h"
+#include "xreg_cortexr5.h"
+#include "xil_exception.h"
+
+
+/************************** Variable Definitions *****************************/
+
+#define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
+
+#if defined (__clang__)
+extern s32 Image$$ARM_LIB_STACK$$Limit;
+extern s32 Image$$ARM_UNDEF_STACK$$Base;
+#elif defined (__GNUC__)
+extern s32 _stack_end;
+extern s32 __undef_stack;
+#endif
+
+/****************************************************************************/
+/************************** Function Prototypes ******************************/
+
+/****************************************************************************/
+/**
+* @brief Enable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheEnable(void)
+{
+ register u32 CtrlReg;
+
+ /* enable caches only if they are disabled */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_C_BIT)==0x00000000U) {
+ /* invalidate the Data cache */
+ Xil_DCacheInvalidate();
+
+ /* enable the Data cache */
+ CtrlReg |= (XREG_CP15_CONTROL_C_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheDisable(void)
+{
+ register u32 CtrlReg;
+
+ /* clean and invalidate the Data cache */
+ Xil_DCacheFlush();
+
+ /* disable the Data cache */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+
+ CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the entire Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheInvalidate(void)
+{
+ u32 currmask;
+ u32 stack_start,stack_end,stack_size;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+#if defined (__clang__)
+ stack_end = (u32 )&Image$$ARM_LIB_STACK$$Limit;
+ stack_start = (u32 )&Image$$ARM_UNDEF_STACK$$Base;
+#elif defined (__GNUC__)
+ stack_end = (u32 )&_stack_end;
+ stack_start = (u32 )&__undef_stack;
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+ stack_size = stack_start-stack_end;
+
+ /* Flush stack memory to save return address */
+ Xil_DCacheFlushRange(stack_end, stack_size);
+#endif
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+
+ /*invalidate all D cache*/
+ mtcp(XREG_CP15_INVAL_DC_ALL, 0);
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate a Data cache line. If the byte specified by the
+* address (adr) is cached by the data cache, the cacheline
+* containing that byte is invalidated.If the cacheline is modified
+* (dirty), the modified contents are lost and are NOT written
+* to system memory before the line is invalidated.
+*
+*
+* @param adr: 32bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+ mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
+
+ /* Wait for invalidate to complete */
+ dsb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the Data cache for the given address range.
+* If the bytes specified by the address (adr) are cached by the
+* Data cache,the cacheline containing that byte is invalidated.
+* If the cacheline is modified (dirty), the modified contents are
+* lost and are NOT written to system memory before the line is
+* invalidated.
+*
+* @param adr: 32bit start address of the range to be invalidated.
+* @param len: Length of range to be invalidated in bytes.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateRange(INTPTR adr, u32 len)
+{
+ const u32 cacheline = 32U;
+ u32 end;
+ u32 tempadr = adr;
+ u32 tempend;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ if (len != 0U) {
+ end = tempadr + len;
+ tempend = end;
+ /* Select L1 Data cache in CSSR */
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
+
+ if ((tempadr & (cacheline-1U)) != 0U) {
+ tempadr &= (~(cacheline - 1U));
+
+ Xil_DCacheFlushLine(tempadr);
+ }
+ if ((tempend & (cacheline-1U)) != 0U) {
+ tempend &= (~(cacheline - 1U));
+
+ Xil_DCacheFlushLine(tempend);
+ }
+
+ while (tempadr < tempend) {
+
+ /* Invalidate Data cache line */
+ asm_inval_dc_line_mva_poc(tempadr);
+
+ tempadr += cacheline;
+ }
+ }
+
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush the entire Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheFlush(void)
+{
+ register u32 CsidReg, C7Reg;
+ u32 CacheSize, LineSize, NumWays;
+ u32 Way, WayIndex, Set, SetIndex, NumSet;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+
+#if defined (__GNUC__)
+ CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_CACHE_SIZE_ID,CsidReg);
+#endif
+ /* Determine Cache Size */
+
+ CacheSize = (CsidReg >> 13U) & 0x000001FFU;
+ CacheSize += 0x00000001U;
+ CacheSize *= (u32)128; /* to get number of bytes */
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x000003ffU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ NumSet = CacheSize/NumWays;
+ NumSet /= (((u32)0x00000001U) << LineSize);
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Invalidate all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set;
+ /* Flush by Set/Way */
+ asm_clean_inval_dc_line_sw(C7Reg);
+
+ Set += (((u32)0x00000001U) << LineSize);
+ }
+ Set = 0U;
+ Way += 0x40000000U;
+ }
+
+ /* Wait for flush to complete */
+ dsb();
+ mtcpsr(currmask);
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush a Data cache line. If the byte specified by the address (adr)
+* is cached by the Data cache, the cacheline containing that byte is
+* invalidated. If the cacheline is modified (dirty), the entire
+* contents of the cacheline are written to system memory before the
+* line is invalidated.
+*
+* @param adr: 32bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheFlushLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+
+ mtcp(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
+
+ /* Wait for flush to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush the Data cache for the given address range.
+* If the bytes specified by the address (adr) are cached by the
+* Data cache, the cacheline containing those bytes is invalidated.If
+* the cacheline is modified (dirty), the written to system memory
+* before the lines are invalidated.
+*
+* @param adr: 32bit start address of the range to be flushed.
+* @param len: Length of the range to be flushed in bytes
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheFlushRange(INTPTR adr, u32 len)
+{
+ u32 LocalAddr = adr;
+ const u32 cacheline = 32U;
+ u32 end;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ if (len != 0x00000000U) {
+ /* Back the starting address up to the start of a cache line
+ * perform cache operations until adr+len
+ */
+ end = LocalAddr + len;
+ LocalAddr &= ~(cacheline - 1U);
+
+ while (LocalAddr < end) {
+ /* Flush Data cache line */
+ asm_clean_inval_dc_line_mva_poc(LocalAddr);
+
+ LocalAddr += cacheline;
+ }
+ }
+ dsb();
+ mtcpsr(currmask);
+}
+/****************************************************************************/
+/**
+* @brief Store a Data cache line. If the byte specified by the address
+* (adr) is cached by the Data cache and the cacheline is modified
+* (dirty), the entire contents of the cacheline are written to
+* system memory.After the store completes, the cacheline is marked
+* as unmodified (not dirty).
+*
+* @param adr: 32bit address of the data to be stored
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheStoreLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
+ mtcp(XREG_CP15_CLEAN_DC_LINE_MVA_POC, (adr & (~0x1F)));
+
+ /* Wait for store to complete */
+ dsb();
+ isb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Enable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheEnable(void)
+{
+ register u32 CtrlReg;
+
+ /* enable caches only if they are disabled */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_I_BIT)==0x00000000U) {
+ /* invalidate the instruction cache */
+ mtcp(XREG_CP15_INVAL_IC_POU, 0);
+
+ /* enable the instruction cache */
+ CtrlReg |= (XREG_CP15_CONTROL_I_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheDisable(void)
+{
+ register u32 CtrlReg;
+
+ dsb();
+
+ /* invalidate the instruction cache */
+ mtcp(XREG_CP15_INVAL_IC_POU, 0);
+
+ /* disable the instruction cache */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+
+ CtrlReg &= ~(XREG_CP15_CONTROL_I_BIT);
+
+ mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the entire instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidate(void)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
+
+ /* invalidate the instruction cache */
+ mtcp(XREG_CP15_INVAL_IC_POU, 0);
+
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate an instruction cache line.If the instruction specified
+* by the address is cached by the instruction cache, the
+* cacheline containing that instruction is invalidated.
+*
+* @param adr: 32bit address of the instruction to be invalidated.
+*
+* @return None.
+*
+* @note The bottom 5 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateLine(INTPTR adr)
+{
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
+ mtcp(XREG_CP15_INVAL_IC_LINE_MVA_POU, (adr & (~0x1F)));
+
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the instruction cache for the given address range.
+* If the bytes specified by the address (adr) are cached by the
+* Data cache, the cacheline containing that byte is invalidated.
+* If the cachelineis modified (dirty), the modified contents are
+* lost and are NOT written to system memory before the line is
+* invalidated.
+*
+* @param adr: 32bit start address of the range to be invalidated.
+* @param len: Length of the range to be invalidated in bytes.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateRange(INTPTR adr, u32 len)
+{
+ u32 LocalAddr = adr;
+ const u32 cacheline = 32U;
+ u32 end;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ if (len != 0x00000000U) {
+ /* Back the starting address up to the start of a cache line
+ * perform cache operations until adr+len
+ */
+ end = LocalAddr + len;
+ LocalAddr = LocalAddr & ~(cacheline - 1U);
+
+ /* Select cache L0 I-cache in CSSR */
+ mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
+
+ while (LocalAddr < end) {
+
+ /* Invalidate L1 I-cache line */
+ asm_inval_ic_line_mva_pou(LocalAddr);
+
+ LocalAddr += cacheline;
+ }
+ }
+
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
diff --git a/bsps/shared/xil/arm/cortexr5/xil_mpu.c b/bsps/shared/xil/arm/cortexr5/xil_mpu.c
new file mode 100644
index 0000000000..85f8f7f8da
--- /dev/null
+++ b/bsps/shared/xil/arm/cortexr5/xil_mpu.c
@@ -0,0 +1,645 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+* @file xil_mpu.c
+*
+* This file provides APIs for enabling/disabling MPU and setting the memory
+* attributes for sections, in the MPU translation table.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ---------------------------------------------------
+* 5.00 pkp 02/10/14 Initial version
+* 6.2 mus 01/27/17 Updated to support IAR compiler
+* 6.4 asa 08/16/17 Added many APIs for MPU access to make MPU usage
+* user-friendly. The APIs added are: Xil_UpdateMPUConfig,
+* Xil_GetMPUConfig, Xil_GetNumOfFreeRegions,
+* Xil_GetNextMPURegion, Xil_DisableMPURegionByRegNum,
+* Xil_GetMPUFreeRegMask, Xil_SetMPURegionByRegNum, and
+* Xil_InitializeExistingMPURegConfig.
+* Added a new array of structure of type XMpuConfig to
+* represent the MPU configuration table.
+* 6.8 aru 07/02/18 Returned the pointer instead of address
+* of that pointer in Xil_MemMap().
+* 7.5 asa 03/01/21 Ensure that Mpu_Config does not stay in .boot/.vector
+* sections which generally should be executable code
+* which can be allocated and not written.
+* Mpu_Config array is populated during boot time, hence
+* cannot be placed in .bss or .data section. Putting
+* Mpu_Config in a new .bootdata section.
+* 7.7 sk 01/10/22 Update int to u32 to fix misrac misra_c_2012_directive_4_6
+* violations.
+* 7.7 sk 01/10/22 Typecast variables from signed to unsigned to fix
+* misra_c_2012_rule_10_4 violation.
+* 7.7 sk 01/10/22 Add explicit parentheses for region_size and region_size[0]
+* to fix misra_c_2012_rule_12_1 violation.
+* 7.7 sk 01/10/22 Remove unsigned sign to fix misra_c_2012_rule_10_3 violation.
+* 7.7 sk 01/10/22 Modify if condition to fix misra_c_2012_rule_10_1 violation.
+* 7.7 sk 01/10/22 Typecast to fix wider essential type misra_c_2012_rule_10_7
+* violation.
+* 7.7 sk 01/10/22 Update conditional expression to fix misra_c_2012_rule_14_4
+* violation.
+* 7.7 sk 01/10/22 Add braces for the if statement to make it a compound
+* statement to fix misra_c_2012_rule_15_6 violation.
+* </pre>
+*
+*
+******************************************************************************/
+
+/*
+ * Origin: https://github.com/Xilinx/embeddedsw/blob/master/lib/bsp/standalone/src/arm/cortexr5/xil_mpu.c
+ * __rtems__ changes:
+ * - un-include xdebug.h and add macro for xdbg_printf
+ * - relocate XMpu_Config
+ * - form Xilinx link script section(".bootdata")
+ * - to RTEMS link script section(".bsp_start_data")
+ */
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xpseudo_asm.h"
+#include "xil_types.h"
+#include "xil_mpu.h"
+#ifndef __rtems__
+#include "xdebug.h"
+#else
+#define xdbg_printf(...)
+#endif
+#include "xstatus.h"
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/**************************** Type Definitions *******************************/
+
+/************************** Constant Definitions *****************************/
+#define MPU_REGION_SIZE_MIN 0x20
+/************************** Variable Definitions *****************************/
+
+static const struct {
+ u64 size;
+ u32 encoding;
+}region_size[] = {
+ { 0x20, REGION_32B },
+ { 0x40, REGION_64B },
+ { 0x80, REGION_128B },
+ { 0x100, REGION_256B },
+ { 0x200, REGION_512B },
+ { 0x400, REGION_1K },
+ { 0x800, REGION_2K },
+ { 0x1000, REGION_4K },
+ { 0x2000, REGION_8K },
+ { 0x4000, REGION_16K },
+ { 0x8000, REGION_32K },
+ { 0x10000, REGION_64K },
+ { 0x20000, REGION_128K },
+ { 0x40000, REGION_256K },
+ { 0x80000, REGION_512K },
+ { 0x100000, REGION_1M },
+ { 0x200000, REGION_2M },
+ { 0x400000, REGION_4M },
+ { 0x800000, REGION_8M },
+ { 0x1000000, REGION_16M },
+ { 0x2000000, REGION_32M },
+ { 0x4000000, REGION_64M },
+ { 0x8000000, REGION_128M },
+ { 0x10000000, REGION_256M },
+ { 0x20000000, REGION_512M },
+ { 0x40000000, REGION_1G },
+ { 0x80000000, REGION_2G },
+ { 0x100000000, REGION_4G },
+};
+
+#ifndef __rtems__
+#if defined (__GNUC__)
+XMpu_Config Mpu_Config __attribute__((section(".bootdata")));
+#elif defined (__ICCARM__)
+#pragma default_function_attributes = @ ".bootdata"
+XMpu_Config Mpu_Config;
+#endif
+#else
+XMpu_Config Mpu_Config __attribute__((section(".bsp_start_data")));
+#endif
+
+/************************** Function Prototypes ******************************/
+void Xil_InitializeExistingMPURegConfig(void);
+/*****************************************************************************/
+/**
+* @brief This function sets the memory attributes for a section covering
+* 1MB, of memory in the translation table.
+*
+* @param addr: 32-bit address for which memory attributes need to be set.
+* @param attrib: Attribute for the given memory region.
+* @return None.
+*
+*
+******************************************************************************/
+void Xil_SetTlbAttributes(INTPTR addr, u32 attrib)
+{
+ INTPTR Localaddr = addr;
+ Localaddr &= (INTPTR)(~(0xFFFFFU));
+ /* Setting the MPU region with given attribute with 1MB size */
+ Xil_SetMPURegion(Localaddr, 0x100000, attrib);
+}
+
+/*****************************************************************************/
+/**
+* @brief Set the memory attributes for a section of memory in the
+* translation table.
+*
+* @param addr: 32-bit address for which memory attributes need to be set..
+* @param size: size is the size of the region.
+* @param attrib: Attribute for the given memory region.
+* @return None.
+*
+*
+******************************************************************************/
+u32 Xil_SetMPURegion(INTPTR addr, u64 size, u32 attrib)
+{
+ u32 Regionsize = 0;
+ INTPTR Localaddr = addr;
+ u32 NextAvailableMemRegion;
+ u32 i;
+
+ NextAvailableMemRegion = Xil_GetNextMPURegion();
+ if (NextAvailableMemRegion == 0xFFU) {
+ xdbg_printf(DEBUG, "No regions available\r\n");
+ return XST_FAILURE;
+ }
+
+ Xil_DCacheFlush();
+ Xil_ICacheInvalidate();
+
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,NextAvailableMemRegion);
+ isb();
+
+ /* Lookup the size. */
+ for (i = 0; i < (sizeof (region_size) / sizeof (region_size[0])); i++) {
+ if (size <= region_size[i].size) {
+ Regionsize = region_size[i].encoding;
+ break;
+ }
+ }
+
+ Localaddr &= (INTPTR)(~(region_size[i].size - 1U));
+
+ Regionsize <<= 1;
+ Regionsize |= REGION_EN;
+ dsb();
+ mtcp(XREG_CP15_MPU_REG_BASEADDR, Localaddr); /* Set base address of a region */
+ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL, attrib); /* Set the control attribute */
+ mtcp(XREG_CP15_MPU_REG_SIZE_EN, Regionsize); /* set the region size and enable it*/
+ dsb();
+ isb();
+ Xil_UpdateMPUConfig(NextAvailableMemRegion, Localaddr, Regionsize, attrib);
+ return XST_SUCCESS;
+}
+/*****************************************************************************/
+/**
+* @brief Enable MPU for Cortex R5 processor. This function invalidates I
+* cache and flush the D Caches, and then enables the MPU.
+*
+* @return None.
+*
+******************************************************************************/
+void Xil_EnableMPU(void)
+{
+ u32 CtrlReg, Reg;
+ s32 DCacheStatus=0, ICacheStatus=0;
+ /* enable caches only if they are disabled */
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_C_BIT) != 0x00000000U) {
+ DCacheStatus=1;
+ }
+ if ((CtrlReg & XREG_CP15_CONTROL_I_BIT) != 0x00000000U) {
+ ICacheStatus=1;
+ }
+
+ if(DCacheStatus != 0) {
+ Xil_DCacheDisable();
+ }
+ if(ICacheStatus != 0){
+ Xil_ICacheDisable();
+ }
+#if defined (__GNUC__)
+ Reg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,Reg);
+#endif
+ Reg |= 0x00000001U;
+ dsb();
+ mtcp(XREG_CP15_SYS_CONTROL, Reg);
+ isb();
+ /* enable caches only if they are disabled in routine*/
+ if(DCacheStatus != 0) {
+ Xil_DCacheEnable();
+ }
+ if(ICacheStatus != 0) {
+ Xil_ICacheEnable();
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Disable MPU for Cortex R5 processors. This function invalidates I
+* cache and flush the D Caches, and then disabes the MPU.
+*
+* @return None.
+*
+******************************************************************************/
+void Xil_DisableMPU(void)
+{
+ u32 CtrlReg, Reg;
+ s32 DCacheStatus=0, ICacheStatus=0;
+ /* enable caches only if they are disabled */
+
+#if defined (__GNUC__)
+ CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,CtrlReg);
+#endif
+ if ((CtrlReg & XREG_CP15_CONTROL_C_BIT) != 0x00000000U) {
+ DCacheStatus=1;
+ }
+ if ((CtrlReg & XREG_CP15_CONTROL_I_BIT) != 0x00000000U) {
+ ICacheStatus=1;
+ }
+
+ if(DCacheStatus != 0) {
+ Xil_DCacheDisable();
+ }
+ if(ICacheStatus != 0){
+ Xil_ICacheDisable();
+ }
+
+ mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0);
+#if defined (__GNUC__)
+ Reg = mfcp(XREG_CP15_SYS_CONTROL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_SYS_CONTROL,Reg);
+#endif
+ Reg &= ~(0x00000001U);
+ dsb();
+ mtcp(XREG_CP15_SYS_CONTROL, Reg);
+ isb();
+ /* enable caches only if they are disabled in routine*/
+ if(DCacheStatus != 0) {
+ Xil_DCacheEnable();
+ }
+ if(ICacheStatus != 0) {
+ Xil_ICacheEnable();
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Update the MPU configuration for the requested region number in
+* the global MPU configuration table.
+*
+* @param reg_num: The requested region number to be updated information for.
+* @param address: 32 bit address for start of the region.
+* @param size: Requested size of the region.
+* @param attrib: Attribute for the corresponding region.
+* @return XST_FAILURE: When the requested region number if 16 or more.
+* XST_SUCCESS: When the MPU configuration table is updated.
+*
+*
+******************************************************************************/
+u32 Xil_UpdateMPUConfig(u32 reg_num, INTPTR address, u32 size, u32 attrib)
+{
+ u32 ReturnVal = XST_SUCCESS;
+ u32 Tempsize = size;
+ u32 Index;
+
+ if (reg_num >= MAX_POSSIBLE_MPU_REGS) {
+ xdbg_printf(DEBUG, "Invalid region number\r\n");
+ ReturnVal = XST_FAILURE;
+ goto exit;
+ }
+
+ if ((size & REGION_EN) != 0) {
+ Mpu_Config[reg_num].RegionStatus = MPU_REG_ENABLED;
+ Mpu_Config[reg_num].BaseAddress = address;
+ Tempsize &= (~REGION_EN);
+ Tempsize >>= 1;
+ /* Lookup the size. */
+ for (Index = 0; Index <
+ (sizeof (region_size) / sizeof (region_size[0])); Index++) {
+ if (Tempsize <= region_size[Index].encoding) {
+ Mpu_Config[reg_num].Size = region_size[Index].size;
+ break;
+ }
+ }
+ Mpu_Config[reg_num].Attribute = attrib;
+ } else {
+ Mpu_Config[reg_num].RegionStatus = 0U;
+ Mpu_Config[reg_num].BaseAddress = 0;
+ Mpu_Config[reg_num].Size = 0U;
+ Mpu_Config[reg_num].Attribute = 0U;
+ }
+
+exit:
+ return ReturnVal;
+}
+
+/*****************************************************************************/
+/**
+* @brief The MPU configuration table is passed to the caller.
+*
+* @param mpuconfig: This is of type XMpu_Config which is an array of
+* 16 entries of type structure representing the MPU config table
+* @return none
+*
+*
+******************************************************************************/
+void Xil_GetMPUConfig (XMpu_Config mpuconfig) {
+ u32 Index = 0U;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ mpuconfig[Index].RegionStatus = Mpu_Config[Index].RegionStatus;
+ mpuconfig[Index].BaseAddress = Mpu_Config[Index].BaseAddress;
+ mpuconfig[Index].Attribute = Mpu_Config[Index].Attribute;
+ mpuconfig[Index].Size = Mpu_Config[Index].Size;
+ Index++;
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Returns the total number of free MPU regions available.
+*
+* @return Number of free regions available to users
+*
+*
+******************************************************************************/
+u32 Xil_GetNumOfFreeRegions (void) {
+ u32 Index = 0U;
+ u32 NumofFreeRegs = 0U;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ if (MPU_REG_DISABLED == Mpu_Config[Index].RegionStatus) {
+ NumofFreeRegs++;
+ }
+ Index++;
+ }
+ return NumofFreeRegs;
+}
+
+/*****************************************************************************/
+/**
+* @brief Returns the total number of free MPU regions available in the form
+* of a mask. A bit of 1 in the returned 16 bit value represents the
+* corresponding region number to be available.
+* For example, if this function returns 0xC0000, this would mean, the
+* regions 14 and 15 are available to users.
+*
+* @return The free region mask as a 16 bit value
+*
+*
+******************************************************************************/
+u16 Xil_GetMPUFreeRegMask (void) {
+ u32 Index = 0U;
+ u16 FreeRegMask = 0U;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ if (MPU_REG_DISABLED == Mpu_Config[Index].RegionStatus) {
+ FreeRegMask |= ((u16)1U << Index);
+ }
+ Index++;
+ }
+ return FreeRegMask;
+}
+
+/*****************************************************************************/
+/**
+* @brief Disables the corresponding region number as passed by the user.
+*
+* @param reg_num: The region number to be disabled
+* @return XST_SUCCESS: If the region could be disabled successfully
+* XST_FAILURE: If the requested region number is 16 or more.
+*
+*
+******************************************************************************/
+u32 Xil_DisableMPURegionByRegNum (u32 reg_num) {
+ u32 Temp = 0U;
+ u32 ReturnVal = XST_FAILURE;
+
+ if (reg_num >= 16U) {
+ xdbg_printf(DEBUG, "Invalid region number\r\n");
+ goto exit1;
+ }
+ Xil_DCacheFlush();
+ Xil_ICacheInvalidate();
+
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,reg_num);
+#if defined (__GNUC__)
+ Temp = mfcp(XREG_CP15_MPU_REG_SIZE_EN);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_MPU_REG_SIZE_EN,Temp);
+#endif
+ Temp &= (~REGION_EN);
+ dsb();
+ mtcp(XREG_CP15_MPU_REG_SIZE_EN,Temp);
+ dsb();
+ isb();
+ Xil_UpdateMPUConfig(reg_num, 0, 0U, 0U);
+ ReturnVal = XST_SUCCESS;
+
+exit1:
+ return ReturnVal;
+}
+
+/*****************************************************************************/
+/**
+* @brief Enables the corresponding region number as passed by the user.
+*
+* @param reg_num: The region number to be enabled
+* @param addr: 32 bit address for start of the region.
+* @param size: Requested size of the region.
+* @param attrib: Attribute for the corresponding region.
+* @return XST_SUCCESS: If the region could be created successfully
+* XST_FAILURE: If the requested region number is 16 or more.
+*
+*
+******************************************************************************/
+u32 Xil_SetMPURegionByRegNum (u32 reg_num, INTPTR addr, u64 size, u32 attrib)
+{
+ u32 ReturnVal = XST_SUCCESS;
+ INTPTR Localaddr = addr;
+ u32 Regionsize = 0;
+ u32 Index;
+
+ if (reg_num >= 16U) {
+ xdbg_printf(DEBUG, "Invalid region number\r\n");
+ ReturnVal = XST_FAILURE;
+ goto exit2;
+ }
+
+ if (Mpu_Config[reg_num].RegionStatus == MPU_REG_ENABLED) {
+ xdbg_printf(DEBUG, "Region already enabled\r\n");
+ ReturnVal = XST_FAILURE;
+ goto exit2;
+ }
+
+ Xil_DCacheFlush();
+ Xil_ICacheInvalidate();
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,reg_num);
+ isb();
+
+ /* Lookup the size. */
+ for (Index = 0; Index <
+ (sizeof (region_size) / sizeof (region_size[0])); Index++) {
+ if (size <= region_size[Index].size) {
+ Regionsize = region_size[Index].encoding;
+ break;
+ }
+ }
+
+ Localaddr &= (INTPTR)(~(region_size[Index].size - 1U));
+ Regionsize <<= 1;
+ Regionsize |= REGION_EN;
+ dsb();
+ mtcp(XREG_CP15_MPU_REG_BASEADDR, Localaddr);
+ mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL, attrib);
+ mtcp(XREG_CP15_MPU_REG_SIZE_EN, Regionsize);
+ dsb();
+ isb();
+ Xil_UpdateMPUConfig(reg_num, Localaddr, Regionsize, attrib);
+exit2:
+ return ReturnVal;
+
+}
+
+/*****************************************************************************/
+/**
+* @brief Initializes the MPU configuration table that are setup in the
+* R5 boot code in the Init_Mpu function called before C main.
+*
+* @return none
+*
+*
+******************************************************************************/
+void Xil_InitializeExistingMPURegConfig(void)
+{
+ u32 Index = 0U;
+ u32 Index1 = 0U;
+ u32 MPURegSize;
+ INTPTR MPURegBA;
+ u32 MPURegAttrib;
+ u32 Tempsize;
+
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,Index);
+#if defined (__GNUC__)
+ MPURegSize = mfcp(XREG_CP15_MPU_REG_SIZE_EN);
+ MPURegBA = mfcp(XREG_CP15_MPU_REG_BASEADDR);
+ MPURegAttrib = mfcp(XREG_CP15_MPU_REG_ACCESS_CTRL);
+#elif defined (__ICCARM__)
+ mfcp(XREG_CP15_MPU_REG_SIZE_EN,MPURegSize);
+ mfcp(XREG_CP15_MPU_REG_BASEADDR, MPURegBA);
+ mfcp(XREG_CP15_MPU_REG_ACCESS_CTRL, MPURegAttrib);
+#endif
+ if ((MPURegSize & REGION_EN) != 0) {
+ Mpu_Config[Index].RegionStatus = MPU_REG_ENABLED;
+ Mpu_Config[Index].BaseAddress = MPURegBA;
+ Mpu_Config[Index].Attribute = MPURegAttrib;
+ Tempsize = MPURegSize & (~REGION_EN);
+ Tempsize >>= 1;
+ for (Index1 = 0; Index1 <
+ (sizeof (region_size) / sizeof (region_size[0])); Index1++) {
+ if (Tempsize <= region_size[Index1].encoding) {
+ Mpu_Config[Index].Size = region_size[Index1].size;
+ break;
+ }
+ }
+ }
+ Index++;
+ }
+}
+
+/*****************************************************************************/
+/**
+* @brief Returns the next available free MPU region
+*
+* @return The free MPU region available
+*
+*
+******************************************************************************/
+u32 Xil_GetNextMPURegion(void)
+{
+ u32 Index = 0U;
+ u32 NextAvailableReg = 0xFF;
+ while (Index < MAX_POSSIBLE_MPU_REGS) {
+ if (Mpu_Config[Index].RegionStatus != MPU_REG_ENABLED) {
+ NextAvailableReg = Index;
+ break;
+ }
+ Index++;
+ }
+ return NextAvailableReg;
+}
+
+#ifdef __GNUC__
+#define u32overflow(a, b) ({typeof(a) s; __builtin_uadd_overflow(a, b, &s); })
+#else
+#define u32overflow(a, b) ((a) > ((a) + (b)))
+#endif /* __GNUC__ */
+
+/*****************************************************************************/
+/**
+* @brief Memory mapping for Cortex-R5F. If successful, the mapped
+* region will include all of the memory requested, but may
+* include more. Specifically, it will be a power of 2 in
+* size, aligned on a boundary of that size.
+*
+* @param Physaddr is base physical address at which to start mapping.
+* NULL in Physaddr masks possible mapping errors.
+* @param size of region to be mapped.
+* @param flags used to set translation table.
+*
+* @return Physaddr on success, NULL on error. Ambiguous if Physaddr==NULL
+*
+* @cond Xil_MemMap_internal
+* @note: u32overflow() is defined for readability and (for __GNUC__) to
+* - force the type of the check to be the same as the first argument
+* - hide the otherwise unused third argument of the builtin
+* - improve safety by choosing the explicit _uadd_ version.
+* Consider __builtin_add_overflow_p() when available.
+* Use an alternative (less optimal?) for compilers w/o the builtin.
+* @endcond
+******************************************************************************/
+void *Xil_MemMap(UINTPTR Physaddr, size_t size, u32 flags)
+{
+ size_t Regionsize = MPU_REGION_SIZE_MIN;
+ UINTPTR Basephysaddr = 0, end = Physaddr + size;
+
+ if (flags == 0U) {
+ return (void *)Physaddr;
+ }
+ if (u32overflow(Physaddr, size)) {
+ return NULL;
+ }
+ for ( ; Regionsize != 0U; Regionsize <<= 1) {
+ if (Regionsize >= size) {
+ Basephysaddr = Physaddr & ~(Regionsize - 1U);
+ if (u32overflow(Basephysaddr, Regionsize)) {
+ break;
+ }
+ if ((Basephysaddr + Regionsize) >= end) {
+ return ((Xil_SetMPURegion(Basephysaddr,
+ Regionsize, flags) == XST_SUCCESS) ?
+ (void *)Physaddr : NULL);
+ }
+ }
+ }
+ return NULL;
+}
diff --git a/bsps/shared/xil/xil_assert.c b/bsps/shared/xil/xil_assert.c
new file mode 100644
index 0000000000..b3dd7e9718
--- /dev/null
+++ b/bsps/shared/xil/xil_assert.c
@@ -0,0 +1,126 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_assert.c
+* @addtogroup common_assert_apis Assert APIs and Macros
+* @{
+*
+* This file contains basic assert related functions for Xilinx software IP.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm 07/14/09 Initial release
+* 6.0 kvn 05/31/16 Make Xil_AsserWait a global variable
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Variable Definitions *****************************/
+
+/**
+ * @brief This variable allows testing to be done easier with asserts. An assert
+ * sets this variable such that a driver can evaluate this variable
+ * to determine if an assert occurred.
+ */
+u32 Xil_AssertStatus;
+
+/**
+ * @brief This variable allows the assert functionality to be changed for testing
+ * such that it does not wait infinitely. Use the debugger to disable the
+ * waiting during testing of asserts.
+ */
+s32 Xil_AssertWait = 1;
+
+/* The callback function to be invoked when an assert is taken */
+static Xil_AssertCallback Xil_AssertCallbackRoutine = NULL;
+
+/************************** Function Prototypes ******************************/
+
+/*****************************************************************************/
+/**
+*
+* @brief Implement assert. Currently, it calls a user-defined callback
+* function if one has been set. Then, it potentially enters an
+* infinite loop depending on the value of the Xil_AssertWait
+* variable.
+*
+* @param File: filename of the source
+* @param Line: linenumber within File
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void Xil_Assert(const char8 *File, s32 Line)
+{
+ /* if the callback has been set then invoke it */
+ if (Xil_AssertCallbackRoutine != 0) {
+ (*Xil_AssertCallbackRoutine)(File, Line);
+ }
+
+ /* if specified, wait indefinitely such that the assert will show up
+ * in testing
+ */
+ while (Xil_AssertWait != 0) {
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief Set up a callback function to be invoked when an assert occurs.
+* If a callback is already installed, then it will be replaced.
+*
+* @param Routine: callback to be invoked when an assert is taken
+*
+* @return None.
+*
+* @note This function has no effect if NDEBUG is set
+*
+******************************************************************************/
+void Xil_AssertSetCallback(Xil_AssertCallback Routine)
+{
+ Xil_AssertCallbackRoutine = Routine;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief Null handler function. This follows the XInterruptHandler
+* signature for interrupt handlers. It can be used to assign a null
+* handler (a stub) to an interrupt controller vector table.
+*
+* @param NullParameter: arbitrary void pointer and not used.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void XNullHandler(void *NullParameter)
+{
+ (void) NullParameter;
+}
+/**
+* @} End of "addtogroup common_assert_apis".
+*/
diff --git a/bsps/shared/xil/xil_mem.c b/bsps/shared/xil/xil_mem.c
new file mode 100644
index 0000000000..44e7d9a0c4
--- /dev/null
+++ b/bsps/shared/xil/xil_mem.c
@@ -0,0 +1,70 @@
+/******************************************************************************/
+/**
+* Copyright (c) 2015 - 2022 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/****************************************************************************/
+/**
+* @file xil_mem.c
+*
+* This file contains xil mem copy function to use in case of word aligned
+* data copies.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- -------- -------- -----------------------------------------------
+* 6.1 nsk 11/07/16 First release.
+* 7.7 sk 01/10/22 Update Xil_MemCpy functions variables typecast
+* from int to s32 to fix misra_c_2012_directive_4_6
+* violations.
+* 7.7 sk 01/10/22 Include xil_mem.h header file to fix Xil_MemCpy
+* prototype misra_c_2012_rule_8_4 violation.
+*
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files ********************************/
+
+#include "xil_types.h"
+#include "xil_mem.h"
+
+/***************** Inline Functions Definitions ********************/
+/*****************************************************************************/
+/**
+* @brief This function copies memory from once location to other.
+*
+* @param dst: pointer pointing to destination memory
+*
+* @param src: pointer pointing to source memory
+*
+* @param cnt: 32 bit length of bytes to be copied
+*
+*****************************************************************************/
+void Xil_MemCpy(void* dst, const void* src, u32 cnt)
+{
+ char *d = (char*)(void *)dst;
+ const char *s = src;
+
+ while (cnt >= sizeof (s32)) {
+ *(s32*)d = *(s32*)s;
+ d += sizeof (s32);
+ s += sizeof (s32);
+ cnt -= sizeof (s32);
+ }
+ while (cnt >= sizeof (u16)) {
+ *(u16*)d = *(u16*)s;
+ d += sizeof (u16);
+ s += sizeof (u16);
+ cnt -= sizeof (u16);
+ }
+ while ((cnt) > 0U){
+ *d = *s;
+ d += 1U;
+ s += 1U;
+ cnt -= 1U;
+ }
+}