summaryrefslogtreecommitdiffstats
path: root/bsps/shared/xil
diff options
context:
space:
mode:
Diffstat (limited to 'bsps/shared/xil')
-rw-r--r--bsps/shared/xil/VERSION20
-rw-r--r--bsps/shared/xil/xil_assert.c126
-rw-r--r--bsps/shared/xil/xil_cache.c732
-rw-r--r--bsps/shared/xil/xil_mem.c70
4 files changed, 948 insertions, 0 deletions
diff --git a/bsps/shared/xil/VERSION b/bsps/shared/xil/VERSION
new file mode 100644
index 0000000000..d94f574255
--- /dev/null
+++ b/bsps/shared/xil/VERSION
@@ -0,0 +1,20 @@
+The information in this file describes the source of files in
+bsps/shared/xil/ and bsps/include/xil/.
+
+Import from:
+
+https://github.com/Xilinx/embeddedsw.git
+
+commit 8a89579489c88ea5acd23d7d439ac928659c26cf
+Author: msreeram <manikanta.sreeram@xilinx.com>
+AuthorDate: Wed Apr 6 23:24:38 2022 -0600
+Commit: Siva Addepalli <sivaprasad.addepalli@xilinx.com>
+CommitDate: Fri Apr 8 16:47:15 2022 +0530
+
+ update license file for EmbeddedSW 2022.1 release
+
+ Update license file for EmbeddedSW 2022.1 release
+
+ Signed-off-by: Manikanta Sreeram <msreeram@xilinx.com>
+
+ Acked-by : Meena Paleti <meena.paleti@xilinx.com>
diff --git a/bsps/shared/xil/xil_assert.c b/bsps/shared/xil/xil_assert.c
new file mode 100644
index 0000000000..b3dd7e9718
--- /dev/null
+++ b/bsps/shared/xil/xil_assert.c
@@ -0,0 +1,126 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_assert.c
+* @addtogroup common_assert_apis Assert APIs and Macros
+* @{
+*
+* This file contains basic assert related functions for Xilinx software IP.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm 07/14/09 Initial release
+* 6.0 kvn 05/31/16 Make Xil_AsserWait a global variable
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Variable Definitions *****************************/
+
+/**
+ * @brief This variable allows testing to be done easier with asserts. An assert
+ * sets this variable such that a driver can evaluate this variable
+ * to determine if an assert occurred.
+ */
+u32 Xil_AssertStatus;
+
+/**
+ * @brief This variable allows the assert functionality to be changed for testing
+ * such that it does not wait infinitely. Use the debugger to disable the
+ * waiting during testing of asserts.
+ */
+s32 Xil_AssertWait = 1;
+
+/* The callback function to be invoked when an assert is taken */
+static Xil_AssertCallback Xil_AssertCallbackRoutine = NULL;
+
+/************************** Function Prototypes ******************************/
+
+/*****************************************************************************/
+/**
+*
+* @brief Implement assert. Currently, it calls a user-defined callback
+* function if one has been set. Then, it potentially enters an
+* infinite loop depending on the value of the Xil_AssertWait
+* variable.
+*
+* @param File: filename of the source
+* @param Line: linenumber within File
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void Xil_Assert(const char8 *File, s32 Line)
+{
+ /* if the callback has been set then invoke it */
+ if (Xil_AssertCallbackRoutine != 0) {
+ (*Xil_AssertCallbackRoutine)(File, Line);
+ }
+
+ /* if specified, wait indefinitely such that the assert will show up
+ * in testing
+ */
+ while (Xil_AssertWait != 0) {
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief Set up a callback function to be invoked when an assert occurs.
+* If a callback is already installed, then it will be replaced.
+*
+* @param Routine: callback to be invoked when an assert is taken
+*
+* @return None.
+*
+* @note This function has no effect if NDEBUG is set
+*
+******************************************************************************/
+void Xil_AssertSetCallback(Xil_AssertCallback Routine)
+{
+ Xil_AssertCallbackRoutine = Routine;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief Null handler function. This follows the XInterruptHandler
+* signature for interrupt handlers. It can be used to assign a null
+* handler (a stub) to an interrupt controller vector table.
+*
+* @param NullParameter: arbitrary void pointer and not used.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void XNullHandler(void *NullParameter)
+{
+ (void) NullParameter;
+}
+/**
+* @} End of "addtogroup common_assert_apis".
+*/
diff --git a/bsps/shared/xil/xil_cache.c b/bsps/shared/xil/xil_cache.c
new file mode 100644
index 0000000000..aef64b310a
--- /dev/null
+++ b/bsps/shared/xil/xil_cache.c
@@ -0,0 +1,732 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.c
+*
+* Contains required functions for the ARM cache functionality.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.0 pkp 05/29/14 First release
+* 5.5 pkp 04/15/16 Updated the Xil_DCacheInvalidate,
+* Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
+* functions description for proper explanation
+* 6.2 pkp 01/22/17 Added support for EL1 non-secure
+* 6.2 asa 01/31/17 The existing Xil_DCacheDisable API first flushes the
+* D caches and then disables it. The problem with that is,
+* potentially there will be a small window after the cache
+* flush operation and before the we disable D caches where
+* we might have valid data in cache lines. In such a
+* scenario disabling the D cache can lead to unknown behavior.
+* The ideal solution to this is to use assembly code for
+* the complete API and avoid any memory accesses. But with
+* that we will end up having a huge amount on assembly code
+* which is not maintainable. Changes are done to use a mix
+* of assembly and C code. All local variables are put in
+* registers. Also function calls are avoided in the API to
+* avoid using stack memory.
+* These changes fix CR#966220.
+* 6.2 mus 02/13/17 The new api Xil_ConfigureL1Prefetch is added to disable pre-fetching/configure
+* the maximum number of outstanding data prefetches allowed in
+* L1 cache system.It fixes CR#967864.
+* 6.6 mus 02/27/18 Updated Xil_DCacheInvalidateRange and
+* Xil_ICacheInvalidateRange APIs to change the data type of
+* "cacheline" variable as "INTPTR", This change has been done
+* to avoid the truncation of upper DDR addresses to 32 bit.It
+* fixes CR#995581.
+* 6.6 mus 03/15/18 By default CPUACTLR_EL1 is accessible only from EL3, it
+* results into abort if accessed from EL1 non secure privilege
+* level. Updated Xil_ConfigureL1Prefetch function to access
+* CPUACTLR_EL1 only for EL3.
+* 6.8 mn 08/01/18 Optimize the Xil_DCacheInvalidateRange() function to remove
+* redundant operations
+* 6.8 asa 09/15/18 Fix bug in the Xil_DCacheInvalidateRange API introduced while
+* making optimizations in the previous patch. This change fixes
+* CR-1008926.
+* 7.0 mus 10/12/18 Updated Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
+* APIs to replace IVAC instruction with CIVAC. So that, these
+* APIs will always do flush + invalidate in case of Cortexa53 as
+* well as Cortexa72 processor.
+* 7.1 mus 09/17/19 Xil_DCacheFlushRange and Xil_DCacheInvalidateRange are executing
+* same functionality (clean + validate). Removed
+* Xil_DCacheFlushRange function implementation and defined it as
+* macro. Xil_DCacheFlushRange macro points to the
+* Xil_DCacheInvalidateRange API to avoid code duplication.
+*
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xil_io.h"
+#include "xpseudo_asm.h"
+#include "xparameters.h"
+#include "xreg_cortexa53.h"
+#include "xil_exception.h"
+#include "bspconfig.h"
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+#define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
+
+/****************************************************************************/
+/**
+* @brief Enable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheEnable(void)
+{
+ u32 CtrlReg;
+
+ if (EL3 == 1) {
+ CtrlReg = mfcp(SCTLR_EL3);
+ } else if (EL1_NONSECURE == 1) {
+ CtrlReg = mfcp(SCTLR_EL1);
+ } else {
+ CtrlReg = 0U;
+ }
+
+ /* enable caches only if they are disabled */
+ if((CtrlReg & XREG_CONTROL_DCACHE_BIT) == 0X00000000U){
+
+ /* invalidate the Data cache */
+ Xil_DCacheInvalidate();
+
+ CtrlReg |= XREG_CONTROL_DCACHE_BIT;
+
+ if (EL3 == 1) {
+ /* enable the Data cache for el3*/
+ mtcp(SCTLR_EL3,CtrlReg);
+ } else if (EL1_NONSECURE == 1) {
+ /* enable the Data cache for el1*/
+ mtcp(SCTLR_EL1,CtrlReg);
+ }
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheDisable(void)
+{
+ register u32 CsidReg;
+ register u32 C7Reg;
+ register u32 LineSize;
+ register u32 NumWays;
+ register u32 Way;
+ register u32 WayIndex;
+ register u32 WayAdjust;
+ register u32 Set;
+ register u32 SetIndex;
+ register u32 NumSet;
+ register u32 CacheLevel;
+
+ dsb();
+ asm(
+ "mov x0, #0\n\t"
+#if EL3==1
+ "mrs x0, sctlr_el3 \n\t"
+ "and w0, w0, #0xfffffffb\n\t"
+ "msr sctlr_el3, x0\n\t"
+#elif EL1_NONSECURE==1
+ "mrs x0, sctlr_el1 \n\t"
+ "and w0, w0, #0xfffffffb\n\t"
+ "msr sctlr_el1, x0\n\t"
+#endif
+ "dsb sy\n\t"
+ );
+
+ /* Number of level of cache*/
+ CacheLevel = 0U;
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /*Number of Set*/
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+
+ /* Wait for Flush to complete */
+ dsb();
+
+ /* Select cache level 1 and D cache in CSSR */
+ CacheLevel += (0x00000001U << 1U);
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Number of Sets */
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust=clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set=0U;
+ Way += (0x00000001U<<WayAdjust);
+ }
+ /* Wait for Flush to complete */
+ dsb();
+
+ asm(
+#if EL3==1
+ "tlbi ALLE3\n\t"
+#elif EL1_NONSECURE==1
+ "tlbi VMALLE1\n\t"
+#endif
+ "dsb sy\r\n"
+ "isb\n\t"
+ );
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the Data cache. The contents present in the cache are
+* cleaned and invalidated.
+*
+* @return None.
+*
+* @note In Cortex-A53, functionality to simply invalid the cachelines
+* is not present. Such operations are a problem for an environment
+* that supports virtualisation. It would allow one OS to invalidate
+* a line belonging to another OS. This could lead to the other OS
+* crashing because of the loss of essential data. Hence, such
+* operations are promoted to clean and invalidate which avoids such
+* corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidate(void)
+{
+ register u32 CsidReg, C7Reg;
+ u32 LineSize, NumWays;
+ u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+
+ /* Number of level of cache*/
+
+ CacheLevel=0U;
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0X00000001U;
+
+ /*Number of Set*/
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0X00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Invalidate all the cachelines */
+ for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(ISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+
+ /* Wait for invalidate to complete */
+ dsb();
+
+ /* Select cache level 1 and D cache in CSSR */
+ CacheLevel += (0x00000001U<<1U) ;
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Number of Sets */
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Invalidate all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(ISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+ /* Wait for invalidate to complete */
+ dsb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate a Data cache line. The cacheline is cleaned and
+* invalidated.
+*
+* @param adr: 64bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note In Cortex-A53, functionality to simply invalid the cachelines
+* is not present. Such operations are a problem for an environment
+* that supports virtualisation. It would allow one OS to invalidate
+* a line belonging to another OS. This could lead to the other OS
+* crashing because of the loss of essential data. Hence, such
+* operations are promoted to clean and invalidate which avoids such
+* corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateLine(INTPTR adr)
+{
+
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x0);
+ mtcpdc(CIVAC,(adr & (~0x3F)));
+ /* Wait for invalidate to complete */
+ dsb();
+ /* Select cache level 1 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x2);
+ mtcpdc(IVAC,(adr & (~0x3F)));
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the Data cache for the given address range.
+* The cachelines present in the adderss range are cleaned and
+* invalidated
+*
+* @param adr: 64bit start address of the range to be invalidated.
+* @param len: Length of the range to be invalidated in bytes.
+*
+* @return None.
+*
+* @note In Cortex-A53, functionality to simply invalid the cachelines
+* is not present. Such operations are a problem for an environment
+* that supports virtualisation. It would allow one OS to invalidate
+* a line belonging to another OS. This could lead to the other OS
+* crashing because of the loss of essential data. Hence, such
+* operations are promoted to clean and invalidate which avoids such
+* corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len)
+{
+ const INTPTR cacheline = 64U;
+ INTPTR end = adr + len;
+ adr = adr & (~0x3F);
+ u32 currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ if (len != 0U) {
+ while (adr < end) {
+ mtcpdc(CIVAC,adr);
+ adr += cacheline;
+ }
+ }
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush the Data cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_DCacheFlush(void)
+{
+ register u32 CsidReg, C7Reg;
+ u32 LineSize, NumWays;
+ u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
+ u32 currmask;
+
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+
+ /* Number of level of cache*/
+
+ CacheLevel = 0U;
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /*Number of Set*/
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set = 0U;
+ Way += (0x00000001U << WayAdjust);
+ }
+
+ /* Wait for Flush to complete */
+ dsb();
+
+ /* Select cache level 1 and D cache in CSSR */
+ CacheLevel += (0x00000001U << 1U);
+ mtcp(CSSELR_EL1,CacheLevel);
+ isb();
+
+ CsidReg = mfcp(CCSIDR_EL1);
+
+ /* Get the cacheline size, way size, index size from csidr */
+ LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+ /* Number of Ways */
+ NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+ NumWays += 0x00000001U;
+
+ /* Number of Sets */
+ NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+ NumSet += 0x00000001U;
+
+ WayAdjust=clz(NumWays) - (u32)0x0000001FU;
+
+ Way = 0U;
+ Set = 0U;
+
+ /* Flush all the cachelines */
+ for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+ for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+ C7Reg = Way | Set | CacheLevel;
+ mtcpdc(CISW,C7Reg);
+ Set += (0x00000001U << LineSize);
+ }
+ Set=0U;
+ Way += (0x00000001U<<WayAdjust);
+ }
+ /* Wait for Flush to complete */
+ dsb();
+
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Flush a Data cache line. If the byte specified by the address (adr)
+* is cached by the Data cache, the cacheline containing that byte is
+* invalidated. If the cacheline is modified (dirty), the entire
+* contents of the cacheline are written to system memory before the
+* line is invalidated.
+*
+* @param adr: 64bit address of the data to be flushed.
+*
+* @return None.
+*
+* @note The bottom 6 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheFlushLine(INTPTR adr)
+{
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ /* Select cache level 0 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x0);
+ mtcpdc(CIVAC,(adr & (~0x3F)));
+ /* Wait for flush to complete */
+ dsb();
+ /* Select cache level 1 and D cache in CSSR */
+ mtcp(CSSELR_EL1,0x2);
+ mtcpdc(CIVAC,(adr & (~0x3F)));
+ /* Wait for flush to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Enable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheEnable(void)
+{
+ u32 CtrlReg;
+
+ if (EL3 == 1) {
+ CtrlReg = mfcp(SCTLR_EL3);
+ } else if (EL1_NONSECURE == 1) {
+ CtrlReg = mfcp(SCTLR_EL1);
+ } else {
+ CtrlReg = 0U;
+ }
+
+ /* enable caches only if they are disabled */
+ if((CtrlReg & XREG_CONTROL_ICACHE_BIT)==0x00000000U){
+ /* invalidate the instruction cache */
+ Xil_ICacheInvalidate();
+
+ CtrlReg |= XREG_CONTROL_ICACHE_BIT;
+
+ if (EL3 == 1) {
+ /* enable the instruction cache for el3*/
+ mtcp(SCTLR_EL3,CtrlReg);
+ } else if (EL1_NONSECURE == 1) {
+ /* enable the instruction cache for el1*/
+ mtcp(SCTLR_EL1,CtrlReg);
+ }
+ }
+}
+
+/****************************************************************************/
+/**
+* @brief Disable the instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheDisable(void)
+{
+ u32 CtrlReg;
+
+ if (EL3 == 1) {
+ CtrlReg = mfcp(SCTLR_EL3);
+ } else if (EL1_NONSECURE == 1) {
+ CtrlReg = mfcp(SCTLR_EL1);
+ } else {
+ CtrlReg = 0U;
+ }
+ /* invalidate the instruction cache */
+ Xil_ICacheInvalidate();
+ CtrlReg &= ~(XREG_CONTROL_ICACHE_BIT);
+
+ if (EL3 == 1) {
+ /* disable the instruction cache */
+ mtcp(SCTLR_EL3,CtrlReg);
+ } else if (EL1_NONSECURE == 1) {
+ /* disable the instruction cache */
+ mtcp(SCTLR_EL1,CtrlReg);
+ }
+
+
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the entire instruction cache.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidate(void)
+{
+ unsigned int currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+ mtcp(CSSELR_EL1,0x1);
+ dsb();
+ /* invalidate the instruction cache */
+ mtcpicall(IALLU);
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate an instruction cache line. If the instruction specified
+* by the parameter adr is cached by the instruction cache, the
+* cacheline containing that instruction is invalidated.
+*
+* @param adr: 64bit address of the instruction to be invalidated.
+*
+* @return None.
+*
+* @note The bottom 6 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateLine(INTPTR adr)
+{
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ mtcp(CSSELR_EL1,0x1);
+ /*Invalidate I Cache line*/
+ mtcpic(IVAU,adr & (~0x3F));
+ /* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Invalidate the instruction cache for the given address range.
+* If the instructions specified by the address range are cached by
+* the instrunction cache, the cachelines containing those
+* instructions are invalidated.
+*
+* @param adr: 64bit start address of the range to be invalidated.
+* @param len: Length of the range to be invalidated in bytes.
+*
+* @return None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len)
+{
+ const INTPTR cacheline = 64U;
+ INTPTR end;
+ INTPTR tempadr = adr;
+ INTPTR tempend;
+ u32 currmask;
+ currmask = mfcpsr();
+ mtcpsr(currmask | IRQ_FIQ_MASK);
+
+ if (len != 0x00000000U) {
+ end = tempadr + len;
+ tempend = end;
+ tempadr &= ~(cacheline - 0x00000001U);
+
+ /* Select cache Level 0 I-cache in CSSR */
+ mtcp(CSSELR_EL1,0x1);
+ while (tempadr < tempend) {
+ /*Invalidate I Cache line*/
+ mtcpic(IVAU,adr & (~0x3F));
+
+ tempadr += cacheline;
+ }
+ }
+/* Wait for invalidate to complete */
+ dsb();
+ mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief Configure the maximum number of outstanding data prefetches
+* allowed in L1 cache.
+*
+* @param num: maximum number of outstanding data prefetches allowed,
+* valid values are 0-7.
+*
+* @return None.
+*
+* @note This function is implemented only for EL3 privilege level.
+*
+*****************************************************************************/
+void Xil_ConfigureL1Prefetch (u8 num) {
+#if EL3
+ u64 val=0;
+
+ val= mfcp(S3_1_C15_C2_0 );
+ val &= ~(L1_DATA_PREFETCH_CONTROL_MASK);
+ val |= (num << L1_DATA_PREFETCH_CONTROL_SHIFT);
+ mtcp(S3_1_C15_C2_0,val);
+#endif
+}
diff --git a/bsps/shared/xil/xil_mem.c b/bsps/shared/xil/xil_mem.c
new file mode 100644
index 0000000000..44e7d9a0c4
--- /dev/null
+++ b/bsps/shared/xil/xil_mem.c
@@ -0,0 +1,70 @@
+/******************************************************************************/
+/**
+* Copyright (c) 2015 - 2022 Xilinx, Inc. All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/****************************************************************************/
+/**
+* @file xil_mem.c
+*
+* This file contains xil mem copy function to use in case of word aligned
+* data copies.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- -------- -------- -----------------------------------------------
+* 6.1 nsk 11/07/16 First release.
+* 7.7 sk 01/10/22 Update Xil_MemCpy functions variables typecast
+* from int to s32 to fix misra_c_2012_directive_4_6
+* violations.
+* 7.7 sk 01/10/22 Include xil_mem.h header file to fix Xil_MemCpy
+* prototype misra_c_2012_rule_8_4 violation.
+*
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files ********************************/
+
+#include "xil_types.h"
+#include "xil_mem.h"
+
+/***************** Inline Functions Definitions ********************/
+/*****************************************************************************/
+/**
+* @brief This function copies memory from once location to other.
+*
+* @param dst: pointer pointing to destination memory
+*
+* @param src: pointer pointing to source memory
+*
+* @param cnt: 32 bit length of bytes to be copied
+*
+*****************************************************************************/
+void Xil_MemCpy(void* dst, const void* src, u32 cnt)
+{
+ char *d = (char*)(void *)dst;
+ const char *s = src;
+
+ while (cnt >= sizeof (s32)) {
+ *(s32*)d = *(s32*)s;
+ d += sizeof (s32);
+ s += sizeof (s32);
+ cnt -= sizeof (s32);
+ }
+ while (cnt >= sizeof (u16)) {
+ *(u16*)d = *(u16*)s;
+ d += sizeof (u16);
+ s += sizeof (u16);
+ cnt -= sizeof (u16);
+ }
+ while ((cnt) > 0U){
+ *d = *s;
+ d += 1U;
+ s += 1U;
+ cnt -= 1U;
+ }
+}