summaryrefslogtreecommitdiffstats
path: root/bsps/arm/altera-cyclone-v/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'bsps/arm/altera-cyclone-v/contrib')
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/README.txt19
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/include/alt_qspi.h1535
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_16550_uart.c1179
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_address_space.c509
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_clock_manager.c5554
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma.c3749
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma_program.c1064
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_generalpurpose_io.c777
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_i2c.c2004
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_qspi.c2619
-rw-r--r--bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_reset_manager.c135
11 files changed, 19144 insertions, 0 deletions
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/README.txt b/bsps/arm/altera-cyclone-v/contrib/hwlib/README.txt
new file mode 100644
index 0000000000..d0f505da76
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/README.txt
@@ -0,0 +1,19 @@
+HWLIB
+=====
+Hwlib is a collection of sources provided by Altera for the Cyclone-V.
+
+As hwlib is third party software, please keep modifications and additions
+to the sources to a minimum for easy maintenance. Otherwise updating to a
+new version of hwlib released by Altera can become difficult.
+
+The hwlib directory contains only those files from Alteras hwlib which are
+required by the BSP (the whole hwlib was considered too big).
+The directory structure within the hwlib directory is equivalent to Alteras
+hwlib directory structure. For easy maintenance only whole files have been
+left out.
+
+Altera provides the hwlib with their SoC Embedded Design Suite (EDS).
+
+HWLIB Version:
+--------------
+All files are from hwlib 13.1 distributed with SoC EDS 14.0.0.200.
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/include/alt_qspi.h b/bsps/arm/altera-cyclone-v/contrib/hwlib/include/alt_qspi.h
new file mode 100644
index 0000000000..982c0aceae
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/include/alt_qspi.h
@@ -0,0 +1,1535 @@
+/******************************************************************************
+*
+* Copyright 2013 Altera Corporation. All Rights Reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+*
+* 2. Redistributions in binary form must reproduce the above copyright notice,
+* this list of conditions and the following disclaimer in the documentation
+* and/or other materials provided with the distribution.
+*
+* 3. The name of the author may not be used to endorse or promote products
+* derived from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+*
+******************************************************************************/
+
+/******************************************************************************
+*
+* !!!! Customer Be Aware, Exception!!!
+*
+* 1. Qspi Direct Access Mode is not working!
+*
+* This is because the qspi flash memory installed on our DevKit board, Micro
+* part N25Q00xx, 8 Gb, is not completely compatible with our embedded Synopsis
+* QSPI controller IP. Therefore there is no viable direct access code offered
+* in the lib. All the memory rea/write functionality is offered with indirect
+* access only.
+*
+* Should you install a different flash memory part in your custom board, and
+* wondering wether direct access mode works, please contact with us.
+*
+******************************************************************************/
+
+/*! \file
+ * Altera - QSPI Flash Controller Module
+ */
+
+#ifndef __ALT_QSPI_H__
+#define __ALT_QSPI_H__
+
+#include <bsp/hwlib.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI QSPI Flash Controller Module
+ *
+ * This module defines a low level driver API for the hardware processor system
+ * (HPS) quad serial peripheral interface (QSPI) flash controller for access to
+ * serial NOR flash devices. The quad SPI flash controller supports standard SPI
+ * flash devices as well as high-performance dual and quad SPI flash
+ * devices.
+ *
+ * @{
+ */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_CSR General Control and Status Functions
+ *
+ * The declarations and functions in this group provide general purpose control
+ * and status functions for the QSPI Flash Controller.
+ *
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * Initialize the QSPI flash controller for use.
+ *
+ * \internal
+ * Implementation Notes:
+ * * The QSPI Controller has been designed to wake up in a state that is
+ * suitable for performing basic reads and writes using the direct access
+ * controller.
+ * * Bring out of reset
+ * * QSPI reference clock validation
+ * * See Programmer's Guide, Configuring the QSPI Controller for use after
+ * reset, in QSPI_FLASH_CTRL for full initialization details.
+ * \endinternal
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_init(void);
+
+/******************************************************************************/
+/*!
+ * Uninitialize the QSPI flash controller.
+ *
+ * Uninitialize the QSPI flash controller by cancelling any indirect transfers
+ * in progress and putting the QSPI controller into reset.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_uninit(void);
+
+/******************************************************************************/
+/*!
+ * Disable the QSPI Controller.
+ *
+ * Disable the QSPI once the current transfer of the data word (FF_W) is
+ * complete. All output enables are inactive and all pins are set to input
+ * mode.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_disable(void);
+
+/******************************************************************************/
+/*!
+ * Enable the QSPI Controller.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_enable(void);
+
+/******************************************************************************/
+/*!
+ * This type definition enumerates the interrupt status conditions for the QSPI
+ * controller.
+ *
+ * The enumerations serve as masks for the QSPI controller events that can be
+ * set when the designated conditions occur and the corresponding event is
+ * enabled. When any of these event source conditions are true, the \b
+ * ALT_INT_INTERRUPT_QSPI_IRQ interrupt output is asserted high.
+ *
+ * Interrupt sources are cleared when software calls alt_qspi_int_clear(). The
+ * interrupt sources are individually maskable using alt_qspi_int_disable() and
+ * alt_qspi_int_enable().
+ */
+typedef enum ALT_QSPI_INT_STATUS_e
+{
+ /*!
+ * Mode fail M - indicates the voltage on pin n_ss_in is inconsistent with
+ * the SPI mode. Set = 1 if n_ss_in is low in master mode (multi-master
+ * contention). These conditions will clear the spi_enable bit and disable
+ * the SPI.
+ * * 0 = no mode fault has been detected.
+ * * 1 = a mode fault has occurred.
+ */
+ ALT_QSPI_INT_STATUS_MODE_FAIL = (0x1 << 0),
+
+ /*!
+ * Underflow Detected.
+ * * 0 = no underflow has been detected.
+ * * 1 = underflow is detected and an attempt to transfer data is made
+ * when the small TX FIFO is empty. This may occur when AHB write
+ * data is being supplied too slowly to keep up with the requested
+ * write operation.
+ */
+ ALT_QSPI_INT_STATUS_UFL = (0x1 << 1),
+
+ /*!
+ * Controller has completed last triggered indirect operation.
+ */
+ ALT_QSPI_INT_STATUS_IDAC_OP_COMPLETE = (0x1 << 2),
+
+ /*!
+ * Indirect operation was requested but could not be accepted. Two indirect
+ * operations already in storage.
+ */
+ ALT_QSPI_INT_STATUS_IDAC_OP_REJECT = (0x1 << 3),
+
+ /*!
+ * Write to protected area was attempted and rejected.
+ */
+ ALT_QSPI_INT_STATUS_WR_PROT_VIOL = (0x1 << 4),
+
+ /*!
+ * Illegal AHB Access Detected. AHB write wrapping bursts and the use of
+ * SPLIT/RETRY accesses will cause this interrupt to trigger.
+ */
+ ALT_QSPI_INT_STATUS_ILL_AHB_ACCESS = (0x1 << 5),
+
+ /*!
+ * Indirect Transfer Watermark Level Breached.
+ */
+ ALT_QSPI_INT_STATUS_IDAC_WTRMK_TRIG = (0x1 << 6),
+
+ /*!
+ * Receive Overflow. This should only occur in Legacy SPI mode.
+ *
+ * Set if an attempt is made to push the RX FIFO when it is full. This bit
+ * is reset only by a system reset and cleared only when this register is
+ * read. If a new push to the RX FIFO occurs coincident with a register read
+ * this flag will remain set.
+ * * 0 = no overflow has been detected.
+ * * 1 = an overflow has occurred.
+ */
+ ALT_QSPI_INT_STATUS_RX_OVF = (0x1 << 7),
+
+ /*!
+ * Small TX FIFO not full (current FIFO status). Can be ignored in non-SPI
+ * legacy mode.
+ * * 0 = FIFO has >= THRESHOLD entries.
+ * * 1 = FIFO has < THRESHOLD entries.
+ */
+ ALT_QSPI_INT_STATUS_TX_FIFO_NOT_FULL = (0x1 << 8),
+
+ /*!
+ * Small TX FIFO full (current FIFO status). Can be ignored in non-SPI
+ * legacy mode.
+ * * 0 = FIFO is not full.
+ * * 1 = FIFO is full.
+ */
+ ALT_QSPI_INT_STATUS_TX_FIFO_FULL = (0x1 << 9),
+
+ /*!
+ * Small RX FIFO not empty (current FIFO status). Can be ignored in non-SPI
+ * legacy mode.
+ * * 0 = FIFO has < RX THRESHOLD entries.
+ * * 1 = FIFO has >= THRESHOLD entries.
+ */
+ ALT_QSPI_INT_STATUS_RX_FIFO_NOT_EMPTY = (0x1 << 10),
+
+ /*!
+ * Small RX FIFO full (current FIFO status). Can be ignored in non-SPI
+ * legacy mode.
+ * * 0 = FIFO is not full.
+ * * 1 = FIFO is full.
+ */
+ ALT_QSPI_INT_STATUS_RX_FIFO_FULL = (0x1 << 11),
+
+ /*!
+ * Indirect Read partition of SRAM is full and unable to immediately
+ * complete indirect operation.
+ */
+ ALT_QSPI_INT_STATUS_IDAC_RD_FULL = (0x1 << 12)
+
+} ALT_QSPI_INT_STATUS_t;
+
+/******************************************************************************/
+/*!
+ * Returns the QSPI controller interrupt status register value.
+ *
+ * This function returns the current value of the QSPI controller interrupt
+ * status register value which reflects the current QSPI controller status
+ * conditions.
+ *
+ * \returns The current value of the QSPI controller interrupt status
+ * register value which reflects the current QSPI controller status
+ * conditions as defined by the \ref ALT_QSPI_INT_STATUS_t mask.
+ * If the corresponding bit is set then the condition is asserted.
+ */
+uint32_t alt_qspi_int_status_get(void);
+
+/******************************************************************************/
+/*!
+ * Clears the specified QSPI controller interrupt status conditions identified
+ * in the mask.
+ *
+ * This function clears one or more of the status conditions as contributors to
+ * the \b ALT_INT_INTERRUPT_QSPI_IRQ interrupt signal state.
+ *
+ * \param mask
+ * Specifies the QSPI interrupt status conditions to clear. \e
+ * mask is a mask of logically OR'ed \ref ALT_QSPI_INT_STATUS_t
+ * values that designate the status conditions to clear.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_int_clear(const uint32_t mask);
+
+/******************************************************************************/
+/*!
+ * Disable the specified QSPI controller interrupt status conditions identified
+ * in the mask.
+ *
+ * This function disables one or more of the status conditions as contributors
+ * to the \b ALT_INT_INTERRUPT_QSPI_IRQ interrupt signal state.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * NOTE: A cleared bit for any status condition in the mask value does not have
+ * the effect of enabling it as a contributor to the \b
+ * ALT_INT_INTERRUPT_QSPI_IRQ interrupt signal state. The function
+ * alt_qspi_int_enable() is used to enable status source conditions.
+ *
+ * \param mask
+ * Specifies the status conditions to disable as interrupt source
+ * contributors. \e mask is a mask of logically OR'ed
+ * \ref ALT_QSPI_INT_STATUS_t values that designate the status
+ * conditions to disable.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_int_disable(const uint32_t mask);
+
+/******************************************************************************/
+/*!
+ * Enable the specified QSPI controller interrupt status conditions identified
+ * in the mask.
+ *
+ * This function enables one or more of the status conditions as contributors to
+ * the \b ALT_INT_INTERRUPT_QSPI_IRQ interrupt signal state.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * NOTE: A cleared bit for any status condition in the mask value does not have
+ * the effect of disabling it as a contributor to the \b
+ * ALT_INT_INTERRUPT_QSPI_IRQ interrupt signal state. The function
+ * alt_qspi_int_disable() is used to disable status source conditions.
+ *
+ * \param mask
+ * Specifies the status conditions to enable as interrupt source
+ * contributors. \e mask is a mask of logically OR'ed
+ * \ref ALT_QSPI_INT_STATUS_t values that designate the status
+ * conditions to enable.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_int_enable(const uint32_t mask);
+
+/******************************************************************************/
+/*!
+ * Returns true the serial interface and QSPI pipeline is IDLE.
+ *
+ * \returns Returns true the serial interface and QSPI pipeline is IDLE.
+ */
+bool alt_qspi_is_idle(void);
+
+/*! @} */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_GP_BLKIO General Purpose Block I/O
+ *
+ * The functions in this group provide general purpose block read and
+ * write flash functions.
+ *
+ * \internal
+ * These functions use Indirect Read/Write transfers to read and write block
+ * data to the flash device. An outline of the operational flow for these
+ * operations can be found in:
+ * //depot/soc/hhp_sw/baremetal_fw/drivers/qspi/qspi.c
+ *
+ * The general flow for an indirect block read is to call
+ * qspi_configure_mode_indirect_read_start() to initiate the read transfer from
+ * the flash device into the SRAM buffer and follow with a call to either
+ * qpsi_write_sram_fifo_poll() or qspi_read_sram_fifo_irq() to copy the data
+ * from SRAM into the user's buffer.
+ *
+ * The general flow for an indirect block write is to call
+ * qspi_configure_mode_indirect_write_start() to initiate the write transfer
+ * from the SRAM buffer to the flash device and follow with a call to either
+ * qpsi_write_sram_fifo_poll() or qspi_write_sram_fifo_irq() to fill the SRAM
+ * buffer with the user's data as space becomes available.
+ * \endinternal
+ *
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * Read a block of data from the specified flash address.
+ *
+ * Reads a block of \e n data bytes from the flash \e src address into the user
+ * supplied \e dest buffer. The memory address, flash address, and size must be
+ * word aligned.
+ *
+ * \param dest
+ * The address of a caller supplied destination buffer large enough
+ * to contain the requested block of flash data.
+ *
+ * \param src
+ * The flash device address to start reading data from.
+ *
+ * \param size
+ * The requested number of data bytes to read from the flash device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_read(void * dest, uint32_t src, size_t size);
+
+/******************************************************************************/
+/*!
+ * Write a block of data to the specified flash address.
+ *
+ * Writes a block of \e n data bytes to the flash \e dest address from the
+ * designated \e src buffer. The applicable destination flash address range
+ * should have been erased prior to calling this function. The flash address,
+ * memory address, and size must be word aligned.
+ *
+ * \param dest
+ * The destination flash address to begin writing data to.
+ *
+ * \param src
+ * The source address to start writing data from.
+ *
+ * \param size
+ * The requested number of data bytes to write to the flash device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_write(uint32_t dest, const void * src, size_t size);
+
+/*! @} */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_DEV_CFG Flash Device Configuration
+ *
+ * The declarations and functions in this group are used to configure the QSPI
+ * controller interface to external flash devices.
+ *
+ * The following steps describe how to initialize and configure the
+ * QSPI controller to operate with a flash device.
+ *
+ * * Wait until any pending QSPI operations have completed.
+ * * Disable the QSPI controller using alt_qspi_disable().
+ * * Configure the device for optimal read transaction performance using
+ * alt_qspi_device_read_config_set().
+ * * Configure the device for optimal write transaction performance using
+ * alt_qspi_device_write_config_set().
+ * * Enable (alt_qspi_mode_bit_disable()) or disable
+ * (alt_qspi_mode_bit_disable()) the mode bits per the device
+ * requirements. If mode bits are enabled, then configure the mode
+ * bit values using alt_qspi_mode_bit_config_set().
+ * * Configure the device size and write protection information using
+ * alt_qspi_device_size_config_set().
+ * * Configure the QSPI device delay and timing settings using
+ * alt_qspi_device_write_config_set().
+ * * Configure the baud divisor setting to define the required clock frequency
+ * to the device using alt_qspi_baud_rate_div_set().
+ * * Enable the QSPI controller using alt_qspi_enable().
+ *
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * This type enumerates the operational modes the QSPI controller can be
+ * configured for. It may apply to instruction, address, and/or data width
+ * interactions between the QSPI controller and the flash device.
+ */
+typedef enum ALT_QSPI_MODE_e
+{
+ ALT_QSPI_MODE_SINGLE = 0, /*!< Use Standard Single SPI (SIO-SPI) mode (bits
+ * always transferred into the device on DQ0
+ * only). Supported by all SPI flash devices.
+ */
+ ALT_QSPI_MODE_DUAL = 1, /*!< Use Dual SPI (DIO-SPI) SPI mode where bits are
+ * transferred on DQ0 and DQ1.
+ */
+ ALT_QSPI_MODE_QUAD = 2 /*!< Use Dual SPI (QIO-SPI) SPI mode where bits are
+ * transferred on DQ0, DQ1, DQ3, and DQ3.
+ */
+} ALT_QSPI_MODE_t;
+
+/******************************************************************************/
+/*!
+ * This type enumerates the mode configurations available for driving the
+ * ss_n[3:0] device chip selects. The chip selects may be controlled as either
+ * in a '1 of 4' or '4 to 16 decode' mode.
+ */
+typedef enum ALT_QSPI_CS_MODE_e
+{
+ ALT_QSPI_CS_MODE_SINGLE_SELECT = 0, /*!< Select 1 of 4 chip select ss_n[3:0]
+ */
+ ALT_QSPI_CS_MODE_DECODE = 1 /*!< Select external 4 to 16 decode of
+ * ss_n[3:0].
+ */
+} ALT_QSPI_CS_MODE_t;
+
+/******************************************************************************/
+/*!
+ * This type enumerates the QSPI controller master baud rate divisor selections.
+ */
+typedef enum ALT_QSPI_BAUD_DIV_e
+{
+ ALT_QSPI_BAUD_DIV_2 = 0x0, /*!< Divide by 2 */
+ ALT_QSPI_BAUD_DIV_4 = 0x1, /*!< Divide by 4 */
+ ALT_QSPI_BAUD_DIV_6 = 0x2, /*!< Divide by 6 */
+ ALT_QSPI_BAUD_DIV_8 = 0x3, /*!< Divide by 8 */
+ ALT_QSPI_BAUD_DIV_10 = 0x4, /*!< Divide by 10 */
+ ALT_QSPI_BAUD_DIV_12 = 0x5, /*!< Divide by 12 */
+ ALT_QSPI_BAUD_DIV_14 = 0x6, /*!< Divide by 14 */
+ ALT_QSPI_BAUD_DIV_16 = 0x7, /*!< Divide by 16 */
+ ALT_QSPI_BAUD_DIV_18 = 0x8, /*!< Divide by 18 */
+ ALT_QSPI_BAUD_DIV_20 = 0x9, /*!< Divide by 20 */
+ ALT_QSPI_BAUD_DIV_22 = 0xA, /*!< Divide by 22 */
+ ALT_QSPI_BAUD_DIV_24 = 0xB, /*!< Divide by 24 */
+ ALT_QSPI_BAUD_DIV_26 = 0xC, /*!< Divide by 26 */
+ ALT_QSPI_BAUD_DIV_28 = 0xD, /*!< Divide by 28 */
+ ALT_QSPI_BAUD_DIV_30 = 0xE, /*!< Divide by 30 */
+ ALT_QSPI_BAUD_DIV_32 = 0xF /*!< Divide by 32 */
+} ALT_QSPI_BAUD_DIV_t;
+
+/******************************************************************************/
+/*!
+ * Device Size Configuration
+ *
+ * This type defines the structure used to specify flash device size and write
+ * protect regions.
+ */
+typedef struct ALT_QSPI_DEV_SIZE_CONFIG_s
+{
+ uint32_t block_size; /*!< Number of bytes per device block. The
+ * number is specified as a power of 2.
+ * That is 0 = 1 byte, 1 = 2 bytes, ...
+ * 16 = 65535 bytes, etc.
+ */
+ uint32_t page_size; /*!< Number of bytes per device page. This
+ * is required by the controller for
+ * performing flash writes up to and
+ * across page boundaries.
+ */
+ uint32_t addr_size; /*!< Number of bytes used for the flash
+ * address. The value is \e n + 1
+ * based. That is 0 = 1 byte, 1 = 2 bytes,
+ * 2 = 3 bytes, 3 = 4 bytes.
+ */
+ uint32_t lower_wrprot_block; /*!< The block number that defines the lower
+ * block in the range of blocks that is
+ * protected from writing. This field
+ * is ignored it write protection is
+ * disabled.
+ */
+ uint32_t upper_wrprot_block; /*!< The block number that defines the upper
+ * block in the range of blocks that is
+ * protected from writing. This field
+ * is ignored it write protection is
+ * disabled.
+ */
+ bool wrprot_enable; /*!< The write region enable value. A value
+ * of \b true enables write protection
+ * on the region specified by the
+ * \e lower_wrprot_block and
+ * \e upper_wrprot_block range.
+ */
+} ALT_QSPI_DEV_SIZE_CONFIG_t;
+
+/******************************************************************************/
+/*!
+ * This type enumerates the QSPI clock phase activity options outside the SPI
+ * word.
+ */
+typedef enum ALT_QSPI_CLK_PHASE_e
+{
+ ALT_QSPI_CLK_PHASE_ACTIVE = 0, /*!< The SPI clock is active outside the
+ * word
+ */
+ ALT_QSPI_CLK_PHASE_INACTIVE = 1 /*!< The SPI clock is inactive outside the
+ * word
+ */
+} ALT_QSPI_CLK_PHASE_t;
+
+/******************************************************************************/
+/*!
+ * This type enumerates the QSPI clock polarity options outside the SPI word.
+ */
+typedef enum ALT_QSPI_CLK_POLARITY_e
+{
+ ALT_QSPI_CLK_POLARITY_LOW = 0, /*!< SPI clock is quiescent low outside the
+ * word.
+ */
+ ALT_QSPI_CLK_POLARITY_HIGH = 1 /*!< SPI clock is quiescent high outside the
+ * word.
+ */
+} ALT_QSPI_CLK_POLARITY_t;
+
+/******************************************************************************/
+/*!
+ * QSPI Controller Timing Configuration
+ *
+ * This type defines the structure used to configure timing paramaters used by
+ * the QSPI controller to communicate with a target flash device.
+ *
+ * All timing values are defined in cycles of the SPI master ref clock.
+ */
+typedef struct ALT_QSPI_TIMING_CONFIG_s
+{
+ ALT_QSPI_CLK_PHASE_t clk_phase; /*!< Selects whether the clock is in an
+ * active or inactive phase outside the
+ * SPI word.
+ */
+
+ ALT_QSPI_CLK_POLARITY_t clk_pol; /*!< Selects whether the clock is quiescent
+ * low or high outside the SPI word.
+ */
+
+ uint32_t cs_da; /*!< Chip Select De-Assert. Added delay in
+ * master reference clocks for the length
+ * that the master mode chip select
+ * outputs are de-asserted between
+ * transactions. If CSDA = \e X, then the
+ * chip select de-assert time will be: 1
+ * sclk_out + 1 ref_clk + \e X ref_clks.
+ */
+ uint32_t cs_dads; /*!< Chip Select De-Assert Different
+ * Slaves. Delay in master reference
+ * clocks between one chip select being
+ * de-activated and the activation of
+ * another. This is used to ensure a quiet
+ * period between the selection of two
+ * different slaves. CSDADS is only
+ * relevant when switching between 2
+ * different external flash devices. If
+ * CSDADS = \e X, then the delay will be:
+ * 1 sclk_out + 3 ref_clks + \e X
+ * ref_clks.
+ */
+ uint32_t cs_eot; /*!< Chip Select End Of Transfer. Delay in
+ * master reference clocks between last
+ * bit of current transaction and
+ * de-asserting the device chip select
+ * (n_ss_out). By default (when CSEOT=0),
+ * the chip select will be de-asserted on
+ * the last falling edge of sclk_out at
+ * the completion of the current
+ * transaction. If CSEOT = \e X, then chip
+ * selected will de-assert \e X ref_clks
+ * after the last falling edge of
+ * sclk_out.
+ */
+ uint32_t cs_sot; /*!< Chip Select Start Of Transfer. Delay in
+ * master reference clocks between setting
+ * n_ss_out low and first bit transfer. By
+ * default (CSSOT=0), chip select will be
+ * asserted half a SCLK period before the
+ * first rising edge of sclk_out. If CSSOT
+ * = \e X, chip select will be asserted
+ * half an sclk_out period before the
+ * first rising edge of sclk_out + \e X
+ * ref_clks.
+ */
+
+ uint32_t rd_datacap; /*!< The additional number of read data
+ * capture cycles (ref_clk) that should be
+ * applied to the internal read data
+ * capture circuit. The large
+ * clock-to-out delay of the flash memory
+ * together with trace delays as well as
+ * other device delays may impose a
+ * maximum flash clock frequency which is
+ * less than the flash memory device
+ * itself can operate at. To compensate,
+ * software should set this register to a
+ * value that guarantees robust data
+ * captures.
+ */
+} ALT_QSPI_TIMING_CONFIG_t;
+
+/******************************************************************************/
+/*!
+ * Device Instruction Configuration
+ *
+ * This type defines a structure for specifying the optimal instruction set
+ * configuration to use with a target flash device.
+ */
+typedef struct ALT_QSPI_DEV_INST_CONFIG_s
+{
+ uint32_t op_code; /*!< The read or write op code to use
+ * for the device transaction.
+ */
+ ALT_QSPI_MODE_t inst_type; /*!< Instruction mode type for the
+ * controller to use with the
+ * device. The instruction type
+ * applies to all instructions
+ * (reads and writes) issued from
+ * either the Direct Access
+ * Controller or the Indirect
+ * Acces Controller.
+ */
+ ALT_QSPI_MODE_t addr_xfer_type; /*!< Address transfer mode type. The
+ * value of this field is ignored
+ * if the \e inst_type data member
+ * is set to anything other than
+ * ALT_QSPI_MODE_SINGLE. In that
+ * case, the addr_xfer_type
+ * assumes the same mode as the \e
+ * inst_type.
+ */
+ ALT_QSPI_MODE_t data_xfer_type; /*!< Data transfer mode type. The
+ * value of this field is ignored
+ * if the \e inst_type data member
+ * is set to anything other than
+ * ALT_QSPI_MODE_SINGLE. In that
+ * case, the data_xfer_type
+ * assumes the same mode as the \e
+ * inst_type.
+ */
+ uint32_t dummy_cycles; /*!< Number of dummy clock cycles
+ * required by device for a read
+ * or write instruction.
+ */
+
+} ALT_QSPI_DEV_INST_CONFIG_t;
+
+/******************************************************************************/
+/*!
+ * Get the current value of the QSPI master baud rate divisor.
+ *
+ * \returns The value of the QSPI master baud rate divisor.
+ */
+ALT_QSPI_BAUD_DIV_t alt_qspi_baud_rate_div_get(void);
+
+/******************************************************************************/
+/*!
+ * Set the current value of the QSPI master baud rate divisor.
+ *
+ * Sets the value of the QSPI master baud rate divisor.
+ *
+ * \param baud_rate_div
+ * The master baud rate divisor. Valid range includes
+ * even values 2 to 32.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_baud_rate_div_set(const ALT_QSPI_BAUD_DIV_t baud_rate_div);
+
+/******************************************************************************/
+/*!
+ * Get the current QSPI device peripheral chip select output and decode function
+ * configuration values.
+ *
+ * \param cs
+ * [out] The chip select line output values.
+ *
+ * \param cs_mode
+ * [out] The decode mode to use for the chip selects.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_chip_select_config_get(uint32_t* cs, ALT_QSPI_CS_MODE_t* cs_mode);
+
+/******************************************************************************/
+/*!
+ * Set the QSPI device peripheral chip select outputs and decode function
+ * configuration.
+ *
+ * The chip select lines output values operate according to the selected chip
+ * select decode mode. If \e cs_mode is ALT_QSPI_CS_MODE_SINGLE_SELECT then
+ * cs[3:0] are output thus:
+ *
+ * cs[3:0] | n_ss_out[3:0]
+ * :---------|:----------------------------
+ * xxx0 | 1110
+ * xx01 | 1101
+ * x011 | 1011
+ * 0111 | 0111
+ * 1111 | 1111 (no peripheral selected)
+ *
+ * Otherwise if \e cs_mode is ALT_QSPI_CS_MODE_DECODE then cs[3:0] directly
+ * drives n_ss_out[3:0].
+ *
+ * \param cs
+ * The chip select line output values.
+ *
+ * \param cs_mode
+ * The decode mode to use for the chip selects.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_chip_select_config_set(const uint32_t cs,
+ const ALT_QSPI_CS_MODE_t cs_mode);
+
+/******************************************************************************/
+/*!
+ * Disable the mode bits from being sent after the address bytes.
+ *
+ * Prevent the mode bits defined in the Mode Bit Configuration register from
+ * being sent following the address bytes.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_mode_bit_disable(void);
+
+/******************************************************************************/
+/*!
+ * Enable the mode bits to be sent after the address bytes.
+ *
+ * Ensure the mode bits defined in the Mode Bit Configuration register to
+ * be sent following the address bytes.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_mode_bit_enable(void);
+
+/******************************************************************************/
+/*!
+ * Get the current value of the Mode Bit Configuration register.
+ *
+ * \returns The 8 bit value that is sent to the device following the address
+ * bytes when the mode bit is enabled (see: alt_qspi_mode_bit_enable())
+ */
+uint32_t alt_qspi_mode_bit_config_get(void);
+
+/******************************************************************************/
+/*!
+ * Set the value of the Mode Bit Configuration register.
+ *
+ * Set the value of the 8 bits that are sent to the device following the address
+ * bytes when the mode bit is enabled (see: alt_qspi_mode_bit_enable())
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * \param mode_bits
+ * The 8 bit value sent to the device following the address bytes.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_mode_bit_config_set(const uint32_t mode_bits);
+
+/******************************************************************************/
+/*!
+ * Get the current flash device size and write protection configuration.
+ *
+ * \param cfg
+ * [out] Pointer to a ALT_QSPI_DEV_SIZE_CONFIG_t structure to
+ * contain the returned flash device size and write protection
+ * configuration.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_device_size_config_get(ALT_QSPI_DEV_SIZE_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Set the flash device size and write protection configuration.
+ *
+ * \param cfg
+ * Pointer to a ALT_QSPI_DEV_SIZE_CONFIG_t structure containing the
+ * flash device size and write protection configuration.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_device_size_config_set(const ALT_QSPI_DEV_SIZE_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Get the current QSPI device read instruction configuration.
+ *
+ * \param cfg
+ * [out] Pointer to a ALT_QSPI_DEV_INST_CONFIG_t structure to
+ * contain the returned QSPI controller instruction configuration
+ * used when performing read transactions with the device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_device_read_config_get(ALT_QSPI_DEV_INST_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Set the QSPI device read instruction configuration.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * \param cfg
+ * Pointer to a ALT_QSPI_DEV_INST_CONFIG_t structure specifying the
+ * desired op code, transfer widths, and dummy cycles for the QSPI
+ * controller to use when performing read transactions with the
+ * device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_device_read_config_set(const ALT_QSPI_DEV_INST_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Get the current QSPI device write instruction configuration.
+ *
+ * \param cfg
+ * [out] Pointer to a ALT_QSPI_DEV_INST_CONFIG_t structure to
+ * contain the returned QSPI controller instruction configuration
+ * used when performing write transactions with the device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_device_write_config_get(ALT_QSPI_DEV_INST_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Set the QSPI device write instruction configuration.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * \param cfg
+ * Pointer to a ALT_QSPI_DEV_INST_CONFIG_t structure specifying the
+ * desired op code, transfer widths, and dummy cycles for the QSPI
+ * controller to use when performing write transactions with the
+ * device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_device_write_config_set(const ALT_QSPI_DEV_INST_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Get the QSPI device delay and timing configuration parameters.
+ *
+ * This function returns the settings of the chip select delay and timing
+ * configurations.
+ *
+ * \param cfg
+ * [out] Pointer to a ALT_QSPI_TIMING_CONFIG_t structure to return
+ * the device timing and delay settings.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_timing_config_get(ALT_QSPI_TIMING_CONFIG_t * cfg);
+
+/******************************************************************************/
+/*!
+ * Set the QSPI device delay and timing configuration parameters.
+ *
+ * This function allows the user to configure how the chip select is driven
+ * after each flash access. This is required as each device may have different
+ * timing requirements. As the serial clock frequency is increased, these
+ * timing parameters become more important and can be adjusted to meet the
+ * requirements of a specific flash device. All timings are defined in cycles
+ * of the SPI master ref clock.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * \param cfg
+ * Pointer to a ALT_QSPI_TIMING_CONFIG_t structure specifying the
+ * desired timing and delay settings.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_timing_config_set(const ALT_QSPI_TIMING_CONFIG_t * cfg);
+
+/*! @} */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_DAC Direct Access Mode
+ *
+ * In direct access mode, an access to the AHB data slave triggers a read or
+ * write command to the flash memory. To use the direct access mode, enable the
+ * direct access controller with the alt_qspi_direct_enable() function. An
+ * external master, for example a processor, triggers the direct access
+ * controller with a read or write operation to the AHB data slave
+ * interface. The data slave exposes a 1MB window into the flash device. You can
+ * remap this window to any 1MB location within the flash device address range.
+ *
+ * To remap the AHB data slave to access other 1MB regions of the flash device,
+ * enable address remapping by calling alt_qspi_ahb_address_remap_enable(). All
+ * incoming data slave accesses remap to the offset specified in the remap
+ * address register which is configured by alt_qspi_ahb_remap_address_set().
+ *
+ * The 20 LSBs of incoming addresses are used for accessing the 1MB region and
+ * the higher bits are ignored.
+ *
+ * The quad SPI controller does not issue any error status for accesses that lie
+ * outside the connected flash memory space.
+ *
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * Disable the QSPI Direct Access Controller.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_direct_disable(void);
+
+/******************************************************************************/
+/*!
+ * Enable the QSPI Direct Access Controller.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_direct_enable(void);
+
+/******************************************************************************/
+/*!
+ * Get the current AHB address remap value.
+ *
+ * Returns the current value of the AHB remap address register.
+ *
+ * \returns The value used to remap an incoming AHB address to a
+ * different address used by the flash device.
+ */
+uint32_t alt_qspi_ahb_remap_address_get(void);
+
+/******************************************************************************/
+/*!
+ * Set the AHB address remap value.
+ *
+ * Sets the value of the AHB remap address register.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * \param ahb_remap_addr
+ * The value used to remap an incoming AHB address to a different
+ * address used by the flash device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_ahb_remap_address_set(const uint32_t ahb_remap_addr);
+
+/******************************************************************************/
+/*!
+ * Disable AHB address remapping.
+ *
+ * Disables remapping of incoming AHB addresses so they are sent unmodified to
+ * the flash device. The incoming AHB address maps directly to the address
+ * serially sent to the flash device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_ahb_address_remap_disable(void);
+
+/******************************************************************************/
+/*!
+ * Enable AHB address remapping.
+ *
+ * Enables remapping of incoming AHB addresses so they are modified to
+ * \<address\> + \e N, where \e N is the configured remap address value.
+ *
+ * See: alt_qspi_ahb_remap_address_set().
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_ahb_address_remap_enable(void);
+
+/*! @} */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_INDAC Indirect Access Mode
+ *
+ * In indirect access mode, flash data is temporarily buffered in the QSPI
+ * controller's SRAM. Software controls and triggers indirect accesses through
+ * the APB register slave interface. The controller transfers data through the
+ * AHB data slave interface.
+ *
+ * An indirect read operation reads data from the flash memory, places the data
+ * into the SRAM, and transfers the data to an external master through the AHB
+ * data slave interface.
+ *
+ * An indirect write operation programs data from the SRAM to the flash memory.
+ *
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * Starts an indirect read transfer.
+ *
+ * Initiates an indirect read transfer of the requested number of bytes from the
+ * designated flash address.
+ *
+ * After calling this function, flash data may be read from the QSPI SRAM buffer
+ * as it becomes available via one of the following methods:
+ * * Directly from the AHB data slave interface at the configured AHB trigger
+ * address. If the requested data is not immediately available in the SRAM
+ * buffer then AHB wait states will be applied until the data has been read
+ * from flash into the SRAM buffer. Alternatively, data may be read from the
+ * AHB data slave as the SRAM is filled. The availability of data in the SRAM
+ * buffer may be determined by an SRAM watermark interrupt notification or by
+ * polling the SRAM fill level.
+ * * Configuring and enabling the QSPI DMA peripheral controller.
+ *
+ * The following is a list of restrictions:
+ * * flash_addr must be word aligned.
+ * * num_bytes must be word aligned.
+ * * The transfer must not cross the 3-byte addressing boundary. This
+ * restriction may be device specific and may be lifted in the future.
+ *
+ * \param flash_addr
+ * The flash source address to read data from.
+ *
+ * \param num_bytes
+ * The number of bytes to read from the flash source address.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_read_start(const uint32_t flash_addr,
+ const size_t num_bytes);
+
+/******************************************************************************/
+/*!
+ * Finish the indirect read operation that was completed or canceled. This
+ * function should be called before another indirect read is started.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_read_finish(void);
+
+/******************************************************************************/
+/*!
+ * Cancel all indirect read transfers in progress.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_read_cancel(void);
+
+/******************************************************************************/
+/*!
+ * Get the current indirect read SRAM fill level value.
+ *
+ * Returns the SRAM fill level for the indirect read partition in units of SRAM
+ * words (4 bytes).
+ *
+ * \returns The SRAM fill level for the indirect read partition in units of
+ * SRAM words (4 bytes).
+ */
+uint32_t alt_qspi_indirect_read_fill_level(void);
+
+/******************************************************************************/
+/*!
+ * Get the current indirect read watermark value.
+ *
+ * The watermark value (in bytes) represents the minimum fill level of the SRAM
+ * before a DMA peripheral access is permitted. When the SRAM fill level passes
+ * the watermark, an interrupt source is also generated. This can be disabled by
+ * writing a value of all zeroes.
+ *
+ * \returns The current indirect read watermark value.
+ */
+uint32_t alt_qspi_indirect_read_watermark_get(void);
+
+/******************************************************************************/
+/*!
+ * Set the indirect read watermark value.
+ *
+ * The watermark value (in bytes) represents the minimum fill level of the SRAM
+ * before a DMA peripheral access is permitted. When the SRAM fill level passes
+ * the watermark, an interrupt source is also generated. This can be disabled by
+ * writing a value of all zeroes. The watermark can only be set when no indirect
+ * read is in progress.
+ *
+ * \param watermark
+ * The watermark value (in bytes).
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_read_watermark_set(const uint32_t watermark);
+
+/******************************************************************************/
+/*!
+ * Returns true when an indirect read has completed otherwise false.
+ *
+ * \internal
+ * Returns Indirect Read Transfer Control Register bit 5 "Indirect Completion Status".
+ * \endinternal
+ *
+ * \returns Returns true when an indirect read has completed otherwise false.
+ */
+bool alt_qspi_indirect_read_is_complete(void);
+
+/******************************************************************************/
+/*!
+ * Starts an indirect write transfer.
+ *
+ * Initiates an indirect write transfer of the requested number of bytes to the
+ * designated flash address.
+ *
+ * After calling this function, flash data may be written to the QSPI SRAM
+ * buffer there is space via one of the following methods:
+ * * Directly from the AHB data slave interface at the configured AHB trigger
+ * address. If the requested space is not immediately available in the SRAM
+ * buffer then AHB wait states will be applied until the space becomes
+ * available. Alternatively, the data may be written to the AHB data slave
+ * as the SRAM is drained. The space in the SRAM buffer may be determined by
+ * an SRAM watermark interrupt notification or by polling the SRAM fill
+ * level and subtracting that value from the SRAM space devoted to writes.
+ * * Configuring and enabling the QSPI DMA peripheral controller.
+ *
+ * The following is a list of restrictions:
+ * * flash_addr must be word aligned.
+ * * num_bytes must be word aligned.
+ * * num_bytes must be 256 or below. This is due to a device specific
+ * limitation and may be lifted in the future.
+ * * The transfer must not cross the page (256 byte) addressing boundary. This
+ * restriction may be device specific and may be lifted in the future.
+ *
+ * \param flash_addr
+ * The flash destination address to write data to.
+ *
+ * \param num_bytes
+ * The number of bytes to write to the flash.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_write_start(const uint32_t flash_addr,
+ const size_t num_bytes);
+
+/******************************************************************************/
+/*!
+ * Finish the indirect write operation that was completed or canceled. This
+ * function should be called before another indirect write is started.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_write_finish(void);
+
+/******************************************************************************/
+/*!
+ * Cancel all indirect write transfers in progress.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_write_cancel(void);
+
+/******************************************************************************/
+/*!
+ * Get the current indirect write SRAM fill level value.
+ *
+ * Returns the SRAM fill level for the indirect write partition in units of SRAM
+ * words (4 bytes).
+ *
+ * \returns The SRAM fill level for the indirect write partition in units of
+ * SRAM words (4 bytes).
+ */
+uint32_t alt_qspi_indirect_write_fill_level(void);
+
+/******************************************************************************/
+/*!
+ * Get the current indirect write watermark value.
+ *
+ * The watermark value (in bytes) represents the maximum fill level of the SRAM
+ * before a DMA peripheral access is permitted. When the SRAM fill level falls
+ * below the watermark, an interrupt is also generated. This can be disabled by
+ * writing a value of all ones.
+ *
+ * \returns The current indirect write watermark value.
+ */
+uint32_t alt_qspi_indirect_write_watermark_get(void);
+
+/******************************************************************************/
+/*!
+ * Set the indirect write watermark value.
+ *
+ * The watermark value (in bytes) represents the maximum fill level of the SRAM
+ * before a DMA peripheral access is permitted. When the SRAM fill level falls
+ * below the watermark, an interrupt is also generated. This can be disabled by
+ * writing a value of all ones. The watermark can only be set when no indirect
+ * write is in progress.
+ *
+ * \param watermark
+ * The watermark value (in bytes).
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_indirect_write_watermark_set(const uint32_t watermark);
+
+/******************************************************************************/
+/*!
+ * Returns true when an indirect write has completed otherwise false.
+ *
+ * \internal
+ * Returns Indirect Write Transfer Control Register bit 5 "Indirect Completion
+ * Status".
+ * \endinternal
+ *
+ * \returns Returns true when an indirect write has completed otherwise
+ * false.
+ */
+bool alt_qspi_indirect_write_is_complete(void);
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_CFG_SRAM SRAM Partition
+ *
+ * The SRAM local memory buffer is a 128 by 32-bit (512 total bytes) memory. The
+ * SRAM has two partitions, with the lower partition reserved for indirect read
+ * operations and the upper partition for indirect write operations. The size of
+ * the partitions is specified in the SRAM partition register, based on 32-bit
+ * word sizes. For example, to specify four bytes of storage, write the value 1.
+ * The value written to the indirect read partition size field ( addr ) defines
+ * the number of entries reserved for indirect read operations. For example, write
+ * the value 32 (0x20) to partition the 128-entry SRAM to 32 entries (25%) for
+ * read usage and 96 entries (75%) for write usage.
+ *
+ * The functions in this section provide accces to configure the SRAM read
+ * partition allocation.
+ *
+ * @{
+ */
+
+/*!
+ * The size of the onboard SRAM in bytes.
+ */
+#define ALT_QSPI_SRAM_FIFO_SIZE (512)
+
+/*
+ * The size of the onboard SRAM in entries. Each entry is word (32-bit) sized.
+ */
+#define ALT_QSPI_SRAM_FIFO_ENTRY_COUNT (512 / sizeof(uint32_t))
+
+/******************************************************************************/
+/*!
+ * Get the entry count (words) of the indirect read partition in the QSPI
+ * controller SRAM.
+ *
+ * There is an additional word of read memory not in the SRAM but used to
+ * buffer the SRAM and the AHB. As such, the total on board memory buffer for
+ * indirect read is 1 more than the value reported by this function.
+ *
+ * \returns The count of 32-bit words of the indirect read partition in the
+ * QSPI controller SRAM.
+ *
+ * \internal
+ * The documentation states that the number of locations allocated to indirect
+ * read = SRAM_PARTITION_REG + 1. Cadence clarified that the +1 comes from an
+ * additional register slice for read's, implemented in FLOPs, which was done
+ * to avoid connection the SRAM directly to the AHB interface. This was done
+ * for performance / timing reasons. The +1 will not be included in the return
+ * value but documented as an additional entry.
+ * \endinternal
+ */
+uint32_t alt_qspi_sram_partition_get(void);
+
+/******************************************************************************/
+/*!
+ * Set the entry count (words) of the indirect read partition in the QSPI
+ * controller SRAM.
+ *
+ * Note: It is recommended that setting SRAM partition to 0 or 127 should be
+ * avoided although it is not prohibited.
+ *
+ * \param read_part_size
+ * The count of 32-bit words to allocate to the indirect read
+ * partition in the QSPI controller SRAM.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_sram_partition_set(const uint32_t read_part_size);
+
+/*! @} */
+
+/*! @} */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_ERASE Flash Erase
+ *
+ * The functions in this group are used to erase selected portions of a flash
+ * device.
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * This function erases the designated flash device subsector.
+ *
+ * This function erases the flash device subsector containing the designated
+ * flash address. Any address within the subsector is valid.
+ *
+ * \param addr
+ * A flash address contained within the the subsector to be erased.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_erase_subsector(const uint32_t addr);
+
+/******************************************************************************/
+/*!
+ * This function erases the designated flash device sector.
+ *
+ * This function erases the flash device sector containing the designated flash
+ * address. Any address within the sector is valid.
+ *
+ * \param addr
+ * A flash address contained within the the sector to be erased.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_erase_sector(const uint32_t addr);
+
+/******************************************************************************/
+/*!
+ * This function erases the entire flash device.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_erase_chip(void);
+
+/*! @} */
+
+/******************************************************************************/
+/*! \addtogroup ALT_QSPI_DMA DMA Peripheral Interface
+ *
+ * The DMA peripheral request controller is only used for the indirect mode of
+ * operation where data is temporarily stored in the SRAM. The QSPI flash
+ * controller uses the DMA peripheral request interface to trigger the external
+ * DMA into performing data transfers between memory and the QSPI
+ * controller.
+ *
+ * There are two DMA peripheral request interfaces, one for indirect reads and
+ * one for indirect writes. The DMA peripheral request controller can issue two
+ * types of DMA requests, single or burst, to the external DMA. The number of
+ * bytes for each single or burst request is specified using the
+ * alt_qspi_dma_config_set(). The DMA peripheral request controller splits the
+ * total amount of data to be transferred into a number of DMA burst and single
+ * requests by dividing the total number of bytes by the number of bytes
+ * specified in the burst request, and then dividing the remainder by the number
+ * of bytes in a single request.
+ *
+ * When programming the DMA controller, the burst request size must match the
+ * burst request size set in the quad SPI controller to avoid quickly reaching
+ * an overflow or underflow condition.
+ * @{
+ */
+
+/******************************************************************************/
+/*!
+ * Disable the QSPI DMA peripheral interface.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_dma_disable(void);
+
+/******************************************************************************/
+/*!
+ * Enable the QSPI DMA peripheral interface.
+ *
+ * Enable the QSPI DMA handshaking logic. When enabled the QSPI will trigger DMA
+ * transfer requests via the DMA peripheral interface.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_dma_enable(void);
+
+/******************************************************************************/
+/*!
+ * Get the current DMA peripheral configuration.
+ *
+ * This function returns the QSPI DMA peripheral interface single and burst type
+ * transfer size configurations.
+ *
+ * \param single_type_sz
+ * [out] The number of bytes for each DMA single type
+ * request. Value must be a power of 2 between 1 and 32728.
+ *
+ * \param burst_type_sz
+ * [out] The number of bytes for each DMA burst type request. Value
+ * must be a power of 2 between 1 and 32728.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_dma_config_get(uint32_t * single_type_sz,
+ uint32_t * burst_type_sz);
+
+/******************************************************************************/
+/*!
+ * Set the DMA peripheral configuration.
+ *
+ * This function configures the QSPI DMA peripheral interface single and burst
+ * type transfer sizes. The DMA configruation should be setup while the
+ * controller is idle. Because all transfers are required to be word aligned,
+ * the smallest DMA request is 4 bytes.
+ *
+ * This API requires that the QSPI controller be idle, as determined by
+ * alt_qspi_is_idle().
+ *
+ * \param single_type_sz
+ * The number of bytes for each DMA single type request. Value must
+ * be a power of 2 between 4 and 32768.
+ *
+ * \param burst_type_sz
+ * The number of bytes for each DMA burst type request. Value must
+ * be a power of 2 between 4 and 32768. Bursts must be equal or
+ * larger than single requests.
+ *
+ * \retval ALT_E_SUCCESS Indicates successful completion.
+ * \retval ALT_E_ERROR Indicates an error occurred.
+ */
+ALT_STATUS_CODE alt_qspi_dma_config_set(const uint32_t single_type_sz,
+ const uint32_t burst_type_sz);
+
+
+/*! @} */
+
+/*! @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __ALT_QSPI_H__ */
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_16550_uart.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_16550_uart.c
new file mode 100644
index 0000000000..7ed75c39d3
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_16550_uart.c
@@ -0,0 +1,1179 @@
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <bsp/alt_16550_uart.h>
+#include <bsp/alt_clock_manager.h>
+#include <bsp/socal/alt_rstmgr.h>
+#include <bsp/socal/alt_uart.h>
+#include <bsp/socal/hps.h>
+#include <bsp/socal/socal.h>
+
+/////
+
+#define ALT_16550_HANDLE_DATA_UART_ENABLED_MSK (1UL << 31)
+#define ALT_16550_HANDLE_DATA_DIVISOR_VALUE_GET(value) (value & 0xffff)
+
+#define ALT_ALTERA_16550_CPR_OFST (0xF4)
+#define ALT_ALTERA_16550_CPR_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_ALTERA_16550_CPR_OFST))
+#define ALT_ALTERA_16550_CPR_FIFO_MODE_GET(value) (((value) >> 16) & 0xff)
+#define ALT_ALTERA_16550_CPR_AFCE_MODE_SET_MSK (1 << 4)
+
+/////
+
+// Remove these macros as part of case:123835.
+#define ALT_UART_IER_DLH_VALUE_SET(value) ((value) & 0xff)
+#define ALT_UART_IER_DLH_ETBEI_DLH1_SET_MSK ALT_UART_IER_DLH_ETBEI_DLHL_SET_MSK
+
+/////
+
+//
+// Helper function which resets the UART and if requested, initializes the UART
+// to the default settings. Currently the default settings are:
+// - 8 databits
+// - no parity
+// - 1 stopbit
+// - 57600 baudrate
+// The reset routines depends on the hardware implementation of the UART.
+//
+
+// This helper is needed because the regular alt_read_word(src) essentially
+// resolves to "*(volatile uint32_t *)src". As there is no assignment, this
+// could potentially be optimized away. With the helper, the actual register
+// read should occur and be returned (and subsequently discarded).
+static inline uint32_t alt_read_word_helper(const void * addr)
+{
+ return alt_read_word(addr);
+}
+
+//
+// Helper function write the divisor in hardware.
+//
+static ALT_STATUS_CODE alt_16550_write_divisor_helper(ALT_16550_HANDLE_t * handle,
+ uint32_t divisor)
+{
+ // Validate the divisor parameter.
+ if (divisor > 0xffff)
+ {
+ // This should never happen as it is verified in divisor_set.
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Set LCR::DLAB (Line Control Register :: Divisor Latch Access Bit)
+ alt_setbits_word(ALT_UART_LCR_ADDR(handle->location), ALT_UART_LCR_DLAB_SET_MSK);
+
+ // Write DLL (Divisor Latch Low).
+ alt_write_word(ALT_UART_RBR_THR_DLL_ADDR(handle->location), ALT_UART_RBR_THR_DLL_VALUE_SET(divisor));
+
+ // Write DLH (Divisor Latch High).
+ alt_write_word(ALT_UART_IER_DLH_ADDR(handle->location), ALT_UART_IER_DLH_VALUE_SET(divisor >> 8));
+
+ // Clear LCR::DLAB (Line Control Register :: Divisor Latch Access Bit)
+ alt_clrbits_word(ALT_UART_LCR_ADDR(handle->location), ALT_UART_LCR_DLAB_SET_MSK);
+
+ break;
+
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // Update the enabled state in the handle data.
+ if (divisor != 0)
+ {
+ handle->data |= ALT_16550_HANDLE_DATA_UART_ENABLED_MSK;
+ }
+ else
+ {
+ handle->data &= ~ALT_16550_HANDLE_DATA_UART_ENABLED_MSK;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Helper function to reset the UART.
+//
+static ALT_STATUS_CODE alt_16550_reset_helper(ALT_16550_HANDLE_t * handle, bool enable_init)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Write SRR::UR (Shadow Reset Register :: UART Reset)
+ alt_write_word(ALT_UART_SRR_ADDR(handle->location), ALT_UART_SRR_UR_SET_MSK);
+
+ // Read the MSR to work around case:119085.
+ alt_read_word_helper(ALT_UART_MSR_ADDR(handle->location));
+ break;
+
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ alt_16550_write_divisor_helper(handle, 0); // Disable UART
+ alt_16550_int_disable_all(handle); // Disable interrupts
+ alt_16550_fifo_disable(handle); // Disable FIFOs
+ alt_write_word(ALT_UART_MCR_ADDR(handle->location), 0); // 0 -> MCR (AFCE, LP, OUT2, OUT1, RTS, DTR)
+ break;
+
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // If we are initializing (as opposed to just uninitializing)
+ if (enable_init)
+ {
+ ALT_STATUS_CODE status;
+
+ // Set bit IER::PTIME (Interrupt Enable Register :: Programmable THRE Mode Enable)
+ alt_setbits_word(ALT_UART_IER_DLH_ADDR(handle->location), ALT_UART_IER_DLH_PTIME_DLH7_SET_MSK);
+
+ // Set the line configuration to use 8-N-1.
+ status = alt_16550_line_config_set(handle, ALT_16550_DATABITS_8,
+ ALT_16550_PARITY_DISABLE,
+ ALT_16550_STOPBITS_1);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+
+ uint32_t divisor = ALT_16550_HANDLE_DATA_DIVISOR_VALUE_GET(handle->data);
+ if (divisor == 0)
+ {
+ // Set the default baudrate to 57600.
+ status = alt_16550_baudrate_set(handle, ALT_16550_BAUDRATE_57600);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_init(ALT_16550_DEVICE_t device,
+ void * location,
+ alt_freq_t clock_freq,
+ ALT_16550_HANDLE_t * handle)
+{
+ handle->device = device;
+ handle->data = 0;
+ handle->fcr = 0;
+
+ switch (device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // The ALT_CLK_L4_SP is required for all SoCFPGA UARTs. Check that it's enabled.
+ if (alt_clk_is_enabled(ALT_CLK_L4_SP) != ALT_E_TRUE)
+ {
+ return ALT_E_BAD_CLK;
+ }
+ else
+ {
+ ALT_STATUS_CODE status;
+ status = alt_clk_freq_get(ALT_CLK_L4_SP, &handle->clock_freq);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+
+ if (device == ALT_16550_DEVICE_SOCFPGA_UART0)
+ {
+ handle->location = ALT_UART0_ADDR;
+
+ // Bring UART0 out of reset.
+ alt_clrbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_UART0_SET_MSK);
+ }
+ else // device == ALT_16550_DEVICE_SOCFPGA_UART1
+ {
+ handle->location = ALT_UART1_ADDR;
+
+ // Bring UART1 out of reset.
+ alt_clrbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_UART1_SET_MSK);
+ }
+
+ // Verify the UCR (UART Component Version)
+ uint32_t ucr = alt_read_word(ALT_UART_UCV_ADDR(handle->location));
+ if (ucr != ALT_UART_UCV_UART_COMPONENT_VER_RESET)
+ {
+ return ALT_E_ERROR;
+ }
+ }
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ handle->location = location;
+ handle->clock_freq = clock_freq;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return alt_16550_reset_helper(handle, true);
+}
+
+ALT_STATUS_CODE alt_16550_uninit(ALT_16550_HANDLE_t * handle)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_UART0_SET_MSK);
+ return ALT_E_SUCCESS;
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_UART1_SET_MSK);
+ return ALT_E_SUCCESS;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ default:
+ return alt_16550_reset_helper(handle, false);
+ }
+}
+
+ALT_STATUS_CODE alt_16550_reset(ALT_16550_HANDLE_t * handle)
+{
+ return alt_16550_reset_helper(handle, true);
+}
+
+ALT_STATUS_CODE alt_16550_enable(ALT_16550_HANDLE_t * handle)
+{
+ // Write the divisor cached in the handle data to the divisor registers.
+ // This will effectively enable the UART.
+ return alt_16550_write_divisor_helper(handle,
+ ALT_16550_HANDLE_DATA_DIVISOR_VALUE_GET(handle->data));
+}
+
+ALT_STATUS_CODE alt_16550_disable(ALT_16550_HANDLE_t * handle)
+{
+ // Write 0 to the divisor the divisor registers. This will effectively
+ // disable the UART.
+ return alt_16550_write_divisor_helper(handle, 0);
+}
+
+ALT_STATUS_CODE alt_16550_read(ALT_16550_HANDLE_t * handle,
+ char * item)
+{
+ // Verify that the UART is enabled
+ if (!(handle->data & ALT_16550_HANDLE_DATA_UART_ENABLED_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that the FIFO is disabled
+ if (handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK)
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Read the RBR (Receive Buffer Register) into *item.
+ *item = ALT_UART_RBR_THR_DLL_VALUE_GET(alt_read_word(ALT_UART_RBR_THR_DLL_ADDR(handle->location)));
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_write(ALT_16550_HANDLE_t * handle,
+ char item)
+{
+ // Verify that the UART is enabled
+ if (!(handle->data & ALT_16550_HANDLE_DATA_UART_ENABLED_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that the FIFO is disabled
+ if (handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK)
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Write the buffer into the THR (Transmit Holding Register)
+ alt_write_word(ALT_UART_RBR_THR_DLL_ADDR(handle->location), item);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+ALT_STATUS_CODE alt_16550_fifo_enable(ALT_16550_HANDLE_t * handle)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Set FCR::FIFOE (FIFO Control Register :: FIFO Enable) bit.
+ handle->fcr |= ALT_UART_FCR_FIFOE_SET_MSK;
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location), handle->fcr);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // No need to reset / clear the FIFOs. This is done automatically when
+ // FCR::FIFOE is changed.
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_disable(ALT_16550_HANDLE_t * handle)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Clear FCR::FIFOE (FIFO Control Register :: FIFO Enable) bit.
+ handle->fcr &= ~ALT_UART_FCR_FIFOE_SET_MSK;
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location), handle->fcr);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_read(ALT_16550_HANDLE_t * handle,
+ char * buffer,
+ size_t count)
+{
+ // Verify that the UART is enabled
+ if (!(handle->data & ALT_16550_HANDLE_DATA_UART_ENABLED_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Read the RBR (Receive Buffer Register) into the buffer
+ for (size_t i = 0; i < count; ++i)
+ {
+ buffer[i] = ALT_UART_RBR_THR_DLL_VALUE_GET(alt_read_word(ALT_UART_RBR_THR_DLL_ADDR(handle->location)));
+ }
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_write(ALT_16550_HANDLE_t * handle,
+ const char * buffer,
+ size_t count)
+{
+ // Verify that the UART is enabled
+ if (!(handle->data & ALT_16550_HANDLE_DATA_UART_ENABLED_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Write the buffer into the THR (Transmit Holding Register)
+ for (size_t i = 0; i < count; ++i)
+ {
+ alt_write_word(ALT_UART_RBR_THR_DLL_ADDR(handle->location), buffer[i]);
+ }
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_clear_rx(ALT_16550_HANDLE_t * handle)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Write SRR::RFR (Shadow Reset Register :: Receiver FIFO Reset) bit.
+ alt_write_word(ALT_UART_SRR_ADDR(handle->location), ALT_UART_SRR_RFR_SET_MSK);
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Write FCR::RFIFOR (FIFO Control Register :: Receiver FIFO Reset) bit.
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location), handle->fcr | ALT_UART_FCR_RFIFOR_SET_MSK);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_clear_tx(ALT_16550_HANDLE_t * handle)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Write SRR::XFR (Shadow Reset Register :: Xmitter FIFO Reset) bit.
+ alt_write_word(ALT_UART_SRR_ADDR(handle->location), ALT_UART_SRR_XFR_SET_MSK);
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Write FCR::XFIFOR (FIFO Control Register :: Xmitter FIFO Reset) bit.
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location), handle->fcr | ALT_UART_FCR_XFIFOR_SET_MSK);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_clear_all(ALT_16550_HANDLE_t * handle)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Write SRR::(RFR | XFR)
+ // (Shadow Reset Register :: (Receiver FIFO Reset | Xmitter FIFO Reset)) bits.
+ alt_write_word(ALT_UART_SRR_ADDR(handle->location),
+ ALT_UART_SRR_RFR_SET_MSK | ALT_UART_SRR_XFR_SET_MSK);
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Write FCR::(RFIFOR |XFIFOR)
+ // (FIFO Control Register :: (Receiver FIFO Reset | Xmitter FIFO Reset)) bits.
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location),
+ handle->fcr | ALT_UART_FCR_RFIFOR_SET_MSK | ALT_UART_FCR_XFIFOR_SET_MSK);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_size_get_rx(ALT_16550_HANDLE_t * handle,
+ uint32_t * size)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Read the CPR::FIFO_Mod (Component Parameter Register :: FIFO Mode).
+ // The FIFO size is 16x this value.
+ *size = ALT_UART_CPR_FIFO_MOD_GET(alt_read_word(ALT_UART_CPR_ADDR(handle->location))) << 4;
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Altera 16550 Compatible Soft UARTs have a configurable size and is
+ // stored in the CPR::FIFO_Mode (Component Parameter Register :: FIFO Depth).
+ *size = ALT_ALTERA_16550_CPR_FIFO_MODE_GET(alt_read_word(ALT_ALTERA_16550_CPR_ADDR(handle->location))) << 4;
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_size_get_tx(ALT_16550_HANDLE_t * handle,
+ uint32_t * size)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Read the CPR::FIFO_Mod (Component Parameter Register :: FIFO Mode).
+ // The FIFO size is 16x this value.
+ *size = ALT_UART_CPR_FIFO_MOD_GET(alt_read_word(ALT_UART_CPR_ADDR(handle->location))) << 4;
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Altera 16550 Compatible Soft UARTs have a configurable size and is
+ // stored in the CPR::FIFO_Mode (Component Parameter Register :: FIFO Depth).
+ // The FIFO size is 16x this value.
+ *size = ALT_ALTERA_16550_CPR_FIFO_MODE_GET(alt_read_word(ALT_ALTERA_16550_CPR_ADDR(handle->location))) << 4;
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_level_get_rx(ALT_16550_HANDLE_t * handle,
+ uint32_t * level)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Read RFL (Receive FIFO Level).
+ *level = alt_read_word(ALT_UART_RFL_ADDR(handle->location));
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // RFL not implemented. Return 0.
+ *level = 0;
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_level_get_tx(ALT_16550_HANDLE_t * handle,
+ uint32_t * level)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ // Read TFL (Transmit FIFO Level).
+ *level = alt_read_word(ALT_UART_TFL_ADDR(handle->location));
+ break;
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // TFL not implemented. Return 0.
+ *level = 0;
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_trigger_set_rx(ALT_16550_HANDLE_t * handle,
+ ALT_16550_FIFO_TRIGGER_RX_t trigger)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify triggering parameter
+ switch (trigger)
+ {
+ case ALT_16550_FIFO_TRIGGER_RX_ANY:
+ case ALT_16550_FIFO_TRIGGER_RX_QUARTER_FULL:
+ case ALT_16550_FIFO_TRIGGER_RX_HALF_FULL:
+ case ALT_16550_FIFO_TRIGGER_RX_ALMOST_FULL:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Update FCR::RT (FIFO Control Register :: Receiver Trigger)
+ handle->fcr &= ~ALT_UART_FCR_RT_SET_MSK;
+ handle->fcr |= ALT_UART_FCR_RT_SET(trigger);
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location), handle->fcr);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_fifo_trigger_set_tx(ALT_16550_HANDLE_t * handle,
+ ALT_16550_FIFO_TRIGGER_TX_t trigger)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify triggering parameter
+ switch (trigger)
+ {
+ case ALT_16550_FIFO_TRIGGER_TX_EMPTY:
+ case ALT_16550_FIFO_TRIGGER_TX_ALMOST_EMPTY:
+ case ALT_16550_FIFO_TRIGGER_TX_QUARTER_FULL:
+ case ALT_16550_FIFO_TRIGGER_TX_HALF_FULL:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Update FCR::TET (FIFO Control Register :: Transmit Empty Trigger)
+ handle->fcr &= ~ALT_UART_FCR_TET_SET_MSK;
+ handle->fcr |= ALT_UART_FCR_TET_SET(trigger);
+ alt_write_word(ALT_UART_FCR_ADDR(handle->location), handle->fcr);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+ALT_STATUS_CODE alt_16550_baudrate_get(ALT_16550_HANDLE_t * handle,
+ uint32_t * baudrate)
+{
+ // Query the divisor cached in the handle data
+ uint32_t divisor = ALT_16550_HANDLE_DATA_DIVISOR_VALUE_GET(handle->data);
+
+ // The divisor should never be zero. It is set to allow for a baud of 57600
+ // on initialization and a valid value is checked at
+ // alt_16550_divisor_set(). We do not check for users altering the data in
+ // the handle structure.
+
+ // Formula for calculating the baudrate:
+ // baudrate = clock / (16 * divisor)
+
+ *baudrate = (handle->clock_freq >> 4) / divisor;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_baudrate_set(ALT_16550_HANDLE_t * handle,
+ uint32_t baudrate)
+{
+ if (baudrate == 0)
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ // Formula for calculating the divisor:
+ // baudrate = clock / (16 * divisor)
+ // => baudrate * 16 * divisor = clock
+ // => divisor = clock / (baudrate * 16)
+ // => divisor = (clock / 16) / baudrate
+
+ // Add half of the denominator to address rounding errors.
+ uint32_t divisor = ((handle->clock_freq + (8 * baudrate)) / (16 * baudrate));
+
+ // Check for divisor range is in alt_16550_divisor_set().
+ return alt_16550_divisor_set(handle, divisor);
+}
+
+ALT_STATUS_CODE alt_16550_divisor_get(ALT_16550_HANDLE_t * handle,
+ uint32_t * divisor)
+{
+ // Just read the divisor portion of the handle data.
+ *divisor = ALT_16550_HANDLE_DATA_DIVISOR_VALUE_GET(handle->data);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_divisor_set(ALT_16550_HANDLE_t * handle,
+ uint32_t divisor)
+{
+ // Verify divisor value is in range.
+ if ((divisor > 0xffff) || (divisor == 0))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ // Set the divisor portion of the handle data.
+ handle->data &= ~(0xffff);
+ handle->data |= divisor;
+
+ // Even if the UART is enabled, don't do anything. It is documented that
+ // the change will take effect when the UART move to the enabled state.
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+static ALT_STATUS_CODE alt_16550_ier_mask_set_helper(ALT_16550_HANDLE_t * handle, uint32_t setmask)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Set bit in IER (Interrupt Enable Register)
+ alt_setbits_word(ALT_UART_IER_DLH_ADDR(handle->location), setmask);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+static ALT_STATUS_CODE alt_16550_ier_mask_clr_helper(ALT_16550_HANDLE_t * handle, uint32_t setmask)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Clear bit in IER (Interrupt Enable Register)
+ alt_clrbits_word(ALT_UART_IER_DLH_ADDR(handle->location), setmask);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_int_enable_rx(ALT_16550_HANDLE_t * handle)
+{
+ // Set the IER::ERBFI (Interrupt Enable Register :: Enable Receive Buffer Full Interrupt) bit.
+ return alt_16550_ier_mask_set_helper(handle, ALT_UART_IER_DLH_ERBFI_DLH0_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_disable_rx(ALT_16550_HANDLE_t * handle)
+{
+ // Clear the IER::ERBFI (Interrupt Enable Register :: Enable Receive Buffer Full Interrupt) bit.
+ return alt_16550_ier_mask_clr_helper(handle, ALT_UART_IER_DLH_ERBFI_DLH0_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_enable_tx(ALT_16550_HANDLE_t * handle)
+{
+ // Set the IER::ETBEI (Interrupt Enable Register :: Enable Transmit Buffer Empty Interrupt) bit.
+ return alt_16550_ier_mask_set_helper(handle, ALT_UART_IER_DLH_ETBEI_DLH1_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_disable_tx(ALT_16550_HANDLE_t * handle)
+{
+ // Clear the IER::ETBEI (Interrupt Enable Register :: Enable Transmit Buffer Empty Interrupt) bit.
+ return alt_16550_ier_mask_clr_helper(handle, ALT_UART_IER_DLH_ETBEI_DLH1_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_enable_line(ALT_16550_HANDLE_t * handle)
+{
+ // Set the IER::ELSI (Interrupt Enable Register :: Enable Line Status Interrupt) bit.
+ return alt_16550_ier_mask_set_helper(handle, ALT_UART_IER_DLH_ELSI_DHL2_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_disable_line(ALT_16550_HANDLE_t * handle)
+{
+ // Clear the IER::ELSI (Interrupt Enable Register :: Enable Line Status Interrupt) bit.
+ return alt_16550_ier_mask_clr_helper(handle, ALT_UART_IER_DLH_ELSI_DHL2_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_enable_modem(ALT_16550_HANDLE_t * handle)
+{
+ // Set the IER::EDSSI (Interrupt Enable Register :: Enable Modem Status Interrupt) bit.
+ return alt_16550_ier_mask_set_helper(handle, ALT_UART_IER_DLH_EDSSI_DHL3_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_disable_modem(ALT_16550_HANDLE_t * handle)
+{
+ // Clear the IER::EDSSI (Interrupt Enable Register :: Enable Modem Status Interrupt) bit.
+ return alt_16550_ier_mask_clr_helper(handle, ALT_UART_IER_DLH_EDSSI_DHL3_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_disable_all(ALT_16550_HANDLE_t * handle)
+{
+ // Clear the IER::(ERBFI | ETBEI | ELSI | EDSSI)
+ // (Interrupt Enable Register :: (Enable Receive Buffer Full Interrupt |
+ // Enable Transmit Buffer Empty Interrupt |
+ // Enable Line Status Interrupt |
+ // Enable Modem Status Interrupt)) bits
+ return alt_16550_ier_mask_clr_helper(handle, ALT_UART_IER_DLH_ERBFI_DLH0_SET_MSK |
+ ALT_UART_IER_DLH_ETBEI_DLH1_SET_MSK |
+ ALT_UART_IER_DLH_ELSI_DHL2_SET_MSK |
+ ALT_UART_IER_DLH_EDSSI_DHL3_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_int_status_get(ALT_16550_HANDLE_t * handle,
+ ALT_16550_INT_STATUS_t * status)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Read IIR::IID (Interrupt Identity Register :: Interrupt ID)
+ *status = (ALT_16550_INT_STATUS_t) ALT_UART_IIR_ID_GET(alt_read_word(ALT_UART_IIR_ADDR(handle->location)));
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+ALT_STATUS_CODE alt_16550_line_config_set(ALT_16550_HANDLE_t * handle,
+ ALT_16550_DATABITS_t databits,
+ ALT_16550_PARITY_t parity,
+ ALT_16550_STOPBITS_t stopbits)
+{
+ // Validate the databits parameter.
+ switch (databits)
+ {
+ case ALT_16550_DATABITS_5:
+ case ALT_16550_DATABITS_6:
+ case ALT_16550_DATABITS_7:
+ case ALT_16550_DATABITS_8:
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // Validate the parity parameter.
+ switch (parity)
+ {
+ case ALT_16550_PARITY_DISABLE:
+ case ALT_16550_PARITY_ODD:
+ case ALT_16550_PARITY_EVEN:
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // Validate the stopbits parameter.
+ switch (stopbits)
+ {
+ case ALT_16550_STOPBITS_1:
+ case ALT_16550_STOPBITS_2:
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // LCR (Line Control Register) cache.
+ uint32_t lcr = 0;
+
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+
+ // Configure the number of databits
+ lcr |= ALT_UART_LCR_DLS_SET(databits);
+
+ // Configure the number of stopbits
+ lcr |= ALT_UART_LCR_STOP_SET(stopbits);
+
+ // Configure the parity
+ if (parity != ALT_16550_PARITY_DISABLE)
+ {
+ // Enable parity in LCR
+ lcr |= ALT_UART_LCR_PEN_SET_MSK;
+
+ if (parity == ALT_16550_PARITY_EVEN)
+ {
+ // Enable even parity in LCR; otherwise it's odd parity.
+ lcr |= ALT_UART_LCR_EPS_SET_MSK;
+ }
+ }
+
+ // Update LCR (Line Control Register)
+ alt_replbits_word(ALT_UART_LCR_ADDR(handle->location),
+ ALT_UART_LCR_DLS_SET_MSK
+ | ALT_UART_LCR_STOP_SET_MSK
+ | ALT_UART_LCR_PEN_SET_MSK
+ | ALT_UART_LCR_EPS_SET_MSK,
+ lcr);
+
+ break;
+
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_line_break_enable(ALT_16550_HANDLE_t * handle)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Set the LCR::Break (Line Control Register :: Break) bit.
+ alt_setbits_word(ALT_UART_LCR_ADDR(handle->location), ALT_UART_LCR_BREAK_SET_MSK);
+ break;
+
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_line_break_disable(ALT_16550_HANDLE_t * handle)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Clear the LCR::Break (Line Control Register :: Break) bit.
+ alt_clrbits_word(ALT_UART_LCR_ADDR(handle->location), ALT_UART_LCR_BREAK_SET_MSK);
+ break;
+
+ default:
+ return ALT_E_ERROR;
+ }
+
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_line_status_get(ALT_16550_HANDLE_t * handle,
+ uint32_t * status)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Read the LSR (Line Status Register).
+ *status = alt_read_word(ALT_UART_LSR_ADDR(handle->location));
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+static ALT_STATUS_CODE alt_16550_mcr_mask_set_helper(ALT_16550_HANDLE_t * handle,
+ uint32_t setmask)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Set the bit in MCR (Modem Control Register).
+ alt_setbits_word(ALT_UART_MCR_ADDR(handle->location), setmask);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+static ALT_STATUS_CODE alt_16550_mcr_mask_clr_helper(ALT_16550_HANDLE_t * handle, uint32_t setmask)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Clear the bit in MCR (Modem Control Register).
+ alt_clrbits_word(ALT_UART_MCR_ADDR(handle->location), setmask);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_16550_flowcontrol_enable(ALT_16550_HANDLE_t * handle)
+{
+ // Verify that the FIFO is enabled
+ if (!(handle->fcr & ALT_UART_FCR_FIFOE_SET_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // For the Altera 16550 Compatible Soft UART, check that Hardware Flowcontrol is enabled.
+ if (handle->device == ALT_16550_DEVICE_ALTERA_16550_UART)
+ {
+ // Read the CPR::AFCE_Mode (Component Parameter Register :: Auto Flow Control mode) bit.
+ uint32_t cpr = alt_read_word(ALT_ALTERA_16550_CPR_ADDR(handle->location));
+ if (!(ALT_ALTERA_16550_CPR_AFCE_MODE_SET_MSK & cpr))
+ {
+ return ALT_E_ERROR;
+ }
+ }
+
+ // Set MCR::AFCE (Modem Control Register :: Automatic FlowControl Enable) bit.
+ return alt_16550_mcr_mask_set_helper(handle, ALT_UART_MCR_AFCE_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_flowcontrol_disable(ALT_16550_HANDLE_t * handle)
+{
+ // Clear MCR::AFCE (Modem Control Register :: Automatic FlowControl Enable) bit.
+ return alt_16550_mcr_mask_clr_helper(handle, ALT_UART_MCR_AFCE_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_loopback_enable(ALT_16550_HANDLE_t * handle)
+{
+ // Loopback is not implemented in the Altera 16550 Compatible Soft UART.
+ if (handle->device == ALT_16550_DEVICE_ALTERA_16550_UART)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Set MCR::Loopback (Modem Control Register :: Loopback) bit.
+ return alt_16550_mcr_mask_set_helper(handle, ALT_UART_MCR_LOOPBACK_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_loopback_disable(ALT_16550_HANDLE_t * handle)
+{
+ // Clear MCR::Loopback (Modem Control Register :: Loopback) bit.
+ return alt_16550_mcr_mask_clr_helper(handle, ALT_UART_MCR_LOOPBACK_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_enable_out1(ALT_16550_HANDLE_t * handle)
+{
+ // Set MCR::Out1 (Modem Control Register :: Out1) bit.
+ return alt_16550_mcr_mask_set_helper(handle, ALT_UART_MCR_OUT1_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_disable_out1(ALT_16550_HANDLE_t * handle)
+{
+ // Clear MCR::Out1 (Modem Control Register :: Out1) bit.
+ return alt_16550_mcr_mask_clr_helper(handle, ALT_UART_MCR_OUT1_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_enable_out2(ALT_16550_HANDLE_t * handle)
+{
+ // Set MCR::Out2 (Modem Control Register :: Out2) bit.
+ return alt_16550_mcr_mask_set_helper(handle, ALT_UART_MCR_OUT2_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_disable_out2(ALT_16550_HANDLE_t * handle)
+{
+ // Clear MCR::Out2 (Modem Control Register :: Out2) bit.
+ return alt_16550_mcr_mask_clr_helper(handle, ALT_UART_MCR_OUT2_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_enable_rts(ALT_16550_HANDLE_t * handle)
+{
+ // Set MCR::RTS (Modem Control Register :: Request To Send) bit.
+ return alt_16550_mcr_mask_set_helper(handle, ALT_UART_MCR_RTS_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_disable_rts(ALT_16550_HANDLE_t * handle)
+{
+ // Clear MCR::RTS (Modem Control Register :: Request To Send) bit.
+ return alt_16550_mcr_mask_clr_helper(handle, ALT_UART_MCR_RTS_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_enable_dtr(ALT_16550_HANDLE_t * handle)
+{
+ // Set MCR::DTR (Modem Control Register :: Data Terminal Ready) bit.
+ return alt_16550_mcr_mask_set_helper(handle, ALT_UART_MCR_DTR_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_disable_dtr(ALT_16550_HANDLE_t * handle)
+{
+ // Clear MCR::DTR (Modem Control Register :: Data Terminal Ready) bit.
+ return alt_16550_mcr_mask_clr_helper(handle, ALT_UART_MCR_DTR_SET_MSK);
+}
+
+ALT_STATUS_CODE alt_16550_modem_status_get(ALT_16550_HANDLE_t * handle,
+ uint32_t * status)
+{
+ switch (handle->device)
+ {
+ case ALT_16550_DEVICE_SOCFPGA_UART0:
+ case ALT_16550_DEVICE_SOCFPGA_UART1:
+ case ALT_16550_DEVICE_ALTERA_16550_UART:
+ // Read the MSR (Modem Status Register).
+ *status = alt_read_word(ALT_UART_MSR_ADDR(handle->location));
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ return ALT_E_SUCCESS;
+}
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_address_space.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_address_space.c
new file mode 100644
index 0000000000..d64ac33ec8
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_address_space.c
@@ -0,0 +1,509 @@
+/******************************************************************************
+ *
+ * alt_address_space.c - API for the Altera SoC FPGA address space.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+#include <bsp/alt_address_space.h>
+#include <bsp/socal/alt_l3.h>
+#include <bsp/socal/socal.h>
+#include <bsp/socal/alt_acpidmap.h>
+#include <bsp/hwlib.h>
+
+
+#define ALT_ACP_ID_MAX_INPUT_ID 7
+#define ALT_ACP_ID_MAX_OUTPUT_ID 4096
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_addr_space_remap(ALT_ADDR_SPACE_MPU_ATTR_t mpu_attr,
+ ALT_ADDR_SPACE_NONMPU_ATTR_t nonmpu_attr,
+ ALT_ADDR_SPACE_H2F_BRIDGE_ATTR_t h2f_bridge_attr,
+ ALT_ADDR_SPACE_LWH2F_BRIDGE_ATTR_t lwh2f_bridge_attr)
+{
+ uint32_t remap_reg_val = 0;
+
+ // Parameter checking and validation...
+ if (mpu_attr == ALT_ADDR_SPACE_MPU_ZERO_AT_BOOTROM)
+ {
+ remap_reg_val |= ALT_L3_REMAP_MPUZERO_SET(ALT_L3_REMAP_MPUZERO_E_BOOTROM);
+ }
+ else if (mpu_attr == ALT_ADDR_SPACE_MPU_ZERO_AT_OCRAM)
+ {
+ remap_reg_val |= ALT_L3_REMAP_MPUZERO_SET(ALT_L3_REMAP_MPUZERO_E_OCRAM);
+ }
+ else
+ {
+ return ALT_E_INV_OPTION;
+ }
+
+ if (nonmpu_attr == ALT_ADDR_SPACE_NONMPU_ZERO_AT_SDRAM)
+ {
+ remap_reg_val |= ALT_L3_REMAP_NONMPUZERO_SET(ALT_L3_REMAP_NONMPUZERO_E_SDRAM);
+ }
+ else if (nonmpu_attr == ALT_ADDR_SPACE_NONMPU_ZERO_AT_OCRAM)
+ {
+ remap_reg_val |= ALT_L3_REMAP_NONMPUZERO_SET(ALT_L3_REMAP_NONMPUZERO_E_OCRAM);
+ }
+ else
+ {
+ return ALT_E_INV_OPTION;
+ }
+
+ if (h2f_bridge_attr == ALT_ADDR_SPACE_H2F_INACCESSIBLE)
+ {
+ remap_reg_val |= ALT_L3_REMAP_H2F_SET(ALT_L3_REMAP_H2F_E_INVISIBLE);
+ }
+ else if (h2f_bridge_attr == ALT_ADDR_SPACE_H2F_ACCESSIBLE)
+ {
+ remap_reg_val |= ALT_L3_REMAP_H2F_SET(ALT_L3_REMAP_H2F_E_VISIBLE);
+ }
+ else
+ {
+ return ALT_E_INV_OPTION;
+ }
+
+ if (lwh2f_bridge_attr == ALT_ADDR_SPACE_LWH2F_INACCESSIBLE)
+ {
+ remap_reg_val |= ALT_L3_REMAP_LWH2F_SET(ALT_L3_REMAP_LWH2F_E_INVISIBLE);
+ }
+ else if (lwh2f_bridge_attr == ALT_ADDR_SPACE_LWH2F_ACCESSIBLE)
+ {
+ remap_reg_val |= ALT_L3_REMAP_LWH2F_SET(ALT_L3_REMAP_LWH2F_E_VISIBLE);
+ }
+ else
+ {
+ return ALT_E_INV_OPTION;
+ }
+
+ // Perform the remap.
+ alt_write_word(ALT_L3_REMAP_ADDR, remap_reg_val);
+
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+// Remap the MPU address space view of address 0 to access the SDRAM controller.
+// This is done by setting the L2 cache address filtering register start address
+// to 0 and leaving the address filtering address end address value
+// unmodified. This causes all physical addresses in the range
+// address_filter_start <= physical_address < address_filter_end to be directed
+// to the to the AXI Master Port M1 which is connected to the SDRAM
+// controller. All other addresses are directed to AXI Master Port M0 which
+// connect the MPU subsystem to the L3 interconnect.
+//
+// It is unnecessary to modify the MPU remap options in the L3 remap register
+// because those options only affect addresses in the MPU subsystem address
+// ranges that are now redirected to the SDRAM controller and never reach the L3
+// interconnect anyway.
+ALT_STATUS_CODE alt_mpu_addr_space_remap_0_to_sdram(void)
+{
+ uint32_t addr_filt_end = (alt_read_word(L2_CACHE_ADDR_FILTERING_END_ADDR) &
+ L2_CACHE_ADDR_FILTERING_END_ADDR_MASK);
+ return alt_l2_addr_filter_cfg_set(0x0, addr_filt_end);
+}
+
+/******************************************************************************/
+// Return the L2 cache address filtering registers configuration settings in the
+// user provided start and end address range out parameters.
+ALT_STATUS_CODE alt_l2_addr_filter_cfg_get(uint32_t* addr_filt_start,
+ uint32_t* addr_filt_end)
+{
+ if (addr_filt_start == NULL || addr_filt_end == NULL)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t addr_filt_start_reg = alt_read_word(L2_CACHE_ADDR_FILTERING_START_ADDR);
+ uint32_t addr_filt_end_reg = alt_read_word(L2_CACHE_ADDR_FILTERING_END_ADDR);
+
+ *addr_filt_start = (addr_filt_start_reg & L2_CACHE_ADDR_FILTERING_START_ADDR_MASK);
+ *addr_filt_end = (addr_filt_end_reg & L2_CACHE_ADDR_FILTERING_END_ADDR_MASK);
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_l2_addr_filter_cfg_set(uint32_t addr_filt_start,
+ uint32_t addr_filt_end)
+{
+ // Address filtering start and end values must be 1 MB aligned.
+ if ( (addr_filt_start & ~L2_CACHE_ADDR_FILTERING_START_ADDR_MASK)
+ || (addr_filt_end & ~L2_CACHE_ADDR_FILTERING_END_ADDR_MASK) )
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ // While it is possible to set the address filtering end value above its
+ // reset value and thereby access a larger SDRAM address range, it is not
+ // recommended. Doing so would potentially obscure any mapped HPS to FPGA
+ // bridge address spaces and peripherals on the L3 interconnect.
+ if (addr_filt_end > L2_CACHE_ADDR_FILTERING_END_RESET)
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ // NOTE: ARM (ARM DDI 0246F CoreLink Level 2 Cache Controller L2C-310 TRM)
+ // recommends programming the Address Filtering End Register before the
+ // Address Filtering Start Register to avoid unpredictable behavior between
+ // the two writes.
+ alt_write_word(L2_CACHE_ADDR_FILTERING_END_ADDR, addr_filt_end);
+ // It is recommended that address filtering always remain enabled.
+ addr_filt_start |= L2_CACHE_ADDR_FILTERING_ENABLE_MASK;
+ alt_write_word(L2_CACHE_ADDR_FILTERING_START_ADDR, addr_filt_start);
+
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_fixed_read_set(const uint32_t input_id,
+ const uint32_t output_id,
+ const ALT_ACP_ID_MAP_PAGE_t page,
+ const uint32_t aruser)
+{
+ if (input_id > ALT_ACP_ID_OUT_DYNAM_ID_7 || output_id == ALT_ACP_ID_MAX_OUTPUT_ID)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (output_id)
+ {
+ case ALT_ACP_ID_OUT_FIXED_ID_2:
+ alt_write_word(ALT_ACPIDMAP_VID2RD_ADDR,
+ ALT_ACPIDMAP_VID2RD_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID2RD_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID2RD_USER_SET(aruser)
+ | ALT_ACPIDMAP_VID2RD_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_3:
+ alt_write_word(ALT_ACPIDMAP_VID3RD_ADDR,
+ ALT_ACPIDMAP_VID3RD_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID3RD_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID3RD_USER_SET(aruser)
+ | ALT_ACPIDMAP_VID3RD_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_4:
+ alt_write_word(ALT_ACPIDMAP_VID4RD_ADDR,
+ ALT_ACPIDMAP_VID4RD_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID4RD_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID4RD_USER_SET(aruser)
+ | ALT_ACPIDMAP_VID4RD_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_5:
+ alt_write_word(ALT_ACPIDMAP_VID5RD_ADDR,
+ ALT_ACPIDMAP_VID5RD_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID5RD_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID5RD_USER_SET(aruser)
+ | ALT_ACPIDMAP_VID5RD_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_6:
+ alt_write_word(ALT_ACPIDMAP_VID6RD_ADDR,
+ ALT_ACPIDMAP_VID6RD_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID6RD_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID6RD_USER_SET(aruser)
+ | ALT_ACPIDMAP_VID6RD_FORCE_SET(1UL));
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_fixed_write_set(const uint32_t input_id,
+ const uint32_t output_id,
+ const ALT_ACP_ID_MAP_PAGE_t page,
+ const uint32_t awuser)
+{
+ if (input_id > ALT_ACP_ID_OUT_DYNAM_ID_7 || output_id == ALT_ACP_ID_MAX_OUTPUT_ID)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (output_id)
+ {
+ case ALT_ACP_ID_OUT_FIXED_ID_2:
+ alt_write_word(ALT_ACPIDMAP_VID2WR_ADDR,
+ ALT_ACPIDMAP_VID2WR_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID2WR_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID2WR_USER_SET(awuser)
+ | ALT_ACPIDMAP_VID2WR_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_3:
+ alt_write_word(ALT_ACPIDMAP_VID3WR_ADDR,
+ ALT_ACPIDMAP_VID3WR_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID3WR_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID3WR_USER_SET(awuser)
+ | ALT_ACPIDMAP_VID3WR_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_4:
+ alt_write_word(ALT_ACPIDMAP_VID4WR_ADDR,
+ ALT_ACPIDMAP_VID4WR_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID4WR_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID4WR_USER_SET(awuser)
+ | ALT_ACPIDMAP_VID4WR_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_5:
+ alt_write_word(ALT_ACPIDMAP_VID5WR_ADDR,
+ ALT_ACPIDMAP_VID5WR_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID5WR_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID5WR_USER_SET(awuser)
+ | ALT_ACPIDMAP_VID5WR_FORCE_SET(1UL));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_6:
+ alt_write_word(ALT_ACPIDMAP_VID6WR_ADDR,
+ ALT_ACPIDMAP_VID6WR_MID_SET(input_id)
+ | ALT_ACPIDMAP_VID6WR_PAGE_SET(page)
+ | ALT_ACPIDMAP_VID6WR_USER_SET(awuser)
+ | ALT_ACPIDMAP_VID6WR_FORCE_SET(1UL)
+ );
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_dynamic_read_set(const uint32_t output_id)
+{
+ if (output_id == ALT_ACP_ID_MAX_OUTPUT_ID)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t aruser, page;
+
+ switch (output_id)
+ {
+ case ALT_ACP_ID_OUT_FIXED_ID_2:
+ aruser = ALT_ACPIDMAP_VID2RD_USER_GET(alt_read_word(ALT_ACPIDMAP_VID2RD_ADDR));
+ page = ALT_ACPIDMAP_VID2RD_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID2RD_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_3:
+ aruser = ALT_ACPIDMAP_VID3RD_USER_GET(alt_read_word(ALT_ACPIDMAP_VID3RD_ADDR));
+ page = ALT_ACPIDMAP_VID3RD_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID3RD_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_4:
+ aruser = ALT_ACPIDMAP_VID4RD_USER_GET(alt_read_word(ALT_ACPIDMAP_VID4RD_ADDR));
+ page = ALT_ACPIDMAP_VID4RD_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID4RD_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_5:
+ aruser = ALT_ACPIDMAP_VID5RD_USER_GET(alt_read_word(ALT_ACPIDMAP_VID5RD_ADDR));
+ page = ALT_ACPIDMAP_VID5RD_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID5RD_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_6:
+ aruser = ALT_ACPIDMAP_VID6RD_USER_GET(alt_read_word(ALT_ACPIDMAP_VID6RD_ADDR));
+ page = ALT_ACPIDMAP_VID6RD_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID6RD_ADDR));
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ alt_write_word(ALT_ACPIDMAP_DYNRD_ADDR,
+ ALT_ACPIDMAP_DYNRD_PAGE_SET(page)
+ | ALT_ACPIDMAP_DYNRD_USER_SET(aruser));
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_dynamic_write_set(const uint32_t output_id)
+{
+ if (output_id == ALT_ACP_ID_MAX_OUTPUT_ID)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t awuser, page;
+
+ switch (output_id)
+ {
+ case ALT_ACP_ID_OUT_FIXED_ID_2:
+ awuser = ALT_ACPIDMAP_VID2WR_USER_GET(alt_read_word(ALT_ACPIDMAP_VID2WR_ADDR));
+ page = ALT_ACPIDMAP_VID2WR_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID2WR_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_3:
+ awuser = ALT_ACPIDMAP_VID3WR_USER_GET(alt_read_word(ALT_ACPIDMAP_VID3WR_ADDR));
+ page = ALT_ACPIDMAP_VID3WR_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID3WR_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_4:
+ awuser = ALT_ACPIDMAP_VID4WR_USER_GET(alt_read_word(ALT_ACPIDMAP_VID4WR_ADDR));
+ page = ALT_ACPIDMAP_VID4WR_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID4WR_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_5:
+ awuser = ALT_ACPIDMAP_VID5WR_USER_GET(alt_read_word(ALT_ACPIDMAP_VID5WR_ADDR));
+ page = ALT_ACPIDMAP_VID5WR_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID5WR_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_6:
+ awuser = ALT_ACPIDMAP_VID6WR_USER_GET(alt_read_word(ALT_ACPIDMAP_VID6WR_ADDR));
+ page = ALT_ACPIDMAP_VID6WR_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID6WR_ADDR));
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ alt_write_word(ALT_ACPIDMAP_DYNWR_ADDR,
+ ALT_ACPIDMAP_DYNWR_PAGE_SET(page)
+ | ALT_ACPIDMAP_DYNWR_USER_SET(awuser));
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_dynamic_read_options_set(const ALT_ACP_ID_MAP_PAGE_t page,
+ const uint32_t aruser)
+{
+ alt_write_word(ALT_ACPIDMAP_DYNRD_ADDR,
+ ALT_ACPIDMAP_DYNRD_PAGE_SET(page)
+ | ALT_ACPIDMAP_DYNRD_USER_SET(aruser));
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_dynamic_write_options_set(const ALT_ACP_ID_MAP_PAGE_t page,
+ const uint32_t awuser)
+{
+ alt_write_word(ALT_ACPIDMAP_DYNWR_ADDR,
+ ALT_ACPIDMAP_DYNWR_PAGE_SET(page)
+ | ALT_ACPIDMAP_DYNWR_USER_SET(awuser));
+ return ALT_E_SUCCESS;
+}
+
+/******************************************************************************/
+ALT_STATUS_CODE alt_acp_id_map_read_options_get(const uint32_t output_id,
+ bool * fixed,
+ uint32_t * input_id,
+ ALT_ACP_ID_MAP_PAGE_t * page,
+ uint32_t * aruser)
+{
+ if (output_id == ALT_ACP_ID_MAX_OUTPUT_ID)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (output_id)
+ {
+ case ALT_ACP_ID_OUT_FIXED_ID_2:
+ *aruser = ALT_ACPIDMAP_VID2RD_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID2RD_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID2RD_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID2RD_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID2RD_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID2RD_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID2RD_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID2RD_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_3:
+ *aruser = ALT_ACPIDMAP_VID3RD_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID3RD_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID3RD_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID3RD_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID3RD_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID3RD_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID3RD_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID3RD_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_4:
+ *aruser = ALT_ACPIDMAP_VID4RD_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID4RD_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID4RD_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID4RD_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID4RD_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID4RD_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID4RD_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID4RD_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_5:
+ *aruser = ALT_ACPIDMAP_VID5RD_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID5RD_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID5RD_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID5RD_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID5RD_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID5RD_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID5RD_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID5RD_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_6:
+ *aruser = ALT_ACPIDMAP_VID6RD_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID6RD_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID6RD_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID6RD_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID6RD_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID6RD_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID6RD_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID6RD_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_7:
+ *aruser = ALT_ACPIDMAP_DYNRD_S_USER_GET(alt_read_word(ALT_ACPIDMAP_DYNRD_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_DYNRD_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_DYNRD_S_ADDR));
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_acp_id_map_write_options_get(const uint32_t output_id,
+ bool * fixed,
+ uint32_t * input_id,
+ ALT_ACP_ID_MAP_PAGE_t * page,
+ uint32_t * awuser)
+{
+ if (output_id == ALT_ACP_ID_MAX_OUTPUT_ID)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (output_id)
+ {
+ case ALT_ACP_ID_OUT_FIXED_ID_2:
+ *awuser = ALT_ACPIDMAP_VID2WR_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID2WR_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID2WR_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID2WR_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID2WR_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID2WR_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID2WR_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID2WR_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_3:
+ *awuser = ALT_ACPIDMAP_VID3WR_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID3WR_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID3WR_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID3WR_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID3WR_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID3WR_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID3WR_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID3WR_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_4:
+ *awuser = ALT_ACPIDMAP_VID4WR_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID4WR_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID4WR_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID4WR_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID4WR_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID4WR_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID4WR_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID4WR_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_5:
+ *awuser = ALT_ACPIDMAP_VID5WR_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID5WR_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID5WR_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID5WR_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID5WR_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID5WR_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID5WR_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID5WR_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_6:
+ *awuser = ALT_ACPIDMAP_VID6WR_S_USER_GET(alt_read_word(ALT_ACPIDMAP_VID6WR_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_VID6WR_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_VID6WR_S_ADDR));
+ *input_id = ALT_ACPIDMAP_VID6WR_S_MID_GET(alt_read_word(ALT_ACPIDMAP_VID6WR_S_ADDR));
+ *fixed = ALT_ACPIDMAP_VID6WR_S_FORCE_GET(alt_read_word(ALT_ACPIDMAP_VID6WR_S_ADDR));
+ break;
+ case ALT_ACP_ID_OUT_DYNAM_ID_7:
+ *awuser = ALT_ACPIDMAP_DYNWR_S_USER_GET(alt_read_word(ALT_ACPIDMAP_DYNWR_S_ADDR));
+ *page = (ALT_ACP_ID_MAP_PAGE_t)ALT_ACPIDMAP_DYNWR_S_PAGE_GET(alt_read_word(ALT_ACPIDMAP_DYNWR_S_ADDR));
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return ALT_E_SUCCESS;
+}
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_clock_manager.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_clock_manager.c
new file mode 100644
index 0000000000..95404c1c13
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_clock_manager.c
@@ -0,0 +1,5554 @@
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdio.h>
+
+#include <bsp/socal/hps.h>
+#include <bsp/socal/socal.h>
+#include <bsp/socal/alt_sysmgr.h>
+#include <bsp/hwlib.h>
+#include <bsp/alt_clock_manager.h>
+#include <bsp/alt_mpu_registers.h>
+
+#define UINT12_MAX (4096)
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Useful Structures ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+
+ /* General structure used to hold parameters of various clock entities, */
+typedef struct ALT_CLK_PARAMS_s
+{
+ alt_freq_t freqcur; // current frequency of the clock
+ alt_freq_t freqmin; // minimum allowed frequency for this clock
+ alt_freq_t freqmax; // maximum allowed frequency for this clock
+ uint32_t guardband : 7; // guardband percentage (0-100) if this clock
+ // is a PLL, ignored otherwise
+ uint32_t active : 1; // current state of activity of this clock
+} ALT_CLK_PARAMS_t;
+
+
+typedef struct ALT_EXT_CLK_PARAMBLOK_s
+{
+ ALT_CLK_PARAMS_t clkosc1; // ALT_CLK_OSC1
+ ALT_CLK_PARAMS_t clkosc2; // ALT_CLK_OSC2
+ ALT_CLK_PARAMS_t periph; // ALT_CLK_F2H_PERIPH_REF
+ ALT_CLK_PARAMS_t sdram; // ALT_CLK_F2H_SDRAM_REF
+} ALT_EXT_CLK_PARAMBLOK_t;
+
+
+ /* Initializes the External Clock Frequency Limits block */
+ /* The first field is the current external clock frequency, and can be set by */
+ /* alt_clk_ext_clk_freq_set(), the second and third fields are the minimum and */
+ /* maximum frequencies, the fourth field is ignored, and the fifth field */
+ /* contains the current activity state of the clock, 1=active, 0=inactive. */
+ /* Values taken from Section 2.3 and Section 2.7.1 of the HHP HPS-Clocking */
+ /* NPP specification. */
+static ALT_EXT_CLK_PARAMBLOK_t alt_ext_clk_paramblok =
+{
+ { 25000000, 10000000, 50000000, 0, 1 },
+ { 25000000, 10000000, 50000000, 0, 1 },
+ { 0, 10000000, 50000000, 0, 1 },
+ { 0, 10000000, 50000000, 0, 1 }
+};
+
+
+ /* PLL frequency limits */
+typedef struct ALT_PLL_CLK_PARAMBLOK_s
+{
+ ALT_CLK_PARAMS_t MainPLL_600; // Main PLL values for 600 MHz SoC
+ ALT_CLK_PARAMS_t PeriphPLL_600; // Peripheral PLL values for 600 MHz SoC
+ ALT_CLK_PARAMS_t SDRAMPLL_600; // SDRAM PLL values for 600 MHz SoC
+ ALT_CLK_PARAMS_t MainPLL_800; // Main PLL values for 800 MHz SoC
+ ALT_CLK_PARAMS_t PeriphPLL_800; // Peripheral PLL values for 800 MHz SoC
+ ALT_CLK_PARAMS_t SDRAMPLL_800; // SDRAM PLL values for 800 MHz SoC
+} ALT_PLL_CLK_PARAMBLOK_t;
+
+
+ /* Initializes the PLL frequency limits block */
+ /* The first field is the current frequency, the second and third fields */
+ /* are the design limits of the PLLs as listed in Section 3.2.1.2 of the */
+ /* HHP HPS-Clocking NPP document. The fourth field of each line is the */
+ /* guardband percentage, and the fifth field of each line is the current */
+ /* state of the PLL, 1=active, 0=inactive. */
+#define ALT_ORIGINAL_GUARDBAND_VAL 20
+#define ALT_GUARDBAND_LIMIT 20
+
+static ALT_PLL_CLK_PARAMBLOK_t alt_pll_clk_paramblok =
+{
+ { 0, 320000000, 1200000000, ALT_ORIGINAL_GUARDBAND_VAL, 0 },
+ { 0, 320000000, 900000000, ALT_ORIGINAL_GUARDBAND_VAL, 0 },
+ { 0, 320000000, 800000000, ALT_ORIGINAL_GUARDBAND_VAL, 0 },
+ { 0, 320000000, 1600000000, ALT_ORIGINAL_GUARDBAND_VAL, 1 },
+ { 0, 320000000, 1250000000, ALT_ORIGINAL_GUARDBAND_VAL, 1 },
+ { 0, 320000000, 1066000000, ALT_ORIGINAL_GUARDBAND_VAL, 1 }
+};
+
+
+ /* PLL counter frequency limits */
+typedef struct ALT_PLL_CNTR_FREQMAX_s
+{
+ alt_freq_t MainPLL_C0; // Main PLL Counter 0 parameter block
+ alt_freq_t MainPLL_C1; // Main PLL Counter 1 parameter block
+ alt_freq_t MainPLL_C2; // Main PLL Counter 2 parameter block
+ alt_freq_t MainPLL_C3; // Main PLL Counter 3 parameter block
+ alt_freq_t MainPLL_C4; // Main PLL Counter 4 parameter block
+ alt_freq_t MainPLL_C5; // Main PLL Counter 5 parameter block
+ alt_freq_t PeriphPLL_C0; // Peripheral PLL Counter 0 parameter block
+ alt_freq_t PeriphPLL_C1; // Peripheral PLL Counter 1 parameter block
+ alt_freq_t PeriphPLL_C2; // Peripheral PLL Counter 2 parameter block
+ alt_freq_t PeriphPLL_C3; // Peripheral PLL Counter 3 parameter block
+ alt_freq_t PeriphPLL_C4; // Peripheral PLL Counter 4 parameter block
+ alt_freq_t PeriphPLL_C5; // Peripheral PLL Counter 5 parameter block
+ alt_freq_t SDRAMPLL_C0; // SDRAM PLL Counter 0 parameter block
+ alt_freq_t SDRAMPLL_C1; // SDRAM PLL Counter 1 parameter block
+ alt_freq_t SDRAMPLL_C2; // SDRAM PLL Counter 2 parameter block
+ alt_freq_t SDRAMPLL_C5; // SDRAM PLL Counter 5 parameter block
+} ALT_PLL_CNTR_FREQMAX_t;
+
+//
+// The following pll max frequency array statically defined must be recalculated each time
+// when powering up, by calling alt_clk_clkmgr_init()
+//
+// for 14.1 uboot preloader, the following values are calculated dynamically.
+//
+// Arrial 5
+// alt_pll_cntr_maxfreq.MainPLL_C0 = 1050000000
+// alt_pll_cntr_maxfreq.MainPLL_C1 = 350000000
+// alt_pll_cntr_maxfreq.MainPLL_C2 = 262500000
+// alt_pll_cntr_maxfreq.MainPLL_C3 = 350000000
+// alt_pll_cntr_maxfreq.MainPLL_C4 = 2050781
+// alt_pll_cntr_maxfreq.MainPLL_C5 = 116666666
+// alt_pll_cntr_maxfreq.PeriphPLL_C0 = 1953125
+// alt_pll_cntr_maxfreq.PeriphPLL_C1 = 250000000
+// alt_pll_cntr_maxfreq.PeriphPLL_C2 = 1953125
+// alt_pll_cntr_maxfreq.PeriphPLL_C3 = 200000000
+// alt_pll_cntr_maxfreq.PeriphPLL_C4 = 200000000
+// alt_pll_cntr_maxfreq.PeriphPLL_C5 = 1953125
+// alt_pll_cntr_maxfreq.SDRAMPLL_C0 = 533333333
+// alt_pll_cntr_maxfreq.SDRAMPLL_C1 = 1066666666
+// alt_pll_cntr_maxfreq.SDRAMPLL_C2 = 533333333
+// alt_pll_cntr_maxfreq.SDRAMPLL_C5 = 177777777
+
+// Cyclone V
+// alt_pll_cntr_maxfreq.MainPLL_C0 = 925000000
+// alt_pll_cntr_maxfreq.MainPLL_C1 = 370000000
+// alt_pll_cntr_maxfreq.MainPLL_C2 = 462500000
+// alt_pll_cntr_maxfreq.MainPLL_C3 = 370000000
+// alt_pll_cntr_maxfreq.MainPLL_C4 = 3613281
+// alt_pll_cntr_maxfreq.MainPLL_C5 = 123333333
+// alt_pll_cntr_maxfreq.PeriphPLL_C0 = 1953125
+// alt_pll_cntr_maxfreq.PeriphPLL_C1 = 250000000
+// alt_pll_cntr_maxfreq.PeriphPLL_C2 = 1953125
+// alt_pll_cntr_maxfreq.PeriphPLL_C3 = 200000000
+// alt_pll_cntr_maxfreq.PeriphPLL_C4 = 200000000
+// alt_pll_cntr_maxfreq.PeriphPLL_C5 = 1953125
+// alt_pll_cntr_maxfreq.SDRAMPLL_C0 = 400000000
+// alt_pll_cntr_maxfreq.SDRAMPLL_C1 = 800000000
+// alt_pll_cntr_maxfreq.SDRAMPLL_C2 = 400000000
+// alt_pll_cntr_maxfreq.SDRAMPLL_C5 = 133333333
+
+
+/* Initializes the PLL Counter output maximum frequency block */
+static ALT_PLL_CNTR_FREQMAX_t alt_pll_cntr_maxfreq =
+{
+ 800000000, /* Main PLL Outputs */
+ 400000000,
+ 400000000,
+ 432000000,
+ 250000000,
+ 125000000,
+ 250000000, /* Peripheral PLL Outputs */
+ 250000000,
+ 432000000,
+ 250000000,
+ 200000000,
+ 100000000, /* SDRAM PLL Outputs */
+ 533000000,
+ 1066000000,
+ 533000000,
+ 200000000
+};
+
+
+
+ /* Maximum multiply, divide, and counter divisor values for each PLL */
+#define ALT_CLK_PLL_MULT_MAX 4095
+#define ALT_CLK_PLL_DIV_MAX 63
+#define ALT_CLK_PLL_CNTR_MAX 511
+
+
+ /* Definitions for the reset request and reset acknowledge bits */
+ /* for each of the output counters for each of the PLLS */
+#define ALT_CLK_PLL_RST_BIT_C0 0x00000001
+#define ALT_CLK_PLL_RST_BIT_C1 0x00000002
+#define ALT_CLK_PLL_RST_BIT_C2 0x00000004
+#define ALT_CLK_PLL_RST_BIT_C3 0x00000008
+#define ALT_CLK_PLL_RST_BIT_C4 0x00000010
+#define ALT_CLK_PLL_RST_BIT_C5 0x00000020
+
+
+ /* These are the bits that deal with PLL lock and this macro */
+ /* defines a mask to test for bits outside of these */
+#define ALT_CLK_MGR_PLL_LOCK_BITS (ALT_CLKMGR_INTREN_MAINPLLACHIEVED_CLR_MSK \
+ & ALT_CLKMGR_INTREN_PERPLLACHIEVED_CLR_MSK \
+ & ALT_CLKMGR_INTREN_SDRPLLACHIEVED_CLR_MSK \
+ & ALT_CLKMGR_INTREN_MAINPLLLOST_CLR_MSK \
+ & ALT_CLKMGR_INTREN_PERPLLLOST_CLR_MSK \
+ & ALT_CLKMGR_INTREN_SDRPLLLOST_CLR_MSK)
+
+
+// Undocumented register which determines clock dividers for main PLL C0, C1, and C2. These should be considered RO.
+#define ALT_CLKMGR_ALTERA_OFST 0xe0
+#define ALT_CLKMGR_ALTERA_MPUCLK_OFST 0x0
+#define ALT_CLKMGR_ALTERA_MAINCLK_OFST 0x4
+#define ALT_CLKMGR_ALTERA_DBGATCLK_OFST 0x8
+#define ALT_CLKMGR_ALTERA_ADDR ALT_CAST(void *, (ALT_CAST(char *, ALT_CLKMGR_ADDR) + ALT_CLKMGR_ALTERA_OFST))
+#define ALT_CLKMGR_ALTERA_MPUCLK_ADDR ALT_CAST(void *, (ALT_CAST(char *, ALT_CLKMGR_ALTERA_ADDR) + ALT_CLKMGR_ALTERA_MPUCLK_OFST))
+#define ALT_CLKMGR_ALTERA_MAINCLK_ADDR ALT_CAST(void *, (ALT_CAST(char *, ALT_CLKMGR_ALTERA_ADDR) + ALT_CLKMGR_ALTERA_MAINCLK_OFST))
+#define ALT_CLKMGR_ALTERA_DBGATCLK_ADDR ALT_CAST(void *, (ALT_CAST(char *, ALT_CLKMGR_ALTERA_ADDR) + ALT_CLKMGR_ALTERA_DBGATCLK_OFST))
+#define ALT_CLKMGR_ALTERA_MPUCLK_CNT_GET(value) (((value) & 0x000001ff) >> 0)
+#define ALT_CLKMGR_ALTERA_MAINCLK_CNT_GET(value) (((value) & 0x000001ff) >> 0)
+#define ALT_CLKMGR_ALTERA_DBGATCLK_CNT_GET(value) (((value) & 0x000001ff) >> 0)
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Utility functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+
+/****************************************************************************************/
+/* alt_clk_mgr_wait() introduces a delay, not very exact, but very light in */
+/* implementation. Depending upon the optinization level, it will wait at least the */
+/* number of clock cycles specified in the cnt parameter, sometimes many more. The */
+/* reg parameter is set to a register or a memory location that was recently used (so */
+/* as to avoid accidently evicting a register and a recently-used cache line in favor */
+/* of one whose values are not actually needed.). The cnt parameter sets the number of */
+/* repeated volatile memory reads and so sets a minimum time delay measured in */
+/* mpu_clk cycles. If mpu_clk = osc1 clock (as in bypass mode), then this gives a */
+/* minimum osc1 clock cycle delay. */
+/****************************************************************************************/
+
+inline static void alt_clk_mgr_wait(void* reg, uint32_t cnt)
+{
+ for (; cnt ; cnt--)
+ {
+ (void) alt_read_word(reg);
+ }
+}
+
+ /* Wait time constants */
+ /* These values came from Section 4.9.4 of the HHP HPS-Clocking NPP document */
+#define ALT_SW_MANAGED_CLK_WAIT_CTRDIV 30 /* 30 or more MPU clock cycles */
+#define ALT_SW_MANAGED_CLK_WAIT_HWCTRDIV 40
+#define ALT_SW_MANAGED_CLK_WAIT_BYPASS 30
+#define ALT_SW_MANAGED_CLK_WAIT_SAFEREQ 30
+#define ALT_SW_MANAGED_CLK_WAIT_SAFEEXIT 30
+#define ALT_SW_MANAGED_CLK_WAIT_NANDCLK 8 /* 8 or more MPU clock cycles */
+
+
+#define ALT_BYPASS_TIMEOUT_CNT 50
+ // arbitrary number until i find more info
+#define ALT_TIMEOUT_PHASE_SYNC 300
+ // how many loops to wait for the SDRAM clock to come around
+ // to zero and allow for writing a new divisor ratio to it
+
+ALT_STATUS_CODE alt_clk_plls_settle_wait(void)
+{
+ int32_t i = ALT_BYPASS_TIMEOUT_CNT;
+ bool nofini;
+
+ do
+ {
+ nofini = alt_read_word(ALT_CLKMGR_STAT_ADDR) & ALT_CLKMGR_STAT_BUSY_SET_MSK;
+ } while (nofini && i--);
+ // wait until clocks finish transitioning and become stable again
+ return (i > 0) ? ALT_E_SUCCESS : ALT_E_ERROR;
+}
+
+static ALT_STATUS_CODE alt_clk_pll_lock_wait(ALT_CLK_t pll, uint32_t timeout)
+{
+ uint32_t locked_mask = 0;
+
+ if (pll == ALT_CLK_MAIN_PLL) { locked_mask = ALT_CLKMGR_INTER_MAINPLLLOCKED_SET_MSK; }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL) { locked_mask = ALT_CLKMGR_INTER_PERPLLLOCKED_SET_MSK; }
+ else if (pll == ALT_CLK_SDRAM_PLL) { locked_mask = ALT_CLKMGR_INTER_SDRPLLLOCKED_SET_MSK; }
+ else
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ do
+ {
+ uint32_t int_status = alt_read_word(ALT_CLKMGR_INTER_ADDR);
+ if (int_status & locked_mask)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ } while (timeout--);
+
+ return ALT_E_TMO;
+}
+
+ /* Useful utility macro for checking if two values */
+ /* are within a certain percentage of each other */
+#define alt_within_delta(ref, neu, prcnt) (((((neu) * 100)/(ref)) < (100 + (prcnt))) \
+ && ((((neu) * 100)/(ref)) > (100 - (prcnt))))
+
+
+ /* Flags to include or omit code sections */
+// There are four cases where there is a small possibility of producing clock
+// glitches. Code has been added from an abundance of caution to prevent
+// these glitches. If further testing shows that this extra code is not necessary
+// under any conditions, it may be easily eliminated by clearing these flags.
+
+#define ALT_PREVENT_GLITCH_BYP true
+// for PLL entering or leaving bypass
+#define ALT_PREVENT_GLITCH_EXSAFE true
+// for PLL exiting safe mode
+#define ALT_PREVENT_GLITCH_CNTRRST true
+// resets counter phase
+#define ALT_PREVENT_GLITCH_CHGC1 true
+// for changing Main PLL C1 counter
+
+
+
+/****************************************************************************************/
+/* Bare-bones utility function used to make the somewhat complex writes to the PLL */
+/* counter registers (the clock dividers) easier. No parameter-checking or */
+/* error-checking, this is a static to this file and invisible to Doxygen. */
+/****************************************************************************************/
+
+static void alt_clk_pllcounter_write(void* vcoaddr, void* stataddr, void* cntraddr,
+ uint32_t val, uint32_t msk, uint32_t shift)
+{
+#if ALT_PREVENT_GLITCH_CNTRRST
+ // this is here from an abundance of caution and it may not be necessary
+ // to put the counter in reset for this write
+ volatile uint32_t temp;
+
+ alt_setbits_word(vcoaddr, msk << shift); // put the counter in reset
+ do
+ {
+ temp = alt_read_word(stataddr);
+ } while (!(temp & msk));
+
+ alt_write_word(cntraddr, val);
+ alt_clrbits_word(vcoaddr, msk << shift); // release counter reset
+
+#else // should we find out that resetting the counters as above is unnecessary
+ alt_write_word(cntraddr, val);
+#endif
+}
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Main Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+
+/****************************************************************************************/
+/* alt_clk_lock_status_clear() clears assertions of one or more of the PLL lock status */
+/* conditions. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_lock_status_clear(ALT_CLK_PLL_LOCK_STATUS_t lock_stat_mask)
+{
+ if (lock_stat_mask & ( ALT_CLKMGR_INTER_MAINPLLACHIEVED_CLR_MSK
+ & ALT_CLKMGR_INTER_PERPLLACHIEVED_CLR_MSK
+ & ALT_CLKMGR_INTER_SDRPLLACHIEVED_CLR_MSK
+ & ALT_CLKMGR_INTER_MAINPLLLOST_CLR_MSK
+ & ALT_CLKMGR_INTER_PERPLLLOST_CLR_MSK
+ & ALT_CLKMGR_INTER_SDRPLLLOST_CLR_MSK)
+ )
+ {
+ return ALT_E_BAD_ARG;
+ }
+ else
+ {
+ alt_setbits_word(ALT_CLKMGR_INTER_ADDR, lock_stat_mask);
+ return ALT_E_SUCCESS;
+ }
+}
+
+
+/****************************************************************************************/
+/* alt_clk_lock_status_get() returns the value of the PLL lock status conditions. */
+/****************************************************************************************/
+
+uint32_t alt_clk_lock_status_get(void)
+{
+ return alt_read_word(ALT_CLKMGR_INTER_ADDR) & ( ALT_CLKMGR_INTER_MAINPLLACHIEVED_SET_MSK
+ | ALT_CLKMGR_INTER_PERPLLACHIEVED_SET_MSK
+ | ALT_CLKMGR_INTER_SDRPLLACHIEVED_SET_MSK
+ | ALT_CLKMGR_INTER_MAINPLLLOST_SET_MSK
+ | ALT_CLKMGR_INTER_PERPLLLOST_SET_MSK
+ | ALT_CLKMGR_INTER_SDRPLLLOST_SET_MSK
+ | ALT_CLKMGR_INTER_MAINPLLLOCKED_SET_MSK
+ | ALT_CLKMGR_INTER_PERPLLLOCKED_SET_MSK
+ | ALT_CLKMGR_INTER_SDRPLLLOCKED_SET_MSK );
+}
+
+
+/****************************************************************************************/
+/* alt_clk_pll_is_locked() returns ALT_E_TRUE if the designated PLL is currently */
+/* locked and ALT_E_FALSE if not. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_pll_is_locked(ALT_CLK_t pll)
+{
+ ALT_STATUS_CODE status = ALT_E_BAD_ARG;
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ status = (alt_read_word(ALT_CLKMGR_INTER_ADDR) & ALT_CLKMGR_INTER_MAINPLLLOCKED_SET_MSK)
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ status = (alt_read_word(ALT_CLKMGR_INTER_ADDR) & ALT_CLKMGR_INTER_PERPLLLOCKED_SET_MSK)
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ status = (alt_read_word(ALT_CLKMGR_INTER_ADDR) & ALT_CLKMGR_INTER_SDRPLLLOCKED_SET_MSK)
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ }
+ return status;
+}
+
+
+/****************************************************************************************/
+/* alt_clk_safe_mode_clear() clears the safe mode status of the Clock Manager following */
+/* a reset. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_safe_mode_clear(void)
+{
+ ALT_STATUS_CODE status = ALT_E_ERROR;
+#if ALT_PREVENT_GLITCH_EXSAFE
+ uint32_t temp;
+
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp &
+ (ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK));
+ // gate off l4MP and L4SP clocks (no matter their source)
+
+ alt_setbits_word(ALT_CLKMGR_CTL_ADDR, ALT_CLKMGR_CTL_SAFEMOD_SET_MSK);
+ // clear safe mode bit
+ status = alt_clk_plls_settle_wait();
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR,
+ ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK | ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK,
+ temp);
+ // gate l4MP and L4SP clocks back on if they were on previously
+
+#else
+ alt_setbits_word(ALT_CLKMGR_CTL_ADDR, ALT_CLKMGR_CTL_SAFEMOD_SET_MSK);
+ // clear safe mode bit
+ status = alt_clk_plls_settle_wait();
+
+#endif
+ return status;
+}
+
+
+/****************************************************************************************/
+/* alt_clk_is_in_safe_mode() returns whether the specified safe mode clock domain is in */
+/* safe mode or not. */
+/****************************************************************************************/
+
+bool alt_clk_is_in_safe_mode(ALT_CLK_SAFE_DOMAIN_t clk_domain)
+{
+ bool ret = false;
+ uint32_t temp;
+
+ if (clk_domain == ALT_CLK_DOMAIN_NORMAL)
+ {
+ ret = alt_read_word(ALT_CLKMGR_CTL_ADDR) & ALT_CLKMGR_CTL_SAFEMOD_SET_MSK;
+ // is the main clock domain in safe mode?
+ }
+ else if (clk_domain == ALT_CLK_DOMAIN_DEBUG)
+ {
+ temp = alt_read_word(ALT_CLKMGR_DBCTL_ADDR);
+ if (temp & ALT_CLKMGR_DBCTL_STAYOSC1_SET_MSK)
+ {
+ ret = true; // is the debug clock domain in safe mode?
+ }
+ else if (temp & ALT_CLKMGR_DBCTL_ENSFMDWR_SET_MSK)
+ {
+ ret = alt_read_word(ALT_CLKMGR_CTL_ADDR) & ALT_CLKMGR_CTL_SAFEMOD_SET_MSK;
+ // is the debug clock domain following the main clock domain
+ // AND is the main clock domain in safe mode?
+ }
+ }
+ return ret;
+}
+
+/****************************************************************************************/
+/* alt_clk_pll_bypass_disable() disables bypass mode for the specified PLL, removing */
+/* it from bypass mode and allowing it to provide the output of the PLL to drive the */
+/* six main clocks. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_pll_bypass_disable(ALT_CLK_t pll)
+{
+ ALT_STATUS_CODE status = ALT_E_BAD_ARG;
+ uint32_t temp;
+#if ALT_PREVENT_GLITCH_BYP
+ uint32_t temp1;
+ bool restore_0 = false;
+ bool restore_1 = false;
+#endif
+
+ // this function should only be called after the selected PLL is locked
+ if (alt_clk_pll_is_locked(pll) == ALT_E_TRUE)
+ {
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+#if ALT_PREVENT_GLITCH_BYP
+ // if L4MP or L4SP source is set to Main PLL C1, gate it off before changing
+ // bypass state, then gate clock back on. FogBugz #63778
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+ temp1 = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK) && (!(temp & ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK)))
+ {
+ restore_0 = true;
+ }
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK) && (!(temp & ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK)))
+ {
+ restore_1 = true;
+ }
+ temp = temp1;
+ if (restore_0) { temp &= ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK; }
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); }
+#endif
+
+ // assert outresetall of main PLL
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+ alt_write_word(ALT_CLKMGR_MAINPLL_VCO_ADDR, temp | ALT_CLKMGR_MAINPLL_VCO_OUTRSTALL_SET_MSK);
+
+ // deassert outresetall of main PLL
+ alt_write_word(ALT_CLKMGR_MAINPLL_VCO_ADDR, temp & ALT_CLKMGR_MAINPLL_VCO_OUTRSTALL_CLR_MSK);
+
+ alt_clk_plls_settle_wait();
+
+ // remove bypass
+ alt_clrbits_word(ALT_CLKMGR_BYPASS_ADDR, ALT_CLKMGR_BYPASS_MAINPLL_SET_MSK);
+ status = alt_clk_plls_settle_wait();
+
+#if ALT_PREVENT_GLITCH_BYP
+ if (restore_0 || restore_1)
+ {
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ // wait a bit more before reenabling the L4MP and L4SP clocks
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp1);
+ }
+#endif
+ }
+
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+#if ALT_PREVENT_GLITCH_BYP
+ // if L4MP or L4SP source is set to Main PLL C1, gate it off before changing
+ // bypass state, then gate clock back on. FogBugz #63778
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+ temp1 = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK) && (temp & ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK))
+ {
+ restore_0 = true;
+ }
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK) && (temp & ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK))
+ {
+ restore_1 = true;
+ }
+ temp = temp1;
+ if (restore_0) { temp &= ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK; }
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); }
+#endif
+
+ // assert outresetall of Peripheral PLL
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, temp | ALT_CLKMGR_PERPLL_VCO_OUTRSTALL_SET_MSK);
+ alt_clk_plls_settle_wait();
+
+ // deassert outresetall of main PLL
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, temp & ALT_CLKMGR_PERPLL_VCO_OUTRSTALL_CLR_MSK);
+
+ // remove bypass - don't think that there's any need to touch the bypass clock source
+ alt_clrbits_word(ALT_CLKMGR_BYPASS_ADDR, ALT_CLKMGR_BYPASS_PERPLL_SET_MSK);
+ status = alt_clk_plls_settle_wait();
+
+#if ALT_PREVENT_GLITCH_BYP
+ if (restore_0 || restore_1)
+ {
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ // wait a bit more before reenabling the L4MP and L4SP clocks
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp1);
+ }
+#endif
+ }
+
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ // assert outresetall of SDRAM PLL
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, temp | ALT_CLKMGR_SDRPLL_VCO_OUTRSTALL_SET_MSK);
+
+ // deassert outresetall of main PLL
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, temp & ALT_CLKMGR_SDRPLL_VCO_OUTRSTALL_CLR_MSK);
+ alt_clk_plls_settle_wait();
+
+ // remove bypass - don't think that there's any need to touch the bypass clock source
+ alt_clrbits_word(ALT_CLKMGR_BYPASS_ADDR, ALT_CLKMGR_BYPASS_SDRPLLSRC_SET_MSK);
+ status = alt_clk_plls_settle_wait();
+ }
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+
+ return status;
+}
+
+
+/****************************************************************************************/
+/* alt_clk_pll_bypass_enable() enable bypass mode for the specified PLL. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_pll_bypass_enable(ALT_CLK_t pll, bool use_input_mux)
+{
+ ALT_STATUS_CODE status = ALT_E_BAD_ARG;
+ uint32_t temp;
+#ifdef ALT_PREVENT_GLITCH_BYP
+ uint32_t temp1;
+ bool restore_0 = false;
+ bool restore_1 = false;
+#endif
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ if (!use_input_mux)
+ {
+#ifdef ALT_PREVENT_GLITCH_BYP
+ // if L4MP or L4SP source is set to Main PLL C1, gate it off before changing
+ // bypass state, then gate clock back on. FogBugz #63778
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+ temp1 = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK) && (!(temp & ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK)))
+ {
+ restore_0 = true;
+ }
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK) && (!(temp & ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK)))
+ {
+ restore_1 = true;
+ }
+ temp = temp1;
+ if (restore_0) { temp &= ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK; }
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); }
+
+ alt_setbits_word(ALT_CLKMGR_BYPASS_ADDR, ALT_CLKMGR_BYPASS_MAINPLL_SET_MSK);
+ // no input mux select on main PLL
+
+ status = alt_clk_plls_settle_wait();
+ // wait before reenabling the L4MP and L4SP clocks
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp1); }
+
+#else
+ alt_setbits_word(ALT_CLKMGR_BYPASS_ADDR, ALT_CLKMGR_BYPASS_MAINPLL_SET_MSK);
+ // no input mux select on main PLL
+ status = alt_clk_plls_settle_wait();
+
+#endif
+ status = ALT_E_SUCCESS;
+ }
+ else
+ {
+ status = ALT_E_BAD_ARG;
+ }
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+#ifdef ALT_PREVENT_GLITCH_BYP
+ // if L4MP or L4SP source is set to Peripheral PLL C1, gate it off before changing
+ // bypass state, then gate clock back on. FogBugz #63778
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+ temp1 = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK) && (temp & ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK))
+ {
+ restore_0 = true;
+ }
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK) && (temp & ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK))
+ {
+ restore_1 = true;
+ }
+ temp = temp1;
+ if (restore_0) { temp &= ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK; }
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); }
+
+ temp = alt_read_word(ALT_CLKMGR_BYPASS_ADDR) &
+ (ALT_CLKMGR_BYPASS_PERPLL_CLR_MSK & ALT_CLKMGR_BYPASS_PERPLLSRC_CLR_MSK);
+ temp |= (use_input_mux) ? ALT_CLKMGR_BYPASS_PERPLL_SET_MSK |
+ ALT_CLKMGR_BYPASS_PERPLLSRC_SET_MSK : ALT_CLKMGR_BYPASS_PERPLL_SET_MSK;
+ // set bypass bit and optionally the source select bit
+
+ alt_write_word(ALT_CLKMGR_BYPASS_ADDR, temp);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ // wait a bit before reenabling the L4MP and L4SP clocks
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp1); }
+
+#else
+ temp = alt_read_word(ALT_CLKMGR_BYPASS_ADDR) &
+ (ALT_CLKMGR_BYPASS_PERPLL_CLR_MSK & ALT_CLKMGR_BYPASS_PERPLLSRC_CLR_MSK);
+ temp |= (use_input_mux) ? ALT_CLKMGR_BYPASS_PERPLL_SET_MSK |
+ ALT_CLKMGR_BYPASS_PERPLLSRC_SET_MSK : ALT_CLKMGR_BYPASS_PERPLL_SET_MSK;
+ // set bypass bit and optionally the source select bit
+#endif
+ status = ALT_E_SUCCESS;
+ }
+
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_BYPASS_ADDR) &
+ (ALT_CLKMGR_BYPASS_SDRPLL_CLR_MSK & ALT_CLKMGR_BYPASS_SDRPLLSRC_CLR_MSK);
+ temp |= (use_input_mux) ? ALT_CLKMGR_BYPASS_SDRPLL_SET_MSK |
+ ALT_CLKMGR_BYPASS_SDRPLLSRC_SET_MSK : ALT_CLKMGR_BYPASS_SDRPLL_SET_MSK;
+ // set bypass bit and optionally the source select bit
+ alt_write_word(ALT_CLKMGR_BYPASS_ADDR, temp);
+ status = ALT_E_SUCCESS;
+ }
+ return status;
+}
+
+
+/****************************************************************************************/
+/* alt_clk_pll_is_bypassed() returns whether the specified PLL is in bypass or not. */
+/* Bypass is a special state where the PLL VCO and the C0-C5 counters are bypassed */
+/* and not in the circuit. Either the Osc1 clock input or the input chosen by the */
+/* input mux may be selected to be operational in the bypass state. All changes to */
+/* the PLL VCO must be made in bypass mode to avoid the potential of producing clock */
+/* glitches which may affect downstream clock dividers and peripherals. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_pll_is_bypassed(ALT_CLK_t pll)
+{
+ ALT_STATUS_CODE status = ALT_E_BAD_ARG;
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ status = (ALT_CLKMGR_CTL_SAFEMOD_GET(alt_read_word(ALT_CLKMGR_CTL_ADDR))
+ || ALT_CLKMGR_BYPASS_MAINPLL_GET(alt_read_word(ALT_CLKMGR_BYPASS_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ status = (ALT_CLKMGR_CTL_SAFEMOD_GET(alt_read_word(ALT_CLKMGR_CTL_ADDR))
+ || ALT_CLKMGR_BYPASS_PERPLL_GET(alt_read_word(ALT_CLKMGR_BYPASS_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ status = (ALT_CLKMGR_CTL_SAFEMOD_GET(alt_read_word(ALT_CLKMGR_CTL_ADDR))
+ || ALT_CLKMGR_BYPASS_SDRPLL_GET(alt_read_word(ALT_CLKMGR_BYPASS_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ }
+ return status;
+}
+
+
+/****************************************************************************************/
+/* alt_clk_pll_source_get() returns the current input of the specified PLL. */
+/****************************************************************************************/
+
+ALT_CLK_t alt_clk_pll_source_get(ALT_CLK_t pll)
+{
+ ALT_CLK_t ret = ALT_CLK_UNKNOWN;
+ uint32_t temp;
+
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ ret = ALT_CLK_IN_PIN_OSC1;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ // three possible clock sources for the peripheral PLL
+ temp = ALT_CLKMGR_PERPLL_VCO_PSRC_GET(alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1)
+ {
+ ret = ALT_CLK_IN_PIN_OSC1;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2)
+ {
+ ret = ALT_CLK_IN_PIN_OSC2;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF)
+ {
+ ret = ALT_CLK_F2H_PERIPH_REF;
+ }
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ // three possible clock sources for the SDRAM PLL
+ temp = ALT_CLKMGR_SDRPLL_VCO_SSRC_GET(alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR));
+ if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1)
+ {
+ ret = ALT_CLK_IN_PIN_OSC1;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2)
+ {
+ ret = ALT_CLK_IN_PIN_OSC2;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF)
+ {
+ ret = ALT_CLK_F2H_SDRAM_REF;
+ }
+ }
+ return ret;
+}
+
+//
+// alt_clk_clock_disable() disables the specified clock. Once the clock is disabled,
+// its clock signal does not propagate to its clocked elements.
+//
+ALT_STATUS_CODE alt_clk_clock_disable(ALT_CLK_t clk)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ switch (clk)
+ {
+ // For PLLs, put them in bypass mode.
+ case ALT_CLK_MAIN_PLL:
+ case ALT_CLK_PERIPHERAL_PLL:
+ case ALT_CLK_SDRAM_PLL:
+ status = alt_clk_pll_bypass_enable(clk, false);
+ break;
+
+ // Clocks that originate at the Main PLL.
+ case ALT_CLK_L4_MAIN:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L4MAINCLK_SET_MSK);
+ break;
+ case ALT_CLK_L3_MP:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L3MPCLK_SET_MSK);
+ break;
+ case ALT_CLK_L4_MP:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK);
+ break;
+ case ALT_CLK_L4_SP:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG_AT:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGATCLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGCLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG_TRACE:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGTRACECLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG_TIMER:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGTMRCLK_SET_MSK);
+ break;
+ case ALT_CLK_CFG:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_CFGCLK_SET_MSK);
+ break;
+ case ALT_CLK_H2F_USER0:
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_S2FUSER0CLK_SET_MSK);
+ break;
+
+ // Clocks that originate at the Peripheral PLL.
+ case ALT_CLK_EMAC0:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_EMAC0CLK_SET_MSK);
+ break;
+ case ALT_CLK_EMAC1:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_EMAC1CLK_SET_MSK);
+ break;
+ case ALT_CLK_USB_MP:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_USBCLK_SET_MSK);
+ break;
+ case ALT_CLK_SPI_M:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_SPIMCLK_SET_MSK);
+ break;
+ case ALT_CLK_CAN0:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_CAN0CLK_SET_MSK);
+ break;
+ case ALT_CLK_CAN1:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_CAN1CLK_SET_MSK);
+ break;
+ case ALT_CLK_GPIO_DB:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_GPIOCLK_SET_MSK);
+ break;
+ case ALT_CLK_H2F_USER1:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_S2FUSER1CLK_SET_MSK);
+ break;
+ case ALT_CLK_SDMMC:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_SDMMCCLK_SET_MSK);
+ break;
+ case ALT_CLK_NAND_X:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_NANDCLK_SET_MSK);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK);
+ // gate nand_clk off before nand_x_clk.
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_NANDXCLK_SET_MSK);
+ break;
+ case ALT_CLK_NAND:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_NANDCLK_SET_MSK);
+ break;
+ case ALT_CLK_QSPI:
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK);
+ break;
+
+ // Clocks that originate at the SDRAM PLL.
+ case ALT_CLK_DDR_DQS:
+ alt_clrbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_DDRDQSCLK_SET_MSK);
+ break;
+ case ALT_CLK_DDR_2X_DQS:
+ alt_clrbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_DDR2XDQSCLK_SET_MSK);
+ break;
+ case ALT_CLK_DDR_DQ:
+ alt_clrbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_DDRDQCLK_SET_MSK);
+ break;
+ case ALT_CLK_H2F_USER2:
+ alt_clrbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_S2FUSER2CLK_SET_MSK);
+ break;
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+
+ return status;
+}
+
+
+//
+// alt_clk_clock_enable() enables the specified clock. Once the clock is enabled, its
+// clock signal propagates to its elements.
+//
+ALT_STATUS_CODE alt_clk_clock_enable(ALT_CLK_t clk)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ switch (clk)
+ {
+ // For PLLs, take them out of bypass mode.
+ case ALT_CLK_MAIN_PLL:
+ case ALT_CLK_PERIPHERAL_PLL:
+ case ALT_CLK_SDRAM_PLL:
+ status = alt_clk_pll_bypass_disable(clk);
+ break;
+
+ // Clocks that originate at the Main PLL.
+ case ALT_CLK_L4_MAIN:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L4MAINCLK_SET_MSK);
+ break;
+ case ALT_CLK_L3_MP:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L3MPCLK_SET_MSK);
+ break;
+ case ALT_CLK_L4_MP:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK);
+ break;
+ case ALT_CLK_L4_SP:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG_AT:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGATCLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGCLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG_TRACE:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGTRACECLK_SET_MSK);
+ break;
+ case ALT_CLK_DBG_TIMER:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_DBGTMRCLK_SET_MSK);
+ break;
+ case ALT_CLK_CFG:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_CFGCLK_SET_MSK);
+ break;
+ case ALT_CLK_H2F_USER0:
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_CLKMGR_MAINPLL_EN_S2FUSER0CLK_SET_MSK);
+ break;
+
+ // Clocks that originate at the Peripheral PLL.
+ case ALT_CLK_EMAC0:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_EMAC0CLK_SET_MSK);
+ break;
+ case ALT_CLK_EMAC1:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_EMAC1CLK_SET_MSK);
+ break;
+ case ALT_CLK_USB_MP:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_USBCLK_SET_MSK);
+ break;
+ case ALT_CLK_SPI_M:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_SPIMCLK_SET_MSK);
+ break;
+ case ALT_CLK_CAN0:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_CAN0CLK_SET_MSK);
+ break;
+ case ALT_CLK_CAN1:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_CAN1CLK_SET_MSK);
+ break;
+ case ALT_CLK_GPIO_DB:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_GPIOCLK_SET_MSK);
+ break;
+ case ALT_CLK_H2F_USER1:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_S2FUSER1CLK_SET_MSK);
+ break;
+ case ALT_CLK_SDMMC:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_SDMMCCLK_SET_MSK);
+ break;
+ case ALT_CLK_NAND_X:
+ // implementation detail - should ALK_CLK_NAND be gated off here before enabling ALT_CLK_NAND_X?
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_NANDXCLK_SET_MSK);
+ // implementation detail - should this wait be enforced here?
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK);
+ break;
+ case ALT_CLK_NAND:
+ // enabling ALT_CLK_NAND always implies enabling ALT_CLK_NAND_X first
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_NANDXCLK_SET_MSK);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK);
+ // gate nand_x_clk on at least 8 MCU clocks before nand_clk
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_NANDCLK_SET_MSK);
+ break;
+ case ALT_CLK_QSPI:
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK);
+ break;
+
+ // Clocks that originate at the SDRAM PLL.
+ case ALT_CLK_DDR_DQS:
+ alt_setbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_DDRDQSCLK_SET_MSK);
+ break;
+ case ALT_CLK_DDR_2X_DQS:
+ alt_setbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_DDR2XDQSCLK_SET_MSK);
+ break;
+ case ALT_CLK_DDR_DQ:
+ alt_setbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_DDRDQCLK_SET_MSK);
+ break;
+ case ALT_CLK_H2F_USER2:
+ alt_setbits_word(ALT_CLKMGR_SDRPLL_EN_ADDR, ALT_CLKMGR_SDRPLL_EN_S2FUSER2CLK_SET_MSK);
+ break;
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+
+ return status;
+}
+
+//
+// alt_clk_is_enabled() returns whether the specified clock is enabled or not.
+//
+ALT_STATUS_CODE alt_clk_is_enabled(ALT_CLK_t clk)
+{
+ ALT_STATUS_CODE status = ALT_E_BAD_ARG;
+
+ switch (clk)
+ {
+ // For PLLs, this function checks if the PLL is bypassed or not.
+ case ALT_CLK_MAIN_PLL:
+ case ALT_CLK_PERIPHERAL_PLL:
+ case ALT_CLK_SDRAM_PLL:
+ status = (alt_clk_pll_is_bypassed(clk) != ALT_E_TRUE);
+ break;
+
+ // These clocks are not gated, so must return a ALT_E_BAD_ARG type error.
+ case ALT_CLK_MAIN_PLL_C0:
+ case ALT_CLK_MAIN_PLL_C1:
+ case ALT_CLK_MAIN_PLL_C2:
+ case ALT_CLK_MAIN_PLL_C3:
+ case ALT_CLK_MAIN_PLL_C4:
+ case ALT_CLK_MAIN_PLL_C5:
+ case ALT_CLK_MPU:
+ case ALT_CLK_MPU_L2_RAM:
+ case ALT_CLK_MPU_PERIPH:
+ case ALT_CLK_L3_MAIN:
+ case ALT_CLK_L3_SP:
+ case ALT_CLK_DBG_BASE:
+ case ALT_CLK_MAIN_QSPI:
+ case ALT_CLK_MAIN_NAND_SDMMC:
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ case ALT_CLK_SDRAM_PLL_C0:
+ case ALT_CLK_SDRAM_PLL_C1:
+ case ALT_CLK_SDRAM_PLL_C2:
+ case ALT_CLK_SDRAM_PLL_C5:
+ status = ALT_E_BAD_ARG;
+ break;
+
+ // Clocks that originate at the Main PLL.
+ case ALT_CLK_L4_MAIN:
+ status = (ALT_CLKMGR_MAINPLL_EN_L4MAINCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_L3_MP:
+ status = (ALT_CLKMGR_MAINPLL_EN_L3MPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_L4_MP:
+ status = (ALT_CLKMGR_MAINPLL_EN_L4MPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_L4_SP:
+ status = (ALT_CLKMGR_MAINPLL_EN_L4SPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_DBG_AT:
+ status = (ALT_CLKMGR_MAINPLL_EN_DBGATCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_DBG:
+ status = (ALT_CLKMGR_MAINPLL_EN_DBGCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_DBG_TRACE:
+ status = (ALT_CLKMGR_MAINPLL_EN_DBGTRACECLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_DBG_TIMER:
+ status = (ALT_CLKMGR_MAINPLL_EN_DBGTMRCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_CFG:
+ status = (ALT_CLKMGR_MAINPLL_EN_CFGCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_H2F_USER0:
+ status = (ALT_CLKMGR_MAINPLL_EN_S2FUSER0CLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+
+ // Clocks that originate at the Peripheral PLL.
+ case ALT_CLK_EMAC0:
+ status = (ALT_CLKMGR_PERPLL_EN_EMAC0CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_EMAC1:
+ status = (ALT_CLKMGR_PERPLL_EN_EMAC1CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_USB_MP:
+ status = (ALT_CLKMGR_PERPLL_EN_USBCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_SPI_M:
+ status = (ALT_CLKMGR_PERPLL_EN_SPIMCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_CAN0:
+ status = (ALT_CLKMGR_PERPLL_EN_CAN0CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_CAN1:
+ status = (ALT_CLKMGR_PERPLL_EN_CAN1CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_GPIO_DB:
+ status = (ALT_CLKMGR_PERPLL_EN_GPIOCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_H2F_USER1:
+ status = (ALT_CLKMGR_PERPLL_EN_S2FUSER1CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+
+ // Clocks that may originate at the Main PLL, the Peripheral PLL, or the FPGA.
+ case ALT_CLK_SDMMC:
+ status = (ALT_CLKMGR_PERPLL_EN_SDMMCCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_NAND_X:
+ status = (ALT_CLKMGR_PERPLL_EN_NANDXCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_NAND:
+ status = (ALT_CLKMGR_PERPLL_EN_NANDCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_QSPI:
+ status = (ALT_CLKMGR_PERPLL_EN_QSPICLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+
+ // Clocks that originate at the SDRAM PLL.
+ case ALT_CLK_DDR_DQS:
+ status = (ALT_CLKMGR_SDRPLL_EN_DDRDQSCLK_GET(alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_DDR_2X_DQS:
+ status = (ALT_CLKMGR_SDRPLL_EN_DDR2XDQSCLK_GET(alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_DDR_DQ:
+ status = (ALT_CLKMGR_SDRPLL_EN_DDRDQCLK_GET(alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+ case ALT_CLK_H2F_USER2:
+ status = (ALT_CLKMGR_SDRPLL_EN_S2FUSER2CLK_GET(alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR)))
+ ? ALT_E_TRUE : ALT_E_FALSE;
+ break;
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+
+ }
+
+ return status;
+}
+
+//
+// alt_clk_source_get() gets the input reference clock source selection value for the
+// specified clock or PLL.
+//
+ALT_CLK_t alt_clk_source_get(ALT_CLK_t clk)
+{
+ ALT_CLK_t ret = ALT_CLK_UNKNOWN;
+ uint32_t temp;
+
+ switch (clk)
+ {
+ // Potential external clock sources.
+ // these clock entities are their own source
+ case ALT_CLK_IN_PIN_OSC1:
+ case ALT_CLK_IN_PIN_OSC2:
+ case ALT_CLK_F2H_PERIPH_REF:
+ case ALT_CLK_F2H_SDRAM_REF:
+ case ALT_CLK_IN_PIN_JTAG:
+ case ALT_CLK_IN_PIN_ULPI0:
+ case ALT_CLK_IN_PIN_ULPI1:
+ case ALT_CLK_IN_PIN_EMAC0_RX:
+ case ALT_CLK_IN_PIN_EMAC1_RX:
+ ret = clk;
+ break;
+
+ // Phase-Locked Loops.
+ case ALT_CLK_MAIN_PLL:
+ case ALT_CLK_OSC1:
+ ret = ALT_CLK_IN_PIN_OSC1;
+ break;
+ case ALT_CLK_PERIPHERAL_PLL:
+ ret = alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL);
+ break;
+ case ALT_CLK_SDRAM_PLL:
+ ret = alt_clk_pll_source_get(ALT_CLK_SDRAM_PLL);
+ break;
+
+ // Main Clock Group.
+ case ALT_CLK_MAIN_PLL_C0:
+ case ALT_CLK_MAIN_PLL_C1:
+ case ALT_CLK_MAIN_PLL_C2:
+ case ALT_CLK_MAIN_PLL_C3:
+ case ALT_CLK_MAIN_PLL_C4:
+ case ALT_CLK_MAIN_PLL_C5:
+ // check bypass, return either osc1 or PLL ID
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL;
+ break;
+
+ case ALT_CLK_MPU_PERIPH:
+ case ALT_CLK_MPU_L2_RAM:
+ case ALT_CLK_MPU:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C0;
+ break;
+
+ case ALT_CLK_L4_MAIN:
+ case ALT_CLK_L3_MAIN:
+ case ALT_CLK_L3_MP:
+ case ALT_CLK_L3_SP:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C1;
+ break;
+
+ case ALT_CLK_L4_MP:
+ // read the state of the L4_mp source bit
+ if ((ALT_CLKMGR_MAINPLL_L4SRC_L4MP_GET(alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR)))
+ == ALT_CLKMGR_MAINPLL_L4SRC_L4MP_E_MAINPLL)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C1;
+ }
+ else
+ {
+ // if the clock comes from periph_base_clk
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C4;
+ }
+ break;
+
+ case ALT_CLK_L4_SP:
+ // read the state of the source bit
+ if ((ALT_CLKMGR_MAINPLL_L4SRC_L4SP_GET(alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR)))
+ == ALT_CLKMGR_MAINPLL_L4SRC_L4SP_E_MAINPLL)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C1;
+ }
+ else
+ {
+ // if the clock comes from periph_base_clk
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C4;
+ }
+ break;
+
+ case ALT_CLK_DBG_BASE:
+ case ALT_CLK_DBG_AT:
+ case ALT_CLK_DBG_TRACE:
+ case ALT_CLK_DBG_TIMER:
+ case ALT_CLK_DBG:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_OSC1 : ALT_CLK_MAIN_PLL_C2;
+ break;
+ case ALT_CLK_MAIN_QSPI:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_OSC1 : ALT_CLK_MAIN_PLL_C3;
+ break;
+ case ALT_CLK_MAIN_NAND_SDMMC:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_OSC1 : ALT_CLK_MAIN_PLL_C4;
+ break;
+ case ALT_CLK_CFG:
+ case ALT_CLK_H2F_USER0:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_OSC1 : ALT_CLK_MAIN_PLL_C5;
+ break;
+
+ // Peripherals Clock Group
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ // if the clock comes from periph_base_clk
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL;
+ break;
+
+ case ALT_CLK_EMAC0:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C0;
+ break;
+
+ case ALT_CLK_EMAC1:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C1;
+ break;
+
+ case ALT_CLK_USB_MP:
+ case ALT_CLK_SPI_M:
+ case ALT_CLK_CAN0:
+ case ALT_CLK_CAN1:
+ case ALT_CLK_GPIO_DB:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C4;
+ break;
+
+ case ALT_CLK_H2F_USER1:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C5;
+ break;
+
+ case ALT_CLK_SDMMC:
+ temp = ALT_CLKMGR_PERPLL_SRC_SDMMC_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_F2S_PERIPH_REF_CLK)
+ {
+ ret = ALT_CLK_F2H_PERIPH_REF;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_MAIN_NAND_CLK)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C4;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_PERIPH_NAND_CLK)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C3;
+ }
+ break;
+
+ case ALT_CLK_NAND_X:
+ case ALT_CLK_NAND:
+ temp = ALT_CLKMGR_PERPLL_SRC_NAND_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_NAND_E_F2S_PERIPH_REF_CLK)
+ {
+ ret = ALT_CLK_F2H_PERIPH_REF;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_NAND_E_MAIN_NAND_CLK)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C4;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_NAND_E_PERIPH_NAND_CLK)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C3;
+ }
+ break;
+
+ case ALT_CLK_QSPI:
+ temp = ALT_CLKMGR_PERPLL_SRC_QSPI_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_F2S_PERIPH_REF_CLK)
+ {
+ ret = ALT_CLK_F2H_PERIPH_REF;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_MAIN_QSPI_CLK)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE) ?
+ ALT_CLK_IN_PIN_OSC1 : ALT_CLK_MAIN_PLL_C3;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_PERIPH_QSPI_CLK)
+ {
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_PERIPHERAL_PLL) : ALT_CLK_PERIPHERAL_PLL_C2;
+ }
+ break;
+
+ // SDRAM Clock Group
+ case ALT_CLK_SDRAM_PLL_C0:
+ case ALT_CLK_SDRAM_PLL_C1:
+ case ALT_CLK_SDRAM_PLL_C2:
+ case ALT_CLK_SDRAM_PLL_C3:
+ case ALT_CLK_SDRAM_PLL_C4:
+ case ALT_CLK_SDRAM_PLL_C5:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_SDRAM_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_SDRAM_PLL) : ALT_CLK_SDRAM_PLL;
+ break;
+ case ALT_CLK_DDR_DQS:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_SDRAM_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_SDRAM_PLL) : ALT_CLK_SDRAM_PLL_C0;
+ break;
+ case ALT_CLK_DDR_2X_DQS:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_SDRAM_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_SDRAM_PLL) : ALT_CLK_SDRAM_PLL_C1;
+ break;
+ case ALT_CLK_DDR_DQ:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_SDRAM_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_SDRAM_PLL) : ALT_CLK_SDRAM_PLL_C2;
+ break;
+ case ALT_CLK_H2F_USER2:
+ ret = (alt_clk_pll_is_bypassed(ALT_CLK_SDRAM_PLL) == ALT_E_TRUE) ?
+ alt_clk_pll_source_get(ALT_CLK_SDRAM_PLL) : ALT_CLK_SDRAM_PLL_C5;
+ break;
+
+ // Clock Output Pins
+ case ALT_CLK_OUT_PIN_EMAC0_TX:
+ case ALT_CLK_OUT_PIN_EMAC1_TX:
+ case ALT_CLK_OUT_PIN_SDMMC:
+ case ALT_CLK_OUT_PIN_I2C0_SCL:
+ case ALT_CLK_OUT_PIN_I2C1_SCL:
+ case ALT_CLK_OUT_PIN_I2C2_SCL:
+ case ALT_CLK_OUT_PIN_I2C3_SCL:
+ case ALT_CLK_OUT_PIN_SPIM0:
+ case ALT_CLK_OUT_PIN_SPIM1:
+ case ALT_CLK_OUT_PIN_QSPI:
+ ret = ALT_CLK_UNKNOWN;
+ break;
+
+ default:
+ ret = ALT_CLK_UNKNOWN;
+ break;
+ }
+
+ return ret;
+}
+
+//
+// alt_clk_source_set() sets the specified clock's input reference clock source
+// selection to the specified input. It does not handle gating the specified clock
+// off and back on, those are covered in other functions in this API, but it does
+// verify that the clock is off before changing the divider or PLL. Note that the PLL
+// must have regained phase-lock before being the bypass is disabled.
+//
+ALT_STATUS_CODE alt_clk_source_set(ALT_CLK_t clk, ALT_CLK_t ref_clk)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ uint32_t temp;
+
+ if (ALT_CLK_MAIN_PLL == clk)
+ {
+ if ((ref_clk == ALT_CLK_IN_PIN_OSC1) || (ref_clk == ALT_CLK_OSC1))
+ {
+ // ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ status = ALT_E_BAD_ARG;
+ }
+ }
+ else if (ALT_CLK_PERIPHERAL_PLL == clk)
+ {
+ // the PLL must be bypassed before getting here
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+ temp &= ALT_CLKMGR_PERPLL_VCO_PSRC_CLR_MSK;
+
+ if ((ref_clk == ALT_CLK_IN_PIN_OSC1) || (ref_clk == ALT_CLK_OSC1))
+ {
+ temp |= ALT_CLKMGR_PERPLL_VCO_PSRC_SET(ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1);
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_IN_PIN_OSC2)
+ {
+ temp |= ALT_CLKMGR_PERPLL_VCO_PSRC_SET(ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2);
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_F2H_PERIPH_REF)
+ {
+ temp |= ALT_CLKMGR_PERPLL_VCO_PSRC_SET(ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF);
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, temp);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+ else if (ALT_CLK_SDRAM_PLL == clk)
+ {
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+ temp &= ALT_CLKMGR_SDRPLL_VCO_SSRC_CLR_MSK;
+
+ if ((ref_clk == ALT_CLK_IN_PIN_OSC1) || (ref_clk == ALT_CLK_OSC1))
+ {
+ temp |= ALT_CLKMGR_SDRPLL_VCO_SSRC_SET(ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1);
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_IN_PIN_OSC2)
+ {
+ temp |= ALT_CLKMGR_SDRPLL_VCO_SSRC_SET(ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2);
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_F2H_SDRAM_REF)
+ {
+ temp |= ALT_CLKMGR_SDRPLL_VCO_SSRC_SET(ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF);
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, temp);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+ else if ( ALT_CLK_L4_MP == clk)
+ {
+ // clock is gated off
+ if (ref_clk == ALT_CLK_MAIN_PLL_C1)
+ {
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR, ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK);
+ }
+ else if (ref_clk == ALT_CLK_PERIPHERAL_PLL_C4)
+ {
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR, ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+ else if ( ALT_CLK_L4_SP == clk)
+ {
+ if (ref_clk == ALT_CLK_MAIN_PLL_C1)
+ {
+ alt_clrbits_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR, ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK);
+ }
+ else if (ref_clk == ALT_CLK_PERIPHERAL_PLL_C4)
+ {
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR, ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+ else if (ALT_CLK_SDMMC == clk)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR);
+ temp &= ALT_CLKMGR_PERPLL_SRC_SDMMC_CLR_MSK;
+
+ if (ref_clk == ALT_CLK_F2H_PERIPH_REF)
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_SDMMC_SET(ALT_CLKMGR_PERPLL_SRC_SDMMC_E_F2S_PERIPH_REF_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else if ((ref_clk == ALT_CLK_MAIN_PLL_C4) || (ref_clk == ALT_CLK_MAIN_NAND_SDMMC))
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_SDMMC_SET(ALT_CLKMGR_PERPLL_SRC_SDMMC_E_MAIN_NAND_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_PERIPHERAL_PLL_C3)
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_SDMMC_SET(ALT_CLKMGR_PERPLL_SRC_SDMMC_E_PERIPH_NAND_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+ else if ((ALT_CLK_NAND_X == clk) || ( ALT_CLK_NAND == clk))
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR);
+ temp &= ALT_CLKMGR_PERPLL_SRC_NAND_CLR_MSK;
+
+ if (ref_clk == ALT_CLK_F2H_PERIPH_REF)
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_NAND_SET(ALT_CLKMGR_PERPLL_SRC_NAND_E_F2S_PERIPH_REF_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else if ((ref_clk == ALT_CLK_MAIN_PLL_C4) || (ref_clk == ALT_CLK_MAIN_NAND_SDMMC))
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_NAND_SET(ALT_CLKMGR_PERPLL_SRC_NAND_E_MAIN_NAND_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_PERIPHERAL_PLL_C3)
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_NAND_SET(ALT_CLKMGR_PERPLL_SRC_NAND_E_PERIPH_NAND_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+ else if (ALT_CLK_QSPI == clk)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR);
+ temp &= ALT_CLKMGR_PERPLL_SRC_QSPI_CLR_MSK;
+
+ if (ref_clk == ALT_CLK_F2H_PERIPH_REF)
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_QSPI_SET(ALT_CLKMGR_PERPLL_SRC_QSPI_E_F2S_PERIPH_REF_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else if ((ref_clk == ALT_CLK_MAIN_PLL_C3) || (ref_clk == ALT_CLK_MAIN_QSPI))
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_QSPI_SET(ALT_CLKMGR_PERPLL_SRC_QSPI_E_MAIN_QSPI_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else if (ref_clk == ALT_CLK_PERIPHERAL_PLL_C2)
+ {
+ temp |= ALT_CLKMGR_PERPLL_SRC_QSPI_SET(ALT_CLKMGR_PERPLL_SRC_QSPI_E_PERIPH_QSPI_CLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, temp);
+ }
+ else
+ {
+ status = ALT_E_INV_OPTION;
+ }
+ }
+
+ return status;
+}
+
+//
+// alt_clk_ext_clk_freq_set() specifies the frequency of the external clock source as
+// a measure of Hz. This value is stored in a static array and used for calculations.
+// The supplied frequency should be within the Fmin and Fmax values allowed for the
+// external clock source.
+//
+ALT_STATUS_CODE alt_clk_ext_clk_freq_set(ALT_CLK_t clk, alt_freq_t freq)
+{
+ ALT_STATUS_CODE status = ALT_E_BAD_ARG;
+
+ if ((clk == ALT_CLK_IN_PIN_OSC1) || (clk == ALT_CLK_OSC1)) // two names for one input
+ {
+ if ((freq >= alt_ext_clk_paramblok.clkosc1.freqmin) && (freq <= alt_ext_clk_paramblok.clkosc1.freqmax))
+ {
+ alt_ext_clk_paramblok.clkosc1.freqcur = freq;
+ status = ALT_E_SUCCESS;
+ }
+ else
+ {
+ status = ALT_E_ARG_RANGE;
+ }
+ }
+ else if (clk == ALT_CLK_IN_PIN_OSC2) // the other clock input pin
+ {
+ if ((freq >= alt_ext_clk_paramblok.clkosc2.freqmin) && (freq <= alt_ext_clk_paramblok.clkosc2.freqmax))
+ {
+ alt_ext_clk_paramblok.clkosc2.freqcur = freq;
+ status = ALT_E_SUCCESS;
+ }
+ else
+ {
+ status = ALT_E_ARG_RANGE;
+ }
+ }
+ else if (clk == ALT_CLK_F2H_PERIPH_REF) // clock from the FPGA
+ {
+ if ((freq >= alt_ext_clk_paramblok.periph.freqmin) && (freq <= alt_ext_clk_paramblok.periph.freqmax))
+ {
+ alt_ext_clk_paramblok.periph.freqcur = freq;
+ status = ALT_E_SUCCESS;
+ }
+ else
+ {
+ status = ALT_E_ARG_RANGE;
+ }
+ }
+ else if (clk == ALT_CLK_F2H_SDRAM_REF) // clock from the FPGA SDRAM
+ {
+ if ((freq >= alt_ext_clk_paramblok.sdram.freqmin) && (freq <= alt_ext_clk_paramblok.sdram.freqmax))
+ {
+ alt_ext_clk_paramblok.sdram.freqcur = freq;
+ status = ALT_E_SUCCESS;
+ }
+ else
+ {
+ status = ALT_E_ARG_RANGE;
+ }
+ }
+ else
+ {
+ status = ALT_E_BAD_ARG;
+ }
+
+ return status;
+}
+
+
+//
+// alt_clk_ext_clk_freq_get returns the frequency of the external clock source as
+// a measure of Hz. This value is stored in a static array.
+//
+alt_freq_t alt_clk_ext_clk_freq_get(ALT_CLK_t clk)
+{
+ uint32_t ret = 0;
+
+ if ((clk == ALT_CLK_IN_PIN_OSC1) || (clk == ALT_CLK_OSC1)) // two names for one input
+ {
+ ret = alt_ext_clk_paramblok.clkosc1.freqcur;
+ }
+ else if (clk == ALT_CLK_IN_PIN_OSC2)
+ {
+ ret = alt_ext_clk_paramblok.clkosc2.freqcur;
+ }
+ else if (clk == ALT_CLK_F2H_PERIPH_REF) // clock from the FPGA
+ {
+ ret = alt_ext_clk_paramblok.periph.freqcur;
+ }
+ else if (clk == ALT_CLK_F2H_SDRAM_REF) // clock from the FPGA
+ {
+ ret = alt_ext_clk_paramblok.sdram.freqcur;
+ }
+ return ret;
+}
+
+
+//
+// alt_clk_pll_cfg_get() returns the current PLL configuration.
+//
+ALT_STATUS_CODE alt_clk_pll_cfg_get(ALT_CLK_t pll, ALT_CLK_PLL_CFG_t * pll_cfg)
+{
+ ALT_STATUS_CODE ret = ALT_E_ERROR; // return value
+ uint32_t temp; // temp variable
+
+ if (pll_cfg == NULL)
+ {
+ ret = ALT_E_BAD_ARG;
+ return ret;
+ }
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+ pll_cfg->ref_clk = ALT_CLK_IN_PIN_OSC1;
+ pll_cfg->mult = ALT_CLKMGR_MAINPLL_VCO_NUMER_GET(temp);
+ pll_cfg->div = ALT_CLKMGR_MAINPLL_VCO_DENOM_GET(temp);
+
+ // Get the C0-C5 divider values:
+ pll_cfg->cntrs[0] = ALT_CLKMGR_MAINPLL_MPUCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_MPUCLK_ADDR));
+ // C0 - mpu_clk
+
+ pll_cfg->cntrs[1] = ALT_CLKMGR_MAINPLL_MAINCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_MAINCLK_ADDR));
+ // C1 - main_clk
+
+ pll_cfg->cntrs[2] = ALT_CLKMGR_MAINPLL_DBGATCLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_DBGATCLK_ADDR));
+ // C2 - dbg_base_clk
+
+ pll_cfg->cntrs[3] = ALT_CLKMGR_MAINPLL_MAINQSPICLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR));
+ // C3 - main_qspi_clk
+
+ pll_cfg->cntrs[4] = ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR));
+ // C4 - main_nand_sdmmc_clk
+
+ pll_cfg->cntrs[5] = ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR));
+ // C5 - cfg_s2f_user0_clk aka cfg_h2f_user0_clk
+
+ // The Main PLL C0-C5 outputs have no phase shift capabilities :
+ pll_cfg->pshift[0] = pll_cfg->pshift[1] = pll_cfg->pshift[2] =
+ pll_cfg->pshift[3] = pll_cfg->pshift[4] = pll_cfg->pshift[5] = 0;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ temp = ALT_CLKMGR_PERPLL_VCO_PSRC_GET(alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR));
+ if (temp <= 2)
+ {
+ if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1)
+ {
+ pll_cfg->ref_clk = ALT_CLK_IN_PIN_OSC1;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2)
+ {
+ pll_cfg->ref_clk = ALT_CLK_IN_PIN_OSC2;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF)
+ {
+ pll_cfg->ref_clk = ALT_CLK_F2H_PERIPH_REF;
+ }
+
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+ pll_cfg->mult = ALT_CLKMGR_PERPLL_VCO_NUMER_GET(temp);
+ pll_cfg->div = ALT_CLKMGR_PERPLL_VCO_DENOM_GET(temp);
+
+ // Get the C0-C5 divider values:
+ pll_cfg->cntrs[0] = ALT_CLKMGR_PERPLL_EMAC0CLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR));
+ // C0 - emac0_clk
+
+ pll_cfg->cntrs[1] = ALT_CLKMGR_PERPLL_EMAC1CLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR));
+ // C1 - emac1_clk
+
+ pll_cfg->cntrs[2] = ALT_CLKMGR_PERPLL_PERQSPICLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR));
+ // C2 - periph_qspi_clk
+
+ pll_cfg->cntrs[3] = ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR));
+ // C3 - periph_nand_sdmmc_clk
+
+ pll_cfg->cntrs[4] = ALT_CLKMGR_PERPLL_PERBASECLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_PERBASECLK_ADDR));
+ // C4 - periph_base_clk
+
+ pll_cfg->cntrs[5] = ALT_CLKMGR_PERPLL_S2FUSER1CLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_S2FUSER1CLK_ADDR));
+ // C5 - s2f_user1_clk
+
+ // The Peripheral PLL C0-C5 outputs have no phase shift capabilities :
+ pll_cfg->pshift[0] = pll_cfg->pshift[1] = pll_cfg->pshift[2] =
+ pll_cfg->pshift[3] = pll_cfg->pshift[4] = pll_cfg->pshift[5] = 0;
+ ret = ALT_E_SUCCESS;
+ }
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ temp = ALT_CLKMGR_SDRPLL_VCO_SSRC_GET(alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR));
+ if (temp <= 2)
+ {
+ if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1)
+ {
+ pll_cfg->ref_clk = ALT_CLK_IN_PIN_OSC1;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2)
+ {
+ pll_cfg->ref_clk = ALT_CLK_IN_PIN_OSC2;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF)
+ {
+ pll_cfg->ref_clk = ALT_CLK_F2H_SDRAM_REF;
+ }
+
+ pll_cfg->mult = ALT_CLKMGR_SDRPLL_VCO_NUMER_GET(alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR));
+ pll_cfg->div = ALT_CLKMGR_SDRPLL_VCO_DENOM_GET(alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR));
+
+ // Get the C0-C5 divider values:
+ pll_cfg->cntrs[0] = ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR));
+ pll_cfg->pshift[0] = ALT_CLKMGR_SDRPLL_DDRDQSCLK_PHASE_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR));
+ // C0 - ddr_dqs_clk
+
+ pll_cfg->cntrs[1] = ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR));
+ pll_cfg->pshift[1] = ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_PHASE_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR));
+ // C1 - ddr_2x_dqs_clk
+
+ pll_cfg->cntrs[2] = ALT_CLKMGR_SDRPLL_DDRDQCLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR));
+ pll_cfg->pshift[2] = ALT_CLKMGR_SDRPLL_DDRDQCLK_PHASE_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR));
+ // C2 - ddr_dq_clk
+
+ pll_cfg->cntrs[3] = pll_cfg->cntrs[4] = pll_cfg->pshift[3] = pll_cfg->pshift[4] = 0;
+ // C3 & C4 outputs don't exist on the SDRAM PLL
+
+ pll_cfg->cntrs[5] = ALT_CLKMGR_SDRPLL_S2FUSER2CLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR));
+ pll_cfg->pshift[5] = ALT_CLKMGR_SDRPLL_S2FUSER2CLK_PHASE_GET(alt_read_word(ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR));
+ // C5 - s2f_user2_clk or h2f_user2_clk
+
+ ret = ALT_E_SUCCESS;
+ }
+ }
+
+ return ret;
+}
+
+
+//
+// alt_clk_pll_cfg_set() sets the PLL configuration using the configuration parameters
+// specified in pll_cfg.
+//
+ALT_STATUS_CODE alt_clk_pll_cfg_set(ALT_CLK_t pll, const ALT_CLK_PLL_CFG_t * pll_cfg)
+{
+ if (pll_cfg == NULL)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_clk_pll_is_bypassed(pll) != ALT_E_TRUE) // safe to write the PLL registers?
+ {
+ return ALT_E_ERROR;
+ }
+
+ ALT_STATUS_CODE ret = ALT_E_ERROR;
+ uint32_t temp;
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ temp = (ALT_CLKMGR_MAINPLL_VCO_NUMER_CLR_MSK & ALT_CLKMGR_MAINPLL_VCO_DENOM_CLR_MSK)
+ & alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+ temp |= ALT_CLKMGR_MAINPLL_VCO_NUMER_SET(pll_cfg->mult) |
+ ALT_CLKMGR_MAINPLL_VCO_DENOM_SET(pll_cfg->div);
+
+ alt_write_word(ALT_CLKMGR_MAINPLL_VCO_ADDR, temp);
+ alt_write_word(ALT_CLKMGR_ALTERA_MPUCLK_ADDR, pll_cfg->cntrs[0]);
+ alt_write_word(ALT_CLKMGR_ALTERA_MAINCLK_ADDR, pll_cfg->cntrs[1]);
+ alt_write_word(ALT_CLKMGR_MAINPLL_DBGATCLK_ADDR, pll_cfg->cntrs[2]);
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR, pll_cfg->cntrs[3]);
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR, pll_cfg->cntrs[4]);
+ alt_write_word(ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR, pll_cfg->cntrs[5]);
+ ret = ALT_E_SUCCESS;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ temp = ALT_CLKMGR_PERPLL_VCO_NUMER_CLR_MSK & ALT_CLKMGR_PERPLL_VCO_DENOM_CLR_MSK
+ & ALT_CLKMGR_PERPLL_VCO_PSRC_CLR_MSK;
+ temp &= alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+ temp |= ALT_CLKMGR_PERPLL_VCO_NUMER_SET(pll_cfg->mult)
+ | ALT_CLKMGR_PERPLL_VCO_DENOM_SET(pll_cfg->div);
+
+ if ((pll_cfg->ref_clk == ALT_CLK_IN_PIN_OSC1) || (pll_cfg->ref_clk == ALT_CLK_OSC1))
+ {
+ temp |= ALT_CLKMGR_PERPLL_VCO_PSRC_SET(ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1);
+ }
+ else if (pll_cfg->ref_clk == ALT_CLK_IN_PIN_OSC2)
+ {
+ temp |= ALT_CLKMGR_PERPLL_VCO_PSRC_SET(ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2);
+ }
+ else if (pll_cfg->ref_clk == ALT_CLK_F2H_PERIPH_REF)
+ {
+ temp |= ALT_CLKMGR_PERPLL_VCO_PSRC_SET(ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF);
+ }
+ else
+ {
+ return ret;
+ }
+
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, temp);
+ alt_write_word(ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR, pll_cfg->cntrs[0]);
+ alt_write_word(ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR, pll_cfg->cntrs[1]);
+ alt_write_word(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR, pll_cfg->cntrs[2]);
+ alt_write_word(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR, pll_cfg->cntrs[3]);
+ alt_write_word(ALT_CLKMGR_PERPLL_PERBASECLK_ADDR, pll_cfg->cntrs[4]);
+ alt_write_word(ALT_CLKMGR_PERPLL_S2FUSER1CLK_ADDR, pll_cfg->cntrs[5]);
+ ret = ALT_E_SUCCESS;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ // write the SDRAM PLL VCO Counter -----------------------------
+ temp = ALT_CLKMGR_SDRPLL_VCO_NUMER_CLR_MSK & ALT_CLKMGR_SDRPLL_VCO_DENOM_CLR_MSK
+ & ALT_CLKMGR_SDRPLL_VCO_SSRC_CLR_MSK; // make a mask
+ temp &= alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+ temp |= ALT_CLKMGR_SDRPLL_VCO_NUMER_SET(pll_cfg->mult)
+ | ALT_CLKMGR_SDRPLL_VCO_DENOM_SET(pll_cfg->div)
+ | ALT_CLKMGR_SDRPLL_VCO_OUTRSTALL_SET_MSK;
+ // setting this bit aligns the output phase of the counters and prevents
+ // glitches and too-short clock periods when restarting.
+ // this bit is cleared at the end of this routine
+
+ if ((pll_cfg->ref_clk == ALT_CLK_IN_PIN_OSC1) || (pll_cfg->ref_clk == ALT_CLK_OSC1))
+ {
+ temp |= ALT_CLKMGR_SDRPLL_VCO_SSRC_SET(ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1);
+ }
+ else if (pll_cfg->ref_clk == ALT_CLK_IN_PIN_OSC2)
+ {
+ temp |= ALT_CLKMGR_SDRPLL_VCO_SSRC_SET(ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2);
+ }
+ else if (pll_cfg->ref_clk == ALT_CLK_F2H_PERIPH_REF)
+ {
+ temp |= ALT_CLKMGR_SDRPLL_VCO_SSRC_SET(ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF);
+ }
+ else
+ {
+ return ret;
+ }
+
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, temp);
+
+ // write the SDRAM PLL C0 Divide Counter -----------------------------
+ temp = ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_SET(pll_cfg->cntrs[0])
+ | ALT_CLKMGR_SDRPLL_DDRDQSCLK_PHASE_SET(pll_cfg->pshift[0]);
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR, ALT_CLKMGR_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR, temp,
+ ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_SET_MSK | ALT_CLKMGR_SDRPLL_DDRDQSCLK_PHASE_SET_MSK,
+ ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_LSB);
+
+ // write the SDRAM PLL C1 Divide Counter -----------------------------
+ if (ret == ALT_E_SUCCESS)
+ {
+ temp = ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_CNT_SET(pll_cfg->cntrs[1])
+ | ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_PHASE_SET(pll_cfg->pshift[1]);
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR, ALT_CLKMGR_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR, temp,
+ ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_CNT_SET_MSK | ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_PHASE_SET_MSK,
+ ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_CNT_LSB);
+ }
+
+ // write the SDRAM PLL C2 Divide Counter -----------------------------
+ if (ret == ALT_E_SUCCESS)
+ {
+ temp = ALT_CLKMGR_SDRPLL_DDRDQCLK_CNT_SET(pll_cfg->cntrs[2])
+ | ALT_CLKMGR_SDRPLL_DDRDQCLK_PHASE_SET(pll_cfg->pshift[2]);
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR, ALT_CLKMGR_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR, temp,
+ ALT_CLKMGR_SDRPLL_DDRDQCLK_CNT_SET_MSK | ALT_CLKMGR_SDRPLL_DDRDQCLK_PHASE_SET_MSK,
+ ALT_CLKMGR_SDRPLL_DDRDQCLK_CNT_LSB);
+ }
+
+ // write the SDRAM PLL C5 Divide Counter -----------------------------
+ if (ret == ALT_E_SUCCESS)
+ {
+ temp = ALT_CLKMGR_SDRPLL_S2FUSER2CLK_CNT_SET(pll_cfg->cntrs[2])
+ | ALT_CLKMGR_SDRPLL_S2FUSER2CLK_PHASE_SET(pll_cfg->pshift[2]);
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR, ALT_CLKMGR_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR, temp,
+ ALT_CLKMGR_SDRPLL_S2FUSER2CLK_CNT_SET_MSK | ALT_CLKMGR_SDRPLL_S2FUSER2CLK_PHASE_SET_MSK,
+ ALT_CLKMGR_SDRPLL_S2FUSER2CLK_CNT_LSB);
+ }
+
+ if (ret == ALT_E_SUCCESS)
+ {
+ alt_clrbits_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, ALT_CLKMGR_SDRPLL_VCO_OUTRSTALL_SET_MSK);
+ // allow the phase multiplexer and output counter to leave reset
+ }
+ }
+
+ return ret;
+}
+
+
+//
+// alt_clk_pll_vco_cfg_get() returns the current PLL VCO frequency configuration.
+//
+ALT_STATUS_CODE alt_clk_pll_vco_cfg_get(ALT_CLK_t pll, uint32_t * mult, uint32_t * div)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ uint32_t temp;
+
+ if ( (mult == NULL) || (div == NULL) )
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+ *mult = ALT_CLKMGR_MAINPLL_VCO_NUMER_GET(temp) + 1;
+ *div = ALT_CLKMGR_MAINPLL_VCO_DENOM_GET(temp) + 1;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+ *mult = ALT_CLKMGR_PERPLL_VCO_NUMER_GET(temp) + 1;
+ *div = ALT_CLKMGR_PERPLL_VCO_DENOM_GET(temp) + 1;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+ *mult = ALT_CLKMGR_SDRPLL_VCO_NUMER_GET(temp) + 1;
+ *div = ALT_CLKMGR_SDRPLL_VCO_DENOM_GET(temp) + 1;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+
+ return status;
+}
+
+
+/****************************************************************************************/
+/* This enum enumerates a set of possible change methods that are available for use by */
+/* alt_clk_pll_vco_cfg_set() to change VCO parameter settings. */
+/****************************************************************************************/
+
+typedef enum ALT_CLK_PLL_VCO_CHG_METHOD_e
+{
+ ALT_VCO_CHG_NONE_VALID = 0, /* No valid method to change PLL
+ * VCO was found */
+ ALT_VCO_CHG_NOCHANGE = 0x00000001, /* Proposed new VCO values are the
+ * same as the old values */
+ ALT_VCO_CHG_NUM = 0x00000002, /* Can change the VCO multiplier
+ * alone */
+ ALT_VCO_CHG_NUM_BYP = 0x00000004, /* A VCO multiplier-only change will
+ * require putting the PLL in bypass */
+ ALT_VCO_CHG_DENOM = 0x00000008, /* Can change the VCO divider
+ * alone */
+ ALT_VCO_CHG_DENOM_BYP = 0x00000010, /* A VCO divider-only change will
+ * require putting the PLL in bypass */
+ ALT_VCO_CHG_NUM_DENOM = 0x00000020, /* Can change the clock multiplier
+ * first. then the clock divider */
+ ALT_VCO_CHG_NUM_DENOM_BYP = 0x00000040, /* Changing the clock multiplier first.
+ * then the clock divider will
+ * require putting the PLL in bypass */
+ ALT_VCO_CHG_DENOM_NUM = 0x00000080, /* Can change the clock divider first.
+ * then the clock multiplier */
+ ALT_VCO_CHG_DENOM_NUM_BYP = 0x00000100 /* Changing the clock divider first.
+ * then the clock multiplier will
+ * require putting the PLL in bypass */
+} ALT_CLK_PLL_VCO_CHG_METHOD_t;
+
+
+
+/****************************************************************************************/
+/* alt_clk_pll_vco_chg_methods_get() determines which possible methods to change the */
+/* VCO are allowed within the limits set by the maximum PLL multiplier and divider */
+/* values and by the upper and lower frequency limits of the PLL, and also determines */
+/* whether each of these changes can be made without the PLL losing lock, which */
+/* requires the PLL to be bypassed before making changes, and removed from bypass state */
+/* afterwards. */
+/****************************************************************************************/
+
+
+#define ALT_CLK_PLL_VCO_CHG_METHOD_TEST_MODE false
+ // used for testing writes to the PLL VCOs
+
+
+
+static ALT_CLK_PLL_VCO_CHG_METHOD_t alt_clk_pll_vco_chg_methods_get(ALT_CLK_t pll,
+ uint32_t mult, uint32_t div )
+{
+#if ALT_CLK_PLL_VCO_CHG_METHOD_TEST_MODE
+
+ // used for testing
+ return ALT_VCO_CHG_NOCHANGE;
+
+#else
+
+ // check PLL max value limits
+ if ( (mult == 0) || (mult > ALT_CLK_PLL_MULT_MAX)
+ || (div == 0) || (div > ALT_CLK_PLL_DIV_MAX)
+ )
+ {
+ return ALT_VCO_CHG_NONE_VALID;
+ }
+
+ ALT_CLK_PLL_VCO_CHG_METHOD_t ret = ALT_VCO_CHG_NONE_VALID;
+ uint32_t temp;
+ uint32_t numer;
+ uint32_t denom;
+ uint32_t freqmax;
+ uint32_t freqmin;
+ uint32_t inputfreq;
+ uint32_t guardband;
+ bool numerchg = false;
+ bool denomchg = false;
+ bool within_gb;
+
+ // gather data values according to PLL
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+
+ numer = ALT_CLKMGR_MAINPLL_VCO_NUMER_GET(temp);
+ denom = ALT_CLKMGR_MAINPLL_VCO_DENOM_GET(temp);
+
+ freqmax = alt_pll_clk_paramblok.MainPLL_800.freqmax;
+ freqmin = alt_pll_clk_paramblok.MainPLL_800.freqmin;
+ guardband = alt_pll_clk_paramblok.MainPLL_800.guardband;
+
+ inputfreq = alt_ext_clk_paramblok.clkosc1.freqcur;
+ }
+
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+
+ numer = ALT_CLKMGR_PERPLL_VCO_NUMER_GET(temp);
+ denom = ALT_CLKMGR_PERPLL_VCO_DENOM_GET(temp);
+
+ freqmax = alt_pll_clk_paramblok.PeriphPLL_800.freqmax;
+ freqmin = alt_pll_clk_paramblok.PeriphPLL_800.freqmin;
+ guardband = alt_pll_clk_paramblok.PeriphPLL_800.guardband;
+
+ temp = ALT_CLKMGR_PERPLL_VCO_PSRC_GET(temp);
+ if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1)
+ {
+ inputfreq = alt_ext_clk_paramblok.clkosc1.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2)
+ {
+ inputfreq = alt_ext_clk_paramblok.clkosc2.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF)
+ {
+ inputfreq = alt_ext_clk_paramblok.periph.freqcur;
+ }
+ else
+ {
+ return ret;
+ }
+ }
+
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+
+ numer = ALT_CLKMGR_SDRPLL_VCO_NUMER_GET(temp);
+ denom = ALT_CLKMGR_SDRPLL_VCO_DENOM_GET(temp);
+
+ freqmax = alt_pll_clk_paramblok.SDRAMPLL_800.freqmax;
+ freqmin = alt_pll_clk_paramblok.SDRAMPLL_800.freqmin;
+ guardband = alt_pll_clk_paramblok.SDRAMPLL_800.guardband;
+
+ temp = ALT_CLKMGR_SDRPLL_VCO_SSRC_GET(temp);
+ if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1)
+ {
+ inputfreq = alt_ext_clk_paramblok.clkosc1.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2)
+ {
+ inputfreq = alt_ext_clk_paramblok.clkosc2.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF)
+ {
+ inputfreq = alt_ext_clk_paramblok.sdram.freqcur;
+ }
+ else
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ return ret;
+ }
+
+ temp = mult * (inputfreq / div);
+ if ((temp <= freqmax) && (temp >= freqmin)) // are the final values within frequency limits?
+ {
+ numer++;
+ denom++;
+ numerchg = (mult != numer);
+ denomchg = (div != denom);
+
+ if (!numerchg && !denomchg)
+ {
+ ret = ALT_VCO_CHG_NOCHANGE;
+ }
+ else if (numerchg && !denomchg)
+ {
+ within_gb = alt_within_delta(numer, mult, guardband);
+ // check if change is within the guardband limits
+ temp = mult * (inputfreq / denom);
+ if ((temp <= freqmax) && (temp >= freqmin))
+ {
+ ret = ALT_VCO_CHG_NUM;
+ if (!within_gb) ret |= ALT_VCO_CHG_NUM_BYP;
+ }
+ }
+ else if (!numerchg && denomchg)
+ {
+ within_gb = alt_within_delta(denom, div, guardband);
+ temp = numer * (inputfreq / div);
+ if ((temp <= freqmax) && (temp >= freqmin))
+ {
+ ret = ALT_VCO_CHG_DENOM;
+ if (!within_gb)
+ {
+ ret |= ALT_VCO_CHG_DENOM_BYP;
+ }
+ }
+ }
+ else //numerchg && denomchg
+ {
+ within_gb = alt_within_delta(numer, mult, guardband);
+ temp = mult * (inputfreq / denom);
+ if ((temp <= freqmax) && (temp >= freqmin))
+ {
+ ret = ALT_VCO_CHG_NUM_DENOM;
+ if (!within_gb)
+ {
+ ret |= ALT_VCO_CHG_NUM_DENOM_BYP;
+ }
+ }
+ within_gb = alt_within_delta(denom, div, guardband);
+ temp = numer * (inputfreq / div);
+ if ((temp <= freqmax) && (temp >= freqmin))
+ {
+ ret = ALT_VCO_CHG_DENOM_NUM;
+ if (!within_gb)
+ {
+ ret |= ALT_VCO_CHG_DENOM_NUM_BYP;
+ }
+ }
+ }
+ }
+
+ return ret;
+#endif
+}
+
+
+/****************************************************************************************/
+/* alt_clk_pll_vco_cfg_set() sets the PLL VCO frequency configuration using the */
+/* supplied multiplier and divider arguments. alt_clk_pll_vco_chg_methods_get() */
+/* determines which methods are allowed by the limits set by the maximum multiplier */
+/* and divider values and by the upper and lower frequency limits of the PLL, and also */
+/* determines whether these changes can be made without requiring the PLL to be */
+/* bypassed. alt_clk_pll_vco_cfg_set() then carries out the actions required to effect */
+/* the method chosen to change the VCO settings. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_clk_pll_vco_cfg_set(ALT_CLK_t pll, uint32_t mult, uint32_t div)
+{
+ ALT_STATUS_CODE ret = ALT_E_ERROR;
+ ALT_CLK_PLL_VCO_CHG_METHOD_t method;
+ bool byp = false;
+ void *vaddr;
+ uint32_t numermask, denommask;
+ uint32_t numershift, denomshift;
+
+
+ method = alt_clk_pll_vco_chg_methods_get(pll, mult, div);
+
+ if (method == ALT_VCO_CHG_NONE_VALID)
+ {
+ ret = ALT_E_BAD_CLK;
+ }
+ else if (method == ALT_VCO_CHG_NOCHANGE)
+ {
+ ret = ALT_E_INV_OPTION;
+ }
+ else
+ {
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ vaddr = ALT_CLKMGR_MAINPLL_VCO_ADDR;
+ numermask = ALT_CLKMGR_MAINPLL_VCO_NUMER_SET_MSK;
+ denommask = ALT_CLKMGR_MAINPLL_VCO_DENOM_SET_MSK;
+ numershift = ALT_CLKMGR_MAINPLL_VCO_NUMER_LSB;
+ denomshift = ALT_CLKMGR_MAINPLL_VCO_DENOM_LSB;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ vaddr = ALT_CLKMGR_PERPLL_VCO_ADDR;
+ numermask = ALT_CLKMGR_PERPLL_VCO_NUMER_SET_MSK;
+ denommask = ALT_CLKMGR_PERPLL_VCO_DENOM_SET_MSK;
+ numershift = ALT_CLKMGR_PERPLL_VCO_NUMER_LSB;
+ denomshift = ALT_CLKMGR_PERPLL_VCO_DENOM_LSB;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ vaddr = ALT_CLKMGR_SDRPLL_VCO_ADDR;
+ numermask = ALT_CLKMGR_SDRPLL_VCO_NUMER_SET_MSK;
+ denommask = ALT_CLKMGR_SDRPLL_VCO_DENOM_SET_MSK;
+ numershift = ALT_CLKMGR_SDRPLL_VCO_NUMER_LSB;
+ denomshift = ALT_CLKMGR_SDRPLL_VCO_DENOM_LSB;
+ }
+ else { return ALT_E_BAD_ARG; }
+
+ mult--;
+ div--;
+
+ if (method & ALT_VCO_CHG_NUM)
+ {
+ if (method & ALT_VCO_CHG_NUM_BYP)
+ {
+ alt_clk_pll_bypass_enable(pll, 0);
+ byp = true;
+ alt_clk_mgr_wait(vaddr, ALT_SW_MANAGED_CLK_WAIT_BYPASS);
+ }
+ alt_replbits_word(vaddr, numermask, mult << numershift);
+ }
+
+ else if (method & ALT_VCO_CHG_DENOM)
+ {
+ if (method & ALT_VCO_CHG_DENOM_BYP)
+ {
+ alt_clk_pll_bypass_enable(pll, 0);
+ byp = true;
+ }
+ alt_replbits_word(vaddr, denommask, div << denomshift);
+ }
+
+ else if (method & ALT_VCO_CHG_NUM_DENOM)
+ {
+ if (method & ALT_VCO_CHG_NUM_DENOM_BYP)
+ {
+ alt_clk_pll_bypass_enable(pll, 0);
+ byp = true;
+ }
+ alt_replbits_word(vaddr, numermask, mult << numershift);
+ if (!byp) // if PLL is not bypassed
+ {
+ ret = alt_clk_pll_lock_wait(ALT_CLK_MAIN_PLL, 1000);
+ // verify PLL is still locked or wait for it to lock again
+ }
+ alt_replbits_word(vaddr, denommask, div << denomshift);
+ }
+
+ else if (method & ALT_VCO_CHG_DENOM_NUM)
+ {
+ if (method & ALT_VCO_CHG_DENOM_NUM_BYP)
+ {
+ alt_clk_pll_bypass_enable(pll, 0);
+ byp = true;
+ }
+ alt_replbits_word(vaddr, numermask, mult << numershift);
+ if (!byp) // if PLL is not bypassed
+ {
+ ret = alt_clk_pll_lock_wait(ALT_CLK_MAIN_PLL, 1000);
+ // verify PLL is still locked or wait for it to lock again
+ }
+ alt_replbits_word(vaddr, denommask, div << denomshift);
+ }
+
+ ret = alt_clk_pll_lock_wait(ALT_CLK_MAIN_PLL, 1000);
+ // verify PLL is still locked or wait for it to lock again
+ if (byp)
+ {
+ alt_clk_pll_bypass_disable(pll);
+ alt_clk_mgr_wait(vaddr, ALT_SW_MANAGED_CLK_WAIT_BYPASS);
+ // wait for PLL to come out of bypass mode completely
+ }
+ }
+ return ret;
+}
+
+
+//
+// alt_clk_pll_vco_freq_get() gets the VCO frequency of the specified PLL.
+// Note that since there is at present no known way for software to obtain the speed
+// bin of the SoC or MPU that it is running on, the function below only deals with the
+// 800 MHz part. This may need to be revised in the future.
+//
+ALT_STATUS_CODE alt_clk_pll_vco_freq_get(ALT_CLK_t pll, alt_freq_t * freq)
+{
+ uint64_t temp1 = 0;
+ uint32_t temp;
+ uint32_t numer;
+ uint32_t denom;
+ ALT_STATUS_CODE ret = ALT_E_BAD_ARG;
+
+ if (freq == NULL)
+ {
+ return ret;
+ }
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+ numer = ALT_CLKMGR_MAINPLL_VCO_NUMER_GET(temp);
+ denom = ALT_CLKMGR_MAINPLL_VCO_DENOM_GET(temp);
+ temp1 = (uint64_t) alt_ext_clk_paramblok.clkosc1.freqcur;
+ temp1 *= (numer + 1);
+ temp1 /= (denom + 1);
+
+ if (temp1 <= UINT32_MAX)
+ {
+ temp = (alt_freq_t) temp1;
+ alt_pll_clk_paramblok.MainPLL_800.freqcur = temp;
+ // store this value in the parameter block table
+ *freq = temp;
+ // should NOT check value against PLL frequency limits
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+ numer = ALT_CLKMGR_PERPLL_VCO_NUMER_GET(temp);
+ denom = ALT_CLKMGR_PERPLL_VCO_DENOM_GET(temp);
+ temp = ALT_CLKMGR_PERPLL_VCO_PSRC_GET(temp);
+ if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1)
+ {
+ temp1 = (uint64_t) alt_ext_clk_paramblok.clkosc1.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2)
+ {
+ temp1 = (uint64_t) alt_ext_clk_paramblok.clkosc2.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF)
+ {
+ temp1 = (uint64_t) alt_ext_clk_paramblok.periph.freqcur;
+ }
+
+ if (temp1 != 0)
+ {
+ temp1 *= (numer + 1);
+ temp1 /= (denom + 1);
+ if (temp1 <= UINT32_MAX)
+ {
+ temp = (alt_freq_t) temp1;
+ alt_pll_clk_paramblok.PeriphPLL_800.freqcur = temp;
+ // store this value in the parameter block table
+
+ *freq = temp;
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ } // this returns ALT_BAD_ARG if the source isn't known
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+ numer = ALT_CLKMGR_SDRPLL_VCO_NUMER_GET(temp);
+ denom = ALT_CLKMGR_SDRPLL_VCO_DENOM_GET(temp);
+ temp = ALT_CLKMGR_SDRPLL_VCO_SSRC_GET(temp);
+ if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1)
+ {
+ temp1 = (uint64_t) alt_ext_clk_paramblok.clkosc1.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2)
+ {
+ temp1 = (uint64_t) alt_ext_clk_paramblok.clkosc2.freqcur;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF)
+ {
+ temp1 = (uint64_t) alt_ext_clk_paramblok.sdram.freqcur;
+ }
+
+ if (temp1 != 0)
+ {
+ temp1 *= (numer + 1);
+ temp1 /= (denom + 1);
+ if (temp1 <= UINT32_MAX)
+ {
+ temp = (alt_freq_t) temp1;
+ alt_pll_clk_paramblok.SDRAMPLL_800.freqcur = temp;
+ // store this value in the parameter block table
+
+ *freq = temp;
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ }
+ } // which returns ALT_BAD_ARG if the source isn't known
+
+ return ret;
+}
+
+//
+// Returns the current guard band range in effect for the PLL.
+//
+uint32_t alt_clk_pll_guard_band_get(ALT_CLK_t pll)
+{
+ uint32_t ret = 0;
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ ret = alt_pll_clk_paramblok.MainPLL_800.guardband;
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ ret = alt_pll_clk_paramblok.PeriphPLL_800.guardband;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ ret = alt_pll_clk_paramblok.SDRAMPLL_800.guardband;
+ }
+ return ret;
+}
+
+//
+// clk_mgr_pll_guard_band_set() changes the guard band from its current value to permit
+// a more lenient or stringent policy to be in effect for the implementation of the
+// functions configuring PLL VCO frequency.
+//
+ALT_STATUS_CODE alt_clk_pll_guard_band_set(ALT_CLK_t pll, uint32_t guard_band)
+{
+ if ( (guard_band > UINT12_MAX) || (guard_band <= 0)
+ || (guard_band > ALT_GUARDBAND_LIMIT)
+ )
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (pll == ALT_CLK_MAIN_PLL)
+ {
+ alt_pll_clk_paramblok.MainPLL_800.guardband = guard_band;
+ //alt_pll_clk_paramblok.MainPLL_600.guardband = guard_band;
+ // ??? Don't know how to check the MPU speed bin yet, so only 800 MHz struct is used
+ }
+ else if (pll == ALT_CLK_PERIPHERAL_PLL)
+ {
+ alt_pll_clk_paramblok.PeriphPLL_800.guardband = guard_band;
+ //alt_pll_clk_paramblok.PeriphPLL_600.guardband = guard_band;
+ }
+ else if (pll == ALT_CLK_SDRAM_PLL)
+ {
+ alt_pll_clk_paramblok.SDRAMPLL_800.guardband = guard_band;
+ //alt_pll_clk_paramblok.SDRAMPLL_600.guardband = guard_band;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+
+ return status;
+}
+
+//
+// alt_clk_divider_get() gets configured divider value for the specified clock.
+//
+ALT_STATUS_CODE alt_clk_divider_get(ALT_CLK_t clk, uint32_t * div)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ uint32_t temp;
+
+ if (div == NULL)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (clk)
+ {
+ // Main PLL outputs
+ case ALT_CLK_MAIN_PLL_C0:
+ case ALT_CLK_MPU:
+ *div = (ALT_CLKMGR_MAINPLL_MPUCLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MPUCLK_ADDR)) + 1) *
+ (ALT_CLKMGR_ALTERA_MPUCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_MPUCLK_ADDR)) + 1);
+ break;
+
+ case ALT_CLK_MAIN_PLL_C1:
+ case ALT_CLK_L4_MAIN:
+ case ALT_CLK_L3_MAIN:
+ *div = (ALT_CLKMGR_MAINPLL_MAINCLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINCLK_ADDR)) + 1) *
+ (ALT_CLKMGR_ALTERA_MAINCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_MAINCLK_ADDR)) + 1);
+ break;
+
+ case ALT_CLK_MAIN_PLL_C2:
+ case ALT_CLK_DBG_BASE:
+ case ALT_CLK_DBG_TIMER:
+ *div = (ALT_CLKMGR_MAINPLL_DBGATCLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_DBGATCLK_ADDR)) + 1) *
+ (ALT_CLKMGR_ALTERA_DBGATCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_DBGATCLK_ADDR)) + 1);
+ break;
+
+ case ALT_CLK_MAIN_PLL_C3:
+ case ALT_CLK_MAIN_QSPI:
+ *div = (ALT_CLKMGR_MAINPLL_MAINQSPICLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_MAIN_PLL_C4:
+ case ALT_CLK_MAIN_NAND_SDMMC:
+ *div = (ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_MAIN_PLL_C5:
+ case ALT_CLK_CFG:
+ case ALT_CLK_H2F_USER0:
+ *div = (ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_CNT_GET(alt_read_word(ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR))) + 1;
+ break;
+
+ /////
+
+ // Peripheral PLL outputs
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ case ALT_CLK_EMAC0:
+ *div = (ALT_CLKMGR_PERPLL_EMAC0CLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ case ALT_CLK_EMAC1:
+ *div = (ALT_CLKMGR_PERPLL_EMAC1CLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ *div = (ALT_CLKMGR_PERPLL_PERQSPICLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ *div = (ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ *div = (ALT_CLKMGR_PERPLL_PERBASECLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_PERBASECLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ case ALT_CLK_H2F_USER1:
+ *div = (ALT_CLKMGR_PERPLL_S2FUSER1CLK_CNT_GET(alt_read_word(ALT_CLKMGR_PERPLL_S2FUSER1CLK_ADDR))) + 1;
+ break;
+
+ /////
+
+ // SDRAM PLL outputs
+ case ALT_CLK_SDRAM_PLL_C0:
+ case ALT_CLK_DDR_DQS:
+ *div = (ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C1:
+ case ALT_CLK_DDR_2X_DQS:
+ *div = (ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C2:
+ case ALT_CLK_DDR_DQ:
+ *div = (ALT_CLKMGR_SDRPLL_DDRDQCLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR))) + 1;
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C5:
+ case ALT_CLK_H2F_USER2:
+ *div = (ALT_CLKMGR_SDRPLL_S2FUSER2CLK_CNT_GET(alt_read_word(ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR))) + 1;
+ break;
+
+ /////
+
+ // Other clock dividers
+ case ALT_CLK_L3_MP:
+ temp = ALT_CLKMGR_MAINPLL_MAINDIV_L3MPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_MAINDIV_L3MPCLK_E_DIV2)
+ {
+ *div = temp + 1;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_L3_SP:
+ temp = ALT_CLKMGR_MAINPLL_MAINDIV_L3SPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_MAINDIV_L3SPCLK_E_DIV2)
+ {
+ *div = temp + 1;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ // note that this value does not include the additional effect
+ // of the L3_MP divider that is upchain from this one
+ break;
+
+ case ALT_CLK_L4_MP:
+ temp = ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_L4_SP:
+ temp = ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_DBG_AT:
+ temp = ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_E_DIV4)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_DBG:
+ temp = ALT_CLKMGR_MAINPLL_DBGDIV_DBGCLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_DBGDIV_DBGCLK_E_DIV4)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ // note that this value does not include the value of the upstream dbg_at_clk divder
+ break;
+
+ case ALT_CLK_DBG_TRACE:
+ temp = ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_GET(alt_read_word(ALT_CLKMGR_MAINPLL_TRACEDIV_ADDR));
+ if (temp <= ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_USB_MP:
+ temp = ALT_CLKMGR_PERPLL_DIV_USBCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_DIV_ADDR));
+ if (temp <= ALT_CLKMGR_PERPLL_DIV_USBCLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_SPI_M:
+ temp = ALT_CLKMGR_PERPLL_DIV_SPIMCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_DIV_ADDR));
+ if (temp <= ALT_CLKMGR_PERPLL_DIV_SPIMCLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_CAN0:
+ temp = ALT_CLKMGR_PERPLL_DIV_CAN0CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_DIV_ADDR));
+ if (temp <= ALT_CLKMGR_PERPLL_DIV_CAN0CLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_CAN1:
+ temp = ALT_CLKMGR_PERPLL_DIV_CAN1CLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_DIV_ADDR));
+ if (temp <= ALT_CLKMGR_PERPLL_DIV_CAN1CLK_E_DIV16)
+ {
+ *div = 1 << temp;
+ }
+ else
+ {
+ status = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_GPIO_DB:
+ temp = ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_GET(alt_read_word(ALT_CLKMGR_PERPLL_GPIODIV_ADDR));
+ *div = temp + 1;
+ break;
+
+ case ALT_CLK_MPU_PERIPH:
+ *div = 4; // set by hardware
+ break;
+
+ case ALT_CLK_MPU_L2_RAM:
+ *div = 2; // set by hardware
+ break;
+
+ case ALT_CLK_NAND:
+ *div = 4; // set by hardware
+ break;
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+
+ return status;
+}
+
+/////
+
+#define ALT_CLK_WITHIN_FREQ_LIMITS_TEST_MODE false
+ // used for testing writes to the the full range of counters without
+ // regard to the usual output frequency upper and lower limits
+
+
+static ALT_STATUS_CODE alt_clk_within_freq_limits(ALT_CLK_t clk, uint32_t div)
+{
+#if ALT_CLK_WITHIN_FREQ_LIMITS_TEST_MODE
+ return ALT_E_TRUE;
+#else
+
+ if (div == 0)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ uint32_t numer = 0;
+ uint32_t hilimit;
+ uint32_t lolimit;
+
+ switch (clk)
+ {
+ // Counters of the Main PLL
+ case ALT_CLK_MAIN_PLL_C0:
+ hilimit = alt_pll_cntr_maxfreq.MainPLL_C0;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &numer);
+ break;
+ case ALT_CLK_MAIN_PLL_C1:
+ hilimit = alt_pll_cntr_maxfreq.MainPLL_C1;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &numer);
+ break;
+ case ALT_CLK_MAIN_PLL_C2:
+ hilimit = alt_pll_cntr_maxfreq.MainPLL_C2;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &numer);
+ break;
+ case ALT_CLK_MAIN_PLL_C3:
+ hilimit = alt_pll_cntr_maxfreq.MainPLL_C3;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &numer);
+ break;
+ case ALT_CLK_MAIN_PLL_C4:
+ hilimit = alt_pll_cntr_maxfreq.MainPLL_C4;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &numer);
+ break;
+ case ALT_CLK_MAIN_PLL_C5:
+ hilimit = alt_pll_cntr_maxfreq.MainPLL_C5;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &numer);
+ break;
+
+ // Counters of the Peripheral PLL
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ hilimit = alt_pll_cntr_maxfreq.PeriphPLL_C0;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &numer);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ hilimit = alt_pll_cntr_maxfreq.PeriphPLL_C1;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &numer);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ hilimit = alt_pll_cntr_maxfreq.PeriphPLL_C2;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &numer);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ hilimit = alt_pll_cntr_maxfreq.PeriphPLL_C3;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &numer);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ hilimit = alt_pll_cntr_maxfreq.PeriphPLL_C4;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &numer);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ hilimit = alt_pll_cntr_maxfreq.PeriphPLL_C5;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &numer);
+ break;
+
+ // Counters of the SDRAM PLL
+ case ALT_CLK_SDRAM_PLL_C0:
+ hilimit = alt_pll_cntr_maxfreq.SDRAMPLL_C0;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &numer);
+ break;
+ case ALT_CLK_SDRAM_PLL_C1:
+ hilimit = alt_pll_cntr_maxfreq.SDRAMPLL_C1;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &numer);
+ break;
+ case ALT_CLK_SDRAM_PLL_C2:
+ hilimit = alt_pll_cntr_maxfreq.SDRAMPLL_C2;
+ lolimit = 0;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &numer);
+ break;
+ case ALT_CLK_SDRAM_PLL_C5:
+ hilimit = alt_pll_cntr_maxfreq.SDRAMPLL_C5;
+ lolimit = alt_ext_clk_paramblok.clkosc1.freqcur;
+ status = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &numer);
+ break;
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ numer = numer / div;
+ if ((numer <= hilimit) && (numer >= lolimit))
+ {
+ status = ALT_E_TRUE;
+ }
+ else
+ {
+ status = ALT_E_FALSE;
+ }
+ }
+
+ return status;
+#endif
+}
+
+static bool alt_clkmgr_is_val_modulo_n(uint32_t div, uint32_t mod)
+{
+ if (mod == 1)
+ {
+ return true;
+ }
+ else if (mod == 2)
+ {
+ return (div & 0x1) == 0;
+ }
+ else if (mod == 4)
+ {
+ return (div & 0x3) == 0;
+ }
+ else
+ {
+ return (div % mod) == 0;
+ }
+}
+
+//
+// alt_clk_divider_set() sets the divider value for the specified clock.
+//
+// See pages 38, 44, 45, and 46 of the HPS-Clocking NPP for a map of the
+// HPS clocking architecture and hierarchy of connections.
+//
+ALT_STATUS_CODE alt_clk_divider_set(ALT_CLK_t clk, uint32_t div)
+{
+ ALT_STATUS_CODE ret = ALT_E_BAD_ARG;
+ volatile uint32_t temp, temp1;
+ uint32_t wrval = UINT32_MAX; // value to be written
+ bool restore_0 = false;
+ bool restore_1 = false;
+ bool restore_2 = false;
+
+ switch (clk)
+ {
+ // Main PLL outputs
+ case ALT_CLK_MAIN_PLL_C0:
+ case ALT_CLK_MPU:
+ {
+ uint32_t prediv = (ALT_CLKMGR_ALTERA_MPUCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_MPUCLK_ADDR)) + 1);
+
+ if ( (div <= ((ALT_CLKMGR_MAINPLL_MPUCLK_CNT_SET_MSK + 1) * prediv))
+ && alt_clkmgr_is_val_modulo_n(div, prediv)
+ && (alt_clk_within_freq_limits(ALT_CLK_MAIN_PLL_C0, div) == ALT_E_TRUE) )
+ {
+ wrval = (div / prediv) - 1;
+
+ // HW managed clock, change by writing to the external counter, no need to gate clock
+ // or match phase or wait for transistion time. No other field in the register to mask off either.
+ alt_write_word(ALT_CLKMGR_MAINPLL_MPUCLK_ADDR, wrval);
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ }
+ break;
+
+ case ALT_CLK_MAIN_PLL_C1:
+ case ALT_CLK_L3_MAIN:
+ {
+ uint32_t prediv = (ALT_CLKMGR_ALTERA_MAINCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_MAINCLK_ADDR)) + 1);
+
+ if ( (div <= ((ALT_CLKMGR_MAINPLL_MAINCLK_CNT_SET_MSK + 1) * prediv))
+ && alt_clkmgr_is_val_modulo_n(div, prediv)
+ && (alt_clk_within_freq_limits(ALT_CLK_MAIN_PLL_C1, div) == ALT_E_TRUE) )
+ {
+ // HW managed clock, change by writing to the external counter, no need to gate clock
+ // or match phase or wait for transistion time. No other field in the register to mask off either.
+
+ wrval = (div / prediv) - 1;
+
+#if ALT_PREVENT_GLITCH_CHGC1
+ // if L4MP or L4SP source is set to Main PLL C1, gate it off before changing
+ // bypass state, then gate clock back on. FogBugz #63778
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+ temp1 = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK) && (!(temp & ALT_CLKMGR_MAINPLL_L4SRC_L4MP_SET_MSK)))
+ {
+ restore_0 = true;
+ }
+ if ((temp1 & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK) && (!(temp & ALT_CLKMGR_MAINPLL_L4SRC_L4SP_SET_MSK)))
+ {
+ restore_1 = true;
+ }
+ temp = temp1;
+ if (restore_0) { temp &= ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK; }
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); }
+
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINCLK_ADDR, wrval);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ // wait a bit before reenabling the L4MP and L4SP clocks
+ if (restore_0 || restore_1) { alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp1); }
+#else
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINCLK_ADDR, wrval);
+#endif
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ }
+ break;
+
+ case ALT_CLK_MAIN_PLL_C2:
+ case ALT_CLK_DBG_BASE:
+ {
+ uint32_t prediv = (ALT_CLKMGR_ALTERA_DBGATCLK_CNT_GET(alt_read_word(ALT_CLKMGR_ALTERA_DBGATCLK_ADDR)) + 1);
+
+ if ( (div <= ((ALT_CLKMGR_MAINPLL_DBGATCLK_CNT_SET_MSK + 1) * prediv))
+ && alt_clkmgr_is_val_modulo_n(div, prediv)
+ && (alt_clk_within_freq_limits(ALT_CLK_MAIN_PLL_C2, div) == ALT_E_TRUE) )
+ {
+ wrval = (div / prediv) - 1;
+ // HW managed clock, change by writing to the external counter, no need to gate clock
+ // or match phase or wait for transistion time. No other field in the register to mask off either.
+ alt_write_word(ALT_CLKMGR_MAINPLL_DBGATCLK_ADDR, wrval);
+
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ }
+ break;
+
+ case ALT_CLK_MAIN_PLL_C3:
+ // The rest of the PLL outputs do not have external counters, but
+ // their internal counters are programmable rather than fixed
+ if ( (div <= (ALT_CLKMGR_MAINPLL_MAINQSPICLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_MAIN_PLL_C3, div) == ALT_E_TRUE) )
+ {
+ // if the main_qspi_clk input is selected for the qspi_clk
+ if (ALT_CLKMGR_PERPLL_SRC_QSPI_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR)) ==
+ ALT_CLKMGR_PERPLL_SRC_QSPI_E_MAIN_QSPI_CLK)
+ {
+ restore_0 = (temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR)) & ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK;
+ if (restore_0) // AND if the QSPI clock is currently enabled
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_QSPICLK_CLR_MSK);
+ // gate off the QSPI clock
+ }
+
+ wrval = div - 1;
+ // the rest are software-managed clocks and require a reset sequence to write to
+ alt_clk_pllcounter_write(ALT_CLKMGR_MAINPLL_VCO_ADDR,
+ ALT_CLKMGR_MAINPLL_STAT_ADDR,
+ ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C3,
+ ALT_CLKMGR_MAINPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ // if the QSPI clock was gated on (enabled) before, return it to that state
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_MAIN_PLL_C4:
+ case ALT_CLK_MAIN_NAND_SDMMC:
+ if ( (div <= (ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_MAIN_PLL_C4, div) == ALT_E_TRUE) )
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR);
+ temp1 = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+
+ // do we need to gate off the SDMMC clock ?
+ if (ALT_CLKMGR_PERPLL_SRC_SDMMC_GET(temp) == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_MAIN_NAND_CLK)
+ {
+ if (temp1 & ALT_CLKMGR_PERPLL_EN_SDMMCCLK_SET_MSK) { restore_0 = true; }
+ }
+
+ // do we need to gate off the NAND clock and/or the NANDX clock?
+ if (ALT_CLKMGR_PERPLL_SRC_NAND_GET(temp) == ALT_CLKMGR_PERPLL_SRC_NAND_E_MAIN_NAND_CLK)
+ {
+ if (temp1 & ALT_CLKMGR_PERPLL_EN_NANDXCLK_SET_MSK) { restore_1 = true; }
+ if (temp1 & ALT_CLKMGR_PERPLL_EN_NANDCLK_SET_MSK) { restore_2 = true; }
+ }
+
+ temp = temp1;
+ if (restore_1 && restore_2)
+ {
+ temp &= ALT_CLKMGR_PERPLL_EN_NANDCLK_CLR_MSK;
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK);
+ // gate nand_clk off at least 8 MPU clock cycles before before nand_x_clk
+ }
+
+ if (restore_0 || restore_1)
+ {
+ if (restore_0) { temp &= ALT_CLKMGR_PERPLL_EN_SDMMCCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_PERPLL_EN_NANDXCLK_CLR_MSK; }
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ // gate off sdmmc_clk and/or nand_x_clk
+ }
+
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_MAINPLL_VCO_ADDR,
+ ALT_CLKMGR_MAINPLL_STAT_ADDR,
+ ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C4,
+ ALT_CLKMGR_MAINPLL_VCO_OUTRST_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+
+ if (restore_0 || restore_1)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp1 & ALT_CLKMGR_PERPLL_EN_NANDCLK_CLR_MSK);
+ // if the NANDX and/or SDMMC clock was gated on (enabled) before, return it to that state
+ if (restore_1 && restore_2)
+ {
+ // wait at least 8 clock cycles to turn the nand_clk on
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK);
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp1);
+ }
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_MAIN_PLL_C5:
+ case ALT_CLK_CFG:
+ case ALT_CLK_H2F_USER0:
+ if ( (div <= (ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_MAIN_PLL_C5, div) == ALT_E_TRUE) )
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ restore_0 = ((temp & ALT_CLKMGR_MAINPLL_EN_CFGCLK_SET_MSK) ||
+ (temp & ALT_CLKMGR_MAINPLL_EN_S2FUSER0CLK_SET_MSK));
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & (ALT_CLKMGR_MAINPLL_EN_CFGCLK_CLR_MSK &
+ ALT_CLKMGR_MAINPLL_EN_S2FUSER0CLK_CLR_MSK)); // clear both
+ }
+
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_MAINPLL_VCO_ADDR,
+ ALT_CLKMGR_MAINPLL_STAT_ADDR,
+ ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C5,
+ ALT_CLKMGR_MAINPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ /////
+
+ // Peripheral PLL outputs
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ case ALT_CLK_EMAC0:
+ if ( (div <= (ALT_CLKMGR_PERPLL_EMAC0CLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_PERIPHERAL_PLL_C0, div) == ALT_E_TRUE) )
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ restore_0 = temp & ALT_CLKMGR_PERPLL_EN_EMAC0CLK_SET_MSK;
+
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_EMAC0CLK_CLR_MSK);
+ }
+
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C0,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ case ALT_CLK_EMAC1:
+ if ( (div <= (ALT_CLKMGR_PERPLL_EMAC1CLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_PERIPHERAL_PLL_C1, div) == ALT_E_TRUE) )
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ restore_0 = temp & ALT_CLKMGR_PERPLL_EN_EMAC1CLK_SET_MSK;
+
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_EMAC1CLK_CLR_MSK);
+ }
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C1,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ if ( (div <= (ALT_CLKMGR_PERPLL_PERQSPICLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_PERIPHERAL_PLL_C2, div) == ALT_E_TRUE) )
+ {
+ temp = ALT_CLKMGR_PERPLL_SRC_QSPI_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_PERIPH_QSPI_CLK)
+ {
+ // if qspi source is set to Peripheral PLL C2
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ // and if qspi_clk is enabled
+ restore_0 = temp & ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK;
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_QSPICLK_CLR_MSK);
+ // gate it off
+ }
+ }
+
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C2,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ // if the clock was gated on (enabled) before, return it to that state
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ if ( (div <= (ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_PERIPHERAL_PLL_C3, div) == ALT_E_TRUE) )
+ {
+ // first, are the clock MUX input selections currently set to use the clock we want to change?
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR);
+ restore_0 = (ALT_CLKMGR_PERPLL_SRC_SDMMC_GET(temp) == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_PERIPH_NAND_CLK);
+ restore_1 = restore_2 = (ALT_CLKMGR_PERPLL_SRC_NAND_GET(temp) == ALT_CLKMGR_PERPLL_SRC_NAND_E_PERIPH_NAND_CLK);
+
+ // now AND those with the current state of the three gate enables
+ // to get the clocks which must be gated off and then back on
+ temp1 = temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ restore_0 = restore_0 && (temp & ALT_CLKMGR_PERPLL_EN_SDMMCCLK_SET_MSK);
+ restore_1 = restore_1 && (temp & ALT_CLKMGR_PERPLL_EN_NANDXCLK_SET_MSK);
+ restore_2 = restore_2 && (temp & ALT_CLKMGR_PERPLL_EN_NANDCLK_SET_MSK);
+
+ // gate off the clocks that depend on the clock divider that we want to change
+ if (restore_2) { temp &= ALT_CLKMGR_PERPLL_EN_NANDCLK_CLR_MSK; }
+ if (restore_0) { temp &= ALT_CLKMGR_PERPLL_EN_SDMMCCLK_CLR_MSK; }
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+
+ // the NAND clock must be gated off before the NANDX clock,
+ if (restore_1)
+ {
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK);
+ temp &= ALT_CLKMGR_PERPLL_EN_NANDXCLK_CLR_MSK;
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C3,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV );
+
+ // NAND clock and NAND_X clock cannot be written together, must be a set sequence with a delay
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp1 & ALT_CLKMGR_PERPLL_EN_NANDCLK_CLR_MSK);
+ if (restore_2)
+ {
+ // the NANDX clock must be gated on before the NAND clock.
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_NANDCLK );
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp1);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ if ( (div <= (ALT_CLKMGR_PERPLL_PERBASECLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_PERIPHERAL_PLL_C4, div) == ALT_E_TRUE) )
+ {
+ // look at the L4 set of clock gates first
+ temp1 = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+ restore_0 = (ALT_CLKMGR_MAINPLL_L4SRC_L4MP_GET(temp1) == ALT_CLKMGR_MAINPLL_L4SRC_L4MP_E_PERIPHPLL);
+ restore_1 = (ALT_CLKMGR_MAINPLL_L4SRC_L4SP_GET(temp1) == ALT_CLKMGR_MAINPLL_L4SRC_L4SP_E_PERIPHPLL);
+ temp1 = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ restore_0 = restore_0 && (temp1 & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK);
+ restore_1 = restore_1 && (temp1 & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK);
+
+ // if the l4_sp and l4_mp clocks are not set to use the periph_base_clk
+ // from the Peripheral PLL C4 clock divider output, or if they are
+ // not currently gated on, don't change their gates
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (restore_0) { temp &= ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK; }
+ if (restore_1) { temp &= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK; }
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+
+ // now look at the C4 direct set of clock gates
+ // first, create a mask of the C4 direct set of clock gate enables
+ temp = ( ALT_CLKMGR_PERPLL_EN_USBCLK_SET_MSK
+ | ALT_CLKMGR_PERPLL_EN_SPIMCLK_SET_MSK
+ | ALT_CLKMGR_PERPLL_EN_CAN0CLK_SET_MSK
+ | ALT_CLKMGR_PERPLL_EN_CAN1CLK_SET_MSK
+ | ALT_CLKMGR_PERPLL_EN_GPIOCLK_SET_MSK );
+
+ // gate off all the C4 Direct set of clocks
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp1 & ~temp);
+
+ // change the clock divider ratio - the reason we're here
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_PERBASECLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C4,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_PERBASECLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV );
+
+ // gate the affected clocks that were on before back on - both sets of gates
+ temp = (restore_0) ? ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK : 0;
+ if (restore_1) { temp |= ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK; }
+ alt_setbits_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp1);
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ case ALT_CLK_H2F_USER1:
+ if ( (div <= (ALT_CLKMGR_PERPLL_S2FUSER1CLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_PERIPHERAL_PLL_C5, div) == ALT_E_TRUE) )
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ restore_0 = temp & ALT_CLKMGR_PERPLL_EN_S2FUSER1CLK_SET_MSK;
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_S2FUSER1CLK_CLR_MSK);
+ }
+
+ // now write the new divisor ratio
+ wrval = div - 1;
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_S2FUSER1CLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C5,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV );
+ if (restore_0) { alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp); }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ /////
+
+ // SDRAM PLL outputs
+ case ALT_CLK_SDRAM_PLL_C0:
+ case ALT_CLK_DDR_DQS:
+ if ( (div <= (ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_SDRAM_PLL_C0, div) == ALT_E_TRUE) )
+ {
+ wrval = div - 1;
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_SDRPLL_EN_DDRDQSCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp & ALT_CLKMGR_SDRPLL_EN_DDRDQSCLK_CLR_MSK);
+ restore_0 = true;
+ }
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR,
+ ALT_CLKMGR_SDRPLL_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C0,
+ ALT_CLKMGR_SDRPLL_DDRDQSCLK_CNT_LSB);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp); // which has the enable bit set
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C1:
+ case ALT_CLK_DDR_2X_DQS:
+ if ( (div <= (ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_SDRAM_PLL_C1, div) == ALT_E_TRUE) )
+ {
+ wrval = div - 1;
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_SDRPLL_EN_DDR2XDQSCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp & ALT_CLKMGR_SDRPLL_EN_DDR2XDQSCLK_CLR_MSK);
+ restore_0 = true;
+ }
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR,
+ ALT_CLKMGR_SDRPLL_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C1,
+ ALT_CLKMGR_SDRPLL_VCO_OUTRST_LSB);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp); // which has the enable bit set
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C2:
+ case ALT_CLK_DDR_DQ:
+ if ( (div <= (ALT_CLKMGR_SDRPLL_DDRDQCLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_SDRAM_PLL_C2, div) == ALT_E_TRUE) )
+ {
+ wrval = div - 1;
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_SDRPLL_EN_DDRDQCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp & ALT_CLKMGR_SDRPLL_EN_DDRDQCLK_CLR_MSK);
+ restore_0 = true;
+ }
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR,
+ ALT_CLKMGR_SDRPLL_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C2,
+ ALT_CLKMGR_SDRPLL_VCO_OUTRST_LSB);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp); // which has the enable bit set
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C5:
+ case ALT_CLK_H2F_USER2:
+ if ( (div <= (ALT_CLKMGR_SDRPLL_S2FUSER2CLK_CNT_SET_MSK + 1))
+ && (alt_clk_within_freq_limits(ALT_CLK_SDRAM_PLL_C5, div) == ALT_E_TRUE) )
+ {
+ wrval = div - 1;
+ temp = alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_SDRPLL_EN_S2FUSER2CLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp & ALT_CLKMGR_SDRPLL_EN_S2FUSER2CLK_CLR_MSK);
+ restore_0 = true;
+ }
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_SDRPLL_VCO_ADDR,
+ ALT_CLKMGR_SDRPLL_STAT_ADDR,
+ ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C5,
+ ALT_CLKMGR_SDRPLL_VCO_OUTRST_LSB);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, temp); // which has the enable bit set
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ /////
+
+ // Other clock dividers
+ case ALT_CLK_L3_MP:
+ if (div == 1) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L3MPCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L3MPCLK_E_DIV2; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_MAINPLL_EN_L3MPCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & ALT_CLKMGR_MAINPLL_EN_L3MPCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_CLKMGR_MAINPLL_MAINDIV_L3MPCLK_SET_MSK,
+ wrval << ALT_CLKMGR_MAINPLL_MAINDIV_L3MPCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_EN_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV );
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); // which has the enable bit set
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_L3_SP:
+ // note that the L3MP divider is upstream from the L3SP divider
+ // and any changes to the former will affect the output of both
+ if (div == 1) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L3SPCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L3SPCLK_E_DIV2; }
+
+ if (wrval != UINT32_MAX)
+ {
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_CLKMGR_MAINPLL_MAINDIV_L3SPCLK_SET_MSK,
+ wrval << ALT_CLKMGR_MAINPLL_MAINDIV_L3SPCLK_LSB);
+ // no clock gate to close and reopen
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV );
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_L4_MP:
+ if (div == 1) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & ALT_CLKMGR_MAINPLL_EN_L4MPCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_SET_MSK,
+ wrval << ALT_CLKMGR_MAINPLL_MAINDIV_L4MPCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp); // which has the enable bit set
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_L4_SP:
+ if (div == 1) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & ALT_CLKMGR_MAINPLL_EN_L4SPCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_SET_MSK,
+ wrval << ALT_CLKMGR_MAINPLL_MAINDIV_L4SPCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_DBG_AT:
+ if (div == 1) { wrval = ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_E_DIV4; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_MAINPLL_EN_DBGATCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & ALT_CLKMGR_MAINPLL_EN_DBGATCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR, ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_SET_MSK,
+ wrval << ALT_CLKMGR_MAINPLL_DBGDIV_DBGATCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_DBG:
+ if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_DBGDIV_DBGCLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_MAINPLL_DBGDIV_DBGCLK_E_DIV4; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_MAINPLL_EN_DBGCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & ALT_CLKMGR_MAINPLL_EN_DBGCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR, ALT_CLKMGR_MAINPLL_DBGDIV_DBGCLK_SET_MSK,
+ wrval << (ALT_CLKMGR_MAINPLL_DBGDIV_DBGCLK_LSB - 1));
+ // account for the fact that the divisor ratios are 2x the value
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_DBG_TRACE:
+ if (div == 1) { wrval = ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_MAINPLL_EN_DBGTRACECLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp & ALT_CLKMGR_MAINPLL_EN_DBGTRACECLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_MAINPLL_TRACEDIV_ADDR, ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_SET_MSK,
+ wrval << ALT_CLKMGR_MAINPLL_TRACEDIV_TRACECLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_TRACEDIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_USB_MP:
+ if (div == 1) { wrval = ALT_CLKMGR_PERPLL_DIV_USBCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_PERPLL_DIV_USBCLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_PERPLL_DIV_USBCLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_PERPLL_DIV_USBCLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_PERPLL_DIV_USBCLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_PERPLL_EN_USBCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_USBCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_CLKMGR_PERPLL_DIV_USBCLK_SET_MSK,
+ wrval << ALT_CLKMGR_PERPLL_DIV_USBCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_SPI_M:
+ if (div == 1) { wrval = ALT_CLKMGR_PERPLL_DIV_SPIMCLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_PERPLL_DIV_SPIMCLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_PERPLL_DIV_SPIMCLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_PERPLL_DIV_SPIMCLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_PERPLL_DIV_SPIMCLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_PERPLL_EN_SPIMCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_SPIMCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_CLKMGR_PERPLL_DIV_SPIMCLK_SET_MSK,
+ wrval << ALT_CLKMGR_PERPLL_DIV_SPIMCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_CAN0:
+ if (div == 1) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN0CLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN0CLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN0CLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN0CLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN0CLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_PERPLL_EN_CAN0CLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_CAN0CLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_CLKMGR_PERPLL_DIV_CAN0CLK_SET_MSK,
+ wrval << ALT_CLKMGR_PERPLL_DIV_CAN0CLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_CAN1:
+ if (div == 1) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN1CLK_E_DIV1; }
+ else if (div == 2) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN1CLK_E_DIV2; }
+ else if (div == 4) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN1CLK_E_DIV4; }
+ else if (div == 8) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN1CLK_E_DIV8; }
+ else if (div == 16) { wrval = ALT_CLKMGR_PERPLL_DIV_CAN1CLK_E_DIV16; }
+
+ if (wrval != UINT32_MAX)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_PERPLL_EN_CAN1CLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_CAN1CLK_CLR_MSK);
+ restore_0 = true;
+ }
+ alt_replbits_word(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_CLKMGR_PERPLL_DIV_CAN1CLK_SET_MSK,
+ wrval << ALT_CLKMGR_PERPLL_DIV_CAN1CLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_DIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_GPIO_DB: // GPIO debounce clock
+ if (div <= ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET_MSK)
+ {
+ temp = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+ if (temp & ALT_CLKMGR_PERPLL_EN_GPIOCLK_SET_MSK)
+ {
+ // if clock is currently on, gate it off
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp & ALT_CLKMGR_PERPLL_EN_GPIOCLK_CLR_MSK);
+ restore_0 = true;
+ }
+ wrval = div - 1;
+ alt_replbits_word(ALT_CLKMGR_PERPLL_GPIODIV_ADDR, ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET_MSK,
+ wrval << ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_LSB);
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_GPIODIV_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, temp);
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ break;
+
+ case ALT_CLK_MAIN_QSPI:
+ temp = ALT_CLKMGR_PERPLL_SRC_QSPI_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ // get the QSPI clock source
+ restore_0 = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR) & ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK;
+ // and the current enable state
+ wrval = div - 1;
+
+ if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_MAIN_QSPI_CLK)
+ { // if the main_qspi_clk (Main PLL C3 Ouput) input is selected
+ if (div <= ALT_CLKMGR_MAINPLL_MAINQSPICLK_CNT_SET_MSK)
+ {
+ if (restore_0)
+ {
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK);
+ } // gate off the QSPI clock
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_MAINPLL_VCO_ADDR,
+ ALT_CLKMGR_MAINPLL_STAT_ADDR,
+ ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C3,
+ ALT_CLKMGR_MAINPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK);
+ // if the QSPI clock was gated on (enabled) before, return it to that state
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_PERIPH_QSPI_CLK)
+ {
+ if (div <= ALT_CLKMGR_PERPLL_PERQSPICLK_CNT_SET_MSK)
+ {
+ if (restore_0)
+ {
+ alt_clrbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK);
+ } // gate off the QSPI clock
+
+ alt_clk_pllcounter_write(ALT_CLKMGR_PERPLL_VCO_ADDR,
+ ALT_CLKMGR_PERPLL_STAT_ADDR,
+ ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR,
+ wrval,
+ ALT_CLK_PLL_RST_BIT_C2,
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_LSB);
+
+ alt_clk_mgr_wait(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR, ALT_SW_MANAGED_CLK_WAIT_CTRDIV);
+ if (restore_0)
+ {
+ alt_setbits_word(ALT_CLKMGR_PERPLL_EN_ADDR, ALT_CLKMGR_PERPLL_EN_QSPICLK_SET_MSK);
+ // if the QSPI clock was gated on (enabled) before, return it to that state
+ }
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ARG_RANGE;
+ }
+ }
+ break;
+
+ /////
+
+ default:
+ ret = ALT_E_BAD_ARG;
+ break;
+ }
+
+ return ret;
+}
+
+//
+// alt_clk_freq_get() returns the output frequency of the specified clock.
+//
+ALT_STATUS_CODE alt_clk_freq_get(ALT_CLK_t clk, alt_freq_t* freq)
+{
+ ALT_STATUS_CODE ret = ALT_E_BAD_ARG;
+ uint32_t temp = 0;
+ uint64_t numer = 0;
+ uint64_t denom = 1;
+
+ if (freq == NULL)
+ {
+ return ret;
+ }
+
+ switch (clk)
+ {
+ // External Inputs
+ case ALT_CLK_IN_PIN_OSC1:
+ case ALT_CLK_OSC1:
+ numer = alt_ext_clk_paramblok.clkosc1.freqcur;
+ // denom = 1 by default
+ ret = ALT_E_SUCCESS;
+ break;
+
+ case ALT_CLK_IN_PIN_OSC2:
+ numer = alt_ext_clk_paramblok.clkosc2.freqcur;
+ // denom = 1 by default
+ ret = ALT_E_SUCCESS;
+ break;
+
+ case ALT_CLK_F2H_PERIPH_REF:
+ numer = alt_ext_clk_paramblok.periph.freqcur;
+ // denom = 1 by default
+ ret = ALT_E_SUCCESS;
+ break;
+
+ case ALT_CLK_F2H_SDRAM_REF:
+ numer = alt_ext_clk_paramblok.sdram.freqcur;
+ // denom = 1 by default
+ ret = ALT_E_SUCCESS;
+ break;
+
+ /////
+
+ // PLLs
+ case ALT_CLK_MAIN_PLL:
+ if (alt_clk_pll_is_bypassed(ALT_CLK_MAIN_PLL) == ALT_E_TRUE)
+ {
+ temp = alt_ext_clk_paramblok.clkosc1.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ }
+ numer = (uint64_t) temp;
+ // denom = 1 by default
+ break;
+
+ case ALT_CLK_PERIPHERAL_PLL:
+ if (alt_clk_pll_is_bypassed(ALT_CLK_PERIPHERAL_PLL) == ALT_E_TRUE)
+ {
+ temp = ALT_CLKMGR_PERPLL_VCO_PSRC_GET(alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC1)
+ {
+ temp = alt_ext_clk_paramblok.clkosc1.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_EOSC2)
+ {
+ temp = alt_ext_clk_paramblok.clkosc2.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_VCO_PSRC_E_F2S_PERIPH_REF)
+ {
+ temp = alt_ext_clk_paramblok.periph.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ }
+ else
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ }
+ numer = (uint64_t) temp;
+ // denom = 1 by default
+ break;
+
+ case ALT_CLK_SDRAM_PLL:
+ if (alt_clk_pll_is_bypassed(ALT_CLK_SDRAM_PLL) == ALT_E_TRUE)
+ {
+ temp = ALT_CLKMGR_SDRPLL_VCO_SSRC_GET(alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR));
+ if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC1)
+ {
+ temp = alt_ext_clk_paramblok.clkosc1.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_EOSC2)
+ {
+ temp = alt_ext_clk_paramblok.clkosc2.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_SDRPLL_VCO_SSRC_E_F2S_SDRAM_REF)
+ {
+ temp = alt_ext_clk_paramblok.sdram.freqcur;
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ }
+ else
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &temp);
+ }
+ numer = (uint64_t) temp;
+ // denom = 1 by default
+ break;
+
+ /////
+
+ // Main Clock Group
+ case ALT_CLK_MAIN_PLL_C0:
+ case ALT_CLK_MAIN_PLL_C1:
+ case ALT_CLK_MAIN_PLL_C2:
+ case ALT_CLK_MAIN_PLL_C3:
+ case ALT_CLK_MAIN_PLL_C4:
+ case ALT_CLK_MAIN_PLL_C5:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(clk, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_MPU:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C0, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_MPU_PERIPH:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C0, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MPU_PERIPH, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_MPU_L2_RAM:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C0, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MPU_L2_RAM, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_L4_MAIN:
+ case ALT_CLK_L3_MAIN:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C1, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_L3_MP:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C1, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_L3_MP, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_L3_SP:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C1, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_L3_MP, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = denom * (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_L3_SP, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_L4_MP:
+ ret = alt_clk_divider_get(ALT_CLK_L4_MP, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ temp = ALT_CLKMGR_MAINPLL_L4SRC_L4MP_GET(alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR));
+ if (temp == ALT_CLKMGR_MAINPLL_L4SRC_L4MP_E_MAINPLL)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C1, &temp);
+ denom = denom * (uint64_t) temp; // no real harm if temp is garbage data
+ }
+ }
+ else if (temp == ALT_CLKMGR_MAINPLL_L4SRC_L4MP_E_PERIPHPLL)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ }
+ }
+ break;
+
+ case ALT_CLK_L4_SP:
+ ret = alt_clk_divider_get(ALT_CLK_L4_SP, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ temp = ALT_CLKMGR_MAINPLL_L4SRC_L4SP_GET(alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR));
+ if (temp == ALT_CLKMGR_MAINPLL_L4SRC_L4SP_E_MAINPLL)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C1, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ }
+ else if (temp == ALT_CLKMGR_MAINPLL_L4SRC_L4SP_E_PERIPHPLL) // periph_base_clk
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ }
+ }
+ break;
+
+ case ALT_CLK_DBG_BASE:
+ case ALT_CLK_DBG_TIMER:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C2, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_DBG_AT:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C2, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_DBG_AT, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_DBG:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C2, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_DBG_AT, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = denom * (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_DBG, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_DBG_TRACE:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C2, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_DBG_TRACE, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_MAIN_QSPI:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C3, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_MAIN_NAND_SDMMC:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C4, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_CFG:
+ case ALT_CLK_H2F_USER0:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C5, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ /////
+
+ // Peripheral Clock Group
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(clk, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_EMAC0:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C0, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_EMAC1:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C1, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_USB_MP:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_USB_MP, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ }
+ break;
+
+ case ALT_CLK_SPI_M:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_SPI_M, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_CAN0:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_CAN0, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_CAN1:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_CAN1, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_GPIO_DB:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C4, &temp);
+ }
+ if (ret == ALT_E_SUCCESS)
+ {
+ denom = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_GPIO_DB, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_H2F_USER1:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C5, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ /* Clocks That Can Switch Between Different Clock Groups */
+ case ALT_CLK_SDMMC:
+ temp = ALT_CLKMGR_PERPLL_SRC_SDMMC_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_F2S_PERIPH_REF_CLK)
+ {
+ numer = (uint64_t) alt_ext_clk_paramblok.periph.freqcur;
+ // denom = 1 by default
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_MAIN_NAND_CLK)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C4, &temp);
+ denom = (uint64_t) temp;
+ }
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_SDMMC_E_PERIPH_NAND_CLK)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C3, &temp);
+ denom = (uint64_t) temp;
+ }
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_NAND:
+ denom = 4;
+ // the absence of a break statement here is not a mistake
+ case ALT_CLK_NAND_X:
+ temp = ALT_CLKMGR_PERPLL_SRC_NAND_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_NAND_E_F2S_PERIPH_REF_CLK)
+ {
+ numer = (uint64_t) alt_ext_clk_paramblok.periph.freqcur;
+ // denom = 1 or 4 by default;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_NAND_E_MAIN_NAND_CLK)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C4, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_NAND_E_PERIPH_NAND_CLK)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C3, &temp);
+ denom = denom * (uint64_t) temp;
+ }
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ break;
+
+ case ALT_CLK_QSPI:
+ temp = ALT_CLKMGR_PERPLL_SRC_QSPI_GET(alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR));
+ if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_F2S_PERIPH_REF_CLK)
+ {
+ numer = (uint64_t) alt_ext_clk_paramblok.periph.freqcur;
+ // denom = 1 by default;
+ ret = ALT_E_SUCCESS;
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_MAIN_QSPI_CLK)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_MAIN_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_MAIN_PLL_C3, &temp);
+ denom = (uint64_t) temp;
+ }
+ }
+ else if (temp == ALT_CLKMGR_PERPLL_SRC_QSPI_E_PERIPH_QSPI_CLK)
+ {
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_PERIPHERAL_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_PERIPHERAL_PLL_C2, &temp);
+ denom = (uint64_t) temp;
+ }
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ break;
+
+ /////
+
+ // SDRAM Clock Group
+ case ALT_CLK_SDRAM_PLL_C0:
+ case ALT_CLK_DDR_DQS:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_SDRAM_PLL_C0, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C1:
+ case ALT_CLK_DDR_2X_DQS:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_SDRAM_PLL_C1, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C2:
+ case ALT_CLK_DDR_DQ:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_SDRAM_PLL_C2, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ case ALT_CLK_SDRAM_PLL_C5:
+ case ALT_CLK_H2F_USER2:
+ ret = alt_clk_pll_vco_freq_get(ALT_CLK_SDRAM_PLL, &temp);
+ if (ret == ALT_E_SUCCESS)
+ {
+ numer = (uint64_t) temp;
+ ret = alt_clk_divider_get(ALT_CLK_SDRAM_PLL_C5, &temp);
+ denom = (uint64_t) temp;
+ }
+ break;
+
+ default:
+ ret = ALT_E_BAD_ARG;
+ break;
+
+ } // end of switch-case construct
+
+ if (ret == ALT_E_SUCCESS)
+ {
+ // will not get here if none of above cases match
+ if (denom > 0)
+ {
+ numer /= denom;
+ if (numer <= UINT32_MAX)
+ {
+ *freq = (uint32_t) numer;
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ }
+ else
+ {
+ ret = ALT_E_ERROR;
+ }
+ }
+
+ return ret;
+}
+
+//
+// alt_clk_irq_disable() disables one or more of the lock status conditions as
+// contributors to the clkmgr_IRQ interrupt signal state.
+//
+ALT_STATUS_CODE alt_clk_irq_disable(ALT_CLK_PLL_LOCK_STATUS_t lock_stat_mask)
+{
+ if (!(lock_stat_mask & ALT_CLK_MGR_PLL_LOCK_BITS))
+ {
+ alt_clrbits_word(ALT_CLKMGR_INTREN_ADDR, lock_stat_mask);
+ return ALT_E_SUCCESS;
+ }
+ else
+ {
+ return ALT_E_BAD_ARG;
+ }
+}
+
+//
+// alt_clk_irq_enable() enables one or more of the lock status conditions as
+// contributors to the clkmgr_IRQ interrupt signal state.
+//
+ALT_STATUS_CODE alt_clk_irq_enable(ALT_CLK_PLL_LOCK_STATUS_t lock_stat_mask)
+{
+ if (!(lock_stat_mask & ALT_CLK_MGR_PLL_LOCK_BITS))
+ {
+ alt_setbits_word(ALT_CLKMGR_INTREN_ADDR, lock_stat_mask);
+ return ALT_E_SUCCESS;
+ }
+ else
+ {
+ return ALT_E_BAD_ARG;
+ }
+}
+
+/////
+
+//
+// alt_clk_group_cfg_raw_get() gets the raw configuration state of the designated
+// clock group.
+//
+ALT_STATUS_CODE alt_clk_group_cfg_raw_get(ALT_CLK_GRP_t clk_group,
+ ALT_CLK_GROUP_RAW_CFG_t * clk_group_raw_cfg)
+{
+ clk_group_raw_cfg->verid = alt_read_word(ALT_SYSMGR_SILICONID1_ADDR);
+ clk_group_raw_cfg->siliid2 = alt_read_word(ALT_SYSMGR_SILICONID2_ADDR);
+ clk_group_raw_cfg->clkgrpsel = clk_group;
+
+ if (clk_group == ALT_MAIN_PLL_CLK_GRP)
+ {
+ // Main PLL VCO register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.vco = alt_read_word(ALT_CLKMGR_MAINPLL_VCO_ADDR);
+
+ // Main PLL Misc register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.misc = alt_read_word(ALT_CLKMGR_MAINPLL_MISC_ADDR);
+
+ // Main PLL C0-C5 Counter registers
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mpuclk = alt_read_word(ALT_CLKMGR_MAINPLL_MPUCLK_ADDR);
+ // doing these as 32-bit reads and writes avoids unnecessary masking operations
+
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mainclk = alt_read_word(ALT_CLKMGR_MAINPLL_MAINCLK_ADDR);
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.dbgatclk = alt_read_word(ALT_CLKMGR_MAINPLL_DBGATCLK_ADDR);
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mainqspiclk = alt_read_word(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR);
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mainnandsdmmcclk = alt_read_word(ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR);
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.cfgs2fuser0clk = alt_read_word(ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR);
+
+ // Main PLL Enable register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.en = alt_read_word(ALT_CLKMGR_MAINPLL_EN_ADDR);
+
+ // Main PLL Maindiv register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.maindiv = alt_read_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR);
+
+ // Main PLL Debugdiv register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.dbgdiv = alt_read_word(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR);
+
+ // Main PLL Tracediv register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.tracediv = alt_read_word(ALT_CLKMGR_MAINPLL_TRACEDIV_ADDR);
+
+ // Main PLL L4 Source register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.l4src = alt_read_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR);
+
+ // Main PLL Status register
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw.stat = alt_read_word(ALT_CLKMGR_MAINPLL_STAT_ADDR);
+ // clkgrp.mainpllgrp.stat.outresetack is defined in the ALT_CLKMGR_MAINPLL_STAT_s declaration
+ // as a const but alt_indwrite_word() overrides that restriction.
+
+ // padding ...
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw._pad_0x38_0x40[0] = 0;
+ clk_group_raw_cfg->clkgrp.mainpllgrp.raw._pad_0x38_0x40[1] = 0;
+
+ return ALT_E_SUCCESS;
+ }
+ else if (clk_group == ALT_PERIPH_PLL_CLK_GRP)
+ {
+ // Peripheral PLL VCO register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.vco = alt_read_word(ALT_CLKMGR_PERPLL_VCO_ADDR);
+
+ // Peripheral PLL Misc register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.misc = alt_read_word(ALT_CLKMGR_PERPLL_MISC_ADDR);
+
+ // Peripheral PLL C0-C5 Counters
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.emac0clk = alt_read_word(ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR);
+ // doing these as 32-bit reads and writes avoids unnecessary masking operations
+
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.emac1clk = alt_read_word(ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR);
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.perqspiclk = alt_read_word(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR);
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.pernandsdmmcclk = alt_read_word(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR);
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.perbaseclk = alt_read_word(ALT_CLKMGR_PERPLL_PERBASECLK_ADDR);
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.s2fuser1clk = alt_read_word(ALT_CLKMGR_PERPLL_S2FUSER1CLK_ADDR);
+
+ // Peripheral PLL Enable register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.en = alt_read_word(ALT_CLKMGR_PERPLL_EN_ADDR);
+
+ // Peripheral PLL Divider register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.div = alt_read_word(ALT_CLKMGR_PERPLL_DIV_ADDR);
+
+ // Peripheral PLL GPIO Divider register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.gpiodiv = alt_read_word(ALT_CLKMGR_PERPLL_GPIODIV_ADDR);
+
+ // Peripheral PLL Source register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.src = alt_read_word(ALT_CLKMGR_PERPLL_SRC_ADDR);
+
+ // Peripheral PLL Status register
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw.stat = alt_read_word(ALT_CLKMGR_PERPLL_STAT_ADDR);
+
+ // padding ...
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw._pad_0x34_0x40[0] = 0;
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw._pad_0x34_0x40[1] = 0;
+ clk_group_raw_cfg->clkgrp.perpllgrp.raw._pad_0x34_0x40[2] = 0;
+
+ return ALT_E_SUCCESS;
+ }
+ else if (clk_group == ALT_SDRAM_PLL_CLK_GRP)
+ {
+ // SDRAM PLL VCO register
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.vco = alt_read_word(ALT_CLKMGR_SDRPLL_VCO_ADDR);
+
+ // SDRAM PLL Control register
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ctrl = alt_read_word(ALT_CLKMGR_SDRPLL_CTL_ADDR);
+
+ // SDRAM PLL C0-C2 & C5 Counters
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ddrdqsclk = alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR);
+ // doing these as 32-bit reads and writes avoids unnecessary masking operations
+
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ddr2xdqsclk = alt_read_word(ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR);
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ddrdqclk = alt_read_word(ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR);
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.s2fuser2clk = alt_read_word(ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR);
+
+ // SDRAM PLL Enable register
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.en = alt_read_word(ALT_CLKMGR_SDRPLL_EN_ADDR);
+
+ // SDRAM PLL Status register
+ clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.stat = alt_read_word(ALT_CLKMGR_SDRPLL_STAT_ADDR);
+
+ return ALT_E_SUCCESS;
+ }
+ else
+ {
+ return ALT_E_BAD_ARG;
+ }
+}
+
+//
+// alt_clk_group_cfg_raw_set() sets the clock group configuration.
+//
+ALT_STATUS_CODE alt_clk_group_cfg_raw_set(const ALT_CLK_GROUP_RAW_CFG_t * clk_group_raw_cfg)
+{
+ // test for matching silicon ID, but not for matching silicon revision number
+ if (ALT_SYSMGR_SILICONID1_ID_GET(alt_read_word(ALT_SYSMGR_SILICONID1_ADDR)) !=
+ ALT_SYSMGR_SILICONID1_ID_GET(clk_group_raw_cfg->verid))
+ {
+ return ALT_E_BAD_VERSION;
+ }
+
+ // get the PLL ID
+ ALT_CLK_GRP_t clk_group = clk_group_raw_cfg->clkgrpsel;
+ ALT_CLK_t pll;
+
+ if (clk_group == ALT_MAIN_PLL_CLK_GRP) { pll = ALT_CLK_MAIN_PLL; }
+ else if (clk_group == ALT_PERIPH_PLL_CLK_GRP) { pll = ALT_CLK_PERIPHERAL_PLL; }
+ else if (clk_group == ALT_SDRAM_PLL_CLK_GRP) { pll = ALT_CLK_SDRAM_PLL; }
+ else
+ {
+ return ALT_E_ERROR;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // if the PLL isn't in bypass mode, put it in bypass mode
+ bool byp = false;
+ if (alt_clk_pll_is_bypassed(pll) == ALT_E_FALSE)
+ {
+ status = alt_clk_pll_bypass_enable(pll, false);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+
+ byp = true;
+ }
+
+ // now write the values in the ALT_CLK_GROUP_RAW_CFG_t structure to the registers
+ if (clk_group == ALT_MAIN_PLL_CLK_GRP)
+ {
+ // Main PLL VCO register
+ alt_write_word(ALT_CLKMGR_MAINPLL_VCO_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.vco &
+ ALT_CLKMGR_MAINPLL_VCO_OUTRSTALL_CLR_MSK & ALT_CLKMGR_MAINPLL_VCO_OUTRST_CLR_MSK);
+ // the outreset and outresetall bits were probably clear when the
+ // state was saved, but make sure they're clear now
+
+ // Main PLL Misc register
+ alt_write_word(ALT_CLKMGR_MAINPLL_MISC_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.misc);
+
+ // Main PLL C0-C5 Counter registers
+ alt_write_word(ALT_CLKMGR_MAINPLL_MPUCLK_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mpuclk);
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINCLK_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mainclk);
+ alt_write_word(ALT_CLKMGR_MAINPLL_DBGATCLK_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.dbgatclk);
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINQSPICLK_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mainqspiclk);
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINNANDSDMMCCLK_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.mainnandsdmmcclk);
+ alt_write_word(ALT_CLKMGR_MAINPLL_CFGS2FUSER0CLK_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.cfgs2fuser0clk);
+
+ // Main PLL Counter Enable register
+ alt_write_word(ALT_CLKMGR_MAINPLL_EN_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.en);
+
+ // Main PLL Maindiv register
+ alt_write_word(ALT_CLKMGR_MAINPLL_MAINDIV_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.maindiv);
+
+ // Main PLL Debugdiv register
+ alt_write_word(ALT_CLKMGR_MAINPLL_DBGDIV_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.dbgdiv);
+
+ // Main PLL Tracediv register
+ alt_write_word(ALT_CLKMGR_MAINPLL_TRACEDIV_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.tracediv);
+
+ // Main PLL L4 Source register
+ alt_write_word(ALT_CLKMGR_MAINPLL_L4SRC_ADDR, clk_group_raw_cfg->clkgrp.mainpllgrp.raw.l4src);
+ }
+ else if (clk_group == ALT_PERIPH_PLL_CLK_GRP)
+ {
+ // Peripheral PLL VCO register
+ alt_write_word(ALT_CLKMGR_PERPLL_VCO_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.vco &
+ ALT_CLKMGR_PERPLL_VCO_OUTRST_CLR_MSK & ALT_CLKMGR_PERPLL_VCO_OUTRSTALL_CLR_MSK);
+ // the outreset and outresetall bits were probably clear when the
+ // state was saved, but make sure they're clear now
+
+ // Peripheral PLL Misc register
+ alt_write_word(ALT_CLKMGR_PERPLL_MISC_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.misc);
+
+ // Peripheral PLL C0-C5 Counters
+ alt_write_word(ALT_CLKMGR_PERPLL_EMAC0CLK_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.emac0clk);
+ alt_write_word(ALT_CLKMGR_PERPLL_EMAC1CLK_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.emac1clk);
+ alt_write_word(ALT_CLKMGR_PERPLL_PERQSPICLK_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.perqspiclk);
+ alt_write_word(ALT_CLKMGR_PERPLL_PERNANDSDMMCCLK_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.pernandsdmmcclk);
+ alt_write_word(ALT_CLKMGR_PERPLL_PERBASECLK_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.perbaseclk);
+ alt_write_word(ALT_CLKMGR_PERPLL_S2FUSER1CLK_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.s2fuser1clk);
+
+ // Peripheral PLL Counter Enable register
+ alt_write_word(ALT_CLKMGR_PERPLL_EN_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.en);
+
+ // Peripheral PLL Divider register
+ alt_write_word(ALT_CLKMGR_PERPLL_DIV_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.div);
+
+ // Peripheral PLL GPIO Divider register
+ alt_write_word(ALT_CLKMGR_PERPLL_GPIODIV_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.gpiodiv);
+
+ // Peripheral PLL Source register
+ alt_write_word(ALT_CLKMGR_PERPLL_SRC_ADDR, clk_group_raw_cfg->clkgrp.perpllgrp.raw.src);
+ }
+ else if (clk_group == ALT_SDRAM_PLL_CLK_GRP)
+ {
+ // SDRAM PLL VCO register
+ alt_write_word(ALT_CLKMGR_SDRPLL_VCO_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.vco &
+ ALT_CLKMGR_SDRPLL_VCO_OUTRST_CLR_MSK & ALT_CLKMGR_SDRPLL_VCO_OUTRSTALL_CLR_MSK);
+ // the outreset and outresetall bits were probably clear when the
+ // state was saved, but make sure they're clear now
+
+ // SDRAM PLL Control register
+ alt_write_word(ALT_CLKMGR_SDRPLL_CTL_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ctrl);
+
+ // SDRAM PLL C0-C2 & C5 Counters
+ alt_write_word(ALT_CLKMGR_SDRPLL_DDRDQSCLK_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ddrdqsclk);
+ alt_write_word(ALT_CLKMGR_SDRPLL_DDR2XDQSCLK_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ddr2xdqsclk);
+ alt_write_word(ALT_CLKMGR_SDRPLL_DDRDQCLK_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.ddrdqclk);
+ alt_write_word(ALT_CLKMGR_SDRPLL_S2FUSER2CLK_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.s2fuser2clk);
+
+ // SDRAM PLL Counter Enable register
+ alt_write_word(ALT_CLKMGR_SDRPLL_EN_ADDR, clk_group_raw_cfg->clkgrp.sdrpllgrp.raw.en);
+ }
+
+ // if PLL was not bypassed before, restore that state
+ if (byp)
+ {
+ status = alt_clk_pll_bypass_disable(pll);
+ }
+
+ return status;
+}
+
+//
+// alt_clk_id_to_string() converts a clock ID to a text string.
+//
+ALT_STATUS_CODE alt_clk_id_to_string(ALT_CLK_t clk_id, char * output, size_t size)
+{
+ char * name = NULL;
+
+ switch (clk_id)
+ {
+ case ALT_CLK_IN_PIN_OSC1:
+ name = "IN_PIN_OSC1";
+ break;
+ case ALT_CLK_IN_PIN_OSC2:
+ name = "IN_PIN_OSC2";
+ break;
+
+ // FPGA Clock Sources External to HPS
+ case ALT_CLK_F2H_PERIPH_REF:
+ name = "F2H_PERIPH_REF";
+ break;
+ case ALT_CLK_F2H_SDRAM_REF:
+ name = "F2H_SDRAM_REF";
+ break;
+
+ // Other Clock Sources External to HPS
+ case ALT_CLK_IN_PIN_JTAG:
+ name = "IN_PIN_JTAG";
+ break;
+ case ALT_CLK_IN_PIN_ULPI0:
+ name = "IN_PIN_ULPI0";
+ break;
+ case ALT_CLK_IN_PIN_ULPI1:
+ name = "IN_PIN_ULPI1";
+ break;
+ case ALT_CLK_IN_PIN_EMAC0_RX:
+ name = "IN_PIN_EMAC0_RX";
+ break;
+ case ALT_CLK_IN_PIN_EMAC1_RX:
+ name = "IN_PIN_EMAC1_RX";
+ break;
+
+ // PLLs
+ case ALT_CLK_MAIN_PLL:
+ name = "MAIN_PLL";
+ break;
+ case ALT_CLK_PERIPHERAL_PLL:
+ name = "PERIPHERAL_PLL";
+ break;
+ case ALT_CLK_SDRAM_PLL:
+ name = "SDRAM_PLL";
+ break;
+
+ // OSC1 Clock Group - The OSC1 clock group contains those clocks which are derived
+ // directly from the osc_clk_1_HPS pin
+ case ALT_CLK_OSC1:
+ name = "OSC1";
+ break;
+
+ // Main Clock Group - The following clocks are derived from the Main PLL.
+ case ALT_CLK_MAIN_PLL_C0:
+ name = "MAIN_PLL_C0";
+ break;
+ case ALT_CLK_MAIN_PLL_C1:
+ name = "MAIN_PLL_C1";
+ break;
+ case ALT_CLK_MAIN_PLL_C2:
+ name = "MAIN_PLL_C2";
+ break;
+ case ALT_CLK_MAIN_PLL_C3:
+ name = "MAIN_PLL_C3";
+ break;
+ case ALT_CLK_MAIN_PLL_C4:
+ name = "MAIN_PLL_C4";
+ break;
+ case ALT_CLK_MAIN_PLL_C5:
+ name = "MAIN_PLL_C5";
+ break;
+ case ALT_CLK_MPU:
+ name = "MPU";
+ break;
+ case ALT_CLK_MPU_L2_RAM:
+ name = "MPU_L2_RAM";
+ break;
+ case ALT_CLK_MPU_PERIPH:
+ name = "MPU_PERIPH";
+ break;
+ case ALT_CLK_L3_MAIN:
+ name = "L3_MAIN";
+ break;
+ case ALT_CLK_L3_MP:
+ name = "L3_MP";
+ break;
+ case ALT_CLK_L3_SP:
+ name = "L3_SP";
+ break;
+ case ALT_CLK_L4_MAIN:
+ name = "L4_MAIN";
+ break;
+ case ALT_CLK_L4_MP:
+ name = "L4_MP";
+ break;
+ case ALT_CLK_L4_SP:
+ name = "L4_SP";
+ break;
+ case ALT_CLK_DBG_BASE:
+ name = "DBG_BASE";
+ break;
+ case ALT_CLK_DBG_AT:
+ name = "DBG_AT";
+ break;
+ case ALT_CLK_DBG_TRACE:
+ name = "DBG_TRACE";
+ break;
+ case ALT_CLK_DBG_TIMER:
+ name = "DBG_TIMER";
+ break;
+ case ALT_CLK_DBG:
+ name = "DBG";
+ break;
+ case ALT_CLK_MAIN_QSPI:
+ name = "MAIN_QSPI";
+ break;
+ case ALT_CLK_MAIN_NAND_SDMMC:
+ name = "MAIN_NAND_SDMMC";
+ break;
+ case ALT_CLK_CFG:
+ name = "CFG";
+ break;
+ case ALT_CLK_H2F_USER0:
+ name = "H2F_USER0";
+ break;
+
+ // Peripherals Clock Group - The following clocks are derived from the Peripheral PLL.
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ name = "PERIPHERAL_PLL_C0";
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ name = "PERIPHERAL_PLL_C1";
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ name = "PERIPHERAL_PLL_C2";
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ name = "PERIPHERAL_PLL_C3";
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ name = "PERIPHERAL_PLL_C4";
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ name = "PERIPHERAL_PLL_C5";
+ break;
+ case ALT_CLK_USB_MP:
+ name = "USB_MP";
+ break;
+ case ALT_CLK_SPI_M:
+ name = "SPI_M";
+ break;
+ case ALT_CLK_QSPI:
+ name = "QSPI";
+ break;
+ case ALT_CLK_NAND_X:
+ name = "NAND_X";
+ break;
+ case ALT_CLK_NAND:
+ name = "NAND";
+ break;
+ case ALT_CLK_SDMMC:
+ name = "SDMMC";
+ break;
+ case ALT_CLK_EMAC0:
+ name = "EMAC0";
+ break;
+ case ALT_CLK_EMAC1:
+ name = "EMAC1";
+ break;
+ case ALT_CLK_CAN0:
+ name = "CAN0";
+ break;
+ case ALT_CLK_CAN1:
+ name = "CAN1";
+ break;
+ case ALT_CLK_GPIO_DB:
+ name = "GPIO_DB";
+ break;
+ case ALT_CLK_H2F_USER1:
+ name = "H2F_USER1";
+ break;
+
+ // SDRAM Clock Group - The following clocks are derived from the SDRAM PLL.
+ case ALT_CLK_SDRAM_PLL_C0:
+ name = "SDRAM_PLL_C0";
+ break;
+ case ALT_CLK_SDRAM_PLL_C1:
+ name = "SDRAM_PLL_C1";
+ break;
+ case ALT_CLK_SDRAM_PLL_C2:
+ name = "SDRAM_PLL_C2";
+ break;
+ case ALT_CLK_SDRAM_PLL_C3:
+ name = "SDRAM_PLL_C3";
+ break;
+ case ALT_CLK_SDRAM_PLL_C4:
+ name = "SDRAM_PLL_C4";
+ break;
+ case ALT_CLK_SDRAM_PLL_C5:
+ name = "SDRAM_PLL_C5";
+ break;
+ case ALT_CLK_DDR_DQS:
+ name = "DDR_DQS";
+ break;
+ case ALT_CLK_DDR_2X_DQS:
+ name = "DDR_2X_DQS";
+ break;
+ case ALT_CLK_DDR_DQ:
+ name = "DDR_DQ";
+ break;
+ case ALT_CLK_H2F_USER2:
+ name = "H2F_USER2";
+ break;
+
+ // Clock Output Pins
+ case ALT_CLK_OUT_PIN_EMAC0_TX:
+ name = "OUT_PIN_EMAC0_TX";
+ break;
+ case ALT_CLK_OUT_PIN_EMAC1_TX:
+ name = "OUT_PIN_EMAC1_TX";
+ break;
+ case ALT_CLK_OUT_PIN_SDMMC:
+ name = "OUT_PIN_SDMMC";
+ break;
+ case ALT_CLK_OUT_PIN_I2C0_SCL:
+ name = "OUT_PIN_I2C0_SCL";
+ break;
+ case ALT_CLK_OUT_PIN_I2C1_SCL:
+ name = "OUT_PIN_I2C1_SCL";
+ break;
+ case ALT_CLK_OUT_PIN_I2C2_SCL:
+ name = "OUT_PIN_I2C2_SCL";
+ break;
+ case ALT_CLK_OUT_PIN_I2C3_SCL:
+ name = "OUT_PIN_I2C3_SCL";
+ break;
+ case ALT_CLK_OUT_PIN_SPIM0:
+ name = "OUT_PIN_SPIM0";
+ break;
+ case ALT_CLK_OUT_PIN_SPIM1:
+ name = "OUT_PIN_SPIM1";
+ break;
+ case ALT_CLK_OUT_PIN_QSPI:
+ name = "OUT_PIN_QSPI";
+ break;
+ case ALT_CLK_UNKNOWN:
+ name = "UNKNOWN";
+ break;
+
+ // do *not* put a 'default' statement here. Then the compiler will throw
+ // an error if another clock id enum is added if the corresponding
+ // string is not added to this function.
+ }
+
+ if (name != NULL)
+ {
+ snprintf(output, size, "ALT_CLK_%s", name);
+ return ALT_E_SUCCESS;
+ }
+ else
+ {
+ return ALT_E_BAD_ARG;
+ }
+}
+
+
+//
+// alt_clk_pll_cntr_maxfreq_recalc() recalculate the maxmum frequency of the specified clock.
+//
+ALT_STATUS_CODE alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_t clk, ALT_PLL_CNTR_FREQMAX_t * maxfreq)
+{
+ ALT_STATUS_CODE ret = ALT_E_BAD_ARG;
+ alt_freq_t freq;
+
+ ret = alt_clk_freq_get(clk, &freq);
+
+ if (ret == ALT_E_SUCCESS)
+ {
+
+ switch (clk)
+ {
+ // Main Clock Group
+ case ALT_CLK_MAIN_PLL_C0:
+ maxfreq->MainPLL_C0 = freq;
+ printf("alt_pll_cntr_maxfreq.MainPLL_C0 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_MAIN_PLL_C1:
+ maxfreq->MainPLL_C1 = freq;
+ printf("alt_pll_cntr_maxfreq.MainPLL_C1 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_MAIN_PLL_C2:
+ maxfreq->MainPLL_C2 = freq;
+ printf("alt_pll_cntr_maxfreq.MainPLL_C2 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_MAIN_PLL_C3:
+ maxfreq->MainPLL_C3 = freq;
+ printf("alt_pll_cntr_maxfreq.MainPLL_C3 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_MAIN_PLL_C4:
+ maxfreq->MainPLL_C4 = freq;
+ printf("alt_pll_cntr_maxfreq.MainPLL_C4 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_MAIN_PLL_C5:
+ maxfreq->MainPLL_C5 = freq;
+ printf("alt_pll_cntr_maxfreq.MainPLL_C5 = %10d\n", (unsigned int)freq);
+ break;
+
+ // Peripheral Clock Group
+ case ALT_CLK_PERIPHERAL_PLL_C0:
+ maxfreq->PeriphPLL_C0 = freq;
+ printf("alt_pll_cntr_maxfreq.PeriphPLL_C0 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C1:
+ maxfreq->PeriphPLL_C1 = freq;
+ printf("alt_pll_cntr_maxfreq.PeriphPLL_C1 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C2:
+ maxfreq->PeriphPLL_C2 = freq;
+ printf("alt_pll_cntr_maxfreq.PeriphPLL_C2 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C3:
+ maxfreq->PeriphPLL_C3 = freq;
+ printf("alt_pll_cntr_maxfreq.PeriphPLL_C3 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C4:
+ maxfreq->PeriphPLL_C4 = freq;
+ printf("alt_pll_cntr_maxfreq.PeriphPLL_C4 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_PERIPHERAL_PLL_C5:
+ maxfreq->PeriphPLL_C5 = freq;
+ printf("alt_pll_cntr_maxfreq.PeriphPLL_C5 = %10d\n", (unsigned int)freq);
+ break;
+
+ // SDRAM Clock Group
+ case ALT_CLK_SDRAM_PLL_C0:
+ maxfreq->SDRAMPLL_C0 = freq;
+ printf("alt_pll_cntr_maxfreq.SDRAMPLL_C0 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_SDRAM_PLL_C1:
+ maxfreq->SDRAMPLL_C1 = freq;
+ printf("alt_pll_cntr_maxfreq.SDRAMPLL_C1 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_SDRAM_PLL_C2:
+ maxfreq->SDRAMPLL_C2 = freq;
+ printf("alt_pll_cntr_maxfreq.SDRAMPLL_C2 = %10d\n", (unsigned int)freq);
+ break;
+ case ALT_CLK_SDRAM_PLL_C5:
+ maxfreq->SDRAMPLL_C5 = freq;
+ printf("alt_pll_cntr_maxfreq.SDRAMPLL_C5 = %10d\n", (unsigned int)freq);
+ break;
+ default:
+ ret = ALT_E_BAD_ARG;
+ printf("bad max frequency parameter\n");
+ break;
+ } // end of switch-case construct
+ }
+
+ return ret;
+}
+
+//
+// u-boot preloader actually initialize clock manager circuitry
+//
+// alt_clk_clkmgr_init() attempt to fix the pll counter max frequencies, since
+// thses frequencies are not known in advance until u-boot programmed clock manager.
+//
+ALT_STATUS_CODE alt_clk_clkmgr_init(void)
+{
+ ALT_STATUS_CODE ret = ALT_E_SUCCESS;
+ ALT_STATUS_CODE status ;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_MAIN_PLL_C0,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_MAIN_PLL_C1,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_MAIN_PLL_C2,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_MAIN_PLL_C3,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_MAIN_PLL_C4,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_MAIN_PLL_C5,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_PERIPHERAL_PLL_C0,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_PERIPHERAL_PLL_C1,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_PERIPHERAL_PLL_C2,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_PERIPHERAL_PLL_C3,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_PERIPHERAL_PLL_C4,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_PERIPHERAL_PLL_C5,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_SDRAM_PLL_C0,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_SDRAM_PLL_C1,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_SDRAM_PLL_C2,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+ status = alt_clk_pll_cntr_maxfreq_recalc(ALT_CLK_SDRAM_PLL_C5,&alt_pll_cntr_maxfreq );
+ if (status != ALT_E_SUCCESS) ret = ALT_E_ERROR;
+
+
+ return ret;
+}
+
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma.c
new file mode 100644
index 0000000000..0fa69141ea
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma.c
@@ -0,0 +1,3749 @@
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <stdio.h>
+#include <bsp/alt_dma.h>
+#include <bsp/socal/socal.h>
+#include <bsp/socal/hps.h>
+#include <bsp/socal/alt_rstmgr.h>
+#include <bsp/socal/alt_sysmgr.h>
+
+#if ALT_DMA_PERIPH_PROVISION_16550_SUPPORT
+#include <bsp/alt_16550_uart.h>
+#include <bsp/socal/alt_uart.h>
+#endif
+
+#if ALT_DMA_PERIPH_PROVISION_QSPI_SUPPORT
+#include <bsp/socal/alt_qspi.h>
+#endif
+
+/////
+
+#ifndef MIN
+#define MIN(a, b) ((a) > (b) ? (b) : (a))
+#endif // MIN
+
+#ifndef ARRAY_COUNT
+#define ARRAY_COUNT(array) (sizeof(array) / sizeof(array[0]))
+#endif
+
+// NOTE: To enable debugging output, delete the next line and uncomment the
+// line after.
+#define dprintf(...)
+// #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+
+/////
+
+//
+// SoCAL stand in for DMA Controller registers
+//
+// The base can be one of the following:
+// - ALT_DMANONSECURE_ADDR
+// - ALT_DMASECURE_ADDR
+//
+// Macros which have a channel parameter does no validation.
+//
+
+// DMA Manager Status Register
+#define ALT_DMA_DSR_OFST 0x0
+#define ALT_DMA_DSR_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DSR_OFST))
+#define ALT_DMA_DSR_DMASTATUS_SET_MSK 0x0000000f
+#define ALT_DMA_DSR_DMASTATUS_GET(value) ((value) & 0x0000000f)
+
+// DMA Program Counter Register
+#define ALT_DMA_DPC_OFST 0x4
+#define ALT_DMA_DPC_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DPC_OFST))
+
+// Interrupt Enable Register
+#define ALT_DMA_INTEN_OFST 0x20
+#define ALT_DMA_INTEN_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_INTEN_OFST))
+
+// Event-Interrupt Raw Status Register
+#define ALT_DMA_INT_EVENT_RIS_OFST 0x24
+#define ALT_DMA_INT_EVENT_RIS_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_INT_EVENT_RIS_OFST))
+
+// Interrupt Status Register
+#define ALT_DMA_INTMIS_OFST 0x28
+#define ALT_DMA_INTMIS_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_INTMIS_OFST))
+
+// Interrupt Clear Register
+#define ALT_DMA_INTCLR_OFST 0x2c
+#define ALT_DMA_INTCLR_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_INTCLR_OFST))
+
+// Fault Status DMA Manager Register
+#define ALT_DMA_FSRD_OFST 0x30
+#define ALT_DMA_FSRD_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_FSRD_OFST))
+
+// Fault Status DMA Channel Register
+#define ALT_DMA_FSRC_OFST 0x34
+#define ALT_DMA_FSRC_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_FSRC_OFST))
+
+// Fault Type DMA Manager Register
+#define ALT_DMA_FTRD_OFST 0x38
+#define ALT_DMA_FTRD_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_FSRD_OFST))
+
+// Fault Type DMA Channel Registers
+#define ALT_DMA_FTRx_OFST(channel) (0x40 + 0x4 * (channel))
+#define ALT_DMA_FTRx_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_FTRx_OFST(channel)))
+
+// Channel Status Registers
+#define ALT_DMA_CSRx_OFST(channel) (0x100 + 0x8 * (channel))
+#define ALT_DMA_CSRx_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CSRx_OFST(channel)))
+#define ALT_DMA_CSRx_CHANNELSTATUS_SET_MSK 0x0000000f
+#define ALT_DMA_CSRx_CHANNELSTATUS_GET(value) ((value) & 0x0000000f)
+
+// Channel Program Counter Registers
+#define ALT_DMA_CPCx_OFST(channel) (0x104 + 0x8 * (channel))
+#define ALT_DMA_CPCx_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CPCx_OFST(channel)))
+
+// Source Address Registers
+#define ALT_DMA_SARx_OFST(channel) (0x400 + 0x20 * (channel))
+#define ALT_DMA_SARx_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_SARx_OFST(channel)))
+
+// Destination Address Registers
+#define ALT_DMA_DARx_OFST(channel) (0x404 + 0x20 * (channel))
+#define ALT_DMA_DARx_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DARx_OFST(channel)))
+
+// Channel Control Registers
+#define ALT_DMA_CCRx_OFST(channel) (0x408 + 0x20 * (channel))
+#define ALT_DMA_CCRx_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CCRx_OFST(channel)))
+
+// Loop Counter 0 Registers
+#define ALT_DMA_LC0_x_OFST(channel) (0x40c + 0x20 * (channel))
+#define ALT_DMA_LC0_x_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_LC0_x_OFST(channel)))
+
+// Loop Counter 1 Registers
+#define ALT_DMA_LC1_x_OFST(channel) (0x410 + 0x20 * (channel))
+#define ALT_DMA_LC1_x_ADDR(base, channel) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_LC1_x_OFST(channel)))
+
+// Debug Status Register
+#define ALT_DMA_DBGSTATUS_OFST 0xd00
+#define ALT_DMA_DBGSTATUS_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DBGSTATUS_OFST))
+
+// Debug Command Register
+#define ALT_DMA_DBGCMD_OFST 0xd04
+#define ALT_DMA_DBGCMD_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DBGCMD_OFST))
+
+// Debug Instruction-0 Register
+#define ALT_DMA_DBGINST0_OFST 0xd08
+#define ALT_DMA_DBGINST0_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DBGINST0_OFST))
+#define ALT_DMA_DBGINST0_CHANNELNUMBER_SET(value) (((value) & 0x7) << 8)
+#define ALT_DMA_DBGINST0_DEBUGTHREAD_SET(value) ((value) & 0x1)
+#define ALT_DMA_DBGINST0_DEBUGTHREAD_E_MANAGER 0
+#define ALT_DMA_DBGINST0_DEBUGTHREAD_E_CHANNEL 1
+#define ALT_DMA_DBGINST0_INSTRUCTIONBYTE0_SET(value) (((value) & 0xff) << 16)
+#define ALT_DMA_DBGINST0_INSTRUCTIONBYTE1_SET(value) (((value) & 0xff) << 24)
+
+// Debug Instruction-1 Register
+#define ALT_DMA_DBGINST1_OFST 0xd0c
+#define ALT_DMA_DBGINST1_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_DBGINST1_OFST))
+
+// Configuration Registers 0 - 4
+#define ALT_DMA_CR0_OFST 0xe00
+#define ALT_DMA_CR1_OFST 0xe04
+#define ALT_DMA_CR2_OFST 0xe08
+#define ALT_DMA_CR3_OFST 0xe0c
+#define ALT_DMA_CR4_OFST 0xe10
+#define ALT_DMA_CR0_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CR0_OFST))
+#define ALT_DMA_CR1_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CR1_OFST))
+#define ALT_DMA_CR2_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CR2_OFST))
+#define ALT_DMA_CR3_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CR3_OFST))
+#define ALT_DMA_CR4_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CR4_OFST))
+
+// DMA Configuration Register
+#define ALT_DMA_CRD_OFST 0xe14
+#define ALT_DMA_CRD_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_CRD_OFST))
+
+// Watchdog Register
+#define ALT_DMA_WD_OFST 0xe80
+#define ALT_DMA_WD_ADDR(base) ALT_CAST(void *, (ALT_CAST(char *, (base)) + ALT_DMA_WD_OFST))
+
+/////
+
+//
+// Internal Data structures
+//
+
+// This flag marks the channel as being allocated.
+#define ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED (1 << 0)
+
+typedef struct ALT_DMA_CHANNEL_INFO_s
+{
+ uint8_t flag;
+}
+ALT_DMA_CHANNEL_INFO_t;
+
+static ALT_DMA_CHANNEL_INFO_t channel_info_array[8];
+
+/////
+
+ALT_STATUS_CODE alt_dma_init(const ALT_DMA_CFG_t * dma_cfg)
+{
+ // Initialize the channel information array
+ for (int i = 0; i < 8; ++i)
+ {
+ channel_info_array[i].flag = 0;
+ }
+
+ // Update the System Manager DMA configuration items
+
+ uint32_t dmactrl = 0;
+
+ // Handle FPGA / CAN muxing
+ for (int i = 0; i < 4; ++i)
+ {
+ // The default is FPGA.
+ switch (dma_cfg->periph_mux[i])
+ {
+ case ALT_DMA_PERIPH_MUX_DEFAULT:
+ case ALT_DMA_PERIPH_MUX_FPGA:
+ break;
+ case ALT_DMA_PERIPH_MUX_CAN:
+ dmactrl |= (ALT_SYSMGR_DMA_CTL_CHANSEL_0_SET_MSK << i);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+ }
+
+ // Handle Manager security
+ // Default is Secure state.
+ switch (dma_cfg->manager_sec)
+ {
+ case ALT_DMA_SECURITY_DEFAULT:
+ case ALT_DMA_SECURITY_SECURE:
+ break;
+ case ALT_DMA_SECURITY_NONSECURE:
+ dmactrl |= ALT_SYSMGR_DMA_CTL_MGRNONSECURE_SET_MSK;
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+
+ // Handle IRQ security
+ for (int i = 0; i < ALT_SYSMGR_DMA_CTL_IRQNONSECURE_WIDTH; ++i)
+ {
+ // Default is Secure state.
+ switch (dma_cfg->irq_sec[i])
+ {
+ case ALT_DMA_SECURITY_DEFAULT:
+ case ALT_DMA_SECURITY_SECURE:
+ break;
+ case ALT_DMA_SECURITY_NONSECURE:
+ dmactrl |= (1 << (i + ALT_SYSMGR_DMA_CTL_IRQNONSECURE_LSB));
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+ }
+
+ alt_write_word(ALT_SYSMGR_DMA_CTL_ADDR, dmactrl);
+
+ // Update the System Manager DMA peripheral security items
+
+ uint32_t dmapersecurity = 0;
+
+ for (int i = 0; i < 32; ++i)
+ {
+ // Default is Secure state.
+ switch (dma_cfg->periph_sec[i])
+ {
+ case ALT_DMA_SECURITY_DEFAULT:
+ case ALT_DMA_SECURITY_SECURE:
+ break;
+ case ALT_DMA_SECURITY_NONSECURE:
+ dmapersecurity |= (1 << i);
+ break;
+ default:
+ return ALT_E_ERROR;
+ }
+ }
+
+ alt_write_word(ALT_SYSMGR_DMA_PERSECURITY_ADDR, dmapersecurity);
+
+ // Take DMA out of reset.
+
+ alt_clrbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_DMA_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_uninit(void)
+{
+ // DMAKILL all channel and free all allocated channels.
+ for (int i = 0; i < 8; ++i)
+ {
+ if (channel_info_array[i].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED)
+ {
+ alt_dma_channel_kill((ALT_DMA_CHANNEL_t)i);
+ alt_dma_channel_free((ALT_DMA_CHANNEL_t)i);
+ }
+ }
+
+ // Put DMA into reset.
+
+ alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_DMA_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_channel_alloc(ALT_DMA_CHANNEL_t channel)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify channel is unallocated
+
+ if (channel_info_array[channel].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Mark channel as allocated
+
+ channel_info_array[channel].flag |= ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_channel_alloc_any(ALT_DMA_CHANNEL_t * allocated)
+{
+ // Sweep channel array for unallocated channel
+
+ for (int i = 0; i < 8; ++i)
+ {
+ if (!(channel_info_array[i].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED))
+ {
+ // Allocate that free channel.
+
+ ALT_STATUS_CODE status = alt_dma_channel_alloc((ALT_DMA_CHANNEL_t)i);
+ if (status == ALT_E_SUCCESS)
+ {
+ *allocated = (ALT_DMA_CHANNEL_t)i;
+ }
+ return status;
+ }
+ }
+
+ // No free channels found.
+
+ return ALT_E_ERROR;
+}
+
+ALT_STATUS_CODE alt_dma_channel_free(ALT_DMA_CHANNEL_t channel)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify channel is allocated
+
+ if (!(channel_info_array[channel].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify channel is stopped
+
+ ALT_DMA_CHANNEL_STATE_t state;
+ ALT_STATUS_CODE status = alt_dma_channel_state_get(channel, &state);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ if (state != ALT_DMA_CHANNEL_STATE_STOPPED)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Mark channel as unallocated.
+
+ channel_info_array[channel].flag &= ~ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_channel_exec(ALT_DMA_CHANNEL_t channel, ALT_DMA_PROGRAM_t * pgm)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify channel is allocated
+
+ if (!(channel_info_array[channel].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify channel is stopped
+
+ ALT_DMA_CHANNEL_STATE_t state;
+ ALT_STATUS_CODE status = alt_dma_channel_state_get(channel, &state);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ if (state != ALT_DMA_CHANNEL_STATE_STOPPED)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Validate the program
+
+ if (alt_dma_program_validate(pgm) != ALT_E_SUCCESS)
+ {
+ return ALT_E_ERROR;
+ }
+
+ //
+ // Execute the program
+ //
+
+ // Get the start address
+
+ uint32_t start = (uint32_t) &pgm->program[pgm->buffer_start];
+
+ dprintf("DMA[exec]: pgm->program = %p.\n", pgm->program);
+ dprintf("DMA[exec]: start = %p.\n", (void *)start);
+
+ // Configure DBGINST0 and DBGINST1 to execute DMAGO targetting the requested channel.
+
+ // For information on APB Interface, see PL330, section 2.5.1.
+ // For information on DBGINSTx, see PL330, section 3.3.20 - 3.3.21.
+ // For information on DMAGO, see PL330, section 4.3.5.
+
+ alt_write_word(ALT_DMA_DBGINST0_ADDR(ALT_DMASECURE_ADDR),
+ ALT_DMA_DBGINST0_INSTRUCTIONBYTE0_SET(0xa0) |
+ ALT_DMA_DBGINST0_INSTRUCTIONBYTE1_SET(channel));
+
+ alt_write_word(ALT_DMA_DBGINST1_ADDR(ALT_DMASECURE_ADDR), start);
+
+ // Execute the instruction held in DBGINST{0,1}
+
+ // For information on DBGCMD, see PL330, section 3.3.19.
+
+ alt_write_word(ALT_DMA_DBGCMD_ADDR(ALT_DMASECURE_ADDR), 0);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_channel_kill(ALT_DMA_CHANNEL_t channel)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify channel is allocated
+
+ if (!(channel_info_array[channel].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // NOTE: Don't worry about the current channel state. Just issue DMAKILL
+ // instruction. The channel state cannot move from from Stopped back to
+ // Killing.
+
+ // Configure DBGINST0 to execute DMAKILL on the requested channel thread.
+ // DMAKILL is short enough not to use DBGINST1 register.
+
+ // For information on APB Interface, see PL330, section 2.5.1.
+ // For information on DBGINSTx, see PL330, section 3.3.20 - 3.3.21.
+ // For information on DMAKILL, see PL330, section 4.3.6.
+
+ alt_write_word(ALT_DMA_DBGINST0_ADDR(ALT_DMASECURE_ADDR),
+ ALT_DMA_DBGINST0_INSTRUCTIONBYTE0_SET(0x1) |
+ ALT_DMA_DBGINST0_CHANNELNUMBER_SET(channel) |
+ ALT_DMA_DBGINST0_DEBUGTHREAD_SET(ALT_DMA_DBGINST0_DEBUGTHREAD_E_CHANNEL));
+
+ // Execute the instruction held in DBGINST0
+
+ // For information on DBGCMD, see PL330, section 3.3.19.
+
+ alt_write_word(ALT_DMA_DBGCMD_ADDR(ALT_DMASECURE_ADDR), 0);
+
+ // Wait for channel to move to KILLING or STOPPED state. Do not wait for
+ // the STOPPED only. If the AXI transaction hangs permanently, it can be
+ // waiting indefinately.
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ ALT_DMA_CHANNEL_STATE_t current;
+ uint32_t i = 20000;
+
+ while (--i)
+ {
+ status = alt_dma_channel_state_get(channel, &current);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ if ( (current == ALT_DMA_CHANNEL_STATE_KILLING)
+ || (current == ALT_DMA_CHANNEL_STATE_STOPPED))
+ {
+ break;
+ }
+ }
+
+ if (i == 0)
+ {
+ status = ALT_E_TMO;
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_dma_channel_reg_get(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_REG_t reg, uint32_t * val)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on SAR, see PL330, section 3.3.13.
+ // For information on DAR, see PL330, section 3.3.14.
+ // For information on CCR, see PL330, section 3.3.15.
+
+ switch (reg)
+ {
+ case ALT_DMA_PROGRAM_REG_SAR:
+ *val = alt_read_word(ALT_DMA_SARx_ADDR(ALT_DMASECURE_ADDR, channel));
+ break;
+ case ALT_DMA_PROGRAM_REG_DAR:
+ *val = alt_read_word(ALT_DMA_DARx_ADDR(ALT_DMASECURE_ADDR, channel));
+ break;
+ case ALT_DMA_PROGRAM_REG_CCR:
+ *val = alt_read_word(ALT_DMA_CCRx_ADDR(ALT_DMASECURE_ADDR, channel));
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_send_event(ALT_DMA_EVENT_t evt_num)
+{
+ // Validate evt_num
+
+ switch (evt_num)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Issue the DMASEV on the DMA manager thread.
+ // DMASEV is short enough not to use DBGINST1 register.
+
+ // For information on APB Interface, see PL330, section 2.5.1.
+ // For information on DBGINSTx, see PL330, section 3.3.20 - 3.3.21.
+ // For information on DMASEV, see PL330, section 4.3.15.
+
+ alt_write_word(ALT_DMA_DBGINST0_ADDR(ALT_DMASECURE_ADDR),
+ ALT_DMA_DBGINST0_INSTRUCTIONBYTE0_SET(0x34) | // opcode for DMASEV
+ ALT_DMA_DBGINST0_INSTRUCTIONBYTE1_SET(evt_num << 3) |
+ ALT_DMA_DBGINST0_DEBUGTHREAD_SET(ALT_DMA_DBGINST0_DEBUGTHREAD_E_MANAGER)
+ );
+
+ // Execute the instruction held in DBGINST0
+
+ // For information on DBGCMD, see PL330, section 3.3.19.
+
+ alt_write_word(ALT_DMA_DBGCMD_ADDR(ALT_DMASECURE_ADDR), 0);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_manager_state_get(ALT_DMA_MANAGER_STATE_t * state)
+{
+ // For information on DSR, see PL330, section 3.3.1.
+
+ uint32_t raw_state = alt_read_word(ALT_DMA_DSR_ADDR(ALT_DMASECURE_ADDR));
+
+ *state = (ALT_DMA_MANAGER_STATE_t)ALT_DMA_DSR_DMASTATUS_GET(raw_state);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_channel_state_get(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_CHANNEL_STATE_t * state)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on CSR, see PL330, section 3.3.11.
+
+ uint32_t raw_state = alt_read_word(ALT_DMA_CSRx_ADDR(ALT_DMASECURE_ADDR, channel));
+
+ *state = (ALT_DMA_CHANNEL_STATE_t)ALT_DMA_CSRx_CHANNELSTATUS_GET(raw_state);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_manager_fault_status_get(ALT_DMA_MANAGER_FAULT_t * fault)
+{
+ // For information on FTRD, see PL330, section 3.3.9.
+
+ *fault = (ALT_DMA_MANAGER_FAULT_t)alt_read_word(ALT_DMA_FTRD_ADDR(ALT_DMASECURE_ADDR));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_channel_fault_status_get(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_CHANNEL_FAULT_t * fault)
+{
+ // Validate channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on FTR, see PL330, section 3.3.10.
+
+ *fault = (ALT_DMA_CHANNEL_FAULT_t)alt_read_word(ALT_DMA_FTRx_ADDR(ALT_DMASECURE_ADDR, channel));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_event_int_select(ALT_DMA_EVENT_t evt_num,
+ ALT_DMA_EVENT_SELECT_t opt)
+{
+ // Validate evt_num
+ switch (evt_num)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on INTEN, see PL330, section 3.3.3.
+
+ switch (opt)
+ {
+ case ALT_DMA_EVENT_SELECT_SEND_EVT:
+ alt_clrbits_word(ALT_DMA_INTEN_ADDR(ALT_DMASECURE_ADDR), 1 << evt_num);
+ break;
+ case ALT_DMA_EVENT_SELECT_SIG_IRQ:
+ alt_setbits_word(ALT_DMA_INTEN_ADDR(ALT_DMASECURE_ADDR), 1 << evt_num);
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_event_int_status_get_raw(ALT_DMA_EVENT_t evt_num)
+{
+ // Validate evt_num
+ switch (evt_num)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on INT_EVENT_RIS, see PL330, section 3.3.4.
+
+ uint32_t status_raw = alt_read_word(ALT_DMA_INT_EVENT_RIS_ADDR(ALT_DMASECURE_ADDR));
+
+ if (status_raw & (1 << evt_num))
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+ALT_STATUS_CODE alt_dma_int_status_get(ALT_DMA_EVENT_t irq_num)
+{
+ // Validate evt_num
+ switch (irq_num)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on INTMIS, see PL330, section 3.3.5.
+
+ uint32_t int_status = alt_read_word(ALT_DMA_INTMIS_ADDR(ALT_DMASECURE_ADDR));
+
+ if (int_status & (1 << irq_num))
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+ALT_STATUS_CODE alt_dma_int_clear(ALT_DMA_EVENT_t irq_num)
+{
+ // Validate evt_num
+ switch (irq_num)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // For information on INTCLR, see PL330, section 3.3.6.
+
+ alt_write_word(ALT_DMA_INTCLR_ADDR(ALT_DMASECURE_ADDR), 1 << irq_num);
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+ALT_STATUS_CODE alt_dma_memory_to_memory(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_t * program,
+ void * dst,
+ const void * src,
+ size_t size,
+ bool send_evt,
+ ALT_DMA_EVENT_t evt)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // If the size is zero, and no event is requested, just return success.
+ if ((size == 0) && (send_evt == false))
+ {
+ return status;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_init(program);
+ }
+
+ if (size != 0)
+ {
+ uintptr_t udst = (uintptr_t)dst;
+ uintptr_t usrc = (uintptr_t)src;
+
+ dprintf("DMA[M->M]: dst = %p.\n", dst);
+ dprintf("DMA[M->M]: src = %p.\n", src);
+ dprintf("DMA[M->M]: size = 0x%x.\n", size);
+
+ // Detect if memory regions overshoots the address space.
+
+ if (udst + size - 1 < udst)
+ {
+ return ALT_E_BAD_ARG;
+ }
+ if (usrc + size - 1 < usrc)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Detect if memory regions overlaps.
+
+ if (udst > usrc)
+ {
+ if (usrc + size - 1 > udst)
+ {
+ return ALT_E_BAD_ARG;
+ }
+ }
+ else
+ {
+ if (udst + size - 1 > usrc)
+ {
+ return ALT_E_BAD_ARG;
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR, usrc);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR, udst);
+ }
+
+ size_t sizeleft = size;
+
+ //
+ // The algorithm uses the strategy described in PL330 B.3.1.
+ // It is extended for 2-byte and 1-byte unaligned cases.
+ //
+
+ // First see how many byte(s) we need to transfer to get src to be 8 byte aligned
+ if (usrc & 0x7)
+ {
+ uint32_t aligncount = MIN(8 - (usrc & 0x7), sizeleft);
+ sizeleft -= aligncount;
+
+ dprintf("DMA[M->M]: Total pre-alignment 1-byte burst size tranfer(s): %lu.\n", aligncount);
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SBx (Source burst length of [aligncount] transfers)
+ // - DBx (Destination burst length of [aligncount] transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ((aligncount - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((aligncount - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ // This is the number of 8-byte bursts
+ uint32_t burstcount = sizeleft >> 3;
+
+ bool correction = (burstcount != 0);
+
+ // Update the size left to transfer
+ sizeleft &= 0x7;
+
+ dprintf("DMA[M->M]: Total Main 8-byte burst size transfer(s): %lu.\n", burstcount);
+ dprintf("DMA[M->M]: Total Main 1-byte burst size transfer(s): %u.\n", sizeleft);
+
+ // Determine how many 16 length bursts can be done
+
+ if (burstcount >> 4)
+ {
+ uint32_t length16burstcount = burstcount >> 4;
+ burstcount &= 0xf;
+
+ dprintf("DMA[M->M]: Number of 16 burst length 8-byte transfer(s): %lu.\n", length16burstcount);
+ dprintf("DMA[M->M]: Number of remaining 8-byte transfer(s): %lu.\n", burstcount);
+
+ // Program in the following parameters:
+ // - SS64 (Source burst size of 8-byte)
+ // - DS64 (Destination burst size of 8-byte)
+ // - SB16 (Source burst length of 16 transfers)
+ // - DB16 (Destination burst length of 16 transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB16
+ | ALT_DMA_CCR_OPT_SS64
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB16
+ | ALT_DMA_CCR_OPT_DS64
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ while (length16burstcount > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(length16burstcount, 256);
+ length16burstcount -= loopcount;
+
+ dprintf("DMA[M->M]: Looping %lux 16 burst length 8-byte transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+
+ // At this point, we should have [burstcount] 8-byte transfer(s)
+ // remaining. [burstcount] should be less than 16.
+
+ // Do one more burst with a SB / DB of length [burstcount].
+
+ if (burstcount)
+ {
+ // Program in the following parameters:
+ // - SS64 (Source burst size of 8-byte)
+ // - DS64 (Destination burst size of 8-byte)
+ // - SBx (Source burst length of [burstlength] transfers)
+ // - DBx (Destination burst length of [burstlength] transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ((burstcount - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SS64
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((burstcount - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DS64
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ // This is where the last DMAMOV CCR and DMAST is done if an
+ // alignment correction required.
+
+ if ( (correction == true)
+ && ((usrc & 0x7) != (udst & 0x7)) // If src and dst are mod-8 congruent, no correction is needed.
+ )
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ // Determine what type of correction.
+
+ // Set the source parameters to match that of the destination
+ // parameters. This way the SAR is increment in the same fashion as
+ // DAR. This will allow the non 8-byte transfers to copy correctly.
+
+ uint32_t ccr;
+
+ if ((usrc & 0x3) == (udst & 0x3))
+ {
+ dprintf("DMA[M->M]: Single correction 4-byte burst size tranfer.\n");
+
+ // Program in the following parameters:
+ // - SS32 (Source burst size of 4-byte)
+ // - DS32 (Destination burst size of 4-byte)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - All other options default.
+
+ ccr = ( ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SS32
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DS32
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ );
+ }
+ else if ((usrc & 0x1) == (udst & 0x1))
+ {
+ dprintf("DMA[M->M]: Single correction 2-byte burst size tranfer.\n");
+
+ // Program in the following parameters:
+ // - SS16 (Source burst size of 2-byte)
+ // - DS16 (Destination burst size of 2-byte)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - All other options default.
+
+ ccr = ( ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SS16
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DS16
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ );
+ }
+ else
+ {
+ dprintf("DMA[M->M]: Single correction 1-byte burst size tranfer.\n");
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - All other options default.
+
+ ccr = ( ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ );
+ }
+
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ccr);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ // At this point, there should be 0 - 7 1-byte transfers remaining.
+
+ if (sizeleft)
+ {
+ dprintf("DMA[M->M]: Total post 1-byte burst size tranfer(s): %u.\n", sizeleft);
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SBx (Source burst length of [sizeleft] transfers)
+ // - DBx (Destination burst length of [sizeleft] transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ((sizeleft - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((sizeleft - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ } // if (size != 0)
+
+ // Send event if requested.
+ if (send_evt)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[M->M]: Adding event ...\n");
+ status = alt_dma_program_DMASEV(program, evt);
+ }
+ }
+
+ // Now that everything is done, end the program.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAEND(program);
+ }
+
+ // If there was a problem assembling the program, clean up the buffer and exit.
+ if (status != ALT_E_SUCCESS)
+ {
+ // Do not report the status for the clear operation. A failure should be
+ // reported regardless of if the clear is successful.
+ alt_dma_program_clear(program);
+ return status;
+ }
+
+ // Execute the program on the given channel.
+ return alt_dma_channel_exec(channel, program);
+}
+
+ALT_STATUS_CODE alt_dma_zero_to_memory(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_t * program,
+ void * buf,
+ size_t size,
+ bool send_evt,
+ ALT_DMA_EVENT_t evt)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // If the size is zero, and no event is requested, just return success.
+ if ((size == 0) && (send_evt == false))
+ {
+ return status;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_init(program);
+ }
+
+ if (size != 0)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR, (uint32_t)buf);
+ }
+
+ dprintf("DMA[Z->M]: buf = %p.\n", buf);
+ dprintf("DMA[Z->M]: size = 0x%x.\n", size);
+
+ size_t sizeleft = size;
+
+ // First see how many byte(s) we need to transfer to get dst to be 8 byte aligned.
+ if ((uint32_t)buf & 0x7)
+ {
+ uint32_t aligncount = MIN(8 - ((uint32_t)buf & 0x7), sizeleft);
+ sizeleft -= aligncount;
+
+ dprintf("DMA[Z->M]: Total pre-alignment 1-byte burst size tranfer(s): %lu.\n", aligncount);
+
+ // Program in the following parameters:
+ // - DS8 (Destination burst size of 1-byte)
+ // - DBx (Destination burst length of [aligncount] transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB_DEFAULT
+ | ALT_DMA_CCR_OPT_SS_DEFAULT
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((aligncount - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMASTZ(program);
+ }
+ }
+
+ // This is the number of 8-byte bursts left
+ uint32_t burstcount = sizeleft >> 3;
+
+ // Update the size left to transfer
+ sizeleft &= 0x7;
+
+ dprintf("DMA[Z->M]: Total Main 8-byte burst size transfer(s): %lu.\n", burstcount);
+ dprintf("DMA[Z->M]: Total Main 1-byte burst size transfer(s): %u.\n", sizeleft);
+
+ // Determine how many 16 length bursts can be done
+ if (burstcount >> 4)
+ {
+ uint32_t length16burstcount = burstcount >> 4;
+ burstcount &= 0xf;
+
+ dprintf("DMA[Z->M]: Number of 16 burst length 8-byte transfer(s): %lu.\n", length16burstcount);
+ dprintf("DMA[Z->M]: Number of remaining 8-byte transfer(s): %lu.\n", burstcount);
+
+ // Program in the following parameters:
+ // - DS64 (Destination burst size of 8-byte)
+ // - DB16 (Destination burst length of 16 transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB_DEFAULT
+ | ALT_DMA_CCR_OPT_SS_DEFAULT
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB16
+ | ALT_DMA_CCR_OPT_DS64
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ while (length16burstcount > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(length16burstcount, 256);
+ length16burstcount -= loopcount;
+
+ dprintf("DMA[Z->M]: Looping %lux 16 burst length 8-byte transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMASTZ(program);
+ }
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+
+ // At this point, we should have [burstcount] 8-byte transfer(s)
+ // remaining. [burstcount] should be less than 16.
+
+ // Do one more burst with a SB / DB of length [burstcount].
+
+ if (burstcount)
+ {
+ // Program in the following parameters:
+ // - DS64 (Destination burst size of 8-byte)
+ // - DBx (Destination burst length of [burstlength] transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB_DEFAULT
+ | ALT_DMA_CCR_OPT_SS_DEFAULT
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((burstcount - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DS64
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMASTZ(program);
+ }
+ }
+
+ // At this point, there should be 0 - 7 1-byte transfers remaining.
+
+ if (sizeleft)
+ {
+ dprintf("DMA[Z->M]: Total post 1-byte burst size tranfer(s): %u.\n", sizeleft);
+
+ // Program in the following parameters:
+ // - DS8 (Destination burst size of 1-byte)
+ // - DBx (Destination burst length of [sizeleft] transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB_DEFAULT
+ | ALT_DMA_CCR_OPT_SS_DEFAULT
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((sizeleft - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMASTZ(program);
+ }
+ }
+ } // if (size != 0)
+
+ // Send event if requested.
+ if (send_evt)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[Z->M]: Adding event ...\n");
+ status = alt_dma_program_DMASEV(program, evt);
+ }
+ }
+
+ // Now that everything is done, end the program.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAEND(program);
+ }
+
+ // If there was a problem assembling the program, clean up the buffer and exit.
+ if (status != ALT_E_SUCCESS)
+ {
+ // Do not report the status for the clear operation. A failure should be
+ // reported regardless of if the clear is successful.
+ alt_dma_program_clear(program);
+ return status;
+ }
+
+ // Execute the program on the given channel.
+ return alt_dma_channel_exec(channel, program);
+}
+
+ALT_STATUS_CODE alt_dma_memory_to_register(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_t * program,
+ void * dst_reg,
+ const void * src_buf,
+ size_t count,
+ uint32_t register_width_bits,
+ bool send_evt,
+ ALT_DMA_EVENT_t evt)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // If the count is zero, and no event is requested, just return success.
+ if ((count == 0) && (send_evt == false))
+ {
+ return status;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_init(program);
+ }
+
+ if (count != 0)
+ {
+ // Verify valid register_width_bits and construct the CCR SS and DS parameters.
+ uint32_t ccr_ss_ds_mask = 0;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ switch (register_width_bits)
+ {
+ case 8:
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 8 bits)
+ // - DS8 (Destination burst size of 8 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS8 | ALT_DMA_CCR_OPT_DS8;
+ break;
+ case 16:
+ // Program in the following parameters:
+ // - SS16 (Source burst size of 16 bits)
+ // - DS16 (Destination burst size of 16 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS16 | ALT_DMA_CCR_OPT_DS16;
+ break;
+ case 32:
+ // Program in the following parameters:
+ // - SS32 (Source burst size of 32 bits)
+ // - DS32 (Destination burst size of 32 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS32 | ALT_DMA_CCR_OPT_DS32;
+ break;
+ case 64:
+ // Program in the following parameters:
+ // - SS64 (Source burst size of 64 bits)
+ // - DS64 (Destination burst size of 64 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS64 | ALT_DMA_CCR_OPT_DS64;
+ break;
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+ }
+
+ // Verify that the dst_reg and src_buf are aligned to the register width
+ if (status == ALT_E_SUCCESS)
+ {
+ if (((uintptr_t)dst_reg & ((register_width_bits >> 3) - 1)) != 0)
+ {
+ status = ALT_E_BAD_ARG;
+ }
+ else if (((uintptr_t)src_buf & ((register_width_bits >> 3) - 1)) != 0)
+ {
+ status = ALT_E_BAD_ARG;
+ }
+ else
+ {
+ dprintf("DMA[M->R]: dst_reg = %p.\n", dst_reg);
+ dprintf("DMA[M->R]: src_buf = %p.\n", src_buf);
+ dprintf("DMA[M->R]: count = 0x%x.\n", count);
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR, (uint32_t)src_buf);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR, (uint32_t)dst_reg);
+ }
+
+ // This is the remaining count left to process.
+ uint32_t countleft = count;
+
+ // See how many 16-length bursts we can use
+ if (countleft >> 4)
+ {
+ // Program in the following parameters:
+ // - SSx (Source burst size of [ccr_ss_ds_mask])
+ // - DSx (Destination burst size of [ccr_ss_ds_mask])
+ // - DAF (Destination address fixed)
+ // - SB16 (Source burst length of 16 transfers)
+ // - DB16 (Destination burst length of 16 transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ccr_ss_ds_mask
+ | ALT_DMA_CCR_OPT_SB16
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB16
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ uint32_t length16burst = countleft >> 4;
+ countleft &= 0xf;
+
+ dprintf("DMA[M->R]: Number of 16 burst length transfer(s): %lu.\n", length16burst);
+ dprintf("DMA[M->R]: Number of remaining transfer(s): %lu.\n", countleft);
+
+ // See how many 256x 16-length bursts we can use
+ if (length16burst >> 8)
+ {
+ uint32_t loop256length16burst = length16burst >> 8;
+ length16burst &= ((1 << 8) - 1);
+
+ dprintf("DMA[M->R]: Number of 256-looped 16 burst length transfer(s): %lu.\n", loop256length16burst);
+ dprintf("DMA[M->R]: Number of remaining 16 burst length transfer(s): %lu.\n", length16burst);
+
+ while (loop256length16burst > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(loop256length16burst, 256);
+ loop256length16burst -= loopcount;
+
+ dprintf("DMA[M->R]: Looping %lux super loop transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALP(program, 256);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+
+ // The super loop above ensures that the length16burst is below 256.
+ if (length16burst > 0)
+ {
+ uint32_t loopcount = length16burst;
+ length16burst = 0;
+
+ dprintf("DMA[M->R]: Looping %lux 16 burst length transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+
+ // At this point, we should have [countleft] transfer(s) remaining.
+ // [countleft] should be less than 16.
+
+ if (countleft)
+ {
+ // Program in the following parameters:
+ // - SSx (Source burst size of [ccr_ss_ds_mask])
+ // - DSx (Destination burst size of [ccr_ss_ds_mask])
+ // - DAF (Destination address fixed)
+ // - SBx (Source burst length of [countleft] transfer(s))
+ // - DBx (Destination burst length of [countleft] transfer(s))
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[M->R]: Tail end %lux transfer(s).\n", countleft);
+
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ccr_ss_ds_mask
+ | ((countleft - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((countleft - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ } // if (count != 0)
+
+ // Send event if requested.
+ if (send_evt)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[M->R]: Adding event ...\n");
+ status = alt_dma_program_DMASEV(program, evt);
+ }
+ }
+
+ // Now that everything is done, end the program.
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[M->R]: DMAEND program.\n");
+ status = alt_dma_program_DMAEND(program);
+ }
+
+ // If there was a problem assembling the program, clean up the buffer and exit.
+ if (status != ALT_E_SUCCESS)
+ {
+ // Do not report the status for the clear operation. A failure should be
+ // reported regardless of if the clear is successful.
+ alt_dma_program_clear(program);
+ return status;
+ }
+
+ // Execute the program on the given channel.
+ return alt_dma_channel_exec(channel, program);
+}
+
+ALT_STATUS_CODE alt_dma_register_to_memory(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_t * program,
+ void * dst_buf,
+ const void * src_reg,
+ size_t count,
+ uint32_t register_width_bits,
+ bool send_evt,
+ ALT_DMA_EVENT_t evt)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // If the count is zero, and no event is requested, just return success.
+ if ((count == 0) && (send_evt == false))
+ {
+ return status;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_init(program);
+ }
+
+ if (count != 0)
+ {
+ // Verify valid register_width_bits and construct the CCR SS and DS parameters.
+ uint32_t ccr_ss_ds_mask = 0;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ switch (register_width_bits)
+ {
+ case 8:
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 8 bits)
+ // - DS8 (Destination burst size of 8 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS8 | ALT_DMA_CCR_OPT_DS8;
+ break;
+ case 16:
+ // Program in the following parameters:
+ // - SS16 (Source burst size of 16 bits)
+ // - DS16 (Destination burst size of 16 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS16 | ALT_DMA_CCR_OPT_DS16;
+ break;
+ case 32:
+ // Program in the following parameters:
+ // - SS32 (Source burst size of 32 bits)
+ // - DS32 (Destination burst size of 32 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS32 | ALT_DMA_CCR_OPT_DS32;
+ break;
+ case 64:
+ // Program in the following parameters:
+ // - SS64 (Source burst size of 64 bits)
+ // - DS64 (Destination burst size of 64 bits)
+ ccr_ss_ds_mask = ALT_DMA_CCR_OPT_SS64 | ALT_DMA_CCR_OPT_DS64;
+ break;
+ default:
+ dprintf("DMA[R->M]: Invalid register width.\n");
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+ }
+
+ // Verify that the dst_buf and src_reg are aligned to the register width
+ if (status == ALT_E_SUCCESS)
+ {
+ if (((uintptr_t)dst_buf & ((register_width_bits >> 3) - 1)) != 0)
+ {
+ status = ALT_E_BAD_ARG;
+ }
+ else if (((uintptr_t)src_reg & ((register_width_bits >> 3) - 1)) != 0)
+ {
+ status = ALT_E_BAD_ARG;
+ }
+ else
+ {
+ dprintf("DMA[R->M]: dst_reg = %p.\n", dst_buf);
+ dprintf("DMA[R->M]: src_buf = %p.\n", src_reg);
+ dprintf("DMA[R->M]: count = 0x%x.\n", count);
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR, (uint32_t)src_reg);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR, (uint32_t)dst_buf);
+ }
+
+ // This is the remaining count left to process.
+ uint32_t countleft = count;
+
+ // See how many 16-length bursts we can use
+ if (countleft >> 4)
+ {
+ uint32_t length16burst = countleft >> 4;
+ countleft &= 0xf;
+
+ dprintf("DMA[R->M]: Number of 16 burst length transfer(s): %lu.\n", length16burst);
+ dprintf("DMA[R->M]: Number of remaining transfer(s): %lu.\n", countleft);
+
+ //
+ // The algorithm uses the strategy described in PL330 B.2.3.
+ // Not sure if registers will accept burst transfers so read the register in its own transfer.
+ //
+
+ // Program in the following parameters:
+ // - SAF (Source address fixed)
+ // - SSx (Source burst size of [ccr_ss_ds_mask])
+ // - DSx (Destination burst size of [ccr_ss_ds_mask])
+ // - SB16 (Source burst length of 16 transfers)
+ // - DB16 (Destination burst length of 16 transfers)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ccr_ss_ds_mask
+ | ALT_DMA_CCR_OPT_SB16
+ | ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB16
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ // See how many 256x 16-length bursts we can do
+ if (length16burst >> 8)
+ {
+ uint32_t loop256length16burst = length16burst >> 8;
+ length16burst &= ((1 << 8) - 1);
+
+ dprintf("DMA[R->M]: Number of 256-looped 16 burst length transfer(s): %lu.\n", loop256length16burst);
+ dprintf("DMA[R->M]: Number of remaining 16 burst length transfer(s): %lu.\n", length16burst);
+
+ while (loop256length16burst > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(loop256length16burst, 256);
+ loop256length16burst -= loopcount;
+
+ dprintf("DMA[R->M]: Looping %lux super loop transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALP(program, 256);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+
+ // The super loop above ensures that the length16burst is below 256.
+ if (length16burst > 0)
+ {
+ uint32_t loopcount = length16burst;
+ length16burst = 0;
+
+ dprintf("DMA[R->M]: Looping %lux 16 burst length transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+
+ // At this point, we should have [countleft] transfer(s) remaining.
+ // [countleft] should be less than 16.
+
+ if (countleft)
+ {
+ dprintf("DMA[R->M]: Tail end %lux transfer(s).\n", countleft);
+
+ // Program in the following parameters:
+ // - SAF (Source address fixed)
+ // - SSx (Source burst size of [ccr_ss_ds_mask])
+ // - DSx (Destination burst size of [ccr_ss_ds_mask])
+ // - SBx (Source burst length of [countleft] transfer(s))
+ // - DBx (Destination burst length of [countleft] transfer(s))
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ccr_ss_ds_mask
+ | ((countleft - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ((countleft - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ } // if (count != 0)
+
+ // Send event if requested.
+ if (send_evt)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[R->M]: Adding event ...\n");
+ status = alt_dma_program_DMASEV(program, evt);
+ }
+ }
+
+ // Now that everything is done, end the program.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAEND(program);
+ }
+
+ // If there was a problem assembling the program, clean up the buffer and exit.
+ if (status != ALT_E_SUCCESS)
+ {
+ // Do not report the status for the clear operation. A failure should be
+ // reported regardless of if the clear is successful.
+ alt_dma_program_clear(program);
+ return status;
+ }
+
+ // Execute the program on the given channel.
+ return alt_dma_channel_exec(channel, program);
+}
+
+#if ALT_DMA_PERIPH_PROVISION_QSPI_SUPPORT
+static ALT_STATUS_CODE alt_dma_memory_to_qspi(ALT_DMA_PROGRAM_t * program,
+ const char * src,
+ size_t size)
+{
+ if ((uintptr_t)src & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR,
+ (uint32_t)ALT_QSPIDATA_ADDR);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR,
+ (uint32_t)src);
+ }
+
+ /////
+
+ uint32_t dmaper = alt_read_word(ALT_QSPI_DMAPER_ADDR);
+ uint32_t qspi_single_size_log2 = ALT_QSPI_DMAPER_NUMSGLREQBYTES_GET(dmaper);
+ uint32_t qspi_burst_size_log2 = ALT_QSPI_DMAPER_NUMBURSTREQBYTES_GET(dmaper);
+ uint32_t qspi_single_size = 1 << qspi_single_size_log2;
+ uint32_t qspi_burst_size = 1 << qspi_burst_size_log2;
+
+ dprintf("DMA[M->P][QSPI]: QSPI Single = %lu; Burst = %lu.\n", qspi_single_size, qspi_burst_size);
+
+ // Because single transfers are equal or smaller than burst (and in the
+ // smaller case, it is always a clean multiple), only the single size
+ // check is needed for transfer composability.
+ if (size & (qspi_single_size - 1))
+ {
+ dprintf("DMA[M->P][QSPI]: QSPI DMA size configuration not suitable for transfer request.\n");
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ if ((uintptr_t)src & 0x7)
+ {
+ // Source address is not 8-byte aligned. Do 1x 32-bit transfer to get it 8-byte aligned.
+
+ dprintf("DMA[M->P][QSPI]: Creating 1x 4-byte aligning transfer.\n");
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SAI
+ | ALT_DMA_CCR_OPT_SS32
+ | ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DS32
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_TX);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_TX, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ size -= sizeof(uint32_t);
+ }
+
+ uint32_t qspi_single_count = 0;
+ uint32_t qspi_burst_count = size >> qspi_burst_size_log2;
+
+ // Use QSPI burst transfers if:
+ // - QSPI bursts are larger than QSPI singles [AND]
+ // - Size is large enough that at least 1 burst will be used.
+
+ if ( (qspi_burst_size_log2 > qspi_single_size_log2)
+ && (qspi_burst_count != 0)
+ )
+ {
+ // qspi_burst_count = size >> qspi_burst_size_log2;
+ qspi_single_count = (size & (qspi_burst_size - 1)) >> qspi_single_size_log2;
+
+ dprintf("DMA[M->P][QSPI][B]: Burst size = %lu bytes, count = %lu.\n", qspi_burst_size, qspi_burst_count);
+
+ // 1 << 3 => 8 bytes => 64 bits, which is the width of the AXI bus.
+ uint32_t src_size_log2 = MIN(3, qspi_burst_size_log2);
+
+ uint32_t src_length = 0;
+ uint32_t src_multiple = 0;
+
+ if ((qspi_burst_size >> src_size_log2) <= 16)
+ {
+ src_length = qspi_burst_size >> src_size_log2;
+ src_multiple = 1;
+ }
+ else
+ {
+ src_length = 16;
+ src_multiple = (qspi_burst_size >> src_size_log2) >> 4; // divide by 16
+
+ if (src_multiple == 0)
+ {
+ dprintf("DEBUG[QSPI][B]: src_multiple is 0.\n");
+ status = ALT_E_ERROR;
+ }
+ }
+
+ // uint32_t dst_length = 1; // dst_length is always 1 because the address is fixed.
+ uint32_t dst_multiple = qspi_burst_size >> 2; // divide by sizeof(uint32_t)
+
+ dprintf("DMA[M->P][QSPI][B]: dst_size = %u bits, dst_length = %u, dst_multiple = %lu.\n",
+ 32, 1, dst_multiple);
+ dprintf("DMA[M->P][QSPI][B]: src_size = %u bits, src_length = %lu, src_multiple = %lu.\n",
+ (1 << src_size_log2) * 8, src_length, src_multiple);
+
+ /////
+
+ // Program in the following parameters:
+ // - SAI (Source address increment)
+ // - SSx (Source burst size of [1 << src_size_log2]-bytes)
+ // - SBx (Source burst length of [src_length] transfer(s))
+ // - DAF (Destination address fixed)
+ // - DS32 (Destination burst size of 4-bytes)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - All other parameters default
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SAI
+ | (src_size_log2 << 1) // SS
+ | ((src_length - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DS32
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ // NOTE: We do not do the 256x bursts for M->P case because we only
+ // write up to 256 B at a time.
+
+ while (qspi_burst_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(qspi_burst_count, 256);
+ qspi_burst_count -= loopcount;
+
+ dprintf("DMA[M->P][QSPI][B]: Creating %lu burst-type transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_TX);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_TX, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ for (uint32_t j = 0; j < src_multiple; ++j)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ }
+ for (uint32_t k = 0; k < dst_multiple; ++k)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+ else
+ {
+ qspi_single_count = size >> qspi_single_size_log2;
+ }
+
+ // Assemble the single portion of the DMA program.
+ if (qspi_single_count)
+ {
+ dprintf("DMA[M->P][QSPI][S]: Single size = %lu bytes, count = %lu.\n", qspi_single_size, qspi_single_count);
+
+ // 1 << 3 => 8 bytes => 64 bits, which is the width of the AXI bus.
+ uint32_t src_size_log2 = MIN(3, qspi_single_size_log2);
+
+ uint32_t src_length = 0;
+ uint32_t src_multiple = 0;
+
+ if ((qspi_single_size >> src_size_log2) <= 16)
+ {
+ src_length = qspi_single_size >> src_size_log2;
+ src_multiple = 1;
+ }
+ else
+ {
+ src_length = 16;
+ src_multiple = (qspi_single_size >> src_size_log2) >> 4; // divide by 16
+
+ if (src_multiple == 0)
+ {
+ dprintf("DEBUG[QSPI][S]: src_multiple is 0.\n");
+ status = ALT_E_ERROR;
+ }
+ }
+
+ // uint32_t dst_length = 1; // dst_length is always 1 becaus the address is fixed.
+ uint32_t dst_multiple = qspi_single_size >> 2; // divide by sizeof(uint32_t)
+
+ dprintf("DMA[M->P][QSPI][S]: dst_size = %u bits, dst_length = %u, dst_multiple = %lu.\n",
+ 32, 1, dst_multiple);
+ dprintf("DMA[M->P][QSPI][S]: src_size = %u bits, src_length = %lu, src_multiple = %lu.\n",
+ (1 <<src_size_log2) * 8, src_length, src_multiple);
+
+ /////
+
+ // Program in the following parameters:
+ // - SAI (Source address increment)
+ // - SSx (Source burst size of [1 << src_size_log2]-bytes)
+ // - SBx (Source burst length of [src_length] transfer(s))
+ // - DAF (Destination address fixed)
+ // - DS32 (Destination burst size of 4-bytes)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - All other parameters default
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SAI
+ | (src_size_log2 << 1) // SS
+ | ((src_length - 1) << 4) // SB
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DS32
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ // NOTE: We do not do the 256x bursts for M->P case because we only
+ // write up to 256 B at a time.
+
+ while (qspi_single_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(qspi_single_count, 256);
+ qspi_single_count -= loopcount;
+
+ dprintf("DMA[M->P][QSPI][S]: Creating %lu single-type transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_TX);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_TX, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ for (uint32_t j = 0; j < src_multiple; ++j)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+ for (uint32_t k = 0; k < dst_multiple; ++k)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ } // if (qspi_single_count != 0)
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_dma_qspi_to_memory(ALT_DMA_PROGRAM_t * program,
+ char * dst,
+ size_t size)
+{
+ if ((uintptr_t)dst & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR,
+ (uint32_t)dst);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR,
+ (uint32_t)ALT_QSPIDATA_ADDR);
+ }
+
+ /////
+
+ uint32_t dmaper = alt_read_word(ALT_QSPI_DMAPER_ADDR);
+ uint32_t qspi_single_size_log2 = ALT_QSPI_DMAPER_NUMSGLREQBYTES_GET(dmaper);
+ uint32_t qspi_burst_size_log2 = ALT_QSPI_DMAPER_NUMBURSTREQBYTES_GET(dmaper);
+ uint32_t qspi_single_size = 1 << qspi_single_size_log2;
+ uint32_t qspi_burst_size = 1 << qspi_burst_size_log2;
+
+ dprintf("DMA[P->M][QSPI]: QSPI Single = %lu; Burst = %lu.\n", qspi_single_size, qspi_burst_size);
+
+ // Because single transfers are equal or smaller than burst (and in the
+ // smaller case, it is always a clean multiple), only the single size
+ // check is needed for transfer composability.
+ if (size & (qspi_single_size - 1))
+ {
+ dprintf("DMA[P->M][QSPI]: QSPI DMA size configuration not suitable for transfer request.\n");
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ if ((uintptr_t)dst & 0x7)
+ {
+ // Destination address is not 8-byte aligned. Do 1x 32-bit transfer to get it 8-byte aligned.
+
+ dprintf("DMA[P->M][QSPI]: Creating 1x 4-byte aligning transfer.\n");
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SS32
+ | ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DAI
+ | ALT_DMA_CCR_OPT_DS32
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ size -= sizeof(uint32_t);
+ }
+
+ uint32_t qspi_single_count = 0;
+ uint32_t qspi_burst_count = size >> qspi_burst_size_log2;
+
+ // Use QSPI burst transfers if:
+ // - QSPI bursts are larger than QSPI singles [AND]
+ // - Size is large enough that at least 1 burst will be used.
+
+ if ( (qspi_burst_size_log2 > qspi_single_size_log2)
+ && (qspi_burst_count != 0)
+ )
+ {
+ // qspi_burst_count = size >> qspi_burst_size_log2;
+ qspi_single_count = (size & (qspi_burst_size - 1)) >> qspi_single_size_log2;
+
+ dprintf("DMA[P->M][QSPI][B]: Burst size = %lu bytes, count = %lu.\n", qspi_burst_size, qspi_burst_count);
+
+ // 1 << 3 => 8 bytes => 64 bits, which is the width of the AXI bus.
+ uint32_t dst_size_log2 = MIN(3, qspi_burst_size_log2);
+
+ uint32_t dst_length = 0;
+ uint32_t dst_multiple = 0;
+
+ if ((qspi_burst_size >> dst_size_log2) <= 16)
+ {
+ dst_length = qspi_burst_size >> dst_size_log2;
+ dst_multiple = 1;
+ }
+ else
+ {
+ dst_length = 16;
+ dst_multiple = (qspi_burst_size >> dst_size_log2) >> 4; // divide by 16
+
+ if (dst_multiple == 0)
+ {
+ dprintf("DEBUG[QSPI][B]: dst_multiple is 0.\n");
+ status = ALT_E_ERROR;
+ }
+ }
+
+ // uint32_t src_length = 1; // src_length is always 1 because the address is fixed.
+ uint32_t src_multiple = qspi_burst_size >> 2; // divide by sizeof(uint32_t)
+
+ dprintf("DMA[P->M][QSPI][B]: dst_size = %u bits, dst_length = %lu, dst_multiple = %lu.\n",
+ (1 << dst_size_log2) * 8, dst_length, dst_multiple);
+ dprintf("DMA[P->M][QSPI][B]: src_size = %u bits, src_length = %u, src_multiple = %lu.\n",
+ 32, 1, src_multiple);
+
+ /////
+
+ // Program in the following parameters:
+ // - SAF (Source address fixed)
+ // - SS32 (Source burst size of 4-bytes)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DAI (Destination address increment)
+ // - DSx (Destination burst size of [1 << dst_size_log2]-bytes])
+ // - DBx (Destination burst length of [dst_length] transfer(s))
+ // - All other parameters default
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SS32
+ | ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DAI
+ | (dst_size_log2 << 15) // DS
+ | ((dst_length - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ // See how many 256x bursts we can construct. This will allow for extremely large requests.
+
+ if (qspi_burst_count >> 8)
+ {
+ uint32_t qspi_burst256_count = qspi_burst_count >> 8;
+ qspi_burst_count &= (1 << 8) - 1;
+
+ while (qspi_burst256_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(qspi_burst256_count, 256);
+ qspi_burst256_count -= loopcount;
+
+ dprintf("DMA[P->M][QSPI][B]: Creating %lu 256x burst-type transfer(s).\n", loopcount);
+
+ // Outer loop {
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ // Inner loop {
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALP(program, 256);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ for (uint32_t j = 0; j < src_multiple; ++j)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ }
+ for (uint32_t k = 0; k < dst_multiple; ++k)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+
+ // } Inner loop
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+
+ // } Outer loop
+ }
+ }
+
+ while (qspi_burst_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(qspi_burst_count, 256);
+ qspi_burst_count -= loopcount;
+
+ dprintf("DMA[P->M][QSPI][B]: Creating %lu burst-type transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ for (uint32_t j = 0; j < src_multiple; ++j)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ }
+ for (uint32_t k = 0; k < dst_multiple; ++k)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+ }
+ else
+ {
+ qspi_single_count = size >> qspi_single_size_log2;
+ }
+
+ // Assemble the single portion of the DMA program.
+ if (qspi_single_count)
+ {
+ dprintf("DMA[P->M][QSPI][S]: Single size = %lu bytes, count = %lu.\n", qspi_single_size, qspi_single_count);
+
+ // 1 << 3 => 8 bytes => 64 bits, which is the width of the AXI bus.
+ uint32_t dst_size_log2 = MIN(3, qspi_single_size_log2);
+
+ uint32_t dst_length = 0;
+ uint32_t dst_multiple = 0;
+
+ if ((qspi_single_size >> dst_size_log2) <= 16)
+ {
+ dst_length = qspi_single_size >> dst_size_log2;
+ dst_multiple = 1;
+ }
+ else
+ {
+ dst_length = 16;
+ dst_multiple = (qspi_single_size >> dst_size_log2) >> 4; // divide by 16
+
+ if (dst_multiple == 0)
+ {
+ dprintf("DEBUG[QSPI][S]: dst_multiple is 0.\n");
+ status = ALT_E_ERROR;
+ }
+ }
+
+ // uint32_t src_length = 1; // src_length is always 1 because the address is fixed.
+ uint32_t src_multiple = qspi_single_size >> 2; // divide by sizeof(uint32_t)
+
+ dprintf("DMA[P->M][QSPI][S]: dst_size = %u bits, dst_length = %lu, dst_multiple = %lu.\n",
+ (1 << dst_size_log2) * 8, dst_length, dst_multiple);
+ dprintf("DMA[P->M][QSPI][S]: src_size = %u bits, src_length = %u, src_multiple = %lu.\n",
+ 32, 1, src_multiple);
+
+ /////
+
+ // Program in the following parameters:
+ // - SAF (Source address fixed)
+ // - SS32 (Source burst size of 4-bytes)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DAI (Destination address increment)
+ // - DSx (Destination burst size of [1 << dst_size_log2]-bytes])
+ // - DBx (Destination burst length of [dst_length] transfer(s))
+ // - All other parameters default
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SS32
+ | ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DAI
+ | (dst_size_log2 << 15) // DS
+ | ((dst_length - 1) << 18) // DB
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ // See how many 256x bursts we can construct. This will allow for extremely large requests.
+
+ if (qspi_single_count >> 8)
+ {
+ uint32_t qspi_single256_count = qspi_single_count >> 8;
+ qspi_single_count &= (1 << 8) - 1;
+
+ while (qspi_single256_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(qspi_single256_count, 256);
+ qspi_single256_count -= loopcount;
+
+ dprintf("DMA[P->M][QSPI][S]: Creating %lu 256x single-type transfer(s).\n", loopcount);
+
+ // Outer loop {
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ // Inner loop {
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALP(program, 256);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ for (uint32_t j = 0; j < src_multiple; ++j)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+ for (uint32_t k = 0; k < dst_multiple; ++k)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+
+ // } Inner loop
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+
+ // } Outer loop
+ }
+ }
+
+ while (qspi_single_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(qspi_single_count, 256);
+ qspi_single_count -= loopcount;
+
+ dprintf("DMA[P->M][QSPI][S]: Creating %lu single-type transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, ALT_DMA_PERIPH_QSPI_FLASH_RX, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ for (uint32_t j = 0; j < src_multiple; ++j)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+ for (uint32_t k = 0; k < dst_multiple; ++k)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_NONE);
+ }
+ }
+
+ } // if (qspi_single_count != 0)
+
+ return status;
+}
+#endif // ALT_DMA_PERIPH_PROVISION_QSPI_SUPPORT
+
+#if ALT_DMA_PERIPH_PROVISION_16550_SUPPORT
+static ALT_STATUS_CODE alt_dma_memory_to_16550_single(ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t periph,
+ size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - DAF (Destination address fixed)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ uint32_t sizeleft = size;
+
+ while (sizeleft > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(sizeleft, 256);
+ sizeleft -= loopcount;
+
+ dprintf("DMA[M->P][16550][S]: Creating %lu transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, periph);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, periph, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_dma_memory_to_16550_burst(ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t periph,
+ size_t burst_size,
+ size_t burst_count)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SB16 (Source burst length of 16 transfers)
+ // - DB16 (Destination burst length of 16 transfers)
+ // - DAF (Source address fixed)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB16
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SA_DEFAULT
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB16
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DAF
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ while (burst_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(burst_count, 256);
+ burst_count -= loopcount;
+
+ dprintf("DMA[M->P][16550][B]: Creating outer %lu inner loop(s).\n", loopcount);
+
+ // Outer loop {
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, periph);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, periph, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+
+ // Inner loop {
+
+ // Loop [burst_size / 16] times. The burst_size was trimmed to the
+ // nearest multiple of 16 by the caller. Each burst does 16 transfers
+ // hence the need for the divide.
+
+ dprintf("DMA[M->P][16550][B]: Creating inner %u transfer(s).\n", burst_size >> 4);
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALP(program, burst_size >> 4); // divide by 16.
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+
+ // } Inner loop
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+
+ // } Outer loop
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_dma_memory_to_16550(ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t periph,
+ ALT_16550_HANDLE_t * handle,
+ const void * src,
+ size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR,
+ (uint32_t)ALT_UART_RBR_THR_DLL_ADDR(handle->location));
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR,
+ (uint32_t)src);
+ }
+
+ // Determine if FIFOs are enabled from the FCR cache
+
+ if (ALT_UART_FCR_FIFOE_GET(handle->fcr) != 0)
+ {
+ dprintf("DMA[M->P][16550]: FIFOs enabled.\n");
+
+ //
+ // FIFOs are enabled.
+ //
+
+ uint32_t tx_size;
+ uint32_t burst_size;
+ ALT_16550_FIFO_TRIGGER_TX_t trig_tx;
+
+ // Get the TX FIFO Size
+ // Use the register interface to avoid coupling the 16550 and DMA.
+ tx_size = ALT_UART_CPR_FIFO_MOD_GET(alt_read_word(ALT_UART_CPR_ADDR(handle->location))) << 4;
+
+ // Get the TX FIFO Trigger Level from the FCR cache
+ trig_tx = (ALT_16550_FIFO_TRIGGER_TX_t)ALT_UART_FCR_TET_GET(handle->fcr);
+
+ switch (trig_tx)
+ {
+ case ALT_16550_FIFO_TRIGGER_TX_EMPTY:
+ burst_size = tx_size;
+ break;
+ case ALT_16550_FIFO_TRIGGER_TX_ALMOST_EMPTY:
+ burst_size = tx_size - 2;
+ break;
+ case ALT_16550_FIFO_TRIGGER_TX_QUARTER_FULL:
+ burst_size = 3 * (tx_size >> 2);
+ break;
+ case ALT_16550_FIFO_TRIGGER_TX_HALF_FULL:
+ burst_size = tx_size >> 1;
+ break;
+ default:
+ // This case should never happen.
+ return ALT_E_ERROR;
+ }
+
+ if (burst_size < 16)
+ {
+ // There's no point bursting 1 byte at a time per notify, so just do single transfers.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_memory_to_16550_single(program,
+ periph,
+ size);
+ }
+ }
+ else
+ {
+ uint32_t sizeleft = size;
+
+ // Now trip the burst size to a multiple of 16.
+ // This will optimize the bursting in the fewest possible commands.
+ dprintf("DMA[M->P][16550]: Untrimmed burst size = %lu.\n", burst_size);
+ burst_size &= ~0xf;
+ dprintf("DMA[M->P][16550]: Trimmed burst size = %lu.\n", burst_size);
+
+ // Determine how many burst transfers can be done
+ uint32_t burst_count = 0;
+
+ burst_count = sizeleft / burst_size;
+ sizeleft -= burst_count * burst_size;
+
+ if (burst_count == 0)
+ {
+ // Do the transfer
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_memory_to_16550_single(program,
+ periph,
+ sizeleft);
+ }
+ }
+ else
+ {
+ // Do the burst transfers
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_memory_to_16550_burst(program,
+ periph,
+ burst_size,
+ burst_count);
+ }
+
+ // Program the DMA engine to transfer the non-burstable items in single tranfers
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_memory_to_16550_single(program,
+ periph,
+ sizeleft);
+ }
+
+ } // else if (burst_count == 0)
+ }
+ }
+ else
+ {
+ dprintf("DMA[M->P][16550]: FIFOs disabled.\n");
+
+ //
+ // FIFOs are disabled.
+ //
+
+ status = alt_dma_memory_to_16550_single(program,
+ periph,
+ size);
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_dma_16550_to_memory_single(ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t periph,
+ size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SB1 (Source burst length of 1 transfer)
+ // - DB1 (Destination burst length of 1 transfer)
+ // - SAF (Source address fixed)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB1
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB1
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ uint32_t sizeleft = size;
+
+ while (sizeleft > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(sizeleft, 256);
+ sizeleft -= loopcount;
+
+ dprintf("DMA[P->M][16550][S]: Creating %lu transfer(s).\n", loopcount);
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, periph);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, periph, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_SINGLE);
+ }
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_dma_16550_to_memory_burst(ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t periph,
+ size_t burst_size,
+ size_t burst_count)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Program in the following parameters:
+ // - SS8 (Source burst size of 1-byte)
+ // - DS8 (Destination burst size of 1-byte)
+ // - SB16 (Source burst length of 16 transfers)
+ // - DB16 (Destination burst length of 16 transfers)
+ // - SAF (Source address fixed)
+ // - All other options default.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_CCR,
+ ( ALT_DMA_CCR_OPT_SB16
+ | ALT_DMA_CCR_OPT_SS8
+ | ALT_DMA_CCR_OPT_SAF
+ | ALT_DMA_CCR_OPT_SP_DEFAULT
+ | ALT_DMA_CCR_OPT_SC_DEFAULT
+ | ALT_DMA_CCR_OPT_DB16
+ | ALT_DMA_CCR_OPT_DS8
+ | ALT_DMA_CCR_OPT_DA_DEFAULT
+ | ALT_DMA_CCR_OPT_DP_DEFAULT
+ | ALT_DMA_CCR_OPT_DC_DEFAULT
+ | ALT_DMA_CCR_OPT_ES_DEFAULT
+ )
+ );
+ }
+
+ while (burst_count > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t loopcount = MIN(burst_count, 256);
+ burst_count -= loopcount;
+
+ dprintf("DMA[P->M][16550][B]: Creating outer %lu inner loop(s).\n", loopcount);
+
+ // Outer loop {
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALP(program, loopcount);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAFLUSHP(program, periph);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAWFP(program, periph, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+
+ // Inner loop {
+
+ // Loop [burst_size / 16] times. The burst_size was trimmed to the
+ // nearest multiple of 16 by the caller. Each burst does 16 transfers
+ // hence the need for the divide.
+
+ dprintf("DMA[P->M][16550][B]: Creating inner %u transfer(s).\n", burst_size >> 4);
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALP(program, burst_size >> 4); // divide by 16.
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALD(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAST(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+
+ // } Inner loop
+
+ if ((status == ALT_E_SUCCESS) && (loopcount > 1))
+ {
+ status = alt_dma_program_DMALPEND(program, ALT_DMA_PROGRAM_INST_MOD_BURST);
+ }
+
+ // } Outer loop
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_dma_16550_to_memory(ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t periph,
+ ALT_16550_HANDLE_t * handle,
+ void * dst,
+ size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_DAR, (uint32_t)dst);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAMOV(program, ALT_DMA_PROGRAM_REG_SAR, (uint32_t)ALT_UART_RBR_THR_DLL_ADDR(handle->location));
+ }
+
+ // Determine if FIFOs are enabled from the FCR cache
+
+ if (ALT_UART_FCR_FIFOE_GET(handle->fcr) != 0)
+ {
+ dprintf("DMA[P->M][16550]: FIFOs enabled.\n");
+
+ //
+ // FIFOs are enabled.
+ //
+
+ uint32_t rx_size;
+ uint32_t burst_size;
+ ALT_16550_FIFO_TRIGGER_RX_t trig_rx;
+
+ // Get the RX FIFO Size
+ // Use the register interface to avoid coupling the 16550 and DMA.
+ rx_size = ALT_UART_CPR_FIFO_MOD_GET(alt_read_word(ALT_UART_CPR_ADDR(handle->location))) << 4;
+
+ // Get the RX FIFO Trigger Level from the FCR cache
+ trig_rx = (ALT_16550_FIFO_TRIGGER_RX_t)ALT_UART_FCR_RT_GET(handle->fcr);
+
+ switch (trig_rx)
+ {
+ case ALT_16550_FIFO_TRIGGER_RX_ANY:
+ burst_size = 1;
+ break;
+ case ALT_16550_FIFO_TRIGGER_RX_QUARTER_FULL:
+ burst_size = rx_size >> 2; // divide by 4
+ break;
+ case ALT_16550_FIFO_TRIGGER_RX_HALF_FULL:
+ burst_size = rx_size >> 1; // divide by 2
+ break;
+ case ALT_16550_FIFO_TRIGGER_RX_ALMOST_FULL:
+ burst_size = rx_size - 2;
+ break;
+ default:
+ // This case should never happen.
+ return ALT_E_ERROR;
+ }
+
+ if (burst_size < 16)
+ {
+ // There's no point bursting 1 byte at a time per notify, so just do single transfers.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_16550_to_memory_single(program,
+ periph,
+ size);
+ }
+ }
+ else
+ {
+ uint32_t sizeleft = size;
+
+ // Now trim the burst size to a multiple of 16.
+ // This will optimize the bursting in the fewest possible commands.
+ dprintf("DMA[P->M][16550]: Untrimmed burst size = %lu.\n", burst_size);
+ burst_size &= ~0xf;
+ dprintf("DMA[P->M][16550]: Trimmed burst size = %lu.\n", burst_size);
+
+ // Determine how many burst transfers can be done
+ uint32_t burst_count = 0;
+
+ burst_count = sizeleft / burst_size;
+ sizeleft -= burst_count * burst_size;
+
+ if (burst_count == 0)
+ {
+ // Do the transfer.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_16550_to_memory_single(program,
+ periph,
+ sizeleft);
+ }
+ }
+ else
+ {
+ // Do the burst transfers
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_16550_to_memory_burst(program,
+ periph,
+ burst_size,
+ burst_count);
+ }
+
+ // Program the DMA engine to transfer the non-burstable items in single transfers.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_16550_to_memory_single(program,
+ periph,
+ sizeleft);
+ }
+
+ } // if (burst_count == 0)
+ }
+ }
+ else
+ {
+ dprintf("DMA[P->M][16550]: FIFOs disabled.\n");
+
+ //
+ // FIFOs are disabled.
+ //
+
+ status = alt_dma_16550_to_memory_single(program,
+ periph,
+ size);
+ }
+
+ return status;
+}
+#endif // ALT_DMA_PERIPH_PROVISION_16550_SUPPORT
+
+ALT_STATUS_CODE alt_dma_memory_to_periph(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_t * program,
+ ALT_DMA_PERIPH_t dstp,
+ const void * src,
+ size_t size,
+ void * periph_info,
+ bool send_evt,
+ ALT_DMA_EVENT_t evt)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if ((size == 0) && (send_evt == false))
+ {
+ return status;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[M->P]: Init Program.\n");
+ status = alt_dma_program_init(program);
+ }
+
+ if ((status == ALT_E_SUCCESS) && (size != 0))
+ {
+ switch (dstp)
+ {
+#if ALT_DMA_PERIPH_PROVISION_QSPI_SUPPORT
+ case ALT_DMA_PERIPH_QSPI_FLASH_TX:
+ status = alt_dma_memory_to_qspi(program, src, size);
+ break;
+#endif
+
+#if ALT_DMA_PERIPH_PROVISION_16550_SUPPORT
+ case ALT_DMA_PERIPH_UART0_TX:
+ case ALT_DMA_PERIPH_UART1_TX:
+ status = alt_dma_memory_to_16550(program, dstp,
+ (ALT_16550_HANDLE_t *)periph_info, src, size);
+ break;
+#endif
+
+ case ALT_DMA_PERIPH_FPGA_0:
+ case ALT_DMA_PERIPH_FPGA_1:
+ case ALT_DMA_PERIPH_FPGA_2:
+ case ALT_DMA_PERIPH_FPGA_3:
+ case ALT_DMA_PERIPH_FPGA_4:
+ case ALT_DMA_PERIPH_FPGA_5:
+ case ALT_DMA_PERIPH_FPGA_6:
+ case ALT_DMA_PERIPH_FPGA_7:
+ case ALT_DMA_PERIPH_I2C0_TX:
+ case ALT_DMA_PERIPH_I2C1_TX:
+ case ALT_DMA_PERIPH_I2C2_TX:
+ case ALT_DMA_PERIPH_I2C3_TX:
+ case ALT_DMA_PERIPH_SPI0_MASTER_TX:
+ case ALT_DMA_PERIPH_SPI0_SLAVE_TX:
+ case ALT_DMA_PERIPH_SPI1_MASTER_TX:
+ case ALT_DMA_PERIPH_SPI1_SLAVE_TX:
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+ }
+
+ // Send event if requested.
+ if (send_evt)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[M->P]: Adding event.\n");
+ status = alt_dma_program_DMASEV(program, evt);
+ }
+ }
+
+ // Now that everything is done, end the program.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAEND(program);
+ }
+
+ // If there was a problem assembling the program, clean up the buffer and exit.
+ if (status != ALT_E_SUCCESS)
+ {
+ // Do not report the status for the clear operation. A failure should be
+ // reported regardless of if the clear is successful.
+ alt_dma_program_clear(program);
+ return status;
+ }
+
+ // Execute the program on the given channel.
+
+ return alt_dma_channel_exec(channel, program);
+}
+
+ALT_STATUS_CODE alt_dma_periph_to_memory(ALT_DMA_CHANNEL_t channel,
+ ALT_DMA_PROGRAM_t * program,
+ void * dst,
+ ALT_DMA_PERIPH_t srcp,
+ size_t size,
+ void * periph_info,
+ bool send_evt,
+ ALT_DMA_EVENT_t evt)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if ((size == 0) && (send_evt == false))
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[P->M]: Init Program.\n");
+ status = alt_dma_program_init(program);
+ }
+
+ if ((status == ALT_E_SUCCESS) && (size != 0))
+ {
+ switch (srcp)
+ {
+#if ALT_DMA_PERIPH_PROVISION_QSPI_SUPPORT
+ case ALT_DMA_PERIPH_QSPI_FLASH_RX:
+ status = alt_dma_qspi_to_memory(program, dst, size);
+ break;
+#endif
+
+#if ALT_DMA_PERIPH_PROVISION_16550_SUPPORT
+ case ALT_DMA_PERIPH_UART0_RX:
+ case ALT_DMA_PERIPH_UART1_RX:
+ status = alt_dma_16550_to_memory(program, srcp,
+ (ALT_16550_HANDLE_t *)periph_info, dst, size);
+ break;
+#endif
+
+ case ALT_DMA_PERIPH_FPGA_0:
+ case ALT_DMA_PERIPH_FPGA_1:
+ case ALT_DMA_PERIPH_FPGA_2:
+ case ALT_DMA_PERIPH_FPGA_3:
+ case ALT_DMA_PERIPH_FPGA_4:
+ case ALT_DMA_PERIPH_FPGA_5:
+ case ALT_DMA_PERIPH_FPGA_6:
+ case ALT_DMA_PERIPH_FPGA_7:
+ case ALT_DMA_PERIPH_I2C0_RX:
+ case ALT_DMA_PERIPH_I2C1_RX:
+ case ALT_DMA_PERIPH_I2C2_RX:
+ case ALT_DMA_PERIPH_I2C3_RX:
+ case ALT_DMA_PERIPH_SPI0_MASTER_RX:
+ case ALT_DMA_PERIPH_SPI0_SLAVE_RX:
+ case ALT_DMA_PERIPH_SPI1_MASTER_RX:
+ case ALT_DMA_PERIPH_SPI1_SLAVE_RX:
+
+ default:
+ status = ALT_E_BAD_ARG;
+ break;
+ }
+ }
+
+ // Send event if requested.
+ if (send_evt)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DMA[P->M]: Adding event.\n");
+ status = alt_dma_program_DMASEV(program, evt);
+ }
+ }
+
+ // Now that everything is done, end the program.
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_dma_program_DMAEND(program);
+ }
+
+ // If there was a problem assembling the program, clean up the buffer and exit.
+ if (status != ALT_E_SUCCESS)
+ {
+ // Do not report the status for the clear operation. A failure should be
+ // reported regardless of if the clear is successful.
+ alt_dma_program_clear(program);
+ return status;
+ }
+
+ // Execute the program on the given channel.
+
+ return alt_dma_channel_exec(channel, program);
+}
+
+/////
+
+static bool alt_dma_is_init(void)
+{
+ uint32_t permodrst = alt_read_word(ALT_RSTMGR_PERMODRST_ADDR);
+
+ if (permodrst & ALT_RSTMGR_PERMODRST_DMA_SET_MSK)
+ {
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+}
+
+ALT_STATUS_CODE alt_dma_ecc_start(void * block, size_t size)
+{
+ if (alt_dma_is_init() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if ((uintptr_t)block & (sizeof(uint64_t) - 1))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that all channels are either unallocated or allocated and idle.
+
+ for (int i = 0; i < ARRAY_COUNT(channel_info_array); ++i)
+ {
+ if (channel_info_array[i].flag & ALT_DMA_CHANNEL_INFO_FLAG_ALLOCED)
+ {
+ ALT_DMA_CHANNEL_STATE_t state;
+ alt_dma_channel_state_get((ALT_DMA_CHANNEL_t)i, &state);
+
+ if (state != ALT_DMA_CHANNEL_STATE_STOPPED)
+ {
+ dprintf("DMA[ECC]: Error: Channel %d state is non-stopped (%d).\n", i, (int)state);
+ return ALT_E_ERROR;
+ }
+ }
+ }
+
+ /////
+
+ // Enable ECC for DMA RAM
+
+ dprintf("DEBUG[DMA][ECC]: Enable ECC in SysMgr.\n");
+ alt_write_word(ALT_SYSMGR_ECC_DMA_ADDR, ALT_SYSMGR_ECC_DMA_EN_SET_MSK);
+
+ // Clear any pending spurious DMA ECC interrupts.
+
+ dprintf("DEBUG[DMA][ECC]: Clear any pending spurious ECC status in SysMgr.\n");
+ alt_write_word(ALT_SYSMGR_ECC_DMA_ADDR,
+ ALT_SYSMGR_ECC_DMA_EN_SET_MSK
+ | ALT_SYSMGR_ECC_DMA_SERR_SET_MSK
+ | ALT_SYSMGR_ECC_DMA_DERR_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma_program.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma_program.c
new file mode 100644
index 0000000000..c13957bcf8
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_dma_program.c
@@ -0,0 +1,1064 @@
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <bsp/alt_dma_program.h>
+#include <bsp/alt_cache.h>
+#include <stdio.h>
+
+/////
+
+// NOTE: To enable debugging output, delete the next line and uncomment the
+// line after.
+#define dprintf(...)
+// #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+
+/////
+
+//
+// The following section describes how the bits are used in the "flag" field:
+//
+
+// [17:16] Which loop registers (LOOP0, LOOP1) are currently being used by a
+// partially assembled program. LOOP0 is always used before LOOP1. LOOP1 is
+// always ended before LOOP0.
+#define ALT_DMA_PROGRAM_FLAG_LOOP0 (1UL << 16)
+#define ALT_DMA_PROGRAM_FLAG_LOOP1 (1UL << 17)
+#define ALT_DMA_PROGRAM_FLAG_LOOP_ALL (ALT_DMA_PROGRAM_FLAG_LOOP0 | ALT_DMA_PROGRAM_FLAG_LOOP1)
+
+// [18] Flag that marks LOOP0 as a forever loop. Said another way, LOOP0 is
+// being used to execute the DMALPFE directive.
+#define ALT_DMA_PROGRAM_FLAG_LOOP0_IS_FE (1UL << 18)
+// [19] Flag that marks LOOP1 as a forever loop. Said another way, LOOP1 is
+// being used to execute the DMALPFE directive.
+#define ALT_DMA_PROGRAM_FLAG_LOOP1_IS_FE (1UL << 19)
+
+// [24] Flag that the first SAR has been programmed. The SAR field is valid and
+// is the offset from the start of the buffer where SAR is located.
+#define ALT_DMA_PROGRAM_FLAG_SAR (1UL << 24)
+// [25] Flag that the first DAR has been programmed. The DAR field is valid and
+// is the offset from the start of the buffer where DAR is located.
+#define ALT_DMA_PROGRAM_FLAG_DAR (1UL << 25)
+
+// [31] Flag that marks the last assembled instruction as DMAEND.
+#define ALT_DMA_PROGRAM_FLAG_ENDED (1UL << 31)
+
+/////
+
+ALT_STATUS_CODE alt_dma_program_init(ALT_DMA_PROGRAM_t * pgm)
+{
+ // Clear the variables that matter.
+ pgm->flag = 0;
+ pgm->code_size = 0;
+
+ // Calculate the cache aligned start location of the buffer.
+ size_t buffer = (size_t)pgm->program;
+ size_t offset = ((buffer + ALT_DMA_PROGRAM_CACHE_LINE_SIZE - 1) & ~(ALT_DMA_PROGRAM_CACHE_LINE_SIZE - 1)) - buffer;
+
+ // It is safe to cast to uint16_t because the extra offset can only be up to
+ // (ALT_DMA_PROGRAM_CACHE_LINE_SIZE - 1) or 31, which is within range of the
+ // uint16_t.
+ pgm->buffer_start = (uint16_t)offset;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_uninit(ALT_DMA_PROGRAM_t * pgm)
+{
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_clear(ALT_DMA_PROGRAM_t * pgm)
+{
+ // Clear the variables that matter
+ pgm->flag = 0;
+ pgm->code_size = 0;
+
+ return ALT_E_SUCCESS;
+}
+
+__attribute__((weak)) ALT_STATUS_CODE alt_cache_system_clean(void * address, size_t length)
+{
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_validate(const ALT_DMA_PROGRAM_t * pgm)
+{
+ // Verify that at least one instruction is in the buffer
+ if (pgm->code_size == 0)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify all loops are completed.
+ if (pgm->flag & ALT_DMA_PROGRAM_FLAG_LOOP_ALL)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify last item is DMAEND
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_ENDED))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Sync the DMA program to RAM.
+ void * vaddr = (void *)((uintptr_t)(pgm->program + pgm->buffer_start) & ~(ALT_CACHE_LINE_SIZE - 1));
+ size_t length = (pgm->code_size + ALT_CACHE_LINE_SIZE) & ~(ALT_CACHE_LINE_SIZE - 1);
+
+ dprintf("DEBUG[DMAP]: Program (real) @ %p, length = 0x%x.\n", pgm->program + pgm->buffer_start, pgm->code_size);
+ dprintf("DEBUG[DMAP]: Clean: addr = %p, length = 0x%x.\n", vaddr, length);
+
+ return alt_cache_system_clean(vaddr, length);
+}
+
+ALT_STATUS_CODE alt_dma_program_progress_reg(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_REG_t reg,
+ uint32_t current, uint32_t * progress)
+{
+ // Pointer to where the register is initialized in the program buffer.
+ uint8_t * buffer = NULL;
+
+ switch (reg)
+ {
+ case ALT_DMA_PROGRAM_REG_SAR:
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_SAR))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ buffer = pgm->program + pgm->buffer_start + pgm->sar;
+ break;
+
+ case ALT_DMA_PROGRAM_REG_DAR:
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_DAR))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ buffer = pgm->program + pgm->buffer_start + pgm->dar;
+ break;
+
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t initial =
+ (buffer[3] << 24) |
+ (buffer[2] << 16) |
+ (buffer[1] << 8) |
+ (buffer[0] << 0);
+
+ *progress = current - initial;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_update_reg(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_REG_t reg, uint32_t val)
+{
+ uint8_t * buffer = NULL;
+
+ switch (reg)
+ {
+ case ALT_DMA_PROGRAM_REG_SAR:
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_SAR))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ buffer = pgm->program + pgm->buffer_start + pgm->sar;
+ break;
+
+ case ALT_DMA_PROGRAM_REG_DAR:
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_DAR))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ buffer = pgm->program + pgm->buffer_start + pgm->dar;
+ break;
+
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ buffer[0] = (uint8_t)((val >> 0) & 0xff);
+ buffer[1] = (uint8_t)((val >> 8) & 0xff);
+ buffer[2] = (uint8_t)((val >> 16) & 0xff);
+ buffer[3] = (uint8_t)((val >> 24) & 0xff);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAADDH(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_REG_t addr_reg, uint16_t val)
+{
+ // For information on DMAADDH, see PL330, section 4.3.1.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 3) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify valid register; construct instruction modifier.
+ uint8_t ra_mask = 0;
+ switch (addr_reg)
+ {
+ case ALT_DMA_PROGRAM_REG_SAR:
+ ra_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_REG_DAR:
+ ra_mask = 0x2;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAADDH
+ buffer[0] = 0x54 | ra_mask;
+ buffer[1] = (uint8_t)(val & 0xff);
+ buffer[2] = (uint8_t)(val >> 8);
+
+ // Update the code size.
+ pgm->code_size += 3;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAADNH(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_REG_t addr_reg, uint16_t val)
+{
+ // For information on DMAADNH, see PL330, section 4.3.2.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 3) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify valid register; construct instruction modifier.
+ uint8_t ra_mask = 0;
+ switch (addr_reg)
+ {
+ case ALT_DMA_PROGRAM_REG_SAR:
+ ra_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_REG_DAR:
+ ra_mask = 0x2;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAADNH
+ buffer[0] = 0x5c | ra_mask;
+ buffer[1] = (uint8_t)(val & 0xff);
+ buffer[2] = (uint8_t)(val >> 8);
+
+ // Update the code size.
+ pgm->code_size += 3;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAEND(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMAEND, see PL330, section 4.3.3.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAEND
+ buffer[0] = 0x00;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ // Mark program as ended.
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_ENDED;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAFLUSHP(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PERIPH_t periph)
+{
+ // For information on DMAFLUSHP, see PL330, section 4.3.4.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify valid peripheral identifier.
+ if (periph > ((1 << 5) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAFLUSHP
+ buffer[0] = 0x35;
+ buffer[1] = (uint8_t)(periph) << 3;
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAGO(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_CHANNEL_t channel, uint32_t val,
+ ALT_DMA_SECURITY_t sec)
+{
+ // For information on DMAGO, see PL330, section 4.3.5.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 6) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify channel
+ switch (channel)
+ {
+ case ALT_DMA_CHANNEL_0:
+ case ALT_DMA_CHANNEL_1:
+ case ALT_DMA_CHANNEL_2:
+ case ALT_DMA_CHANNEL_3:
+ case ALT_DMA_CHANNEL_4:
+ case ALT_DMA_CHANNEL_5:
+ case ALT_DMA_CHANNEL_6:
+ case ALT_DMA_CHANNEL_7:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify security; construct ns mask value
+ uint8_t ns_mask = 0;
+ switch (sec)
+ {
+ case ALT_DMA_SECURITY_DEFAULT:
+ case ALT_DMA_SECURITY_SECURE:
+ ns_mask = 0x0;
+ break;
+ case ALT_DMA_SECURITY_NONSECURE:
+ ns_mask = 0x2;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAGO
+ buffer[0] = 0xa0 | ns_mask;
+ buffer[1] = (uint8_t)channel;
+ buffer[2] = (uint8_t)((val >> 0) & 0xff);
+ buffer[3] = (uint8_t)((val >> 8) & 0xff);
+ buffer[4] = (uint8_t)((val >> 16) & 0xff);
+ buffer[5] = (uint8_t)((val >> 24) & 0xff);
+
+ // Update the code size.
+ pgm->code_size += 6;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAKILL(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMAKILL, see PL330, section 4.3.6.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAKILL
+ buffer[0] = 0x01;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMALD(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_INST_MOD_t mod)
+{
+ // For information on DMALD, see PL330, section 4.3.7.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify instruction modifier; construct bs, x mask value.
+ uint8_t bsx_mask = 0;
+ switch (mod)
+ {
+ case ALT_DMA_PROGRAM_INST_MOD_NONE:
+ bsx_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_SINGLE:
+ bsx_mask = 0x1;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_BURST:
+ bsx_mask = 0x3;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMALD
+ buffer[0] = 0x04 | bsx_mask;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMALDP(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_INST_MOD_t mod, ALT_DMA_PERIPH_t periph)
+{
+ // For information on DMALDP, see PL330, section 4.3.8.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify instruction modifier; construct bs mask value.
+ uint8_t bs_mask = 0;
+ switch (mod)
+ {
+ case ALT_DMA_PROGRAM_INST_MOD_SINGLE:
+ bs_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_BURST:
+ bs_mask = 0x2;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify valid peripheral identifier.
+ if (periph > ((1 << 5) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMALDP
+ buffer[0] = 0x25 | bs_mask;
+ buffer[1] = (uint8_t)(periph) << 3;
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMALP(ALT_DMA_PROGRAM_t * pgm,
+ uint32_t iterations)
+{
+ // For information on DMALP, see PL330, section 4.3.9.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify iterations in range
+ if ((iterations == 0) || (iterations > 256))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Find suitable LOOPx register to use; construct lc mask value.
+ uint8_t lc_mask = 0;
+ switch (pgm->flag & ALT_DMA_PROGRAM_FLAG_LOOP_ALL)
+ {
+ case 0: // No LOOPx in use. Use LOOP0.
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_LOOP0;
+ pgm->loop0 = pgm->code_size + 2; // This is the first instruction after the DMALP
+ lc_mask = 0x0;
+ break;
+
+ case ALT_DMA_PROGRAM_FLAG_LOOP0: // LOOP0 in use. Use LOOP1.
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_LOOP1;
+ pgm->loop1 = pgm->code_size + 2; // This is the first instruction after the DMALP
+ lc_mask = 0x2;
+ break;
+
+ case ALT_DMA_PROGRAM_FLAG_LOOP_ALL: // All LOOPx in use. Report error.
+ return ALT_E_BAD_OPERATION;
+
+ default: // Catastrophic error !!!
+ return ALT_E_ERROR;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMALP
+ buffer[0] = 0x20 | lc_mask;
+ buffer[1] = (uint8_t)(iterations - 1);
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMALPEND(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_INST_MOD_t mod)
+{
+ // For information on DMALPEND, see PL330, section 4.3.10.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify instruction modifier; construct bs, x mask value.
+ uint8_t bsx_mask = 0;
+ switch (mod)
+ {
+ case ALT_DMA_PROGRAM_INST_MOD_NONE:
+ bsx_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_SINGLE:
+ bsx_mask = 0x1;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_BURST:
+ bsx_mask = 0x3;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Determine the loop to end, if it is a forever loop; construct lc mask, nf mask, and backwards jump value.
+ uint8_t lc_mask = 0;
+ uint8_t nf_mask = 0;
+ uint16_t backwards_jump = 0;
+ switch (pgm->flag & ALT_DMA_PROGRAM_FLAG_LOOP_ALL)
+ {
+ case ALT_DMA_PROGRAM_FLAG_LOOP0: // LOOP0 in use. End LOOP0.
+
+ backwards_jump = pgm->code_size - pgm->loop0;
+
+ pgm->flag &= ~ALT_DMA_PROGRAM_FLAG_LOOP0;
+ pgm->loop0 = 0;
+
+ lc_mask = 0x0;
+
+ if (pgm->flag & ALT_DMA_PROGRAM_FLAG_LOOP0_IS_FE)
+ {
+ pgm->flag &= ~ALT_DMA_PROGRAM_FLAG_LOOP0_IS_FE;
+ }
+ else
+ {
+ nf_mask = 0x10;
+ }
+ break;
+
+ case ALT_DMA_PROGRAM_FLAG_LOOP_ALL: // All LOOPx in use. End LOOP1.
+
+ backwards_jump = pgm->code_size - pgm->loop1;
+
+ pgm->flag &= ~ALT_DMA_PROGRAM_FLAG_LOOP1;
+ pgm->loop1 = 0;
+
+ lc_mask = 0x4;
+
+ if (pgm->flag & ALT_DMA_PROGRAM_FLAG_LOOP1_IS_FE)
+ {
+ pgm->flag &= ~ALT_DMA_PROGRAM_FLAG_LOOP1_IS_FE;
+ }
+ else
+ {
+ nf_mask = 0x10;
+ }
+ break;
+
+ case 0: // No LOOPx in use. Report error!
+ return ALT_E_BAD_OPERATION;
+
+ default: // Catastrophic error !!!
+ return ALT_E_ERROR;
+ }
+
+ // Verify that the jump size is suitable
+ if (backwards_jump > 255)
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMALPEND
+ buffer[0] = 0x28 | nf_mask | lc_mask | bsx_mask;
+ buffer[1] = (uint8_t)(backwards_jump);
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMALPFE(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMALPFE, see PL330, section 4.3.11.
+
+ // Find suitable LOOPx register to use;
+ switch (pgm->flag & ALT_DMA_PROGRAM_FLAG_LOOP_ALL)
+ {
+ case 0: // No LOOPx in use. Use LOOP0.
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_LOOP0;
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_LOOP0_IS_FE;
+ pgm->loop0 = pgm->code_size;
+ break;
+
+ case ALT_DMA_PROGRAM_FLAG_LOOP0: // LOOP0 in use. Use LOOP1.
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_LOOP1;
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_LOOP1_IS_FE;
+ pgm->loop1 = pgm->code_size;
+ break;
+
+ case ALT_DMA_PROGRAM_FLAG_LOOP_ALL: // All LOOPx in use. Report error.
+ return ALT_E_BAD_OPERATION;
+
+ default: // Catastrophic error !!!
+ return ALT_E_ERROR;
+ }
+
+ // Nothing to assemble.
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAMOV(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_REG_t chan_reg, uint32_t val)
+{
+ // For information on DMAMOV, see PL330, section 4.3.12.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 6) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify channel register; construct rd mask value
+ uint8_t rd_mask = 0;
+ switch (chan_reg)
+ {
+ case ALT_DMA_PROGRAM_REG_SAR:
+ rd_mask = 0;
+ // If SAR has not been set before, mark the location of where SAR is in the buffer.
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_SAR))
+ {
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_SAR;
+ pgm->sar = pgm->code_size + 2;
+ }
+ break;
+
+ case ALT_DMA_PROGRAM_REG_CCR:
+ rd_mask = 1;
+ break;
+
+ case ALT_DMA_PROGRAM_REG_DAR:
+ rd_mask = 2;
+ // If DAR has not been set before, mark the location of where DAR is in the buffer.
+ if (!(pgm->flag & ALT_DMA_PROGRAM_FLAG_DAR))
+ {
+ pgm->flag |= ALT_DMA_PROGRAM_FLAG_DAR;
+ pgm->dar = pgm->code_size + 2;
+ }
+ break;
+
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAMOV
+ buffer[0] = 0xbc;;
+ buffer[1] = rd_mask;
+ buffer[2] = (uint8_t)((val >> 0) & 0xff);
+ buffer[3] = (uint8_t)((val >> 8) & 0xff);
+ buffer[4] = (uint8_t)((val >> 16) & 0xff);
+ buffer[5] = (uint8_t)((val >> 24) & 0xff);
+
+ // Update the code size.
+ pgm->code_size += 6;
+
+ return ALT_E_SUCCESS;
+
+}
+
+ALT_STATUS_CODE alt_dma_program_DMANOP(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMANOP, see PL330, section 4.3.13.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMANOP
+ buffer[0] = 0x18;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMARMB(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMARMB, see PL330, section 4.3.14.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMARMB
+ buffer[0] = 0x12;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMASEV(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_EVENT_t evt)
+{
+ // For information on DMA, see PL330, section 4.3.15.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Validate evt selection
+ switch (evt)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMASEV
+ buffer[0] = 0x34;
+ buffer[1] = (uint8_t)(evt) << 3;
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAST(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_INST_MOD_t mod)
+{
+ // For information on DMAST, see PL330, section 4.3.16.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify instruction modifier; construct bs, x mask value.
+ uint8_t bsx_mask = 0;
+ switch (mod)
+ {
+ case ALT_DMA_PROGRAM_INST_MOD_NONE:
+ bsx_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_SINGLE:
+ bsx_mask = 0x1;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_BURST:
+ bsx_mask = 0x3;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAST
+ buffer[0] = 0x08 | bsx_mask;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMASTP(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PROGRAM_INST_MOD_t mod, ALT_DMA_PERIPH_t periph)
+{
+ // For information on DMASTP, see PL330, section 4.3.17.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify instruction modifier; construct bs mask value.
+ uint8_t bs_mask = 0;
+ switch (mod)
+ {
+ case ALT_DMA_PROGRAM_INST_MOD_SINGLE:
+ bs_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_BURST:
+ bs_mask = 0x2;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify valid peripheral identifier.
+ if (periph > ((1 << 5) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMASTP
+ buffer[0] = 0x29 | bs_mask;
+ buffer[1] = (uint8_t)(periph) << 3;
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMASTZ(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMASTZ, see PL330, section 4.3.18.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMASTZ
+ buffer[0] = 0x0c;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAWFE(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_EVENT_t evt, bool invalid)
+{
+ // For information on DMAWFE, see PL330, section 4.3.19.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Validate evt selection
+ switch (evt)
+ {
+ case ALT_DMA_EVENT_0:
+ case ALT_DMA_EVENT_1:
+ case ALT_DMA_EVENT_2:
+ case ALT_DMA_EVENT_3:
+ case ALT_DMA_EVENT_4:
+ case ALT_DMA_EVENT_5:
+ case ALT_DMA_EVENT_6:
+ case ALT_DMA_EVENT_7:
+ case ALT_DMA_EVENT_ABORT:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Construct i mask value
+ uint8_t i_mask = 0;
+ if (invalid)
+ {
+ i_mask = 0x2;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAWFE
+ buffer[0] = 0x36;
+ buffer[1] = ((uint8_t)(evt) << 3) | i_mask;
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAWFP(ALT_DMA_PROGRAM_t * pgm,
+ ALT_DMA_PERIPH_t periph, ALT_DMA_PROGRAM_INST_MOD_t mod)
+{
+ // For information on DMAWFP, see PL330, section 4.3.20.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 2) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Verify valid peripheral identifier.
+ if (periph > ((1 << 5) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Verify instruction modifier; construct bs, p mask value.
+ uint8_t bsp_mask = 0;
+ switch (mod)
+ {
+ case ALT_DMA_PROGRAM_INST_MOD_SINGLE:
+ bsp_mask = 0x0;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_BURST:
+ bsp_mask = 0x2;
+ break;
+ case ALT_DMA_PROGRAM_INST_MOD_PERIPH:
+ bsp_mask = 0x1;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAWFP
+ buffer[0] = 0x30 | bsp_mask;
+ buffer[1] = (uint8_t)(periph) << 3;
+
+ // Update the code size.
+ pgm->code_size += 2;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_dma_program_DMAWMB(ALT_DMA_PROGRAM_t * pgm)
+{
+ // For information on DMAWMB, see PL330, section 4.3.21.
+
+ // Check for sufficient space in buffer
+ if ((pgm->code_size + 1) > ALT_DMA_PROGRAM_PROVISION_BUFFER_SIZE)
+ {
+ return ALT_E_BUF_OVF;
+ }
+
+ // Buffer of where to assemble the instruction.
+ uint8_t * buffer = pgm->program + pgm->buffer_start + pgm->code_size;
+
+ // Assemble DMAWMB
+ buffer[0] = 0x13;
+
+ // Update the code size.
+ pgm->code_size += 1;
+
+ return ALT_E_SUCCESS;
+}
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_generalpurpose_io.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_generalpurpose_io.c
new file mode 100644
index 0000000000..b52e5002a3
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_generalpurpose_io.c
@@ -0,0 +1,777 @@
+/******************************************************************************
+*
+* Copyright 2013 Altera Corporation. All Rights Reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+*
+* 2. Redistributions in binary form must reproduce the above copyright notice,
+* this list of conditions and the following disclaimer in the documentation
+* and/or other materials provided with the distribution.
+*
+* 3. The name of the author may not be used to endorse or promote products
+* derived from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+*
+******************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include <bsp/socal/hps.h>
+#include <bsp/socal/socal.h>
+#include <bsp/socal/alt_gpio.h>
+#include <bsp/socal/alt_rstmgr.h>
+#include <bsp/hwlib.h>
+#include <bsp/alt_generalpurpose_io.h>
+
+
+/****************************************************************************************/
+/******************************* Useful local definitions *******************************/
+/****************************************************************************************/
+
+#define ALT_GPIO_EOPA ALT_GPIO_1BIT_28
+#define ALT_GPIO_EOPB ALT_GPIO_1BIT_57
+#define ALT_GPIO_EOPC ALT_HLGPI_15
+#define ALT_GPIO_BITMASK 0x1FFFFFFF
+
+ // expands the zero or one bit to the 29-bit GPIO word
+#define ALT_GPIO_ALLORNONE(tst) ((uint32_t) ((tst == 0) ? 0 : ALT_GPIO_BITMASK))
+
+
+/****************************************************************************************/
+/* alt_gpio_init() initializes the GPIO modules */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_init(void)
+{
+ // put GPIO modules into system manager reset if not already there
+ alt_gpio_uninit();
+ // release GPIO modules from system reset (w/ two-instruction delay)
+ alt_replbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_GPIO0_SET_MSK |
+ ALT_RSTMGR_PERMODRST_GPIO1_SET_MSK |
+ ALT_RSTMGR_PERMODRST_GPIO2_SET_MSK, 0);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_uninit() uninitializes the GPIO modules */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_uninit(void)
+{
+ // put all GPIO modules into system manager reset
+ alt_replbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_GPIO0_SET_MSK |
+ ALT_RSTMGR_PERMODRST_GPIO1_SET_MSK |
+ ALT_RSTMGR_PERMODRST_GPIO2_SET_MSK,
+ ALT_GPIO_BITMASK);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_datadir_set() sets the specified GPIO data bits to use the data */
+/* direction(s) specified. 0 = input (default). 1 = output. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_datadir_set(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ if ((mask & ~ALT_GPIO_BITMASK) || (config & ~ALT_GPIO_BITMASK)) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_SWPORTA_DDR_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_SWPORTA_DDR_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_SWPORTA_DDR_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, mask, config);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_datadir_get() returns the data direction configuration of selected */
+/* bits of the designated GPIO module. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_datadir_get(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_SWPORTA_DDR_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_SWPORTA_DDR_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_SWPORTA_DDR_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr) & mask;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_data_write() sets the GPIO data outputs of the specified GPIO module */
+/* to a one or zero. Actual outputs are only set if the data direction for that bit(s) */
+/* has previously been set to configure them as output(s). */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_data_write(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, uint32_t val)
+{
+ volatile uint32_t *addr;
+
+ if ((mask & ~ALT_GPIO_BITMASK) || (val & ~ALT_GPIO_BITMASK)) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_SWPORTA_DR_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_SWPORTA_DR_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_SWPORTA_DR_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, mask, val);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_data_read() returns the value of the data inputs of the specified */
+/* GPIO module. Data direction for these bits must have been previously set to inputs. */
+/****************************************************************************************/
+
+#if (!ALT_GPIO_DATAREAD_TEST_MODE)
+ /* This is the production code version. For software unit testing, set the */
+ /* ALT_GPIO_DATAREAD_TEST_MODE flag to true in the makefile, which will compile */
+ /* the GPIO test software version of alt_gpio_port_data_read() instead. */
+
+uint32_t alt_gpio_port_data_read(ALT_GPIO_PORT_t gpio_pid, uint32_t mask)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_EXT_PORTA_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_EXT_PORTA_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_EXT_PORTA_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr) & mask;
+}
+#endif
+
+
+/****************************************************************************************/
+/* alt_gpio_port_int_type_set() sets selected signals of the specified GPIO port to */
+/* be either level-sensitive ( =0) or edge-triggered ( =1). */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_int_type_set(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ if ((mask & ~ALT_GPIO_BITMASK) || (config & ~ALT_GPIO_BITMASK)) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTTYPE_LEVEL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTTYPE_LEVEL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTTYPE_LEVEL_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, mask, config);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_int_type_get() returns the interrupt configuration (edge-triggered or */
+/* level-triggered) for the specified signals of the specified GPIO module. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_int_type_get(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTTYPE_LEVEL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTTYPE_LEVEL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTTYPE_LEVEL_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr) & mask;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_int_pol_set() sets the interrupt polarity of the signals of the */
+/* specified GPIO register (when used as inputs) to active-high ( =0) or active-low */
+/* ( =1). */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_int_pol_set(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ if ((mask & ~ALT_GPIO_BITMASK) || (config & ~ALT_GPIO_BITMASK)) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INT_POL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INT_POL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INT_POL_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, mask, config);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_int_pol_get() returns the active-high or active-low polarity */
+/* configuration for the possible interrupt sources of the specified GPIO module. */
+/* 0 = The interrupt polarity for this bit is set to active-low mode. 1 = The */
+/* interrupt polarity for this bit is set to active-highmode. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_int_pol_get(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INT_POL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INT_POL_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INT_POL_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr) & mask;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_debounce_set() sets the debounce configuration for input signals of */
+/* the specified GPIO module. 0 - Debounce is not selected for this signal (default). */
+/* 1 - Debounce is selected for this signal. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_debounce_set(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ if ((mask & ~ALT_GPIO_BITMASK) || (config & ~ALT_GPIO_BITMASK)) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_DEBOUNCE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_DEBOUNCE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_DEBOUNCE_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, mask, config);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_debounce_get() returns the debounce configuration for the input */
+/* signals of the specified GPIO register. 0 - Debounce is not selected for this */
+/* signal. 1 - Debounce is selected for this signal. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_debounce_get(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_DEBOUNCE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_DEBOUNCE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_DEBOUNCE_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr) & mask;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_sync_set() sets the synchronization configuration for the signals of */
+/* the specified GPIO register. This allows for synchronizing level-sensitive */
+/* interrupts to the internal clock signal. This is a port-wide option that controls */
+/* all level-sensitive interrupt signals of that GPIO port. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_sync_set(ALT_GPIO_PORT_t gpio_pid, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ config = (config != 0) ? 1 : 0;
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_LS_SYNC_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_LS_SYNC_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_LS_SYNC_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_write_word(addr, config);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_sync_get() returns the synchronization configuration for the signals */
+/* of the specified GPIO register. This allows for synchronizing level-sensitive */
+/* interrupts to the internal clock signal. This is a port-wide option that controls */
+/* all level-sensitive interrupt signals of that GPIO port. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_sync_get(ALT_GPIO_PORT_t gpio_pid)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_LS_SYNC_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_LS_SYNC_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_LS_SYNC_ADDR; }
+ else { return ALT_E_BAD_ARG; } // error
+
+ return (alt_read_word(addr) != 0) ? ALT_E_TRUE : ALT_E_FALSE;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_config() configures a group of GPIO signals with the same parameters. */
+/* Allows for configuring all parameters of a given port at one time. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_config(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, ALT_GPIO_PIN_DIR_t dir, ALT_GPIO_PIN_TYPE_t type,
+ ALT_GPIO_PIN_POL_t pol, ALT_GPIO_PIN_DEBOUNCE_t debounc,
+ uint32_t data)
+{
+ ALT_STATUS_CODE ret;
+
+ // set all affected GPIO bits to inputs
+ ret = alt_gpio_port_datadir_set(gpio_pid, mask, ALT_GPIO_ALLORNONE(ALT_GPIO_PIN_INPUT));
+ // the ALT_GPIO_ALLORNONE() macro expands the zero or one bit to the 29-bit GPIO word
+
+ // set trigger type
+ if (ret == ALT_E_SUCCESS)
+ {
+ ret = alt_gpio_port_int_type_set(gpio_pid, mask, ALT_GPIO_ALLORNONE(type));
+ }
+
+ // set polarity
+ if (ret == ALT_E_SUCCESS)
+ {
+ alt_gpio_port_int_pol_set(gpio_pid, mask, ALT_GPIO_ALLORNONE(pol));
+ }
+
+ // set debounce
+ if (ret == ALT_E_SUCCESS)
+ {
+ alt_gpio_port_debounce_set(gpio_pid, mask, ALT_GPIO_ALLORNONE(debounc));
+ }
+
+ // set data output(s)
+ if (ret == ALT_E_SUCCESS)
+ {
+ alt_gpio_port_data_write(gpio_pid, mask, ALT_GPIO_ALLORNONE(data));
+ }
+
+ if (ret == ALT_E_SUCCESS)
+ {
+ // set data direction of one or more bits to select output
+ ret = alt_gpio_port_datadir_set(gpio_pid, mask, ALT_GPIO_ALLORNONE(dir));
+ }
+
+ return ret;
+}
+
+
+/****************************************************************************************/
+/* Enables the specified GPIO data register interrupts. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_int_enable(ALT_GPIO_PORT_t gpio_pid, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ if (config & ~ALT_GPIO_BITMASK) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTEN_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTEN_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTEN_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, config, UINT32_MAX);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* Disables the specified GPIO data module interrupts. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_int_disable(ALT_GPIO_PORT_t gpio_pid, uint32_t config)
+{
+ volatile uint32_t *addr;
+
+ if (config & ~ALT_GPIO_BITMASK) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTEN_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTEN_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTEN_ADDR; }
+ else { return ALT_E_BAD_ARG; }
+
+ alt_replbits_word(addr, config, 0);
+ return ALT_E_SUCCESS;
+}
+
+
+
+/****************************************************************************************/
+/* Get the current state of the specified GPIO port interrupts enables. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_int_enable_get(ALT_GPIO_PORT_t gpio_pid)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTEN_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTEN_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTEN_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr);
+}
+
+
+/****************************************************************************************/
+/* Masks or unmasks selected interrupt source bits of the data register of the */
+/* specified GPIO module. Uses a second bit mask to determine which signals may be */
+/* changed by this call. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_int_mask_set(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t mask, uint32_t val)
+{
+ volatile uint32_t *addr;
+
+ if ((mask & ~ALT_GPIO_BITMASK) || (val & ~ALT_GPIO_BITMASK)) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTMSK_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTMSK_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTMSK_ADDR; }
+ else { return ALT_E_BAD_ARG; } // argument error
+
+ alt_replbits_word(addr, mask, val);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* Returns the interrupt source mask of the specified GPIO module. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_int_mask_get(ALT_GPIO_PORT_t gpio_pid)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTMSK_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTMSK_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTMSK_ADDR; }
+ else { return 0; } // error
+
+ return alt_read_word(addr);
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_int_status_get() returns the interrupt pending status of all signals */
+/* of the specified GPIO register. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_int_status_get(ALT_GPIO_PORT_t gpio_pid)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTSTAT_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTSTAT_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTSTAT_ADDR; }
+ else { return 0; } // error
+
+ return alt_read_word(addr);
+}
+
+
+/****************************************************************************************/
+/* Clear the interrupt pending status of selected signals of the specified GPIO */
+/* register. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_port_int_status_clear(ALT_GPIO_PORT_t gpio_pid,
+ uint32_t clrmask)
+{
+ volatile uint32_t *addr;
+
+ if (clrmask & ~ALT_GPIO_BITMASK) { return ALT_E_ERROR; }
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_INTSTAT_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_INTSTAT_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_INTSTAT_ADDR; }
+ else { return ALT_E_BAD_ARG; } // argument error
+
+ alt_write_word(addr, clrmask);
+ return ALT_E_SUCCESS;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_idcode_get() returns the ID code of the specified GPIO module. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_idcode_get(ALT_GPIO_PORT_t gpio_pid)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_ID_CODE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_ID_CODE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_ID_CODE_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr);
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_port_ver_get() returns the version code of the specified GPIO module. */
+/****************************************************************************************/
+
+uint32_t alt_gpio_port_ver_get(ALT_GPIO_PORT_t gpio_pid)
+{
+ volatile uint32_t *addr;
+
+ if (gpio_pid == ALT_GPIO_PORTA) { addr = ALT_GPIO0_VER_ID_CODE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTB) { addr = ALT_GPIO1_VER_ID_CODE_ADDR; }
+ else if (gpio_pid == ALT_GPIO_PORTC) { addr = ALT_GPIO2_VER_ID_CODE_ADDR; }
+ else { return 0; }
+
+ return alt_read_word(addr);
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_bit_config() configures one bit (signal) of the GPIO ports. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_bit_config(ALT_GPIO_1BIT_t signal_num,
+ ALT_GPIO_PIN_DIR_t dir, ALT_GPIO_PIN_TYPE_t type,
+ ALT_GPIO_PIN_POL_t pol, ALT_GPIO_PIN_DEBOUNCE_t debounce,
+ ALT_GPIO_PIN_DATA_t data)
+{
+ ALT_GPIO_PORT_t pid;
+ uint32_t mask;
+
+ pid = alt_gpio_bit_to_pid(signal_num);
+ mask = 0x1 << alt_gpio_bit_to_port_pin(signal_num);
+ return alt_gpio_port_config(pid, mask, dir, type, pol, debounce, data);
+}
+
+
+/****************************************************************************************/
+/* Returns the configuration parameters of a given GPIO bit. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_bitconfig_get(ALT_GPIO_1BIT_t signal_num,
+ ALT_GPIO_CONFIG_RECORD_t *config)
+{
+ ALT_STATUS_CODE ret = ALT_E_ERROR;
+ ALT_GPIO_PORT_t pid;
+ uint32_t mask, shift;
+
+ if ((config != NULL) && (signal_num != ALT_END_OF_GPIO_SIGNALS) && (signal_num <= ALT_LAST_VALID_GPIO_BIT))
+ {
+ pid = alt_gpio_bit_to_pid(signal_num);
+ shift = alt_gpio_bit_to_port_pin(signal_num);
+ if ((pid != ALT_GPIO_PORT_UNKNOWN) && (shift <= ALT_GPIO_BIT_MAX))
+ {
+ config->signal_number = signal_num;
+ mask = 0x00000001 << shift;
+ config->direction = (alt_gpio_port_datadir_get(pid, mask) == 0) ? ALT_GPIO_PIN_INPUT : ALT_GPIO_PIN_OUTPUT;
+ config->type = (alt_gpio_port_int_type_get(pid, mask) == 0) ? ALT_GPIO_PIN_LEVEL_TRIG_INT : ALT_GPIO_PIN_EDGE_TRIG_INT;
+
+ // save the following data whatever the state of config->direction
+ config->polarity = (alt_gpio_port_int_pol_get(pid, mask) == 0) ? ALT_GPIO_PIN_ACTIVE_LOW : ALT_GPIO_PIN_ACTIVE_HIGH;
+ config->debounce = (alt_gpio_port_debounce_get(pid, mask) == 0) ? ALT_GPIO_PIN_NODEBOUNCE : ALT_GPIO_PIN_DEBOUNCE;
+ config->data = (alt_gpio_port_data_read(pid, mask) == 0) ? ALT_GPIO_PIN_DATAZERO : ALT_GPIO_PIN_DATAONE;
+ ret = ALT_E_SUCCESS;
+ }
+ }
+ return ret;
+}
+
+
+/****************************************************************************************/
+/* alt_gpio_group_config() configures a list of GPIO bits. The GPIO bits do not have */
+/* to be configured the same, as was the case for the mask version of this function, */
+/* alt_gpio_port_config(). Each bit may be configured differently and bits may be */
+/* listed in any order. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_group_config(ALT_GPIO_CONFIG_RECORD_t* config_array, uint32_t len)
+{
+ ALT_STATUS_CODE ret = ALT_E_ERROR;
+
+ if (config_array != NULL)
+ {
+ if (config_array->signal_number == ALT_END_OF_GPIO_SIGNALS) { ret = ALT_E_SUCCESS; }
+ // catches the condition where the pointers are good, but the
+ // first index is the escape character - which isn't an error
+ else
+ {
+ for (; (len-- > 0) && (config_array->signal_number != ALT_END_OF_GPIO_SIGNALS) && (config_array != NULL); config_array++)
+ {
+ ret = alt_gpio_bit_config(config_array->signal_number,
+ config_array->direction, config_array->type, config_array->polarity,
+ config_array->debounce, config_array->data);
+ if ((config_array->direction == ALT_GPIO_PIN_OUTPUT) && (ret == ALT_E_SUCCESS))
+ {
+ // if the pin is set to be an output, set it to the correct value
+ alt_gpio_port_data_write(alt_gpio_bit_to_pid(config_array->signal_number),
+ 0x1 << alt_gpio_bit_to_port_pin(config_array->signal_number),
+ ALT_GPIO_ALLORNONE(config_array->data));
+ // ret should retain the value returned by alt_gpio_bit_config() above
+ // and should not be changed by the alt_gpio_port_data_write() call.
+ }
+ if (((ret != ALT_E_SUCCESS) && (config_array->signal_number <= ALT_LAST_VALID_GPIO_BIT))
+ || ((ret == ALT_E_SUCCESS) && (config_array->signal_number > ALT_LAST_VALID_GPIO_BIT)))
+ {
+ ret = ALT_E_ERROR;
+ break;
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+
+/****************************************************************************************/
+/* Returns a list of the pin signal indices and the associated configuration settings */
+/* (data direction, interrupt type, polarity, debounce, and synchronization) of that */
+/* list of signals. Only the signal indices in the first field of each configuration */
+/* record need be filled in. This function will fill in all the other fields of the */
+/* configuration record, returning all configuration parameters in the array. A signal */
+/* number index in the array equal to ALT_END_OF_GPIO_SIGNALS (-1) also terminates the */
+/* function. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_group_config_get(ALT_GPIO_CONFIG_RECORD_t *config_array,
+ uint32_t len)
+{
+ ALT_STATUS_CODE ret = ALT_E_ERROR;
+
+ if ((config_array != NULL) && (config_array->signal_number == ALT_END_OF_GPIO_SIGNALS))
+ {
+ ret = ALT_E_SUCCESS;
+ }
+ else
+ {
+ for ( ; (len > 0) && (config_array != NULL) && (config_array->signal_number != ALT_END_OF_GPIO_SIGNALS)
+ && (config_array->signal_number <= ALT_LAST_VALID_GPIO_BIT); len--)
+ {
+ ret = alt_gpio_bitconfig_get(config_array->signal_number, config_array);
+ config_array++;
+ if (ret != ALT_E_SUCCESS) { break; }
+ }
+ }
+ return ret;
+}
+
+/****************************************************************************************/
+/* Another way to return a configuration list. The difference between this version and */
+/* alt_gpio_group_config_get() is that this version follows a separate list of signal */
+/* indices instead of having the signal list provided in the first field of the */
+/* configuration records in the array. This function will fill in the fields of the */
+/* configuration record, returning all configuration parameters in the array. A signal */
+/* number index in the array equal to ALT_END_OF_GPIO_SIGNALS (-1) also terminates */
+/* operation. */
+/****************************************************************************************/
+
+ALT_STATUS_CODE alt_gpio_group_config_get2(ALT_GPIO_1BIT_t* pinid_array,
+ ALT_GPIO_CONFIG_RECORD_t *config_array, uint32_t len)
+{
+ ALT_STATUS_CODE ret = ALT_E_ERROR;
+
+ if ((config_array != NULL) && (pinid_array != NULL) && (*pinid_array == ALT_END_OF_GPIO_SIGNALS))
+ {
+ ret = ALT_E_SUCCESS;
+ // catches the condition where the pointers are good, but the
+ // first index is the escape character - which isn't an error
+ }
+ else
+ {
+ for ( ;(len > 0) && (pinid_array != NULL) && (*pinid_array != ALT_END_OF_GPIO_SIGNALS) && (config_array != NULL); len--)
+ {
+ ret = alt_gpio_bitconfig_get(*pinid_array, config_array);
+ config_array++;
+ pinid_array++;
+ if (ret != ALT_E_SUCCESS) { break; }
+ }
+ }
+ return ret;
+}
+
+
+/****************************************************************************************/
+/* A useful utility function. Extracts the GPIO port ID from the supplied GPIO Signal */
+/* Index Number. */
+/****************************************************************************************/
+
+ALT_GPIO_PORT_t alt_gpio_bit_to_pid(ALT_GPIO_1BIT_t pin_num)
+{
+ ALT_GPIO_PORT_t pid = ALT_GPIO_PORT_UNKNOWN;
+
+ if (pin_num <= ALT_GPIO_EOPA) { pid = ALT_GPIO_PORTA; }
+ else if (pin_num <= ALT_GPIO_EOPB) { pid = ALT_GPIO_PORTB; }
+ else if (pin_num <= ALT_GPIO_EOPC) { pid = ALT_GPIO_PORTC; }
+ return pid;
+}
+
+
+/****************************************************************************************/
+/* A useful utility function. Extracts the GPIO signal (pin) mask from the supplied */
+/* GPIO Signal Index Number. */
+/****************************************************************************************/
+
+ALT_GPIO_PORTBIT_t alt_gpio_bit_to_port_pin(ALT_GPIO_1BIT_t pin_num)
+{
+ if (pin_num <= ALT_GPIO_EOPA) {}
+ else if (pin_num <= ALT_GPIO_EOPB) { pin_num -= (ALT_GPIO_EOPA + 1); }
+ else if (pin_num <= ALT_GPIO_EOPC) { pin_num -= (ALT_GPIO_EOPB + 1); }
+ else { return ALT_END_OF_GPIO_PORT_SIGNALS; }
+ return (ALT_GPIO_PORTBIT_t) pin_num;
+}
+
+
+/****************************************************************************************/
+/* A useful utility function. Extracts the GPIO Signal Index Number from the supplied */
+/* GPIO port ID and signal mask. If passed a bitmask composed of more than one signal, */
+/* the signal number of the lowest bitmask presented is returned. */
+/****************************************************************************************/
+
+ALT_GPIO_1BIT_t alt_gpio_port_pin_to_bit(ALT_GPIO_PORT_t pid,
+ uint32_t bitmask)
+{
+ uint32_t i;
+
+ for (i=0; i <= ALT_GPIO_BITNUM_MAX ;i++)
+ {
+ if (bitmask & 0x00000001)
+ {
+ if (pid == ALT_GPIO_PORTA) {}
+ else if (pid == ALT_GPIO_PORTB) { i += ALT_GPIO_EOPA + 1; }
+ else if (pid == ALT_GPIO_PORTC) { i += ALT_GPIO_EOPB + 1; }
+ else { return ALT_END_OF_GPIO_SIGNALS; }
+ return (ALT_GPIO_1BIT_t) i;
+ }
+ bitmask >>= 1;
+ }
+ return ALT_END_OF_GPIO_SIGNALS;
+}
+
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_i2c.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_i2c.c
new file mode 100644
index 0000000000..b6279a7938
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_i2c.c
@@ -0,0 +1,2004 @@
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <bsp/alt_i2c.h>
+#include <bsp/alt_reset_manager.h>
+#include <stdio.h>
+
+/////
+
+// NOTE: To enable debugging output, delete the next line and uncomment the
+// line after.
+#define dprintf(...)
+// #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+
+/////
+
+#define MIN(a, b) ((a) > (b) ? (b) : (a))
+
+/////
+
+// Timeout for reset manager
+#define ALT_I2C_RESET_TMO_INIT 8192
+// Timeout for disable device
+#define ALT_I2C_MAX_T_POLL_COUNT 8192
+// Timeout for waiting interrupt
+#define ALT_I2C_TMO_WAITER 2500000
+
+// Min frequency during standard speed
+#define ALT_I2C_SS_MIN_SPEED 8000
+// Max frequency during standard speed
+#define ALT_I2C_SS_MAX_SPEED 100000
+// Min frequency during fast speed
+#define ALT_I2C_FS_MIN_SPEED 100000
+// Max frequency during fast speed
+#define ALT_I2C_FS_MAX_SPEED 400000
+// Default spike suppression limit during standard speed
+#define ALT_I2C_SS_DEFAULT_SPKLEN 11
+// Default spike suppression limit during fast speed
+#define ALT_I2C_FS_DEFAULT_SPKLEN 4
+
+// Diff between SCL LCNT and SCL HCNT
+#define ALT_I2C_DIFF_LCNT_HCNT 70
+
+// Reserved address from 0x00 to 0x07
+#define ALT_I2C_SLV_RESERVE_ADDR_S_1 0x00
+#define ALT_I2C_SLV_RESERVE_ADDR_F_1 0x07
+// Reserved address from 0x78 to 0x7F
+#define ALT_I2C_SLV_RESERVE_ADDR_S_2 0x78
+#define ALT_I2C_SLV_RESERVE_ADDR_F_2 0x7F
+
+static ALT_STATUS_CODE alt_i2c_is_enabled_helper(ALT_I2C_DEV_t * i2c_dev);
+
+//
+// Check whether i2c space is correct.
+//
+static ALT_STATUS_CODE alt_i2c_checking(ALT_I2C_DEV_t * i2c_dev)
+{
+ if ( (i2c_dev->location != (void *)ALT_I2C_I2C0)
+ && (i2c_dev->location != (void *)ALT_I2C_I2C1)
+ && (i2c_dev->location != (void *)ALT_I2C_I2C2)
+ && (i2c_dev->location != (void *)ALT_I2C_I2C3))
+ {
+ // Incorrect device
+ return ALT_E_FALSE;
+ }
+
+ // Reset i2c module
+ return ALT_E_TRUE;
+}
+
+static ALT_STATUS_CODE alt_i2c_rstmgr_set(ALT_I2C_DEV_t * i2c_dev)
+{
+ uint32_t rst_mask = ALT_RSTMGR_PERMODRST_I2C0_SET_MSK;
+
+ // Assert the appropriate I2C module reset signal via the Reset Manager Peripheral Reset register.
+ switch ((ALT_I2C_CTLR_t)i2c_dev->location)
+ {
+ case ALT_I2C_I2C0:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C0_SET_MSK;
+ break;
+ case ALT_I2C_I2C1:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C1_SET_MSK;
+ break;
+ case ALT_I2C_I2C2:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C2_SET_MSK;
+ break;
+ case ALT_I2C_I2C3:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C3_SET_MSK;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, rst_mask);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Reset i2c module by reset manager
+//
+static ALT_STATUS_CODE alt_i2c_rstmgr_strobe(ALT_I2C_DEV_t * i2c_dev)
+{
+ uint32_t rst_mask = ALT_RSTMGR_PERMODRST_I2C0_SET_MSK;
+
+ // Assert the appropriate I2C module reset signal via the Reset Manager Peripheral Reset register.
+ switch ((ALT_I2C_CTLR_t)i2c_dev->location)
+ {
+ case ALT_I2C_I2C0:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C0_SET_MSK;
+ break;
+ case ALT_I2C_I2C1:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C1_SET_MSK;
+ break;
+ case ALT_I2C_I2C2:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C2_SET_MSK;
+ break;
+ case ALT_I2C_I2C3:
+ rst_mask = ALT_RSTMGR_PERMODRST_I2C3_SET_MSK;
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, rst_mask);
+
+ volatile uint32_t timeout = ALT_I2C_RESET_TMO_INIT;
+
+ // Wait while i2c modure is reseting
+ while (--timeout)
+ ;
+
+ // Deassert the appropriate I2C module reset signal via the Reset Manager Peripheral Reset register.
+ alt_clrbits_word(ALT_RSTMGR_PERMODRST_ADDR, rst_mask);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Initialize the specified I2C controller instance for use and return a device
+// handle referencing it.
+//
+ALT_STATUS_CODE alt_i2c_init(const ALT_I2C_CTLR_t i2c,
+ ALT_I2C_DEV_t * i2c_dev)
+{
+ // Save i2c start address to the instance
+ i2c_dev->location = (void *)i2c;
+
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_clk_is_enabled(ALT_CLK_L4_SP) != ALT_E_TRUE)
+ {
+ return ALT_E_BAD_CLK;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_clk_freq_get(ALT_CLK_L4_SP, &i2c_dev->clock_freq);
+ }
+
+ // Reset i2c module
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_reset(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Reset i2c module
+//
+ALT_STATUS_CODE alt_i2c_reset(ALT_I2C_DEV_t * i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ // Reset i2c module by reset manager
+ alt_i2c_rstmgr_strobe(i2c_dev);
+
+ // Set optimal parameters for all i2c devices on the bus
+ ALT_I2C_MASTER_CONFIG_t cfg;
+ cfg.addr_mode = ALT_I2C_ADDR_MODE_7_BIT;
+ cfg.speed_mode = ALT_I2C_SPEED_STANDARD;
+ cfg.fs_spklen = ALT_I2C_SS_DEFAULT_SPKLEN;
+ cfg.restart_enable = ALT_E_TRUE;
+ cfg.ss_scl_lcnt = cfg.fs_scl_lcnt = 0x2FB;
+ cfg.ss_scl_hcnt = cfg.fs_scl_hcnt = 0x341;
+
+ alt_i2c_master_config_set(i2c_dev, &cfg);
+
+ // Active master mode
+ alt_i2c_op_mode_set(i2c_dev, ALT_I2C_MODE_MASTER);
+
+ // Reset the last target address cache.
+ i2c_dev->last_target = 0xffffffff;
+
+ // Clear interrupts mask and clear interrupt status.
+ // Interrupts are unmasked by default.
+ alt_i2c_int_disable(i2c_dev, ALT_I2C_STATUS_INT_ALL);
+ alt_i2c_int_clear(i2c_dev, ALT_I2C_STATUS_INT_ALL);
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Uninitialize the I2C controller referenced by the i2c_dev handle.
+//
+ALT_STATUS_CODE alt_i2c_uninit(ALT_I2C_DEV_t * i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Disable i2c controller
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_disable(i2c_dev);
+ }
+
+ // Reset i2c module by reset manager
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_rstmgr_set(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Enables the I2C controller.
+//
+ALT_STATUS_CODE alt_i2c_enable(ALT_I2C_DEV_t * i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Enable DMA by default.
+ alt_write_word(ALT_I2C_DMA_CR_ADDR(i2c_dev->location),
+ ALT_I2C_DMA_CR_TDMAE_SET_MSK | ALT_I2C_DMA_CR_RDMAE_SET_MSK);
+
+ alt_setbits_word(ALT_I2C_EN_ADDR(i2c_dev->location), ALT_I2C_EN_EN_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Disables the I2C controller
+//
+ALT_STATUS_CODE alt_i2c_disable(ALT_I2C_DEV_t * i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // If i2c controller is enabled, return with sucess
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ // Else clear enable bit of i2c_enable register
+ alt_clrbits_word(ALT_I2C_EN_ADDR(i2c_dev->location), ALT_I2C_EN_EN_SET_MSK);
+
+ uint32_t timeout = ALT_I2C_MAX_T_POLL_COUNT;
+
+ // Wait to complete all transfer operations or timeout
+ while (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE)
+ {
+ // If controller still are active, return timeout error
+ if (--timeout == 0)
+ {
+ return ALT_E_TMO;
+ }
+ }
+
+ // Clear interrupt status
+ alt_i2c_int_clear(i2c_dev, ALT_I2C_STATUS_INT_ALL);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Check whether i2c controller is enable
+//
+static ALT_STATUS_CODE alt_i2c_is_enabled_helper(ALT_I2C_DEV_t * i2c_dev)
+{
+ if (ALT_I2C_EN_STAT_IC_EN_GET(alt_read_word(ALT_I2C_EN_STAT_ADDR(i2c_dev->location))))
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+ALT_STATUS_CODE alt_i2c_is_enabled(ALT_I2C_DEV_t * i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ return alt_i2c_is_enabled_helper(i2c_dev);
+}
+
+//
+// Get config parameters from appropriate registers for master mode.
+//
+ALT_STATUS_CODE alt_i2c_master_config_get(ALT_I2C_DEV_t *i2c_dev,
+ ALT_I2C_MASTER_CONFIG_t* cfg)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t cfg_register = alt_read_word(ALT_I2C_CON_ADDR(i2c_dev->location));
+ uint32_t tar_register = alt_read_word(ALT_I2C_TAR_ADDR(i2c_dev->location));
+ uint32_t spkl_register = alt_read_word(ALT_I2C_FS_SPKLEN_ADDR(i2c_dev->location));
+
+ cfg->speed_mode = (ALT_I2C_SPEED_t)ALT_I2C_CON_SPEED_GET(cfg_register);
+ cfg->fs_spklen = ALT_I2C_FS_SPKLEN_SPKLEN_GET(spkl_register);
+ cfg->restart_enable = ALT_I2C_CON_IC_RESTART_EN_GET(cfg_register);
+ cfg->addr_mode = (ALT_I2C_ADDR_MODE_t)ALT_I2C_TAR_IC_10BITADDR_MST_GET(tar_register);
+
+ cfg->ss_scl_lcnt = alt_read_word(ALT_I2C_SS_SCL_LCNT_ADDR(i2c_dev->location));
+ cfg->ss_scl_hcnt = alt_read_word(ALT_I2C_SS_SCL_HCNT_ADDR(i2c_dev->location));
+ cfg->fs_scl_lcnt = alt_read_word(ALT_I2C_FS_SCL_LCNT_ADDR(i2c_dev->location));
+ cfg->fs_scl_hcnt = alt_read_word(ALT_I2C_FS_SCL_HCNT_ADDR(i2c_dev->location));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Set config parameters to appropriate registers for master mode.
+//
+ALT_STATUS_CODE alt_i2c_master_config_set(ALT_I2C_DEV_t *i2c_dev,
+ const ALT_I2C_MASTER_CONFIG_t* cfg)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if ( (cfg->speed_mode != ALT_I2C_SPEED_STANDARD)
+ && (cfg->speed_mode != ALT_I2C_SPEED_FAST))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if ( (cfg->addr_mode != ALT_I2C_ADDR_MODE_7_BIT)
+ && (cfg->addr_mode != ALT_I2C_ADDR_MODE_10_BIT))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ // Set config parameters to appropriate registers
+
+ alt_replbits_word(ALT_I2C_CON_ADDR(i2c_dev->location),
+ ALT_I2C_CON_SPEED_SET_MSK | ALT_I2C_CON_IC_RESTART_EN_SET_MSK,
+ ALT_I2C_CON_SPEED_SET(cfg->speed_mode) | ALT_I2C_CON_IC_RESTART_EN_SET(cfg->restart_enable));
+
+ alt_replbits_word(ALT_I2C_TAR_ADDR(i2c_dev->location),
+ ALT_I2C_TAR_IC_10BITADDR_MST_SET_MSK,
+ ALT_I2C_TAR_IC_10BITADDR_MST_SET(cfg->addr_mode));
+
+ alt_replbits_word(ALT_I2C_FS_SPKLEN_ADDR(i2c_dev->location),
+ ALT_I2C_FS_SPKLEN_SPKLEN_SET_MSK,
+ ALT_I2C_FS_SPKLEN_SPKLEN_SET(cfg->fs_spklen));
+
+ alt_replbits_word(ALT_I2C_SS_SCL_LCNT_ADDR(i2c_dev->location),
+ ALT_I2C_SS_SCL_LCNT_IC_SS_SCL_LCNT_SET_MSK,
+ ALT_I2C_SS_SCL_LCNT_IC_SS_SCL_LCNT_SET(cfg->ss_scl_lcnt));
+ alt_replbits_word(ALT_I2C_SS_SCL_HCNT_ADDR(i2c_dev->location),
+ ALT_I2C_SS_SCL_HCNT_IC_SS_SCL_HCNT_SET_MSK,
+ ALT_I2C_SS_SCL_HCNT_IC_SS_SCL_HCNT_SET(cfg->ss_scl_hcnt));
+ alt_replbits_word(ALT_I2C_FS_SCL_LCNT_ADDR(i2c_dev->location),
+ ALT_I2C_FS_SCL_LCNT_IC_FS_SCL_LCNT_SET_MSK,
+ ALT_I2C_FS_SCL_LCNT_IC_FS_SCL_LCNT_SET(cfg->fs_scl_lcnt));
+ alt_replbits_word(ALT_I2C_FS_SCL_HCNT_ADDR(i2c_dev->location),
+ ALT_I2C_FS_SCL_HCNT_IC_FS_SCL_HCNT_SET_MSK,
+ ALT_I2C_FS_SCL_HCNT_IC_FS_SCL_HCNT_SET(cfg->fs_scl_hcnt));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Return bus speed by configuration of i2c controller for master mode.
+//
+ALT_STATUS_CODE alt_i2c_master_config_speed_get(ALT_I2C_DEV_t *i2c_dev,
+ const ALT_I2C_MASTER_CONFIG_t * cfg,
+ uint32_t * speed_in_hz)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t scl_lcnt = (cfg->speed_mode == ALT_I2C_SPEED_STANDARD) ? cfg->ss_scl_lcnt : cfg->fs_scl_lcnt;
+
+ if (scl_lcnt == 0)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *speed_in_hz = i2c_dev->clock_freq / (scl_lcnt << 1);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Fill struct with configuration of i2c controller for master mode by bus speed
+//
+ALT_STATUS_CODE alt_i2c_master_config_speed_set(ALT_I2C_DEV_t *i2c_dev,
+ ALT_I2C_MASTER_CONFIG_t * cfg,
+ uint32_t speed_in_hz)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // If speed is not standard or fast return range error
+ if ((speed_in_hz > ALT_I2C_FS_MAX_SPEED) || (speed_in_hz < ALT_I2C_SS_MIN_SPEED))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ if (speed_in_hz > ALT_I2C_FS_MIN_SPEED)
+ {
+ cfg->speed_mode = ALT_I2C_SPEED_FAST;
+ cfg->fs_spklen = ALT_I2C_FS_DEFAULT_SPKLEN;
+ }
+ else
+ {
+ cfg->speed_mode = ALT_I2C_SPEED_STANDARD;
+ cfg->fs_spklen = ALT_I2C_SS_DEFAULT_SPKLEN;
+ }
+
+ // <lcount> = <internal clock> / 2 * <speed, Hz>
+ uint32_t scl_lcnt = i2c_dev->clock_freq / (speed_in_hz << 1);
+
+ cfg->ss_scl_lcnt = cfg->fs_scl_lcnt = scl_lcnt;
+ // hcount = <lcount> + 70
+ cfg->ss_scl_hcnt = cfg->fs_scl_hcnt = scl_lcnt - ALT_I2C_DIFF_LCNT_HCNT;
+
+ // lcount = <internal clock> / 2 * <speed, Hz>
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Get config parameters from appropriate registers for slave mode.
+//
+ALT_STATUS_CODE alt_i2c_slave_config_get(ALT_I2C_DEV_t *i2c_dev,
+ ALT_I2C_SLAVE_CONFIG_t* cfg)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t cfg_register = alt_read_word(ALT_I2C_CON_ADDR(i2c_dev->location));
+ uint32_t sar_register = alt_read_word(ALT_I2C_SAR_ADDR(i2c_dev->location));
+ uint32_t nack_register = alt_read_word(ALT_I2C_SLV_DATA_NACK_ONLY_ADDR(i2c_dev->location));
+
+ cfg->addr_mode = (ALT_I2C_ADDR_MODE_t)ALT_I2C_CON_IC_10BITADDR_SLV_GET(cfg_register);
+ cfg->addr = ALT_I2C_SAR_IC_SAR_GET(sar_register);
+ cfg->nack_enable = ALT_I2C_SLV_DATA_NACK_ONLY_NACK_GET(nack_register);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Set config parameters to appropriate registers for slave mode.
+//
+ALT_STATUS_CODE alt_i2c_slave_config_set(ALT_I2C_DEV_t *i2c_dev,
+ const ALT_I2C_SLAVE_CONFIG_t* cfg)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if ( (cfg->addr_mode != ALT_I2C_ADDR_MODE_7_BIT)
+ && (cfg->addr_mode != ALT_I2C_ADDR_MODE_10_BIT))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if ( (cfg->addr > ALT_I2C_SAR_IC_SAR_SET_MSK)
+ || (cfg->addr < ALT_I2C_SLV_RESERVE_ADDR_F_1)
+ || ((cfg->addr > ALT_I2C_SLV_RESERVE_ADDR_S_2) && (cfg->addr < ALT_I2C_SLV_RESERVE_ADDR_F_2))
+ )
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ alt_replbits_word(ALT_I2C_CON_ADDR(i2c_dev->location),
+ ALT_I2C_CON_IC_10BITADDR_SLV_SET_MSK,
+ ALT_I2C_CON_IC_10BITADDR_SLV_SET(cfg->addr_mode));
+
+ alt_replbits_word(ALT_I2C_SAR_ADDR(i2c_dev->location),
+ ALT_I2C_SAR_IC_SAR_SET_MSK,
+ ALT_I2C_SAR_IC_SAR_SET(cfg->addr));
+ alt_replbits_word(ALT_I2C_SLV_DATA_NACK_ONLY_ADDR(i2c_dev->location),
+ ALT_I2C_SLV_DATA_NACK_ONLY_NACK_SET_MSK,
+ ALT_I2C_SLV_DATA_NACK_ONLY_NACK_SET(cfg->nack_enable));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Get hold time (use during slave mode)
+//
+ALT_STATUS_CODE alt_i2c_sda_hold_time_get(ALT_I2C_DEV_t *i2c_dev,
+ uint16_t *hold_time)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t sda_register = alt_read_word(ALT_I2C_SDA_HOLD_ADDR(i2c_dev->location));
+ *hold_time = ALT_I2C_SDA_HOLD_IC_SDA_HOLD_GET(sda_register);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Set hold time (use during slave mode)
+//
+ALT_STATUS_CODE alt_i2c_sda_hold_time_set(ALT_I2C_DEV_t *i2c_dev,
+ const uint16_t hold_time)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ alt_replbits_word(ALT_I2C_SDA_HOLD_ADDR(i2c_dev->location),
+ ALT_I2C_SDA_HOLD_IC_SDA_HOLD_SET_MSK,
+ ALT_I2C_SDA_HOLD_IC_SDA_HOLD_SET(hold_time));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Gets the current operational mode of the I2C controller.
+//
+ALT_STATUS_CODE alt_i2c_op_mode_get(ALT_I2C_DEV_t *i2c_dev,
+ ALT_I2C_MODE_t* mode)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t cfg_register = alt_read_word(ALT_I2C_CON_ADDR(i2c_dev->location));
+ uint32_t mst_mod_stat = ALT_I2C_CON_MST_MOD_GET(cfg_register);
+ uint32_t slv_mod_stat = ALT_I2C_CON_IC_SLV_DIS_GET(cfg_register);
+
+ // Return error if master and slave modes enable or disable at the same time
+ if ( (mst_mod_stat == ALT_I2C_CON_MST_MOD_E_EN && slv_mod_stat == ALT_I2C_CON_IC_SLV_DIS_E_EN)
+ || (mst_mod_stat == ALT_I2C_CON_MST_MOD_E_DIS && slv_mod_stat == ALT_I2C_CON_IC_SLV_DIS_E_DIS))
+ {
+ return ALT_E_ERROR;
+ }
+
+ *mode = (ALT_I2C_MODE_t)mst_mod_stat;
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Sets the operational mode of the I2C controller.
+//
+ALT_STATUS_CODE alt_i2c_op_mode_set(ALT_I2C_DEV_t *i2c_dev,
+ const ALT_I2C_MODE_t mode)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if ( (mode != ALT_I2C_MODE_MASTER)
+ && (mode != ALT_I2C_MODE_SLAVE))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ if (mode == ALT_I2C_MODE_MASTER)
+ {
+ // Enable master, disable slave
+ alt_replbits_word(ALT_I2C_CON_ADDR(i2c_dev->location),
+ ALT_I2C_CON_IC_SLV_DIS_SET_MSK | ALT_I2C_CON_MST_MOD_SET_MSK,
+ ALT_I2C_CON_IC_SLV_DIS_SET(ALT_I2C_CON_IC_SLV_DIS_E_DIS) | ALT_I2C_CON_MST_MOD_SET(ALT_I2C_CON_MST_MOD_E_EN));
+ }
+ else if (mode == ALT_I2C_MODE_SLAVE)
+ {
+ // Enable slave, disable master
+ alt_replbits_word(ALT_I2C_CON_ADDR(i2c_dev->location),
+ ALT_I2C_CON_IC_SLV_DIS_SET_MSK | ALT_I2C_CON_MST_MOD_SET_MSK,
+ ALT_I2C_CON_IC_SLV_DIS_SET(ALT_I2C_CON_IC_SLV_DIS_E_EN) | ALT_I2C_CON_MST_MOD_SET(ALT_I2C_CON_MST_MOD_E_DIS));
+ }
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Returns ALT_E_TRUE if the I2C controller is busy
+//
+ALT_STATUS_CODE alt_i2c_is_busy(ALT_I2C_DEV_t *i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if ( ALT_I2C_STAT_ACTIVITY_GET(alt_read_word(ALT_I2C_STAT_ADDR(i2c_dev->location))))
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+//
+// This function reads a single data byte from the receive FIFO.
+//
+ALT_STATUS_CODE alt_i2c_read(ALT_I2C_DEV_t *i2c_dev, uint8_t *value)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ *value = (uint8_t)(ALT_I2C_DATA_CMD_DAT_GET(alt_read_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location))));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// This function writes a single data byte to the transmit FIFO.
+//
+ALT_STATUS_CODE alt_i2c_write(ALT_I2C_DEV_t *i2c_dev, const uint8_t value)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_DAT_SET(value));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// This function acts in the role of a slave-receiver by receiving a single data
+// byte from the I2C bus in response to a write command from the master.
+//
+ALT_STATUS_CODE alt_i2c_slave_receive(ALT_I2C_DEV_t * i2c_dev,
+ uint8_t * data)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // alt_i2c_read().
+ *data = (uint8_t)(ALT_I2C_DATA_CMD_DAT_GET(alt_read_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location))));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// This function acts in the role of a slave-transmitter by transmitting a single
+// data byte to the I2C bus in response to a read request from the master.
+//
+ALT_STATUS_CODE alt_i2c_slave_transmit(ALT_I2C_DEV_t *i2c_dev,
+ const uint8_t data)
+{
+ // Send bulk of data with one value
+ return alt_i2c_slave_bulk_transmit(i2c_dev, &data, 1);
+}
+
+//
+// This function acts in the role of a slave-transmitter by transmitting data in
+// bulk to the I2C bus in response to a series of read requests from a master.
+//
+ALT_STATUS_CODE alt_i2c_slave_bulk_transmit(ALT_I2C_DEV_t *i2c_dev,
+ const void * data,
+ const size_t size)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ const char * buffer = data;
+ for (size_t i = 0; i < size; ++i)
+ {
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_DAT_SET(*buffer)
+ | ALT_I2C_DATA_CMD_STOP_SET(false)
+ | ALT_I2C_DATA_CMD_RESTART_SET(false));
+
+ ++buffer;
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_i2c_master_target_get(ALT_I2C_DEV_t * i2c_dev, uint32_t * target_addr)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *target_addr = i2c_dev->last_target;
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_i2c_master_target_set(ALT_I2C_DEV_t * i2c_dev, uint32_t target_addr)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Wait until the TX FIFO flushes. This is needed because TAR can only be
+ // updated under specific conditions.
+
+ if (target_addr != i2c_dev->last_target)
+ {
+ uint32_t timeout = 10000;
+
+ while (alt_i2c_tx_fifo_is_empty(i2c_dev) == ALT_E_FALSE)
+ {
+ if (--timeout == 0)
+ {
+ status = ALT_E_TMO;
+ break;
+ }
+ }
+
+ // Update target address
+ if (status == ALT_E_SUCCESS)
+ {
+ alt_replbits_word(ALT_I2C_TAR_ADDR(i2c_dev->location),
+ ALT_I2C_TAR_IC_TAR_SET_MSK,
+ ALT_I2C_TAR_IC_TAR_SET(target_addr));
+
+ i2c_dev->last_target = target_addr;
+ }
+ }
+
+ return status;
+}
+
+//
+// Write bulk of data or read requests to tx fifo
+//
+static ALT_STATUS_CODE alt_i2c_master_transmit_helper(ALT_I2C_DEV_t * i2c_dev,
+ const uint8_t * buffer,
+ size_t size,
+ bool issue_restart,
+ bool issue_stop)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // If the rested size is 1, the restart and stop may need to be sent in the
+ // same frame.
+ if (size == 1)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_issue_write(i2c_dev,
+ *buffer,
+ issue_restart,
+ issue_stop);
+
+ ++buffer;
+ --size;
+ }
+ }
+ else
+ {
+ // First byte
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_issue_write(i2c_dev,
+ *buffer,
+ issue_restart,
+ false);
+
+ ++buffer;
+ --size;
+ }
+
+ /////
+
+ // Middle byte(s)
+
+ if (status == ALT_E_SUCCESS)
+ {
+ uint32_t timeout = size * 10000;
+
+ while (size > 1)
+ {
+ uint32_t level = 0;
+ status = alt_i2c_tx_fifo_level_get(i2c_dev, &level);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t space = ALT_I2C_TX_FIFO_NUM_ENTRIES - level;
+ if (space == 0)
+ {
+ if (--timeout == 0)
+ {
+ status = ALT_E_TMO;
+ break;
+ }
+
+ continue;
+ }
+
+ // Subtract 1 because the last byte may need to issue_stop
+ space = MIN(space, size - 1);
+
+ for (uint32_t i = 0; i < space; ++i)
+ {
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_DAT_SET(*buffer)
+ | ALT_I2C_DATA_CMD_STOP_SET(false)
+ | ALT_I2C_DATA_CMD_RESTART_SET(false));
+
+ ++buffer;
+ }
+
+ size -= space;
+ }
+ }
+
+ /////
+
+ // Last byte
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_issue_write(i2c_dev,
+ *buffer,
+ false,
+ issue_stop);
+
+ ++buffer;
+ --size;
+ }
+ }
+
+ return status;
+}
+
+//
+// This function acts in the role of a master-transmitter by issuing a write
+// command and transmitting data to the I2C bus.
+//
+ALT_STATUS_CODE alt_i2c_master_transmit(ALT_I2C_DEV_t *i2c_dev,
+ const void * data,
+ const size_t size,
+ const bool issue_restart,
+ const bool issue_stop)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size == 0)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_transmit_helper(i2c_dev,
+ data,
+ size,
+ issue_restart,
+ issue_stop);
+ }
+
+ // Need reset for set i2c bus in idle state
+ if (status == ALT_E_TMO)
+ {
+ alt_i2c_reset(i2c_dev);
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_i2c_master_receive_helper(ALT_I2C_DEV_t *i2c_dev,
+ uint8_t * buffer,
+ size_t size,
+ bool issue_restart,
+ bool issue_stop)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ uint32_t issue_left = size;
+ uint32_t data_left = size;
+
+ uint32_t timeout = size * 10000;
+
+ // Wait for space in the TX FIFO to send the first read request.
+ // This is needed because the issue restart need to be set.
+
+ if (issue_restart == true)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ while (alt_i2c_tx_fifo_is_full(i2c_dev) == ALT_E_TRUE)
+ {
+ if (--timeout == 0)
+ {
+ status = ALT_E_TMO;
+ break;
+ }
+ }
+ }
+
+ // Now send the first request.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_CMD_SET(ALT_I2C_DATA_CMD_CMD_E_RD)
+ | ALT_I2C_DATA_CMD_STOP_SET(false)
+ | ALT_I2C_DATA_CMD_RESTART_SET(issue_restart));
+
+ --issue_left;
+ }
+ }
+
+ // For the rest of the data ...
+
+ while (data_left > 0)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ // Top up the TX FIFO with read issues
+ // Special consideration must be made for the last read issue, as it may be necessary to "issue_stop".
+
+ if (issue_left > 0)
+ {
+ uint32_t level = 0;
+ status = alt_i2c_tx_fifo_level_get(i2c_dev, &level);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ uint32_t space = ALT_I2C_TX_FIFO_NUM_ENTRIES - level;
+
+ if (issue_left == 1)
+ {
+ if (space > 0)
+ {
+ space = 1;
+
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_CMD_SET(ALT_I2C_DATA_CMD_CMD_E_RD)
+ | ALT_I2C_DATA_CMD_STOP_SET(issue_stop)
+ | ALT_I2C_DATA_CMD_RESTART_SET(false));
+ }
+ }
+ else
+ {
+ // Send up to issue_left - 1, as the last issue has special considerations.
+ space = MIN(issue_left - 1, space);
+
+ for (uint32_t i = 0; i < space; ++i)
+ {
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_CMD_SET(ALT_I2C_DATA_CMD_CMD_E_RD)
+ | ALT_I2C_DATA_CMD_STOP_SET(false)
+ | ALT_I2C_DATA_CMD_RESTART_SET(false));
+ }
+ }
+
+ issue_left -= space;
+ }
+
+ // Read out the resulting received data as they come in.
+
+ if (data_left > 0)
+ {
+ uint32_t level = 0;
+ status = alt_i2c_rx_fifo_level_get(i2c_dev, &level);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ if (level == 0)
+ {
+ if (--timeout == 0)
+ {
+ status = ALT_E_TMO;
+ break;
+ }
+ }
+
+ level = MIN(data_left, level);
+
+ for (uint32_t i = 0; i < level; ++i)
+ {
+ // alt_i2c_read(i2c_dev, &value);
+ *buffer = (uint8_t)(ALT_I2C_DATA_CMD_DAT_GET(alt_read_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location))));
+ ++buffer;
+ }
+
+ data_left -= level;
+ }
+ }
+
+
+ return status;
+}
+
+//
+// This function acts in the role of a master-receiver by receiving one or more
+// data bytes transmitted from a slave in response to read requests issued from
+// this master.
+//
+ALT_STATUS_CODE alt_i2c_master_receive(ALT_I2C_DEV_t *i2c_dev,
+ void * data,
+ const size_t size,
+ const bool issue_restart,
+ const bool issue_stop)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size == 0)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // This I2C controller requires that a read issue be performed for each byte requested.
+ // Read issue takes space in the TX FIFO, which may asynchronously handling a previous request.
+
+ if (size == 1)
+ {
+ uint32_t timeout = 10000;
+
+ // Wait for space in the TX FIFO to send the read request.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ while (alt_i2c_tx_fifo_is_full(i2c_dev) == ALT_E_TRUE)
+ {
+ if (--timeout == 0)
+ {
+ status = ALT_E_TMO;
+ break;
+ }
+ }
+ }
+
+ // Issue the read request in the TX FIFO.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_CMD_SET(ALT_I2C_DATA_CMD_CMD_E_RD)
+ | ALT_I2C_DATA_CMD_STOP_SET(issue_stop)
+ | ALT_I2C_DATA_CMD_RESTART_SET(issue_restart));
+
+ }
+
+ // Wait for data to become available in the RX FIFO.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ while (alt_i2c_rx_fifo_is_empty(i2c_dev) == ALT_E_TRUE)
+ {
+ if (--timeout == 0)
+ {
+ status = ALT_E_TMO;
+ break;
+ }
+ }
+ }
+
+ // Read the RX FIFO.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ uint8_t * buffer = data;
+ *buffer = (uint8_t)(ALT_I2C_DATA_CMD_DAT_GET(alt_read_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location))));
+ }
+ }
+ else if (size <= 64)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_receive_helper(i2c_dev,
+ data,
+ size,
+ issue_restart,
+ issue_stop);
+ }
+ }
+ else
+ {
+ uint8_t * buffer = data;
+ size_t size_left = size;
+
+ // Send the first ALT_I2C_RX_FIFO_NUM_ENTRIES items
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_receive_helper(i2c_dev,
+ buffer,
+ ALT_I2C_RX_FIFO_NUM_ENTRIES,
+ issue_restart,
+ false);
+ }
+
+ buffer += ALT_I2C_RX_FIFO_NUM_ENTRIES;
+ size_left -= ALT_I2C_RX_FIFO_NUM_ENTRIES;
+
+ while (size_left > 0)
+ {
+ if (size_left > ALT_I2C_RX_FIFO_NUM_ENTRIES)
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_receive_helper(i2c_dev,
+ buffer,
+ ALT_I2C_RX_FIFO_NUM_ENTRIES,
+ false,
+ false);
+ }
+
+ buffer += ALT_I2C_RX_FIFO_NUM_ENTRIES;
+ size_left -= ALT_I2C_RX_FIFO_NUM_ENTRIES;
+ }
+ else
+ {
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_receive_helper(i2c_dev,
+ buffer,
+ size_left,
+ false,
+ issue_stop);
+ }
+
+ size_left = 0;
+ }
+
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ }
+ }
+
+ // Need reset for set i2c bus in idle state
+ if (status == ALT_E_TMO)
+ {
+ alt_i2c_reset(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// This function causes the I2C controller master to send data to the bus.
+//
+ALT_STATUS_CODE alt_i2c_issue_write(ALT_I2C_DEV_t *i2c_dev,
+ const uint8_t value,
+ const bool issue_restart,
+ const bool issue_stop)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Wait until there is a FIFO spot
+ uint32_t timeout = 10000;
+
+ while (alt_i2c_tx_fifo_is_full(i2c_dev) == ALT_E_TRUE)
+ {
+ if (--timeout == 0)
+ {
+ return ALT_E_TMO;
+ }
+ }
+
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_DAT_SET(value)
+ | ALT_I2C_DATA_CMD_STOP_SET(issue_stop)
+ | ALT_I2C_DATA_CMD_RESTART_SET(issue_restart));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// This function causes the I2C controller master to issue a READ request on the bus.
+//
+ALT_STATUS_CODE alt_i2c_issue_read(ALT_I2C_DEV_t *i2c_dev,
+ const bool issue_restart,
+ const bool issue_stop)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Wait until there is a FIFO spot
+ uint32_t timeout = 10000;
+
+ while (alt_i2c_tx_fifo_is_full(i2c_dev) == ALT_E_TRUE)
+ {
+ if (--timeout == 0)
+ {
+ return ALT_E_TMO;
+ }
+ }
+
+ alt_write_word(ALT_I2C_DATA_CMD_ADDR(i2c_dev->location),
+ ALT_I2C_DATA_CMD_CMD_SET(ALT_I2C_DATA_CMD_CMD_E_RD)
+ | ALT_I2C_DATA_CMD_STOP_SET(issue_stop)
+ | ALT_I2C_DATA_CMD_RESTART_SET(issue_restart));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// This function acts in the role of a master-transmitter by issuing a general
+// call command to all devices connected to the I2C bus.
+//
+ALT_STATUS_CODE alt_i2c_master_general_call(ALT_I2C_DEV_t *i2c_dev,
+ const void * data,
+ const size_t size,
+ const bool issue_restart,
+ const bool issue_stop)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_ERROR;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_target_set(i2c_dev, 0);
+ }
+
+ // General call is a transmit in master mode (target address are not used during it)
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_i2c_master_transmit(i2c_dev, data, size, issue_restart, issue_stop);
+ }
+
+ return status;
+}
+
+/////
+
+ALT_STATUS_CODE alt_i2c_general_call_ack_disable(ALT_I2C_DEV_t *i2c_dev)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ alt_replbits_word(ALT_I2C_TAR_ADDR(i2c_dev->location),
+ ALT_I2C_TAR_SPECIAL_SET_MSK | ALT_I2C_TAR_GC_OR_START_SET_MSK,
+ ALT_I2C_TAR_SPECIAL_SET(ALT_I2C_TAR_SPECIAL_E_STARTBYTE) | ALT_I2C_TAR_GC_OR_START_SET(ALT_I2C_TAR_GC_OR_START_E_STARTBYTE));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Enables the I2C controller to respond with an ACK when it receives a General
+// Call address.
+//
+ALT_STATUS_CODE alt_i2c_general_call_ack_enable(ALT_I2C_DEV_t *i2c_dev)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ alt_replbits_word(ALT_I2C_TAR_ADDR(i2c_dev->location),
+ ALT_I2C_TAR_SPECIAL_SET_MSK | ALT_I2C_TAR_GC_OR_START_SET_MSK,
+ ALT_I2C_TAR_SPECIAL_SET(ALT_I2C_TAR_SPECIAL_E_GENCALL) | ALT_I2C_TAR_GC_OR_START_SET(ALT_I2C_TAR_GC_OR_START_E_GENCALL));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Returns ALT_E_TRUE if the I2C controller is enabled to respond to General Call
+// addresses.
+//
+ALT_STATUS_CODE alt_i2c_general_call_ack_is_enabled(ALT_I2C_DEV_t *i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t tar_register = alt_read_word(ALT_I2C_TAR_ADDR(i2c_dev->location));
+
+ if ( (ALT_I2C_TAR_SPECIAL_GET(tar_register) == ALT_I2C_TAR_SPECIAL_E_GENCALL)
+ && (ALT_I2C_TAR_GC_OR_START_GET(tar_register) == ALT_I2C_TAR_GC_OR_START_E_GENCALL)
+ )
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+//
+// Returns the current I2C controller interrupt status conditions.
+//
+ALT_STATUS_CODE alt_i2c_int_status_get(ALT_I2C_DEV_t *i2c_dev,
+ uint32_t *status)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *status = alt_read_word(ALT_I2C_INTR_STAT_ADDR(i2c_dev->location));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Returns the I2C controller raw interrupt status conditions irrespective of
+// the interrupt status condition enablement state.
+//
+ALT_STATUS_CODE alt_i2c_int_raw_status_get(ALT_I2C_DEV_t *i2c_dev,
+ uint32_t *status)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *status = alt_read_word(ALT_I2C_RAW_INTR_STAT_ADDR(i2c_dev->location));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Clears the specified I2C controller interrupt status conditions identified
+// in the mask.
+//
+ALT_STATUS_CODE alt_i2c_int_clear(ALT_I2C_DEV_t *i2c_dev, const uint32_t mask)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (mask == ALT_I2C_STATUS_INT_ALL)
+ {
+ alt_read_word(ALT_I2C_CLR_INTR_ADDR(i2c_dev->location));
+ return ALT_E_SUCCESS;
+ }
+
+ // For different status clear different register
+
+ if (mask & ALT_I2C_STATUS_RX_UNDER)
+ {
+ alt_read_word(ALT_I2C_CLR_RX_UNDER_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_RX_OVER)
+ {
+ alt_read_word(ALT_I2C_CLR_RX_OVER_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_TX_OVER)
+ {
+ alt_read_word(ALT_I2C_CLR_TX_OVER_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_RD_REQ)
+ {
+ alt_read_word(ALT_I2C_CLR_RD_REQ_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_TX_ABORT)
+ {
+ alt_read_word(ALT_I2C_CLR_TX_ABRT_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_RX_DONE)
+ {
+ alt_read_word(ALT_I2C_CLR_RX_DONE_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_ACTIVITY)
+ {
+ alt_read_word(ALT_I2C_CLR_ACTIVITY_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_STOP_DET)
+ {
+ alt_read_word(ALT_I2C_CLR_STOP_DET_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_START_DET)
+ {
+ alt_read_word(ALT_I2C_CLR_START_DET_ADDR(i2c_dev->location));
+ }
+ if (mask & ALT_I2C_STATUS_INT_CALL)
+ {
+ alt_read_word(ALT_I2C_CLR_GEN_CALL_ADDR(i2c_dev->location));
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Disable the specified I2C controller interrupt status conditions identified in
+// the mask.
+//
+ALT_STATUS_CODE alt_i2c_int_disable(ALT_I2C_DEV_t *i2c_dev, const uint32_t mask)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ alt_clrbits_word(ALT_I2C_INTR_MSK_ADDR(i2c_dev->location), mask);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Enable the specified I2C controller interrupt status conditions identified in
+// the mask.
+//
+ALT_STATUS_CODE alt_i2c_int_enable(ALT_I2C_DEV_t *i2c_dev, const uint32_t mask)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ alt_setbits_word(ALT_I2C_INTR_MSK_ADDR(i2c_dev->location), mask);
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+//
+// Gets the cause of I2C transmission abort.
+//
+ALT_STATUS_CODE alt_i2c_tx_abort_cause_get(ALT_I2C_DEV_t *i2c_dev,
+ ALT_I2C_TX_ABORT_CAUSE_t *cause)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *cause = (ALT_I2C_TX_ABORT_CAUSE_t)alt_read_word(ALT_I2C_TX_ABRT_SRC_ADDR(i2c_dev->location));
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+//
+// Returns ALT_E_TRUE when the receive FIFO is empty.
+//
+ALT_STATUS_CODE alt_i2c_rx_fifo_is_empty(ALT_I2C_DEV_t *i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (ALT_I2C_STAT_RFNE_GET(alt_read_word(ALT_I2C_STAT_ADDR(i2c_dev->location))) == ALT_I2C_STAT_RFNE_E_EMPTY)
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+//
+// Returns ALT_E_TRUE when the receive FIFO is completely full.
+//
+ALT_STATUS_CODE alt_i2c_rx_fifo_is_full(ALT_I2C_DEV_t *i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (ALT_I2C_STAT_RFF_GET(alt_read_word(ALT_I2C_STAT_ADDR(i2c_dev->location))) == ALT_I2C_STAT_RFF_E_FULL)
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+//
+// Returns the number of valid entries in the receive FIFO.
+//
+ALT_STATUS_CODE alt_i2c_rx_fifo_level_get(ALT_I2C_DEV_t *i2c_dev,
+ uint32_t *num_entries)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *num_entries = ALT_I2C_RXFLR_RXFLR_GET(alt_read_word(ALT_I2C_RXFLR_ADDR(i2c_dev->location)));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Gets the current receive FIFO threshold level value.
+//
+ALT_STATUS_CODE alt_i2c_rx_fifo_threshold_get(ALT_I2C_DEV_t *i2c_dev,
+ uint8_t *threshold)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *threshold = ALT_I2C_RX_TL_RX_TL_GET(alt_read_word(ALT_I2C_RX_TL_ADDR(i2c_dev->location)));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Sets the current receive FIFO threshold level value.
+//
+ALT_STATUS_CODE alt_i2c_rx_fifo_threshold_set(ALT_I2C_DEV_t *i2c_dev,
+ const uint8_t threshold)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ alt_replbits_word(ALT_I2C_RX_TL_ADDR(i2c_dev->location),
+ ALT_I2C_RX_TL_RX_TL_SET_MSK,
+ ALT_I2C_RX_TL_RX_TL_SET(threshold));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+//
+// Returns ALT_E_TRUE when the transmit FIFO is empty.
+//
+ALT_STATUS_CODE alt_i2c_tx_fifo_is_empty(ALT_I2C_DEV_t *i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (ALT_I2C_STAT_TFE_GET(alt_read_word(ALT_I2C_STAT_ADDR(i2c_dev->location))) == ALT_I2C_STAT_TFE_E_EMPTY)
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+//
+// Returns ALT_E_TRUE when the transmit FIFO is completely full.
+//
+ALT_STATUS_CODE alt_i2c_tx_fifo_is_full(ALT_I2C_DEV_t *i2c_dev)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (ALT_I2C_STAT_TFNF_GET(alt_read_word(ALT_I2C_STAT_ADDR(i2c_dev->location))) == ALT_I2C_STAT_TFNF_E_FULL)
+ {
+ return ALT_E_TRUE;
+ }
+ else
+ {
+ return ALT_E_FALSE;
+ }
+}
+
+//
+// Returns the number of valid entries in the transmit FIFO.
+//
+ALT_STATUS_CODE alt_i2c_tx_fifo_level_get(ALT_I2C_DEV_t *i2c_dev,
+ uint32_t *num_entries)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *num_entries = ALT_I2C_TXFLR_TXFLR_GET(alt_read_word(ALT_I2C_TXFLR_ADDR(i2c_dev->location)));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Sets the current transmit FIFO threshold level value.
+//
+ALT_STATUS_CODE alt_i2c_tx_fifo_threshold_get(ALT_I2C_DEV_t *i2c_dev,
+ uint8_t *threshold)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *threshold = ALT_I2C_TX_TL_TX_TL_GET(alt_read_word(ALT_I2C_TX_TL_ADDR(i2c_dev->location)));
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Sets the current transmit FIFO threshold level value.
+//
+ALT_STATUS_CODE alt_i2c_tx_fifo_threshold_set(ALT_I2C_DEV_t *i2c_dev,
+ const uint8_t threshold)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ bool already_enabled = (alt_i2c_is_enabled_helper(i2c_dev) == ALT_E_TRUE);
+
+ if (already_enabled)
+ {
+ // Temporarily disable controller
+ status = alt_i2c_disable(i2c_dev);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ alt_replbits_word(ALT_I2C_TX_TL_ADDR(i2c_dev->location),
+ ALT_I2C_TX_TL_TX_TL_SET_MSK,
+ ALT_I2C_TX_TL_TX_TL_SET(threshold));
+
+ if (already_enabled)
+ {
+ // Re-enable controller
+ status = alt_i2c_enable(i2c_dev);
+ }
+
+ return status;
+}
+
+/////
+
+ALT_STATUS_CODE alt_i2c_rx_dma_threshold_get(ALT_I2C_DEV_t * i2c_dev, uint8_t * threshold)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *threshold = ALT_I2C_DMA_RDLR_DMARDL_GET(alt_read_word(ALT_I2C_DMA_RDLR_ADDR(i2c_dev->location)));
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_i2c_rx_dma_threshold_set(ALT_I2C_DEV_t * i2c_dev, uint8_t threshold)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (threshold > ALT_I2C_DMA_RDLR_DMARDL_SET_MSK)
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ alt_write_word(ALT_I2C_DMA_RDLR_ADDR(i2c_dev->location), threshold);
+ return ALT_E_SUCCESS;
+
+}
+
+ALT_STATUS_CODE alt_i2c_tx_dma_threshold_get(ALT_I2C_DEV_t * i2c_dev, uint8_t * threshold)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ *threshold = ALT_I2C_DMA_TDLR_DMATDL_GET(alt_read_word(ALT_I2C_DMA_TDLR_ADDR(i2c_dev->location)));
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_i2c_tx_dma_threshold_set(ALT_I2C_DEV_t * i2c_dev, uint8_t threshold)
+{
+ if (alt_i2c_checking(i2c_dev) == ALT_E_FALSE)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (threshold > ALT_I2C_DMA_TDLR_DMATDL_SET_MSK)
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ alt_write_word(ALT_I2C_DMA_TDLR_ADDR(i2c_dev->location), threshold);
+ return ALT_E_SUCCESS;
+}
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_qspi.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_qspi.c
new file mode 100644
index 0000000000..3e591fc044
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_qspi.c
@@ -0,0 +1,2619 @@
+/******************************************************************************
+*
+* alt_qspi.c - API for the Altera SoC FPGA QSPI device.
+*
+******************************************************************************/
+
+/******************************************************************************
+ *
+ * Copyright 2013 Altera Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+#include <string.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <bsp/hwlib.h>
+#include <bsp/alt_clock_manager.h>
+#include "alt_qspi.h"
+#include <bsp/alt_qspi_private.h>
+#include <bsp/socal/alt_qspi.h>
+#include <bsp/socal/alt_rstmgr.h>
+#include <bsp/socal/alt_sysmgr.h>
+#include <bsp/socal/hps.h>
+#include <bsp/socal/socal.h>
+
+/////
+
+// NOTE: To enable debugging output, delete the next line and uncomment the
+// line after.
+#define dprintf(...)
+// #define dprintf printf
+
+/////
+
+#define MIN(a, b) ((a) > (b) ? (b) : (a))
+
+// qspi_clk operating frequency range.
+#define ALT_QSPI_CLK_FREQ_MIN ((alt_freq_t)0)
+#define ALT_QSPI_CLK_FREQ_MAX ((alt_freq_t)432000000)
+
+// The set of all valid QSPI controller interrupt status mask values.
+#define ALT_QSPI_INT_STATUS_ALL ( \
+ ALT_QSPI_INT_STATUS_MODE_FAIL | \
+ ALT_QSPI_INT_STATUS_UFL | \
+ ALT_QSPI_INT_STATUS_IDAC_OP_COMPLETE | \
+ ALT_QSPI_INT_STATUS_IDAC_OP_REJECT | \
+ ALT_QSPI_INT_STATUS_WR_PROT_VIOL | \
+ ALT_QSPI_INT_STATUS_ILL_AHB_ACCESS | \
+ ALT_QSPI_INT_STATUS_IDAC_WTRMK_TRIG | \
+ ALT_QSPI_INT_STATUS_RX_OVF | \
+ ALT_QSPI_INT_STATUS_TX_FIFO_NOT_FULL | \
+ ALT_QSPI_INT_STATUS_TX_FIFO_FULL | \
+ ALT_QSPI_INT_STATUS_RX_FIFO_NOT_EMPTY | \
+ ALT_QSPI_INT_STATUS_RX_FIFO_FULL | \
+ ALT_QSPI_INT_STATUS_IDAC_RD_FULL \
+ )
+
+static uint32_t qspi_device_size = 0;
+
+/////
+
+static ALT_STATUS_CODE alt_qspi_device_status(uint32_t * status)
+{
+ // Read flag status register through STIG
+ return alt_qspi_stig_rd_cmd(ALT_QSPI_STIG_OPCODE_RDSR, 0, 1, status, 10000);
+}
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+static ALT_STATUS_CODE alt_qspi_N25Q_device_flag(uint32_t * flagsr)
+{
+ if (qspi_device_size < 0x4000000)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ // Read flag status register through STIG
+ return alt_qspi_stig_rd_cmd(ALT_QSPI_STIG_OPCODE_RDFLGSR, 0, 1, flagsr, 10000);
+}
+
+// NOTE: This must be called after QSPI has been enabled. Communications with
+// the device will not happen until QSPI is enabled.
+static inline ALT_STATUS_CODE alt_qspi_N25Q_enable(void)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // Reset the volatile memory on the N25Q
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_stig_cmd(ALT_QSPI_STIG_OPCODE_RESET_EN, 0, 10000);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_stig_cmd(ALT_QSPI_STIG_OPCODE_RESET_MEM, 0, 10000);
+ }
+
+ /////
+
+ if (status == ALT_E_SUCCESS)
+ {
+ ALT_QSPI_DEV_INST_CONFIG_t cfg =
+ {
+ .op_code = ALT_QSPI_STIG_OPCODE_FASTREAD_QUAD_IO,
+ .inst_type = ALT_QSPI_MODE_SINGLE, // RDID does not support QUAD.
+ .addr_xfer_type = ALT_QSPI_MODE_QUAD,
+ .data_xfer_type = ALT_QSPI_MODE_QUAD,
+ .dummy_cycles = 10
+ };
+
+ status = alt_qspi_device_read_config_set(&cfg);
+ }
+
+/*
+ // CASE 157096: Investigate using QUAD for writes.
+ if (status == ALT_E_SUCCESS)
+ {
+ ALT_QSPI_DEV_INST_CONFIG_t cfg =
+ {
+ .op_code = ALT_QSPI_STIG_OPCODE_PP,
+ .inst_type = ALT_QSPI_MODE_SINGLE,
+ .addr_xfer_type = ALT_QSPI_MODE_QUAD,
+ .data_xfer_type = ALT_QSPI_MODE_QUAD,
+ .dummy_cycles = 0
+ };
+
+ status = alt_qspi_device_write_config_set(&cfg);
+ }
+*/
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_qspi_N25Q_flag_wait_for_program(uint32_t timeout)
+{
+ // The flag status register is only available on the 512 Mib and 1 Gib
+ // (64 MiB and 128 MiB) Micron parts.
+ if (qspi_device_size < 0x4000000)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ uint32_t time_out = timeout;
+ uint32_t stat = 0;
+ bool infinite = (timeout == ALT_QSPI_TIMEOUT_INFINITE);
+
+ do
+ {
+ status = alt_qspi_device_status(&stat);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ if (!ALT_QSPI_STIG_SR_BUSY_GET(stat))
+ {
+ break;
+ }
+ }
+ while (time_out-- || infinite);
+
+ if (time_out == (uint32_t)-1 && !infinite)
+ {
+ status = ALT_E_TMO;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ uint32_t flagsr = 0;
+
+ do
+ {
+ status = alt_qspi_N25Q_device_flag(&flagsr);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ if (ALT_QSPI_STIG_FLAGSR_PROGRAMREADY_GET(flagsr))
+ {
+ break;
+ }
+ }
+ while (timeout-- || infinite);
+
+ if (timeout == (uint32_t)-1 && !infinite)
+ {
+ status = ALT_E_TMO;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ if (ALT_QSPI_STIG_FLAGSR_PROGRAMERROR_GET(flagsr))
+ {
+ status = ALT_E_ERROR;
+ }
+ }
+ }
+ return status;
+}
+
+static ALT_STATUS_CODE alt_qspi_N25Q_flag_wait_for_erase(uint32_t timeout)
+{
+ // The flag status register is only available on the 512 Mib and 1 Gib
+ // (64 MiB and 128 MiB) Micron parts.
+ if (qspi_device_size < 0x4000000)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ uint32_t time_out = timeout;
+ uint32_t stat = 0;
+ bool infinite = (timeout == ALT_QSPI_TIMEOUT_INFINITE);
+
+ do
+ {
+ status = alt_qspi_device_status(&stat);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ if (!ALT_QSPI_STIG_SR_BUSY_GET(stat))
+ {
+ break;
+ }
+ }
+ while (time_out-- || infinite);
+
+ if (time_out == (uint32_t)-1 && !infinite)
+ {
+ status = ALT_E_TMO;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+
+ uint32_t flagsr = 0;
+
+ do
+ {
+ status = alt_qspi_N25Q_device_flag(&flagsr);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ if (ALT_QSPI_STIG_FLAGSR_ERASEREADY_GET(flagsr))
+ {
+ break;
+ }
+ }
+ while (timeout-- || infinite);
+
+ if (timeout == (uint32_t)-1 && !infinite)
+ {
+ status = ALT_E_TMO;
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ if (ALT_QSPI_STIG_FLAGSR_ERASEERROR_GET(flagsr))
+ {
+ status = ALT_E_ERROR;
+ }
+ }
+ }
+
+ return status;
+}
+#endif
+
+//
+// A helper function which converts a ns interval into a delay interval for a given MHz.
+// The +999 is there to round up the result.
+//
+static inline int alt_qspi_ns_to_multiplier(int ns, int mhz)
+{
+ return ((ns * mhz) + 999) / 1000;
+}
+
+ALT_STATUS_CODE alt_qspi_init(void)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ alt_freq_t qspi_clk_freq = 0;
+
+ // Validate QSPI module input clocks.
+ // - pclk - l4_mp_clk
+ // - hclk - l4_mp_clk
+ // - ref_clk - qspi_clk
+
+ // Check and validate the QSPI ref_clk which is connected to the HPS qspi_clk.
+ if (status == ALT_E_SUCCESS)
+ {
+ if (alt_clk_is_enabled(ALT_CLK_QSPI) != ALT_E_TRUE)
+ {
+ status = ALT_E_BAD_CLK;
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_clk_freq_get(ALT_CLK_QSPI, &qspi_clk_freq);
+ if (status == ALT_E_SUCCESS)
+ {
+ if (qspi_clk_freq > ALT_QSPI_CLK_FREQ_MAX)
+ {
+ return ALT_E_BAD_CLK;
+ }
+ }
+ }
+
+ int qspi_clk_mhz = qspi_clk_freq / 1000000;
+
+ /////
+
+ // Take QSPI controller out of reset.
+ alt_clrbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_QSPI_SET_MSK);
+
+ /////
+
+ // Configure the device timing
+
+ if (status == ALT_E_SUCCESS)
+ {
+ ALT_QSPI_TIMING_CONFIG_t timing_cfg =
+ {
+ .clk_phase = (ALT_QSPI_CLK_PHASE_t)ALT_QSPI_CFG_SELCLKPHASE_RESET,
+ .clk_pol = (ALT_QSPI_CLK_POLARITY_t)ALT_QSPI_CFG_SELCLKPOL_RESET,
+ .cs_da = alt_qspi_ns_to_multiplier(ALT_QSPI_TSHSL_NS_DEF, qspi_clk_mhz),
+ .cs_dads = alt_qspi_ns_to_multiplier(ALT_QSPI_TSD2D_NS_DEF, qspi_clk_mhz),
+ .cs_eot = alt_qspi_ns_to_multiplier(ALT_QSPI_TCHSH_NS_DEF, qspi_clk_mhz),
+ .cs_sot = alt_qspi_ns_to_multiplier(ALT_QSPI_TSLCH_NS_DEF, qspi_clk_mhz),
+ .rd_datacap = 1
+ };
+
+ dprintf("DEBUG[QSPI]: cs_da = %" PRIu32 ".\n", timing_cfg.cs_da);
+ dprintf("DEBUG[QSPI]: cs_dads = %" PRIu32 ".\n", timing_cfg.cs_dads);
+ dprintf("DEBUG[QSPI]: cs_eot = %" PRIu32 ".\n", timing_cfg.cs_eot);
+ dprintf("DEBUG[QSPI]: cs_sot = %" PRIu32 ".\n", timing_cfg.cs_sot);
+
+ status = alt_qspi_timing_config_set(&timing_cfg);
+ }
+
+ /////
+
+ // Configure the remap address register, no remap
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_ahb_remap_address_set(0);
+ }
+
+ // Configure the interrupt mask register, disabled all first
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_int_disable(ALT_QSPI_INT_STATUS_ALL);
+ }
+
+ // Configure the baud rate divisor
+ // CASE 157095: Investigate using 108 MHz, and tweaking the rd_datacap param.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ uint32_t device_sclk_mhz = 54;
+ uint32_t div_actual = (qspi_clk_mhz + (device_sclk_mhz - 1)) / device_sclk_mhz;
+ dprintf("DEBUG[QSPI]: div_actual = %" PRIu32 ".\n", div_actual);
+
+ ALT_QSPI_BAUD_DIV_t div_bits = (ALT_QSPI_BAUD_DIV_t)(((div_actual + 1) / 2) - 1);
+ status = alt_qspi_baud_rate_div_set(div_bits);
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_uninit(void)
+{
+ // Put QSPI controller into reset.
+ alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_QSPI_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_disable(void)
+{
+ alt_clrbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_EN_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_enable(void)
+{
+ alt_setbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_EN_SET_MSK);
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ /////
+
+ // Device specific configuration
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_N25Q_enable();
+ }
+#endif
+
+ uint32_t rdid = 0;
+
+ // Query device capabilities
+ // This requires QSPI to be enabled.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_rdid(&rdid);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ // NOTE: The size code seems to be a form of BCD (binary coded decimal).
+ // The first nibble is the 10's digit and the second nibble is the 1's
+ // digit in the number of bytes.
+
+ // Capacity ID samples:
+ // 0x15 : 16 Mb => 2 MiB => 1 << 21 ; BCD=15
+ // 0x16 : 32 Mb => 4 MiB => 1 << 22 ; BCD=16
+ // 0x17 : 64 Mb => 8 MiB => 1 << 23 ; BCD=17
+ // 0x18 : 128 Mb => 16 MiB => 1 << 24 ; BCD=18
+ // 0x19 : 256 Mb => 32 MiB => 1 << 25 ; BCD=19
+ // 0x1a
+ // 0x1b
+ // 0x1c
+ // 0x1d
+ // 0x1e
+ // 0x1f
+ // 0x20 : 512 Mb => 64 MiB => 1 << 26 ; BCD=20
+ // 0x21 : 1024 Mb => 128 MiB => 1 << 27 ; BCD=21
+
+ int cap_code = ALT_QSPI_STIG_RDID_CAPACITYID_GET(rdid);
+
+ if ( ((cap_code >> 4) > 0x9) || ((cap_code & 0xf) > 0x9))
+ {
+ // If a non-valid BCD value is detected at the top or bottom nibble, chances
+ // are that the chip has a problem.
+
+ dprintf("DEBUG[QSPI]: Invalid CapacityID encountered: 0x%02x.\n", cap_code);
+ status = ALT_E_ERROR;
+ }
+ else
+ {
+ int cap_decoded = ((cap_code >> 4) * 10) + (cap_code & 0xf);
+
+ qspi_device_size = 1 << (cap_decoded + 6);
+
+ dprintf("DEBUG[QSPI]: Device size = 0x%" PRIx32 ".\n", qspi_device_size);
+ }
+ }
+
+ // Configure the device size and address bytes
+
+ if (status == ALT_E_SUCCESS)
+ {
+ ALT_QSPI_DEV_SIZE_CONFIG_t size_cfg =
+ {
+ .block_size = ALT_QSPI_DEVSZ_BYTESPERSUBSECTOR_RESET, // 0x10 => 2^16 = 64 KiB
+ .page_size = ALT_QSPI_DEVSZ_BYTESPERDEVICEPAGE_RESET, // 0x100 => 256 B
+ .addr_size = ALT_QSPI_DEVSZ_NUMADDRBYTES_RESET, // 0x2 => 3 bytes or 0x00ffffff mask.
+ .lower_wrprot_block = 0,
+ .upper_wrprot_block = (qspi_device_size - 1) >> 16,
+ .wrprot_enable = ALT_QSPI_WRPROT_EN_RESET
+ };
+
+ status = alt_qspi_device_size_config_set(&size_cfg);
+ }
+
+ /////
+
+ // Configure the DMA parameters
+
+ // This will allow DMA to work well without much intervention by users.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_dma_config_set(4, 32);
+ }
+
+ /////
+
+ return status;
+}
+
+/////
+
+uint32_t alt_qspi_int_status_get(void)
+{
+ // Read and return the value of the QSPI controller Interrupt Status
+ // Register (irqstat).
+ return alt_read_word(ALT_QSPI_IRQSTAT_ADDR);
+}
+
+ALT_STATUS_CODE alt_qspi_int_clear(const uint32_t mask)
+{
+ // Check that the [mask] contains valid interrupt status conditions values.
+ if ((ALT_QSPI_INT_STATUS_ALL & mask) == 0)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Write 1's to clear the desired interrupt status condition(s).
+ alt_write_word(ALT_QSPI_IRQSTAT_ADDR, mask);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_int_disable(const uint32_t mask)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Check that the [mask] contains valid interrupt status conditions values.
+ if ((ALT_QSPI_INT_STATUS_ALL & mask) == 0)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Write 0's to disable the desired interrupt status condition(s).
+ alt_clrbits_word(ALT_QSPI_IRQMSK_ADDR, mask);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_int_enable(const uint32_t mask)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Check that the [mask] contains valid interrupt status conditions values.
+ if ((ALT_QSPI_INT_STATUS_ALL & mask) == 0)
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Write 1's to enable the desired interrupt status condition(s).
+ alt_setbits_word(ALT_QSPI_IRQMSK_ADDR, mask);
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+bool alt_qspi_is_idle(void)
+{
+ // If the idle field of the QSPI configuration register is 1 then the serial
+ // interface and QSPI pipeline is idle.
+ return ALT_QSPI_CFG_IDLE_GET(alt_read_word(ALT_QSPI_CFG_ADDR)) == 1;
+}
+
+/////
+
+static ALT_STATUS_CODE alt_qspi_indirect_write_start_bank(uint32_t dst, size_t length);
+
+static ALT_STATUS_CODE alt_qspi_indirect_page_bound_write_helper(uint32_t dst, const char * src, size_t length)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_indirect_write_start_bank(dst, length);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ uint32_t write_count = 0;
+ uint32_t write_capacity = ALT_QSPI_SRAM_FIFO_ENTRY_COUNT - alt_qspi_sram_partition_get();
+
+ while (write_count < length)
+ {
+ uint32_t space = write_capacity - alt_qspi_indirect_write_fill_level();
+ space = MIN(space, (length - write_count)/ sizeof(uint32_t));
+
+ const uint32_t * data = (const uint32_t *)(src + write_count);
+ for (uint32_t i = 0; i < space; ++i)
+ {
+ alt_write_word(ALT_QSPIDATA_ADDR, *data++);
+ }
+
+ write_count += space * sizeof(uint32_t);
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_indirect_write_finish();
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_qspi_indirect_subsector_aligned_write_helper(const char * data, uint32_t subsec_addr)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ for (int i = 0; i < ALT_QSPI_SUBSECTOR_SIZE / ALT_QSPI_PAGE_SIZE; i++)
+ {
+ int offset = i * ALT_QSPI_PAGE_SIZE;
+
+ status = alt_qspi_indirect_page_bound_write_helper(subsec_addr + offset, data + offset, ALT_QSPI_PAGE_SIZE);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_qspi_indirect_read_start_bank(uint32_t src, size_t size);
+
+//
+// This helper function reads a segment of data, which is limited to 1 bank
+// (24 bits of addressing).
+//
+static ALT_STATUS_CODE alt_qspi_read_bank(char * dst, uint32_t src, size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_indirect_read_start_bank(src, size);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ uint32_t read_count = 0;
+
+ while (!alt_qspi_indirect_read_is_complete())
+ {
+ uint32_t level = alt_qspi_indirect_read_fill_level();
+// level = MIN(level, (size - read_count) / sizeof(uint32_t));
+
+ uint32_t * data = (uint32_t *)(dst + read_count);
+ for (uint32_t i = 0; i < level; ++i)
+ {
+ *data++ = alt_read_word(ALT_QSPIDATA_ADDR);
+ }
+
+ read_count += level * sizeof(uint32_t);
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_indirect_read_finish();
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_read(void * dst, uint32_t src, size_t size)
+{
+ if (src >= qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (src + size - 1 >= qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size == 0)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ if ((uintptr_t)dst & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (src & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ // Verify that there is not already a read in progress.
+ if (ALT_QSPI_INDRD_RD_STAT_GET(alt_read_word(ALT_QSPI_INDRD_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ //
+ // bank_count : The number of bank(s) affected, including partial banks.
+ // bank_addr : The aligned address of the first affected bank, including partial bank(s).
+ // bank_ofst : The offset of the bank to read. Only used when reading the first bank.
+ //
+ uint32_t bank_count = ((src + size - 1) >> 24) - (src >> 24) + 1;
+ uint32_t bank_addr = src & ALT_QSPI_BANK_ADDR_MSK;
+ uint32_t bank_ofst = src & (ALT_QSPI_BANK_SIZE - 1);
+
+ char * data = (char *)dst;
+
+ uint32_t copy_length = MIN(size, ALT_QSPI_BANK_SIZE - bank_ofst);
+
+ dprintf("DEBUG[QSPI]: read(): bulk: mem_addr = %p; flash_addr = 0x%" PRIx32 ".\n", data, src);
+ dprintf("DEBUG[QSPI]: read(): bulk: bank_count = 0x%" PRIx32 ", bank_ofst = 0x%" PRIx32 ".\n", bank_count, bank_ofst);
+
+ for (uint32_t i = 0; i < bank_count; ++i)
+ {
+ dprintf("DEBUG[QSPI]: read(): bank 0x%" PRIx32 "; copy_length = 0x%" PRIx32 ".\n", bank_addr >> 24, copy_length);
+
+ status = alt_qspi_device_bank_select(bank_addr >> 24);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ status = alt_qspi_read_bank(dst, bank_ofst, copy_length);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ bank_addr += ALT_QSPI_BANK_SIZE;
+ data += copy_length;
+ size -= copy_length;
+
+ copy_length = MIN(size, ALT_QSPI_BANK_SIZE);
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_qspi_write_bank(uint32_t dst, const char * src, size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ /////
+
+ uint32_t page_ofst = dst & (ALT_QSPI_PAGE_SIZE - 1);
+ uint32_t write_size = MIN(size, ALT_QSPI_PAGE_SIZE - page_ofst);
+
+ while (size)
+ {
+ dprintf("DEBUG[QSPI]: write(): flash dst = 0x%" PRIx32 ", mem src = %p, write size = 0x%" PRIx32 ", size left = 0x%x.\n", dst, src, write_size, size);
+
+ status = alt_qspi_indirect_page_bound_write_helper(dst, src, write_size);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ dst += write_size;
+ src += write_size;
+ size -= write_size;
+
+ write_size = MIN(size, ALT_QSPI_PAGE_SIZE);
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_write(uint32_t dst, const void * src, size_t size)
+{
+ if (dst >= qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (dst + size - 1 >= qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size == 0)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ if ((uintptr_t)src & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (dst & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ // Verify that there is not already a write in progress.
+ if (ALT_QSPI_INDWR_RDSTAT_GET(alt_read_word(ALT_QSPI_INDWR_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ uint32_t bank_count = ((dst + size - 1) >> 24) - (dst >> 24) + 1;
+ uint32_t bank_addr = dst & ALT_QSPI_BANK_ADDR_MSK;
+ uint32_t bank_ofst = dst & (ALT_QSPI_BANK_SIZE - 1);
+
+ const char * data = src;
+
+ uint32_t copy_length = MIN(size, ALT_QSPI_BANK_SIZE - bank_ofst);
+
+ dprintf("DEBUG[QSPI]: write(): bulk: flash_addr = 0x%" PRIx32 "; mem_addr = %p.\n", dst, data);
+ dprintf("DEBUG[QSPI]: write(): bulk: bank_count = 0x%" PRIx32 ", bank_ofst = 0x%" PRIx32 ".\n", bank_count, bank_ofst);
+
+ for (uint32_t i = 0; i < bank_count; ++i)
+ {
+ dprintf("DEBUG[QSPI]: write(): bank 0x%" PRIx32 "; copy_length = 0x%" PRIx32 ".\n", bank_addr >> 24, copy_length);
+
+ status = alt_qspi_device_bank_select(bank_addr >> 24);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ status = alt_qspi_write_bank(bank_ofst, data, copy_length);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ bank_addr += ALT_QSPI_BANK_SIZE;
+ data += copy_length;
+ size -= copy_length;
+
+ copy_length = MIN(size, ALT_QSPI_BANK_SIZE);
+ }
+
+ return status;
+}
+
+static ALT_STATUS_CODE alt_qspi_erase_subsector_bank(uint32_t addr);
+
+static ALT_STATUS_CODE alt_qspi_replace_bank(uint32_t dst, const char * src, size_t size)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ //
+ // subsec_count : The total number of affected subsector(s),
+ // including partial subsector(s).
+ // subsec_addr : The aligned address of the next affected subsector,
+ // including partial subsector(s).
+ // subsec_partial_head : The number of subsector unaligned data to be
+ // written out at the start of the flash write
+ // request. This data ends at the end of the subsector
+ // or earlier depending on the number of data to be
+ // written.
+ // subsec_partial_tail : The number of subsector unaligned data to be
+ // written out at the end of the flash write request.
+ // This data starts at the start of the subsector. If
+ // only a single subsector is written (partial or
+ // full), this value will be zero.
+ //
+
+ uint32_t subsec_count = ((dst + size - 1) >> 12) - (dst >> 12) + 1;
+ uint32_t subsec_addr = dst & ALT_QSPI_SUBSECTOR_ADDR_MSK;
+
+ uint32_t subsec_partial_head = MIN(ALT_QSPI_SUBSECTOR_SIZE - (dst & (ALT_QSPI_SUBSECTOR_SIZE - 1)), size) & (ALT_QSPI_SUBSECTOR_SIZE - 1);
+ uint32_t subsec_partial_tail = (size - subsec_partial_head) & (ALT_QSPI_SUBSECTOR_SIZE - 1);
+
+ dprintf("DEBUG[QSPI]: replace(): report: dst = 0x%" PRIx32 "; size = 0x%x.\n",
+ dst, size);
+ dprintf("DEBUG[QSPI]: replace(): report: subsec_count = 0x%" PRIx32 "; subsec_addr = 0x%" PRIx32 ".\n",
+ subsec_count, subsec_addr);
+ dprintf("DEBUG[QSPI]: replace(): report: partial_head = 0x%" PRIx32 "; partial_tail = 0x%" PRIx32 ".\n",
+ subsec_partial_head, subsec_partial_tail);
+
+ // Write the first subsector, partial case.
+
+ if (subsec_partial_head)
+ {
+ // The write request is not aligned to a subsector so we must do the
+ // Read-Modify-Write cycle to preserve the existing data at the head of
+ // the subsector not affected by the write.
+
+ char subsec_buf[ALT_QSPI_SUBSECTOR_SIZE];
+
+ uint32_t subsec_ofst = dst & ~ALT_QSPI_SUBSECTOR_ADDR_MSK;
+
+ // - Read the subsector into buffer
+ // - Erase that subsector
+ // - Copy in the user data into buffer
+ // - Write out buffer to subsector
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_read_bank(subsec_buf, subsec_addr, subsec_ofst);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_erase_subsector_bank(subsec_addr);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ memcpy(subsec_buf + subsec_ofst, src, subsec_partial_head);
+ status = alt_qspi_indirect_subsector_aligned_write_helper(subsec_buf, subsec_addr);
+ }
+
+ // Do some bookkeeping on the user buffer information
+ src += subsec_partial_head;
+ size -= subsec_partial_head;
+
+ // Do some bookkeeping on the subsector tracking
+ subsec_count--;
+ subsec_addr += ALT_QSPI_SUBSECTOR_SIZE;
+
+ dprintf("DEBUG[QSPI]: replace(): partial head: subsec_ofst = 0x%" PRIx32 "; size left = 0x%x; status = %" PRIi32 ".\n",
+ subsec_ofst, size, status);
+ }
+
+ // If there is a partial tail, then take 1 off the subsec_count. This way
+ // the following loop will write out all the complete subsectors. The tail
+ // will be written out afterwards.
+
+ if (subsec_partial_tail)
+ {
+ subsec_count--;
+ }
+
+ // Write the aligned subsectors following any partial subsectors.
+
+ for (uint32_t i = 0; i < subsec_count; ++i)
+ {
+ // - Erase subsector
+ // - Write out buffer to subsector
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_erase_subsector_bank(subsec_addr);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_indirect_subsector_aligned_write_helper(src, subsec_addr);
+ }
+
+ src += ALT_QSPI_SUBSECTOR_SIZE;
+ size -= ALT_QSPI_SUBSECTOR_SIZE;
+
+ // Don't modify subsec_count as it's being used by the loop.
+ subsec_addr += ALT_QSPI_SUBSECTOR_SIZE;
+
+ dprintf("DEBUG[QSPI]: replace(): subsec aligned: size left = 0x%x, status = %" PRIi32 ".\n",
+ size, status);
+ }
+
+ // Write the last subsector, partial case.
+
+ if (subsec_partial_tail)
+ {
+ // The write request is not aligned to a subsector so we must do the
+ // Read-Modify-Write cycle to preserve the existing data at the end of
+ // the subsector not affected by the write.
+
+ char subsec_buf[ALT_QSPI_SUBSECTOR_SIZE];
+
+ // - Read the subsector into buffer
+ // - Erase that subsector
+ // - Copy in the user data into buffer
+ // - Write out buffer to subsector
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_read_bank(subsec_buf + subsec_partial_tail,
+ subsec_addr + subsec_partial_tail,
+ ALT_QSPI_SUBSECTOR_SIZE - subsec_partial_tail);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_erase_subsector_bank(subsec_addr);
+ }
+ if (status == ALT_E_SUCCESS)
+ {
+ memcpy(subsec_buf, src, subsec_partial_tail);
+ status = alt_qspi_indirect_subsector_aligned_write_helper(subsec_buf, subsec_addr);
+ }
+
+ src += subsec_partial_tail;
+ size -= subsec_partial_tail;
+
+ dprintf("DEBUG[QSPI]: replace(): partial tail: size left = 0x%x, status = %" PRIi32 ".\n",
+ size, status);
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_replace(uint32_t dst, const void * src, size_t size)
+{
+ if (dst >= qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (dst + size - 1 >= qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size == 0)
+ {
+ return ALT_E_SUCCESS;
+ }
+
+ if ((uintptr_t)src & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (dst & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (size & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ // Verify that there is not already a read in progress.
+ if (ALT_QSPI_INDRD_RD_STAT_GET(alt_read_word(ALT_QSPI_INDRD_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that there is not already a write in progress.
+ if (ALT_QSPI_INDWR_RDSTAT_GET(alt_read_word(ALT_QSPI_INDWR_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ uint32_t bank_count = ((dst + size - 1) >> 24) - (dst >> 24) + 1;
+ uint32_t bank_addr = dst & ALT_QSPI_BANK_ADDR_MSK;
+ uint32_t bank_ofst = dst & (ALT_QSPI_BANK_SIZE - 1);
+
+ const char * data = (const char *)src;
+
+ uint32_t copy_length = MIN(size, ALT_QSPI_BANK_SIZE - bank_ofst);
+
+ dprintf("DEBUG[QSPI]: replace(): bulk: flash_addr = 0x%" PRIx32 "; mem_addr = %p.\n", dst, data);
+ dprintf("DEBUG[QSPI]: replace(): bulk: bank_count = 0x%" PRIx32 ", bank_ofst = 0x%" PRIx32 ".\n", bank_count, bank_ofst);
+
+ for (uint32_t i = 0; i < bank_count; ++i)
+ {
+ dprintf("DEBUG[QSPI]: replace(): bank 0x%" PRIx32 "; copy_length = 0x%" PRIx32 ".\n", bank_addr >> 24, copy_length);
+
+ status = alt_qspi_device_bank_select(bank_addr >> 24);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ status = alt_qspi_replace_bank(bank_ofst, data, copy_length);
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ bank_addr += ALT_QSPI_BANK_SIZE;
+ data += copy_length;
+ size -= copy_length;
+
+ copy_length = MIN(size, ALT_QSPI_BANK_SIZE);
+ }
+
+ return status;
+}
+
+/////
+
+ALT_QSPI_BAUD_DIV_t alt_qspi_baud_rate_div_get(void)
+{
+ uint32_t baud_rate_div = ALT_QSPI_CFG_BAUDDIV_GET(alt_read_word(ALT_QSPI_CFG_ADDR));
+ return (ALT_QSPI_BAUD_DIV_t) baud_rate_div;
+}
+
+ALT_STATUS_CODE alt_qspi_baud_rate_div_set(const ALT_QSPI_BAUD_DIV_t baud_rate_div)
+{
+ if (0xf < (uint32_t)baud_rate_div)
+ {
+ // Invalid baud rate divisor value.
+ return ALT_E_BAD_ARG;
+ }
+
+ // Set the Master Mode Baud Rate Divisor Field of the QSPI Configuration Register.
+ alt_replbits_word(ALT_QSPI_CFG_ADDR,
+ ALT_QSPI_CFG_BAUDDIV_SET_MSK,
+ ALT_QSPI_CFG_BAUDDIV_SET(baud_rate_div));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_chip_select_config_get(uint32_t* cs,
+ ALT_QSPI_CS_MODE_t* cs_mode)
+{
+ uint32_t cfg = alt_read_word(ALT_QSPI_CFG_ADDR);
+
+ *cs = ALT_QSPI_CFG_PERCSLINES_GET(cfg);
+ *cs_mode = (ALT_QSPI_CS_MODE_t) ALT_QSPI_CFG_PERSELDEC_GET(cfg);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_chip_select_config_set(const uint32_t cs,
+ const ALT_QSPI_CS_MODE_t cs_mode)
+{
+ // chip select cs:
+ // four bit value, bit 0 = cs0, bit 1 = cs1, bit 2 = cs2, bit 3 = cs3
+ // since cs is low true, the value of each bit should be zero if enable the cs.
+ //
+ // also allows multiple cs line enabled together.
+
+ if (cs > ((1 << ALT_QSPI_CFG_PERCSLINES_WIDTH) - 1))
+ {
+ // [cs] not within possible 4 bit chip select line value range.
+ return ALT_E_ARG_RANGE;
+ }
+
+ if ((cs_mode != ALT_QSPI_CS_MODE_SINGLE_SELECT) && (cs_mode != ALT_QSPI_CS_MODE_DECODE))
+ {
+ return ALT_E_INV_OPTION;
+ }
+
+ // Update the Peripheral Chip Select Lines and Peripheral Select Decode
+ // Fields of the QSPI Configuration Register value with the chip select
+ // options.
+ uint32_t cfg = alt_read_word(ALT_QSPI_CFG_ADDR);
+ cfg &= ALT_QSPI_CFG_PERCSLINES_CLR_MSK & ALT_QSPI_CFG_PERSELDEC_CLR_MSK;
+ cfg |= ALT_QSPI_CFG_PERCSLINES_SET(cs) | ALT_QSPI_CFG_PERSELDEC_SET(cs_mode);
+ alt_write_word(ALT_QSPI_CFG_ADDR, cfg);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_mode_bit_disable(void)
+{
+ // Clear the Mode Bit Enable Field of the Device Read Instruction Register
+ // to disable mode bits from being sent after the address bytes.
+ alt_clrbits_word(ALT_QSPI_DEVRD_ADDR, ALT_QSPI_DEVRD_ENMODBITS_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_mode_bit_enable(void)
+{
+ // Set the Mode Bit Enable Field of the Device Read Instruction Register
+ // to enable mode bits to be sent after the address bytes.
+ alt_setbits_word(ALT_QSPI_DEVRD_ADDR, ALT_QSPI_DEVRD_ENMODBITS_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+uint32_t alt_qspi_mode_bit_config_get(void)
+{
+ // Return the 8 bit value from the Mode Field of the Mode Bit Configuration
+ // Register.
+ return ALT_QSPI_MODBIT_MOD_GET(alt_read_word(ALT_QSPI_MODBIT_ADDR));
+}
+
+ALT_STATUS_CODE alt_qspi_mode_bit_config_set(const uint32_t mode_bits)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (mode_bits > ((1 << ALT_QSPI_MODBIT_MOD_WIDTH) - 1))
+ {
+ // 'mode_bits' not within possible 8 bit mode value range.
+ return ALT_E_ARG_RANGE;
+ }
+
+ // Set the 8 bit value in the Mode Field of the Mode Bit Configuration
+ // Register.
+ alt_replbits_word(ALT_QSPI_MODBIT_ADDR,
+ ALT_QSPI_MODBIT_MOD_SET_MSK,
+ ALT_QSPI_MODBIT_MOD_SET(mode_bits));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_device_size_config_get(ALT_QSPI_DEV_SIZE_CONFIG_t * cfg)
+{
+ // Although not required, it is recommended that the write protect feature
+ // be enabled prior to enabling the QSPI controller. This will block any AHB
+ // writes from taking effect. This also means the write protection registers
+ // (Lower Write Protection, Upper Write Protection, and Write Protection)
+ // should be setup and the number of bytes per device block in the device
+ // size configuration register should be setup prior to enabling the QSPI
+ // controller.
+
+ // Read Device Size Register and get the Number of Bytes per Block, Number
+ // of Bytes per Device, and Number of Address Bytes Fields.
+
+ uint32_t devsz = alt_read_word(ALT_QSPI_DEVSZ_ADDR);
+
+ cfg->block_size = ALT_QSPI_DEVSZ_BYTESPERSUBSECTOR_GET(devsz);
+ cfg->page_size = ALT_QSPI_DEVSZ_BYTESPERDEVICEPAGE_GET(devsz);
+ cfg->addr_size = ALT_QSPI_DEVSZ_NUMADDRBYTES_GET(devsz);
+
+ // Read Lower Write Protection, Upper Write Protection, and Write Protection
+ // Registers.
+
+ cfg->lower_wrprot_block = ALT_QSPI_LOWWRPROT_SUBSECTOR_GET(alt_read_word(ALT_QSPI_LOWWRPROT_ADDR));
+ cfg->upper_wrprot_block = ALT_QSPI_UPPWRPROT_SUBSECTOR_GET(alt_read_word(ALT_QSPI_UPPWRPROT_ADDR));
+ cfg->wrprot_enable = ALT_QSPI_WRPROT_EN_GET(alt_read_word(ALT_QSPI_WRPROT_ADDR));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_device_size_config_set(const ALT_QSPI_DEV_SIZE_CONFIG_t * cfg)
+{
+ if (cfg->block_size > ((1 << ALT_QSPI_DEVSZ_BYTESPERSUBSECTOR_WIDTH) - 1))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ if (cfg->page_size > ((1 << ALT_QSPI_DEVSZ_BYTESPERDEVICEPAGE_WIDTH) - 1))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ if (cfg->addr_size > ((1 << ALT_QSPI_DEVSZ_NUMADDRBYTES_WIDTH) - 1))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ if (cfg->lower_wrprot_block > cfg->upper_wrprot_block)
+ {
+ // Null write protection regions are not allowed.
+ return ALT_E_ARG_RANGE;
+ }
+
+ /////
+
+ uint32_t value = ALT_QSPI_DEVSZ_BYTESPERSUBSECTOR_SET(cfg->block_size) |
+ ALT_QSPI_DEVSZ_BYTESPERDEVICEPAGE_SET(cfg->page_size) |
+ ALT_QSPI_DEVSZ_NUMADDRBYTES_SET(cfg->addr_size);
+
+ alt_write_word(ALT_QSPI_DEVSZ_ADDR, value);
+
+ if (cfg->wrprot_enable)
+ {
+ alt_write_word(ALT_QSPI_LOWWRPROT_ADDR, cfg->lower_wrprot_block);
+ alt_write_word(ALT_QSPI_UPPWRPROT_ADDR, cfg->upper_wrprot_block);
+ }
+
+ // Read Upper Write Protection Register - uppwrprot.
+ // Set the Write Protection Enable Bit Field of the Write Protection
+ // Register accordingly.
+ if (cfg->wrprot_enable)
+ {
+ alt_setbits_word(ALT_QSPI_WRPROT_ADDR, ALT_QSPI_WRPROT_EN_SET(1));
+ }
+ else
+ {
+ alt_clrbits_word(ALT_QSPI_WRPROT_ADDR, ALT_QSPI_WRPROT_EN_SET(1));
+ }
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_device_read_config_get(ALT_QSPI_DEV_INST_CONFIG_t * cfg)
+{
+ // Read the Device Read Instruction Register - devrd.
+ uint32_t devrd = alt_read_word(ALT_QSPI_DEVRD_ADDR);
+
+ cfg->op_code = ALT_QSPI_DEVRD_RDOPCODE_GET(devrd);
+ cfg->inst_type = (ALT_QSPI_MODE_t) ALT_QSPI_DEVRD_INSTWIDTH_GET(devrd);
+ cfg->addr_xfer_type = (ALT_QSPI_MODE_t) ALT_QSPI_DEVRD_ADDRWIDTH_GET(devrd);
+ cfg->data_xfer_type = (ALT_QSPI_MODE_t) ALT_QSPI_DEVRD_DATAWIDTH_GET(devrd);
+ cfg->dummy_cycles = ALT_QSPI_DEVRD_DUMMYRDCLKS_GET(devrd);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_device_read_config_set(const ALT_QSPI_DEV_INST_CONFIG_t * cfg)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Validate input
+
+ if (cfg->op_code > ((1 << ALT_QSPI_DEVRD_RDOPCODE_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->inst_type)
+ {
+ case ALT_QSPI_MODE_SINGLE:
+ case ALT_QSPI_MODE_DUAL:
+ case ALT_QSPI_MODE_QUAD:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->addr_xfer_type)
+ {
+ case ALT_QSPI_MODE_SINGLE:
+ case ALT_QSPI_MODE_DUAL:
+ case ALT_QSPI_MODE_QUAD:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->data_xfer_type)
+ {
+ case ALT_QSPI_MODE_SINGLE:
+ case ALT_QSPI_MODE_DUAL:
+ case ALT_QSPI_MODE_QUAD:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ if (cfg->dummy_cycles > ((1 << ALT_QSPI_DEVRD_DUMMYRDCLKS_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ /////
+
+ // Read the Device Read Instruction Register - devrd.
+ uint32_t devrd = alt_read_word(ALT_QSPI_DEVRD_ADDR);
+
+ devrd &= ALT_QSPI_DEVRD_RDOPCODE_CLR_MSK &
+ ALT_QSPI_DEVRD_INSTWIDTH_CLR_MSK &
+ ALT_QSPI_DEVRD_ADDRWIDTH_CLR_MSK &
+ ALT_QSPI_DEVRD_DATAWIDTH_CLR_MSK &
+ ALT_QSPI_DEVRD_DUMMYRDCLKS_CLR_MSK;
+
+ devrd |= ALT_QSPI_DEVRD_RDOPCODE_SET(cfg->op_code) |
+ ALT_QSPI_DEVRD_INSTWIDTH_SET(cfg->inst_type) |
+ ALT_QSPI_DEVRD_ADDRWIDTH_SET(cfg->addr_xfer_type) |
+ ALT_QSPI_DEVRD_DATAWIDTH_SET(cfg->data_xfer_type) |
+ ALT_QSPI_DEVRD_DUMMYRDCLKS_SET(cfg->dummy_cycles);
+
+ alt_write_word(ALT_QSPI_DEVRD_ADDR, devrd);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_device_write_config_get(ALT_QSPI_DEV_INST_CONFIG_t * cfg)
+{
+ // Device Write Instruction Register - devwr.
+ uint32_t devwr = alt_read_word(ALT_QSPI_DEVWR_ADDR);
+
+ cfg->op_code = ALT_QSPI_DEVWR_WROPCODE_GET(devwr);
+ // The Instruction Type field in the Device READ Instruction Register only appears
+ // once and applies to both READ and WRITE opertions. it is not included in the
+ // Device WRITE Instruction Register.
+ cfg->inst_type = (ALT_QSPI_MODE_t) ALT_QSPI_DEVRD_INSTWIDTH_GET(alt_read_word(ALT_QSPI_DEVRD_ADDR));
+ cfg->addr_xfer_type = (ALT_QSPI_MODE_t) ALT_QSPI_DEVWR_ADDRWIDTH_GET(devwr);
+ cfg->data_xfer_type = (ALT_QSPI_MODE_t) ALT_QSPI_DEVWR_DATAWIDTH_GET(devwr);
+ cfg->dummy_cycles = ALT_QSPI_DEVWR_DUMMYWRCLKS_GET(devwr);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_device_write_config_set(const ALT_QSPI_DEV_INST_CONFIG_t * cfg)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Validate input
+
+ if (cfg->op_code > ((1 << ALT_QSPI_DEVWR_WROPCODE_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->inst_type)
+ {
+ case ALT_QSPI_MODE_SINGLE:
+ case ALT_QSPI_MODE_DUAL:
+ case ALT_QSPI_MODE_QUAD:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->addr_xfer_type)
+ {
+ case ALT_QSPI_MODE_SINGLE:
+ case ALT_QSPI_MODE_DUAL:
+ case ALT_QSPI_MODE_QUAD:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->data_xfer_type)
+ {
+ case ALT_QSPI_MODE_SINGLE:
+ case ALT_QSPI_MODE_DUAL:
+ case ALT_QSPI_MODE_QUAD:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ if (cfg->dummy_cycles > ((1 << ALT_QSPI_DEVWR_DUMMYWRCLKS_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ /////
+
+ // Read the Device Write Instruction Register - devwr.
+ uint32_t devwr = alt_read_word(ALT_QSPI_DEVWR_ADDR);
+
+ devwr &= ALT_QSPI_DEVWR_WROPCODE_CLR_MSK &
+ ALT_QSPI_DEVWR_ADDRWIDTH_CLR_MSK &
+ ALT_QSPI_DEVWR_DATAWIDTH_CLR_MSK &
+ ALT_QSPI_DEVWR_DUMMYWRCLKS_CLR_MSK;
+
+ devwr |= ALT_QSPI_DEVWR_WROPCODE_SET(cfg->op_code) |
+ ALT_QSPI_DEVWR_ADDRWIDTH_SET(cfg->addr_xfer_type) |
+ ALT_QSPI_DEVWR_DATAWIDTH_SET(cfg->data_xfer_type) |
+ ALT_QSPI_DEVWR_DUMMYWRCLKS_SET(cfg->dummy_cycles);
+
+ alt_write_word(ALT_QSPI_DEVWR_ADDR, devwr);
+
+ // The Instruction Type field in the Device READ Instruction Register only appears
+ // once and applies to both READ and WRITE operations - it is not included in the
+ // Device WRITE Instruction Register. Therefore, modify the Instruction Type
+ // Field in the Device Read Register.
+ alt_replbits_word(ALT_QSPI_DEVRD_ADDR,
+ ALT_QSPI_DEVRD_INSTWIDTH_SET_MSK,
+ ALT_QSPI_DEVRD_INSTWIDTH_SET((uint32_t) cfg->inst_type));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_timing_config_get(ALT_QSPI_TIMING_CONFIG_t * cfg)
+{
+ // QSPI Configuration Register - cfg
+ uint32_t cfgreg = alt_read_word(ALT_QSPI_CFG_ADDR);
+ cfg->clk_phase = (ALT_QSPI_CLK_PHASE_t) ALT_QSPI_CFG_SELCLKPHASE_GET(cfgreg);
+ cfg->clk_pol = (ALT_QSPI_CLK_POLARITY_t) ALT_QSPI_CFG_SELCLKPOL_GET(cfgreg);
+
+ // QSPI Device Delay Register
+ uint32_t delayreg = alt_read_word(ALT_QSPI_DELAY_ADDR);
+ cfg->cs_sot = ALT_QSPI_DELAY_INIT_GET(delayreg);
+ cfg->cs_eot = ALT_QSPI_DELAY_AFTER_GET(delayreg);
+ cfg->cs_dads = ALT_QSPI_DELAY_BTWN_GET(delayreg);
+ cfg->cs_da = ALT_QSPI_DELAY_NSS_GET(delayreg);
+
+ // Read Data Capture Register
+ cfg->rd_datacap = ALT_QSPI_RDDATACAP_DELAY_GET(alt_read_word(ALT_QSPI_RDDATACAP_ADDR));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_timing_config_set(const ALT_QSPI_TIMING_CONFIG_t * cfg)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Validate parameter(s)
+
+ switch (cfg->clk_phase)
+ {
+ case ALT_QSPI_CLK_PHASE_ACTIVE:
+ case ALT_QSPI_CLK_PHASE_INACTIVE:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ switch (cfg->clk_pol)
+ {
+ case ALT_QSPI_CLK_POLARITY_LOW:
+ case ALT_QSPI_CLK_POLARITY_HIGH:
+ break;
+ default:
+ return ALT_E_BAD_ARG;
+ }
+
+ if (cfg->cs_da > ((1 << ALT_QSPI_DELAY_NSS_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ if (cfg->cs_dads > ((1 << ALT_QSPI_DELAY_BTWN_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ if (cfg->cs_eot > ((1 << ALT_QSPI_DELAY_AFTER_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+ if (cfg->cs_sot > ((1 << ALT_QSPI_DELAY_INIT_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ if (cfg->rd_datacap > ((1 << ALT_QSPI_RDDATACAP_DELAY_WIDTH) - 1))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ /////
+
+ // QSPI Configuration Register - cfg
+ uint32_t cfgreg = alt_read_word(ALT_QSPI_CFG_ADDR);
+ cfgreg &= ALT_QSPI_CFG_SELCLKPHASE_CLR_MSK &
+ ALT_QSPI_CFG_SELCLKPOL_CLR_MSK;
+ cfgreg |= ALT_QSPI_CFG_SELCLKPHASE_SET(cfg->clk_phase) |
+ ALT_QSPI_CFG_SELCLKPOL_SET(cfg->clk_pol);
+ alt_write_word(ALT_QSPI_CFG_ADDR, cfgreg);
+
+ // QSPI Device Delay Register
+ uint32_t delayreg = ALT_QSPI_DELAY_INIT_SET(cfg->cs_sot) |
+ ALT_QSPI_DELAY_AFTER_SET(cfg->cs_eot) |
+ ALT_QSPI_DELAY_BTWN_SET(cfg->cs_dads) |
+ ALT_QSPI_DELAY_NSS_SET(cfg->cs_da);
+ alt_write_word(ALT_QSPI_DELAY_ADDR, delayreg);
+
+ // Read Data Capture Register
+
+ alt_write_word(ALT_QSPI_RDDATACAP_ADDR,
+ ALT_QSPI_RDDATACAP_BYP_SET(1) |
+ ALT_QSPI_RDDATACAP_DELAY_SET(cfg->rd_datacap));
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+ALT_STATUS_CODE alt_qspi_direct_disable(void)
+{
+ // Clear (set to 0) the Enable Direct Access Controller Field of the QSPI
+ // Configuration Register to disable the Direct Access Controller.
+ alt_clrbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_ENDIRACC_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_direct_enable(void)
+{
+ // Set (set to 1) the Enable Direct Access Controller Field of the QSPI
+ // Configuration Register to enable the Direct Access Controller.
+ alt_setbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_ENDIRACC_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+uint32_t alt_qspi_ahb_remap_address_get(void)
+{
+ // Read and return the value of the Remap Address Register.
+ return ALT_QSPI_REMAPADDR_VALUE_GET(alt_read_word(ALT_QSPI_REMAPADDR_ADDR));
+}
+
+ALT_STATUS_CODE alt_qspi_ahb_remap_address_set(const uint32_t ahb_remap_addr)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Read and return the value of the Remap Address Register.
+ alt_setbits_word(ALT_QSPI_REMAPADDR_ADDR, ALT_QSPI_REMAPADDR_VALUE_SET(ahb_remap_addr));
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_ahb_address_remap_disable(void)
+{
+ // Clear (set to 0) the Enable AHB Address Remapping Field of the QSPI
+ // Configuration Register to disable AHB address remapping.
+ alt_clrbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_ENAHBREMAP_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_ahb_address_remap_enable(void)
+{
+ // Set (set to 1) the Enable AHB Address Remapping Field of the QSPI
+ // Configuration Register to enable AHB address remapping.
+ alt_setbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_ENAHBREMAP_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+static ALT_STATUS_CODE alt_qspi_indirect_read_start_bank(uint32_t flash_addr,
+ size_t num_bytes)
+{
+ alt_write_word(ALT_QSPI_INDRDSTADDR_ADDR, flash_addr);
+ alt_write_word(ALT_QSPI_INDRDCNT_ADDR, num_bytes);
+ alt_write_word(ALT_QSPI_INDRD_ADDR, ALT_QSPI_INDRD_START_SET_MSK |
+ ALT_QSPI_INDRD_IND_OPS_DONE_STAT_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_read_start(const uint32_t flash_addr,
+ const size_t num_bytes)
+{
+ // flash_addr and num_bytes restriction is to prevent possible unaligned
+ // exceptions.
+
+ if (flash_addr & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (num_bytes & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (num_bytes == 0)
+ {
+ // Do not report this as a success. If a indirect read was not
+ // previously completed, it may be cleared already, at which point
+ // alt_qspi_indirect_read_is_complete() will never report true.
+ return ALT_E_ERROR;
+ }
+
+ if (flash_addr > qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (flash_addr + num_bytes > qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify request does not cross bank boundary.
+ // This limitation is due to the 3-byte addressing limitation.
+ if ((flash_addr & ALT_QSPI_BANK_ADDR_MSK) != ((flash_addr + num_bytes - 1) & ALT_QSPI_BANK_ADDR_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that there is not already a read in progress.
+ if (ALT_QSPI_INDRD_RD_STAT_GET(alt_read_word(ALT_QSPI_INDRD_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status;
+ status = alt_qspi_device_bank_select(flash_addr >> 24);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+
+ /////
+
+ return alt_qspi_indirect_read_start_bank(flash_addr,
+ num_bytes);
+
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_read_finish(void)
+{
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_read_cancel(void)
+{
+ // An indirect operation may be cancelled at any time by setting Indirect
+ // Transfer Control Register bit [1].
+ alt_write_word(ALT_QSPI_INDRD_ADDR, ALT_QSPI_INDRD_CANCEL_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+uint32_t alt_qspi_indirect_read_fill_level(void)
+{
+ // Return the SRAM Fill Level (Indirect Read Partition) Field of the SRAM
+ // Fill Register to get the SRAM Fill Level for the Indirect Read Partition
+ // in units of SRAM Words (4 bytes).
+ return ALT_QSPI_SRAMFILL_INDRDPART_GET(alt_read_word(ALT_QSPI_SRAMFILL_ADDR));
+}
+
+uint32_t alt_qspi_indirect_read_watermark_get(void)
+{
+ // Return the Watermark value in the Indirect Read Transfer Watermark Register.
+ return alt_read_word(ALT_QSPI_INDRDWATER_ADDR);
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_read_watermark_set(const uint32_t watermark)
+{
+ // Verify that there is not already a read in progress.
+ if (ALT_QSPI_INDRD_RD_STAT_GET(alt_read_word(ALT_QSPI_INDRD_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Set the Watermark value in the Indirect Read Transfer Watermark Register.
+ alt_write_word(ALT_QSPI_INDRDWATER_ADDR, watermark);
+
+ return ALT_E_SUCCESS;
+}
+
+bool alt_qspi_indirect_read_is_complete(void)
+{
+ // The value of the Indirect Completion Status Field of the Indirect Read
+ // Transfer Control Register is set by hardware when an indirect read
+ // operation has completed.
+ return (alt_read_word(ALT_QSPI_INDRD_ADDR) & ALT_QSPI_INDRD_IND_OPS_DONE_STAT_SET_MSK) != 0;
+}
+
+static ALT_STATUS_CODE alt_qspi_indirect_write_start_bank(uint32_t flash_addr,
+ size_t num_bytes)
+{
+ alt_write_word(ALT_QSPI_INDWRSTADDR_ADDR, flash_addr);
+ alt_write_word(ALT_QSPI_INDWRCNT_ADDR, num_bytes);
+ alt_write_word(ALT_QSPI_INDWR_ADDR, ALT_QSPI_INDWR_START_SET_MSK |
+ ALT_QSPI_INDWR_INDDONE_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_write_start(const uint32_t flash_addr,
+ const size_t num_bytes)
+{
+ // flash_addr and num_bytes restriction is to prevent possible unaligned
+ // exceptions.
+
+ if (flash_addr & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (num_bytes & 0x3)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (num_bytes == 0)
+ {
+ // Do not report this as a success. If a indirect write was not
+ // previously completed, it may be cleared already, at which point
+ // alt_qspi_indirect_write_is_complete() will never report true.
+ return ALT_E_ERROR;
+ }
+
+ if (num_bytes > 256)
+ {
+ // The Micron part can only write up to 256 bytes at a time.
+ return ALT_E_ERROR;
+ }
+
+ if (flash_addr > qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (flash_addr + num_bytes > qspi_device_size)
+ {
+ return ALT_E_ERROR;
+ }
+
+/*
+ // Verify request does not cross bank boundary.
+ // This limitation is due to the 3-byte addressing limitation.
+ if ((flash_addr & ALT_QSPI_BANK_ADDR_MSK) != ((flash_addr + num_bytes - 1) & ALT_QSPI_BANK_ADDR_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+*/
+ // Verify request does not cross page boundary.
+ // This limitation is in place for the Micron part used.
+ if ((flash_addr & ALT_QSPI_PAGE_ADDR_MSK) != ((flash_addr + num_bytes - 1) & ALT_QSPI_PAGE_ADDR_MSK))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Verify that there is not already a write in progress.
+ if (ALT_QSPI_INDWR_RDSTAT_GET(alt_read_word(ALT_QSPI_INDWR_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ /////
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ status = alt_qspi_device_bank_select(flash_addr >> 24);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+
+ /////
+
+ return alt_qspi_indirect_write_start_bank(flash_addr,
+ num_bytes);
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_write_finish(void)
+{
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ return alt_qspi_N25Q_flag_wait_for_program(ALT_QSPI_TIMEOUT_INFINITE);
+#else
+ return ALT_E_SUCCESS;
+#endif
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_write_cancel(void)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_N25Q_flag_wait_for_program(ALT_QSPI_TIMEOUT_INFINITE);
+ }
+#endif
+
+ if (status == ALT_E_SUCCESS)
+ {
+ // An indirect operation may be cancelled at any time by setting Indirect
+ // Transfer Control Register bit [1].
+ alt_write_word(ALT_QSPI_INDWR_ADDR, ALT_QSPI_INDWR_CANCEL_SET_MSK);
+ }
+
+ return status;
+}
+
+uint32_t alt_qspi_indirect_write_fill_level(void)
+{
+ // Return the SRAM Fill Level (Indirect Write Partition) Field of the SRAM
+ // Fill Register to get the SRAM Fill Level for the Indirect Write Partition
+ // in units of SRAM Words (4 bytes).
+ return ALT_QSPI_SRAMFILL_INDWRPART_GET(alt_read_word(ALT_QSPI_SRAMFILL_ADDR));
+}
+
+uint32_t alt_qspi_indirect_write_watermark_get(void)
+{
+ // Return the Watermark value in the Indirect Write Transfer Watermark Register.
+ return alt_read_word(ALT_QSPI_INDWRWATER_ADDR);
+}
+
+ALT_STATUS_CODE alt_qspi_indirect_write_watermark_set(const uint32_t watermark)
+{
+ // Verify that there is not already a write in progress.
+ if (ALT_QSPI_INDWR_RDSTAT_GET(alt_read_word(ALT_QSPI_INDWR_ADDR)))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // Set the Watermark value in the Indirect Write Transfer Watermark Register.
+ alt_write_word(ALT_QSPI_INDWRWATER_ADDR, watermark);
+
+ return ALT_E_SUCCESS;
+}
+
+bool alt_qspi_indirect_write_is_complete(void)
+{
+ // The value of the Indirect Completion Status Field of the Indirect Write
+ // Transfer Control Register is set by hardware when an indirect write
+ // operation has completed.
+ return (alt_read_word(ALT_QSPI_INDWR_ADDR) & ALT_QSPI_INDWR_INDDONE_SET_MSK) != 0;
+}
+
+/////
+
+uint32_t alt_qspi_sram_partition_get(void)
+{
+ // The number of locations allocated to indirect read is equal to the value
+ // of the SRAM partition register. See the documentation for this function
+ // regarding the + 1 in the IP documentation. This way the get() and set()
+ // will be symmetrical.
+
+ return ALT_QSPI_SRAMPART_ADDR_GET(alt_read_word(ALT_QSPI_SRAMPART_ADDR));
+}
+
+ALT_STATUS_CODE alt_qspi_sram_partition_set(const uint32_t read_part_size)
+{
+ if (read_part_size > ((1 << ALT_QSPI_SRAMPART_ADDR_WIDTH) - 1))
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ alt_replbits_word(ALT_QSPI_SRAMPART_ADDR,
+ ALT_QSPI_SRAMPART_ADDR_SET_MSK,
+ ALT_QSPI_SRAMPART_ADDR_SET(read_part_size));
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+
+static ALT_STATUS_CODE alt_qspi_erase_subsector_bank(uint32_t addr)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_wren();
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_stig_addr_cmd(ALT_QSPI_STIG_OPCODE_SUBSEC_ERASE, 0, addr, 10000);
+ }
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_N25Q_flag_wait_for_erase(ALT_QSPI_TIMEOUT_INFINITE);
+ }
+#endif
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_erase_subsector(const uint32_t addr)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_bank_select(addr >> 24);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_erase_subsector_bank(addr);
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_erase_sector(const uint32_t addr)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_bank_select(addr >> 24);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_wren();
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_stig_addr_cmd(ALT_QSPI_STIG_OPCODE_SEC_ERASE, 0, addr, ALT_QSPI_TIMEOUT_INFINITE);
+ }
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_N25Q_flag_wait_for_erase(ALT_QSPI_TIMEOUT_INFINITE);
+ }
+#endif
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_erase_chip(void)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ if (qspi_device_size >= (2 * ALT_QSPI_N25Q_DIE_SIZE))
+ {
+ // NOTE: This path is specifically for 512 Mib and 1 Gib Micron N25Q
+ // chips only.
+
+ dprintf("DEBUG[QSPI]: erase[chip]: FYI, wait time is ~800s for 128 MiB.\n");
+
+ uint32_t die_count = qspi_device_size / ALT_QSPI_N25Q_DIE_SIZE;
+
+ for (int i = 0; i < die_count; ++i)
+ {
+ if (status != ALT_E_SUCCESS)
+ {
+ break;
+ }
+
+ dprintf("DEBUG[QSPI]: Erase chip: die = %d, total = %" PRIu32 ".\n", i, die_count);
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_bank_select(i * (ALT_QSPI_N25Q_DIE_SIZE / ALT_QSPI_BANK_SIZE));
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_wren();
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_stig_addr_cmd(ALT_QSPI_STIG_OPCODE_DIE_ERASE, 0,
+ i * ALT_QSPI_N25Q_DIE_SIZE,
+ ALT_QSPI_TIMEOUT_INFINITE);
+ }
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_N25Q_flag_wait_for_erase(ALT_QSPI_TIMEOUT_INFINITE);
+ }
+#endif
+ }
+ }
+ else
+ {
+ // NOTE: Untested path.
+
+ dprintf("DEBUG[QSPI]: Bulk erase.\n");
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_bank_select(0);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_wren();
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ // If BULK_ERASE is like other ERASE, it needs the address command.
+ status = alt_qspi_stig_addr_cmd(ALT_QSPI_STIG_OPCODE_BULK_ERASE, 0,
+ 0,
+ ALT_QSPI_TIMEOUT_INFINITE);
+ }
+
+#if ALT_QSPI_PROVISION_MICRON_N25Q_SUPPORT
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_N25Q_flag_wait_for_erase(ALT_QSPI_TIMEOUT_INFINITE);
+ }
+#endif
+ }
+
+ return status;
+}
+
+/////
+
+ALT_STATUS_CODE alt_qspi_dma_disable(void)
+{
+ // Clear (set to 0) the Enable DMA Peripheral Interface Field of the QSPI
+ // Configuration Register to disable the DMA peripheral interface.
+ alt_clrbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_ENDMA_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_dma_enable(void)
+{
+ // Set (set to 1) the Enable DMA Peripheral Interface Field of the QSPI
+ // Configuration Register to enable the DMA peripheral interface.
+ alt_setbits_word(ALT_QSPI_CFG_ADDR, ALT_QSPI_CFG_ENDMA_SET_MSK);
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_dma_config_get(uint32_t * single_type_sz,
+ uint32_t * burst_type_sz)
+{
+ // Get the current value of the DMA Peripheral Register - dmaper
+ uint32_t dmaper = alt_read_word(ALT_QSPI_DMAPER_ADDR);
+
+ // For both values, a programmed value of 0 represents a single byte. The
+ // actual number of bytes used is 2 ** (value in this register field).
+ *single_type_sz = 1 << ALT_QSPI_DMAPER_NUMSGLREQBYTES_GET(dmaper);
+ *burst_type_sz = 1 << ALT_QSPI_DMAPER_NUMBURSTREQBYTES_GET(dmaper);
+
+ return ALT_E_SUCCESS;
+}
+
+//
+// Returns true if [n] is a power of 2 value otherwise returns false.
+//
+static bool is_pow_2(uint32_t n)
+{
+ return ((n > 0) && ((n & (n - 1)) == 0));
+}
+
+//
+// Return the log base 2 value of a number that is known to be a power of 2.
+//
+static uint32_t log2u(uint32_t value)
+{
+ uint32_t exp = 0;
+ while ((exp < 32) && (value != (1 << exp)))
+ {
+ ++exp;
+ }
+ return exp;
+}
+
+ALT_STATUS_CODE alt_qspi_dma_config_set(const uint32_t single_type_sz,
+ const uint32_t burst_type_sz)
+{
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (single_type_sz < 4)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (burst_type_sz < 4)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (burst_type_sz < single_type_sz)
+ {
+ return ALT_E_ERROR;
+ }
+
+ const uint32_t single_type_sz_max = 1 << ((1 << ALT_QSPI_DMAPER_NUMSGLREQBYTES_WIDTH) - 1);
+ const uint32_t burst_type_sz_max = 1 << ((1 << ALT_QSPI_DMAPER_NUMBURSTREQBYTES_WIDTH) - 1);
+
+ // Both parameter values must be a power of 2 between 1 and 32728.
+ if ( (single_type_sz > single_type_sz_max) || !is_pow_2(single_type_sz)
+ || (burst_type_sz > burst_type_sz_max) || !is_pow_2(burst_type_sz)
+ )
+ {
+ return ALT_E_ARG_RANGE;
+ }
+
+ // Get the current value of the DMA Peripheral Register - dmaper
+ uint32_t dmaper = alt_read_word(ALT_QSPI_DMAPER_ADDR);
+ dmaper &= ALT_QSPI_DMAPER_NUMBURSTREQBYTES_CLR_MSK &
+ ALT_QSPI_DMAPER_NUMSGLREQBYTES_CLR_MSK;
+ dmaper |= ALT_QSPI_DMAPER_NUMBURSTREQBYTES_SET(log2u(burst_type_sz)) |
+ ALT_QSPI_DMAPER_NUMSGLREQBYTES_SET(log2u(single_type_sz));
+ alt_write_word(ALT_QSPI_DMAPER_ADDR, dmaper);
+
+ return ALT_E_SUCCESS;
+}
+
+/////
+
+//
+// Private STIG and device commands
+//
+
+static ALT_STATUS_CODE alt_qspi_stig_cmd_helper(uint32_t reg_value, uint32_t timeout)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ bool infinite = (timeout == ALT_QSPI_TIMEOUT_INFINITE);
+
+ alt_write_word(ALT_QSPI_FLSHCMD_ADDR, reg_value);
+ alt_write_word(ALT_QSPI_FLSHCMD_ADDR, reg_value | ALT_QSPI_FLSHCMD_EXECCMD_E_EXECUTE);
+
+ do
+ {
+ reg_value = alt_read_word(ALT_QSPI_FLSHCMD_ADDR);
+ if (!(reg_value & ALT_QSPI_FLSHCMD_CMDEXECSTAT_SET_MSK))
+ {
+ break;
+ }
+
+ } while (timeout-- || infinite);
+
+ if (timeout == (uint32_t)-1 && !infinite)
+ {
+ status = ALT_E_TMO;
+ }
+
+ return status;
+}
+
+ALT_STATUS_CODE alt_qspi_stig_cmd(uint32_t opcode, uint32_t dummy, uint32_t timeout)
+{
+ if (dummy > ((1 << ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_WIDTH) - 1))
+ {
+ return ALT_E_ERROR;
+ }
+
+ uint32_t reg = ALT_QSPI_FLSHCMD_CMDOPCODE_SET(opcode) |
+ ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_SET(dummy);
+
+ return alt_qspi_stig_cmd_helper(reg, timeout);
+}
+
+ALT_STATUS_CODE alt_qspi_stig_rd_cmd(uint8_t opcode,
+ uint32_t dummy,
+ uint32_t num_bytes,
+ uint32_t * output,
+ uint32_t timeout)
+{
+ if (dummy > ((1 << ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_WIDTH) - 1))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // STIG read can only return up to 8 bytes.
+ if ((num_bytes > 8) || (num_bytes == 0))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t reg_value =
+ ALT_QSPI_FLSHCMD_CMDOPCODE_SET(opcode) |
+ ALT_QSPI_FLSHCMD_ENRDDATA_SET(ALT_QSPI_FLSHCMD_ENRDDATA_E_EN) |
+ ALT_QSPI_FLSHCMD_NUMRDDATABYTES_SET(num_bytes - 1) |
+ ALT_QSPI_FLSHCMD_ENCMDADDR_SET(ALT_QSPI_FLSHCMD_ENCMDADDR_E_DISD) |
+ ALT_QSPI_FLSHCMD_ENMODBIT_SET(ALT_QSPI_FLSHCMD_ENMODBIT_E_DISD) |
+ ALT_QSPI_FLSHCMD_NUMADDRBYTES_SET(0) |
+ ALT_QSPI_FLSHCMD_ENWRDATA_SET(ALT_QSPI_FLSHCMD_ENWRDATA_E_NOACTION) |
+ ALT_QSPI_FLSHCMD_NUMWRDATABYTES_SET(0) |
+ ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_SET(dummy);
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ status = alt_qspi_stig_cmd_helper(reg_value, timeout);
+ if (status != ALT_E_SUCCESS)
+ {
+ return status;
+ }
+
+ output[0] = alt_read_word(ALT_QSPI_FLSHCMDRDDATALO_ADDR);
+
+ if (num_bytes > 4)
+ {
+ output[1] = alt_read_word(ALT_QSPI_FLSHCMDRDDATAUP_ADDR);
+ }
+
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_qspi_stig_wr_cmd(uint8_t opcode,
+ uint32_t dummy,
+ uint32_t num_bytes,
+ const uint32_t * input,
+ uint32_t timeout)
+{
+ if (dummy > ((1 << ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_WIDTH) - 1))
+ {
+ return ALT_E_ERROR;
+ }
+
+ // STIG can only write up to 8 bytes.
+ if ((num_bytes > 8) || (num_bytes == 0))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ uint32_t reg_value =
+ ALT_QSPI_FLSHCMD_CMDOPCODE_SET(opcode) |
+ ALT_QSPI_FLSHCMD_ENRDDATA_SET(ALT_QSPI_FLSHCMD_ENRDDATA_E_NOACTION) |
+ ALT_QSPI_FLSHCMD_NUMRDDATABYTES_SET(0) |
+ ALT_QSPI_FLSHCMD_ENCMDADDR_SET(ALT_QSPI_FLSHCMD_ENCMDADDR_E_DISD) |
+ ALT_QSPI_FLSHCMD_ENMODBIT_SET(ALT_QSPI_FLSHCMD_ENMODBIT_E_DISD) |
+ ALT_QSPI_FLSHCMD_NUMADDRBYTES_SET(0) |
+ ALT_QSPI_FLSHCMD_ENWRDATA_SET(ALT_QSPI_FLSHCMD_ENWRDATA_E_WRDATABYTES) |
+ ALT_QSPI_FLSHCMD_NUMWRDATABYTES_SET(num_bytes - 1) |
+ ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_SET(dummy);
+
+ alt_write_word(ALT_QSPI_FLSHCMDWRDATALO_ADDR, input[0]);
+
+ if (num_bytes > 4)
+ {
+ alt_write_word(ALT_QSPI_FLSHCMDWRDATAUP_ADDR, input[1]);
+ }
+
+ return alt_qspi_stig_cmd_helper(reg_value, timeout);
+}
+
+ALT_STATUS_CODE alt_qspi_stig_addr_cmd(uint8_t opcode,
+ uint32_t dummy,
+ uint32_t address,
+ uint32_t timeout)
+{
+ if (dummy > ((1 << ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_WIDTH) - 1))
+ {
+ return ALT_E_ERROR;
+ }
+
+ uint32_t reg = ALT_QSPI_FLSHCMD_CMDOPCODE_SET(opcode) |
+ ALT_QSPI_FLSHCMD_NUMDUMMYBYTES_SET(dummy);
+
+ reg |= ALT_QSPI_FLSHCMD_ENCMDADDR_SET(ALT_QSPI_FLSHCMD_ENCMDADDR_E_END);
+ reg |= ALT_QSPI_FLSHCMD_NUMADDRBYTES_SET(ALT_QSPI_FLSHCMD_NUMADDRBYTES_E_ADDRBYTE3);
+
+ alt_write_word(ALT_QSPI_FLSHCMDADDR_ADDR, address);
+
+ return alt_qspi_stig_cmd_helper(reg, timeout);
+}
+
+/////
+
+ALT_STATUS_CODE alt_qspi_device_wren(void)
+{
+ // Write enable through STIG (not required, auto send by controller during write)
+ return alt_qspi_stig_cmd(ALT_QSPI_STIG_OPCODE_WREN, 0, 10000);
+}
+
+ALT_STATUS_CODE alt_qspi_device_wrdis(void)
+{
+ // Write disable through STIG (not required, auto send by controller during write)
+ return alt_qspi_stig_cmd(ALT_QSPI_STIG_OPCODE_WRDIS, 0, 10000);
+}
+
+ALT_STATUS_CODE alt_qspi_device_rdid(uint32_t * rdid)
+{
+ // Read flash device ID through STIG
+ return alt_qspi_stig_rd_cmd(ALT_QSPI_STIG_OPCODE_RDID, 0, 4, rdid, 10000);
+}
+
+ALT_STATUS_CODE alt_qspi_discovery_parameter(uint32_t * param)
+{
+ // Read flash discovery parameters through STIG
+
+ return alt_qspi_stig_rd_cmd(ALT_QSPI_STIG_OPCODE_DISCVR_PARAM, 8, 8, param, 10000);
+}
+
+ALT_STATUS_CODE alt_qspi_device_bank_select(uint32_t bank)
+{
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+ dprintf("DEBUG[QSPI]: bank_select(): switching to bank 0x%" PRIu32 ".\n", bank);
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_wren();
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_stig_wr_cmd(ALT_QSPI_STIG_OPCODE_WR_EXT_REG, 0, 1, &bank, 10000);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ status = alt_qspi_device_wrdis();
+ }
+
+ return status;
+}
+
+/////
+
+static bool alt_qspi_is_enabled(void)
+{
+ uint32_t cfg = alt_read_word(ALT_QSPI_CFG_ADDR);
+
+ if (cfg & ALT_QSPI_CFG_EN_SET_MSK)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+ALT_STATUS_CODE alt_qspi_ecc_start(void * block, size_t size)
+{
+ if (size < (ALT_QSPI_PAGE_SIZE * 8))
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (alt_qspi_is_enabled() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ if (alt_qspi_is_idle() == false)
+ {
+ return ALT_E_ERROR;
+ }
+
+ ALT_STATUS_CODE status = ALT_E_SUCCESS;
+
+ // 1. Configure SRAM Partition Register to 126 words for read, 2 words for write.
+ // 2. Enable ECC on QSPI RAM
+ // 3. Trigger an indirect read transfer that will fill up 126 words in FIFO by
+ // monitoring read FIFO fill level; Do not read out data through AHB.
+ // 4. Start AHB read and start indirect write operation to write back to the same
+ // device location, this will fill up and initilaize the write partition RAM.
+ // 5. To clear spurious interrupts, reset the QSPI controller.
+
+ // Save the previous partition size
+
+ uint32_t sram_orig = alt_qspi_sram_partition_get();
+ dprintf("DEBUG[QSPI][ECC]: Save original SRAM as %" PRIu32 ".\n", sram_orig);
+
+ // Step 1
+
+ uint32_t sram_fill = (1 << ALT_QSPI_SRAMPART_ADDR_WIDTH) - 2;
+ alt_qspi_sram_partition_set(sram_fill);
+ dprintf("DEBUG[QSPI][ECC]: Set new SRAM as %" PRIu32 ".\n", sram_fill);
+
+ // Step 2
+
+ dprintf("DEBUG[QSPI][ECC]: Enable ECC in SysMgr.\n");
+ alt_write_word(ALT_SYSMGR_ECC_QSPI_ADDR, ALT_SYSMGR_ECC_QSPI_EN_SET_MSK);
+
+ // Step 3
+
+ // Issue a read ~ 2x larger than the read partition. We will read out 1 page,
+ // which will be used as the buffer to write back to QSPI. This way no data
+ // actually changes thus no erase will be needed.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Start indirect read PAGE * 8.\n");
+ status = alt_qspi_indirect_read_start(0x0, ALT_QSPI_PAGE_SIZE * 8);
+ }
+
+ // Read out 1 page for the write data
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Reading out 1 page ...\n");
+
+ uint32_t read_size = 0;
+ char * buffer = block;
+ while (read_size < ALT_QSPI_PAGE_SIZE)
+ {
+ uint32_t level = alt_qspi_indirect_read_fill_level();
+ level = MIN(level, (ALT_QSPI_PAGE_SIZE - read_size) / sizeof(uint32_t));
+
+ uint32_t * data = (uint32_t *)(&buffer[read_size]);
+ for (uint32_t i = 0; i < level; ++i)
+ {
+ *data = alt_read_word(ALT_QSPIDATA_ADDR);
+ ++data;
+ }
+
+ read_size += level * sizeof(uint32_t);
+ }
+
+ if (read_size != ALT_QSPI_PAGE_SIZE)
+ {
+ status = ALT_E_ERROR;
+ }
+ }
+
+ // Wait for read FIFO to report it is up to the specified fill level.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Waiting for read fill level ...\n");
+
+ uint32_t timeout = 10000;
+
+ while (alt_qspi_indirect_read_fill_level() < sram_fill)
+ {
+ if (--timeout == 0)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Waiting for read fill timeout !!!\n");
+ status = ALT_E_TMO;
+ break;
+ }
+ }
+ }
+
+ // Step 4
+
+ // Issue a write of 1 page of the same data from 0x0.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Start indirect write PAGE.\n");
+ status = alt_qspi_indirect_write_start(0x0, ALT_QSPI_PAGE_SIZE);
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Writing in 1 page ...\n");
+
+ uint32_t write_size = 0;
+ char * buffer = block;
+
+ while (write_size < ALT_QSPI_PAGE_SIZE)
+ {
+ uint32_t space = 2 - alt_qspi_indirect_write_fill_level();
+ if (space == 0)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Write FIFO filled at write_size = %" PRIu32 ".\n", write_size);
+ // Space = 0; which means all 2 positions in the write FIFO is filled,
+ // meaning it has been initialized with respect to ECC.
+ break;
+ }
+
+ space = MIN(space, (ALT_QSPI_PAGE_SIZE - write_size) / sizeof(uint32_t));
+
+ uint32_t * data = (uint32_t *)(&buffer[write_size]);
+ for (uint32_t i = 0; i < space; ++i)
+ {
+ alt_write_word(ALT_QSPIDATA_ADDR, *data);
+ ++data;
+ }
+
+ write_size += space * sizeof(uint32_t);
+ }
+
+ if (write_size != ALT_QSPI_PAGE_SIZE)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Cancel indirect write.\n");
+ status = alt_qspi_indirect_write_cancel();
+ }
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Finish indirect write.\n");
+ status = alt_qspi_indirect_write_finish();
+ }
+
+ // Cancel the indirect read as it has initialized the read FIFO partition.
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Cancel indirect read.\n");
+ status = alt_qspi_indirect_read_cancel();
+ }
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Finish indirect read.\n");
+ status = alt_qspi_indirect_read_finish();
+ }
+
+ // Step 5
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Clear any pending spurious QSPI ECC interrupts.\n");
+
+ alt_write_word(ALT_SYSMGR_ECC_QSPI_ADDR,
+ ALT_SYSMGR_ECC_QSPI_EN_SET_MSK
+ | ALT_SYSMGR_ECC_QSPI_SERR_SET_MSK
+ | ALT_SYSMGR_ECC_QSPI_DERR_SET_MSK);
+ }
+
+ /////
+
+ // Restore original partition
+
+ if (status == ALT_E_SUCCESS)
+ {
+ dprintf("DEBUG[QSPI][ECC]: Restore original SRAM as %" PRIu32 ".\n", sram_orig);
+ status = alt_qspi_sram_partition_set(sram_orig);
+ }
+
+ return status;
+}
diff --git a/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_reset_manager.c b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_reset_manager.c
new file mode 100644
index 0000000000..e37e5a7be3
--- /dev/null
+++ b/bsps/arm/altera-cyclone-v/contrib/hwlib/src/hwmgr/alt_reset_manager.c
@@ -0,0 +1,135 @@
+
+/******************************************************************************
+*
+* alt_reset_manager.c - API for the Altera SoC FPGA reset manager.
+*
+******************************************************************************/
+
+/******************************************************************************
+*
+* Copyright 2013 Altera Corporation. All Rights Reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+*
+* 2. Redistributions in binary form must reproduce the above copyright notice,
+* this list of conditions and the following disclaimer in the documentation
+* and/or other materials provided with the distribution.
+*
+* 3. The name of the author may not be used to endorse or promote products
+* derived from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
+* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+*
+******************************************************************************/
+
+#include <bsp/alt_reset_manager.h>
+#include <bsp/socal/socal.h>
+#include <bsp/socal/hps.h>
+#include <bsp/socal/alt_rstmgr.h>
+
+/////
+
+
+uint32_t alt_reset_event_get(void)
+{
+ return alt_read_word(ALT_RSTMGR_STAT_ADDR);
+}
+
+ALT_STATUS_CODE alt_reset_event_clear(uint32_t event_mask)
+{
+ alt_write_word(ALT_RSTMGR_STAT_ADDR, event_mask);
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_reset_cold_reset(void)
+{
+ alt_write_word(ALT_RSTMGR_CTL_ADDR, ALT_RSTMGR_CTL_SWCOLDRSTREQ_SET_MSK);
+ return ALT_E_SUCCESS;
+}
+
+ALT_STATUS_CODE alt_reset_warm_reset(uint32_t warm_reset_delay,
+ uint32_t nRST_pin_clk_assertion,
+ bool sdram_refresh_enable,
+ bool fpga_mgr_handshake,
+ bool scan_mgr_handshake,
+ bool fpga_handshake,
+ bool etr_stall)
+{
+ // Cached register values
+ uint32_t ctrl_reg = ALT_RSTMGR_CTL_SWWARMRSTREQ_SET_MSK;
+ uint32_t counts_reg = 0;
+
+ /////
+
+ // Validate warm_reset_delay is above 16 and below the field width
+ if ((warm_reset_delay < 16) || (warm_reset_delay >= (1 << ALT_RSTMGR_COUNTS_WARMRSTCYCLES_WIDTH)))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Validate nRST_pin_clk_assertion delay is non-zero and below the field width
+ if (!nRST_pin_clk_assertion)
+ {
+ return ALT_E_ERROR;
+ }
+ if (nRST_pin_clk_assertion >= (1 << ALT_RSTMGR_COUNTS_NRSTCNT_WIDTH))
+ {
+ return ALT_E_BAD_ARG;
+ }
+
+ // Update counts register with warm_reset_delay information
+ counts_reg |= ALT_RSTMGR_COUNTS_WARMRSTCYCLES_SET(warm_reset_delay);
+
+ // Update counts register with nRST_pin_clk_assertion information
+ counts_reg |= ALT_RSTMGR_COUNTS_NRSTCNT_SET(nRST_pin_clk_assertion);
+
+ /////
+
+ // Update ctrl register with the specified option flags
+
+ if (sdram_refresh_enable)
+ {
+ ctrl_reg |= ALT_RSTMGR_CTL_SDRSELFREFEN_SET_MSK;
+ }
+
+ if (fpga_mgr_handshake)
+ {
+ ctrl_reg |= ALT_RSTMGR_CTL_FPGAMGRHSEN_SET_MSK;
+ }
+
+ if (scan_mgr_handshake)
+ {
+ ctrl_reg |= ALT_RSTMGR_CTL_SCANMGRHSEN_SET_MSK;
+ }
+
+ if (fpga_handshake)
+ {
+ ctrl_reg |= ALT_RSTMGR_CTL_FPGAHSEN_SET_MSK;
+ }
+
+ if (etr_stall)
+ {
+ ctrl_reg |= ALT_RSTMGR_CTL_ETRSTALLEN_SET_MSK;
+ }
+
+ /////
+
+ // Commit registers to hardware
+ alt_write_word(ALT_RSTMGR_COUNTS_ADDR, counts_reg);
+ alt_write_word(ALT_RSTMGR_CTL_ADDR, ctrl_reg);
+
+ return ALT_E_SUCCESS;
+}