summaryrefslogtreecommitdiffstats
path: root/bsps/shared/grlib/spw
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-12-22 18:31:04 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2019-01-22 12:46:33 +0100
commit7eb606d393306da25fd6e6aa7f8595ffb2e924fc (patch)
tree085befd6fe5e29d229fec9683735516d48e9d41e /bsps/shared/grlib/spw
parentgrlib: Move header files (diff)
downloadrtems-7eb606d393306da25fd6e6aa7f8595ffb2e924fc.tar.bz2
grlib: Move source files
Update #3678.
Diffstat (limited to 'bsps/shared/grlib/spw')
-rw-r--r--bsps/shared/grlib/spw/grspw.c2038
-rw-r--r--bsps/shared/grlib/spw/grspw_pkt.c3295
-rw-r--r--bsps/shared/grlib/spw/grspw_router.c1939
-rw-r--r--bsps/shared/grlib/spw/spwtdp.c991
4 files changed, 8263 insertions, 0 deletions
diff --git a/bsps/shared/grlib/spw/grspw.c b/bsps/shared/grlib/spw/grspw.c
new file mode 100644
index 0000000000..ca0f63edd8
--- /dev/null
+++ b/bsps/shared/grlib/spw/grspw.c
@@ -0,0 +1,2038 @@
+/*
+ * This file contains the GRSPW SpaceWire Driver for LEON2 and LEON3.
+ *
+ * COPYRIGHT (c) 2006
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+#include <grlib/ambapp.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grspw.h>
+
+#include <grlib/grlib_impl.h>
+
+#define DBGSPW_IOCALLS 1
+#define DBGSPW_TX 2
+#define DBGSPW_RX 4
+#define DBGSPW_IOCTRL 1
+#define DBGSPW_DUMP 16
+#define DEBUG_SPACEWIRE_FLAGS (DBGSPW_IOCALLS | DBGSPW_TX | DBGSPW_RX )
+
+/* #define DEBUG_SPACEWIRE_ONOFF */
+
+#ifdef DEBUG_SPACEWIRE_ONOFF
+#define SPACEWIRE_DBG(fmt, args...) do { { printk(" : %03d @ %18s()]:" fmt , __LINE__,__FUNCTION__,## args); }} while(0)
+#define SPACEWIRE_DBG2(fmt) do { { printk(" : %03d @ %18s()]:" fmt , __LINE__,__FUNCTION__); }} while(0)
+#define SPACEWIRE_DBGC(c,fmt, args...) do { if (DEBUG_SPACEWIRE_FLAGS & c) { printk(" : %03d @ %18s()]:" fmt , __LINE__,__FUNCTION__,## args); }} while(0)
+#else
+#define SPACEWIRE_DBG(fmt, args...)
+#define SPACEWIRE_DBG2(fmt, args...)
+#define SPACEWIRE_DBGC(c, fmt, args...)
+#endif
+
+typedef struct {
+ volatile unsigned int ctrl;
+ volatile unsigned int status;
+ volatile unsigned int nodeaddr;
+ volatile unsigned int clkdiv;
+ volatile unsigned int destkey;
+ volatile unsigned int time;
+ volatile unsigned int timer;
+ volatile unsigned int pad;
+
+ volatile unsigned int dma0ctrl;
+ volatile unsigned int dma0rxmax;
+ volatile unsigned int dma0txdesc;
+ volatile unsigned int dma0rxdesc;
+
+ /* For GRSPW core 2 and onwards */
+ volatile unsigned int dma0addr;
+
+} LEON3_SPACEWIRE_Regs_Map;
+
+typedef struct {
+ volatile unsigned int ctrl;
+ volatile unsigned int addr;
+} SPACEWIRE_RXBD;
+
+typedef struct {
+ volatile unsigned int ctrl;
+ volatile unsigned int addr_header;
+ volatile unsigned int len;
+ volatile unsigned int addr_data;
+} SPACEWIRE_TXBD;
+
+#define SPACEWIRE_INIT_TIMEOUT 10
+#define SPACEWIRE_BDTABLE_SIZE 0x400
+#define SPACEWIRE_TXD_SIZE 1024
+#define SPACEWIRE_TXH_SIZE 64
+#define SPACEWIRE_RXPCK_SIZE 1024
+#define SPACEWIRE_TXBUFS_NR 64
+#define SPACEWIRE_RXBUFS_NR 128
+
+#define BUFMEM_PER_LINK (SPACEWIRE_TXBUFS_NR*(SPACEWIRE_TXD_SIZE+SPACEWIRE_TXH_SIZE) + SPACEWIRE_RXBUFS_NR*SPACEWIRE_RXPCK_SIZE)
+
+typedef struct {
+ /* configuration parameters */
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ LEON3_SPACEWIRE_Regs_Map *regs;
+ spw_config config;
+
+ unsigned int tx_all_in_use;
+ unsigned int tx_sent;
+ unsigned int tx_cur;
+ unsigned int rxcur;
+ unsigned int rxbufcur;
+ unsigned int txdbufsize;
+ unsigned int txhbufsize;
+ unsigned int rxbufsize;
+ unsigned int txbufcnt;
+ unsigned int rxbufcnt;
+
+ /* DMA Area set by user */
+ unsigned int rx_dma_area;
+ unsigned int tx_data_dma_area;
+ unsigned int tx_hdr_dma_area;
+ unsigned int bd_dma_area;
+
+ /* statistics */
+ spw_stats stat;
+
+ unsigned int _ptr_rxbuf0;
+ char *ptr_rxbuf0;
+ char *ptr_txdbuf0;
+ char *ptr_txhbuf0;
+ char *_ptr_bd0, *ptr_bd0;
+
+ char *ptr_rxbuf0_remote;
+ char *ptr_txdbuf0_remote;
+ char *ptr_txhbuf0_remote;
+ char *ptr_bd0_remote;
+
+ unsigned int irq;
+ int minor;
+ int core_ver;
+ int open;
+ int running;
+ unsigned int core_freq_khz;
+ unsigned int rtimeout;
+
+ /* semaphores*/
+ rtems_id txsp;
+ rtems_id rxsp;
+
+ SPACEWIRE_RXBD *rx;
+ SPACEWIRE_TXBD *tx;
+
+ unsigned int rx_remote;
+ unsigned int tx_remote;
+} GRSPW_DEV;
+
+/* Function pointer called upon timecode receive */
+void (*grspw_timecode_callback)
+ (void *pDev, void *regs, int minor, unsigned int tc) = NULL;
+
+#ifdef GRSPW_DONT_BYPASS_CACHE
+#define _SPW_READ(address) (*(volatile unsigned int *)(address))
+#define _MEM_READ8(address) (*(volatile unsigned char *)(address))
+#define _MEM_READ32(address) (*(volatile unsigned int *)(address))
+#else
+static inline unsigned int _SPW_READ(volatile void *addr) {
+ unsigned int tmp;
+ __asm__ (" lda [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+
+static inline unsigned int _MEM_READ8(volatile void *addr) {
+ unsigned int tmp;
+ __asm__ (" lduba [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+
+static inline unsigned int _MEM_READ32(volatile void *addr) {
+ unsigned int tmp;
+ __asm__ (" lda [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+#endif
+
+#define MEM_READ8(addr) _MEM_READ8((volatile void *)(addr))
+#define MEM_READ32(addr) _MEM_READ32((volatile void *)(addr))
+#define SPW_READ(addr) _SPW_READ((volatile void *)(addr))
+#define SPW_WRITE(addr,v) (*(volatile unsigned int *)addr)=v
+
+#define SPW_REG(c,r) (c->regs->r)
+#define SPW_REG_CTRL(c) SPW_REG(c,ctrl)
+#define SPW_REG_STATUS(c) SPW_REG(c,status)
+#define SPW_REG_NODEADDR(c) SPW_REG(c,nodeaddr)
+
+#define SPW_CTRL_READ(c) SPW_READ(&SPW_REG_CTRL(c))
+#define SPW_CTRL_WRITE(c,v) SPW_WRITE(&SPW_REG_CTRL(c),v)
+#define SPW_STATUS_READ(c) SPW_READ(&SPW_REG_STATUS(c))
+#define SPW_STATUS_WRITE(c,v) SPW_WRITE(&SPW_REG_STATUS(c),v)
+
+#define SPW_LINKSTATE(c) (((c) >> 21) & 0x7)
+
+#define SPACEWIRE_RXNR(c) ((c&~(SPACEWIRE_BDTABLE_SIZE-1))>>3)
+#define SPACEWIRE_TXNR(c) ((c&~(SPACEWIRE_BDTABLE_SIZE-1))>>4)
+
+#define SPW_RXBD_LENGTH 0x1ffffff
+#define SPW_RXBD_EN (1 << 25)
+#define SPW_RXBD_WR (1 << 26)
+#define SPW_RXBD_IE (1 << 27)
+
+#define SPW_RXBD_EEP (1 << 28)
+#define SPW_RXBD_EHC (1 << 29)
+#define SPW_RXBD_EDC (1 << 30)
+#define SPW_RXBD_ETR (1 << 31)
+
+#define SPW_RXBD_ERROR (SPW_RXBD_EEP | \
+ SPW_RXBD_ETR)
+
+#define SPW_RXBD_RMAPERROR (SPW_RXBD_EHC | SPW_RXBD_EDC)
+
+#define SPW_TXBD_LENGTH 0xffffff
+
+#define SPW_TXBD_EN (1 << 12)
+#define SPW_TXBD_WR (1 << 13)
+#define SPW_TXBD_IE (1 << 14)
+#define SPW_TXBD_LE (1 << 15)
+#define SPW_TXBD_HC (1 << 16)
+#define SPW_TXBD_DC (1 << 17)
+
+#define SPW_TXBD_ERROR (SPW_TXBD_LE)
+
+#define SPW_CTRL_LINKDISABLED (1 << 0)
+#define SPW_CTRL_LINKSTART (1 << 1)
+#define SPW_CTRL_AUTOSTART (1 << 2)
+#define SPW_CTRL_IE (1 << 3)
+#define SPW_CTRL_TI (1 << 4)
+#define SPW_CTRL_PM (1 << 5)
+#define SPW_CTRL_RESET (1 << 6)
+#define SPW_CTRL_TQ (1 << 8)
+#define SPW_CTRL_LI (1 << 9)
+#define SPW_CTRL_TT (1 << 10)
+#define SPW_CTRL_TR (1 << 11)
+#define SPW_CTRL_RE (1 << 16)
+#define SPW_CTRL_RD (1 << 17)
+
+#define SPW_CTRL_RC (1 << 29)
+#define SPW_CTRL_RX (1 << 30)
+#define SPW_CTRL_RA (1 << 31)
+
+#define SPW_STATUS_TO (1 << 0)
+#define SPW_STATUS_CE (1 << 1)
+#define SPW_STATUS_ER (1 << 2)
+#define SPW_STATUS_DE (1 << 3)
+#define SPW_STATUS_PE (1 << 4)
+#define SPW_STATUS_WE (1 << 6)
+#define SPW_STATUS_IA (1 << 7)
+#define SPW_STATUS_EE (1 << 8)
+
+#define SPW_DMACTRL_TXEN (1 << 0)
+#define SPW_DMACTRL_RXEN (1 << 1)
+#define SPW_DMACTRL_TXIE (1 << 2)
+#define SPW_DMACTRL_RXIE (1 << 3)
+#define SPW_DMACTRL_AI (1 << 4)
+#define SPW_DMACTRL_PS (1 << 5)
+#define SPW_DMACTRL_PR (1 << 6)
+#define SPW_DMACTRL_TA (1 << 7)
+#define SPW_DMACTRL_RA (1 << 8)
+#define SPW_DMACTRL_AT (1 << 9)
+#define SPW_DMACTRL_RX (1 << 10)
+#define SPW_DMACTRL_RD (1 << 11)
+#define SPW_DMACTRL_NS (1 << 12)
+
+#define SPW_PREPAREMASK_TX (SPW_DMACTRL_RXEN | SPW_DMACTRL_RXIE | SPW_DMACTRL_PS | SPW_DMACTRL_TA | SPW_DMACTRL_RD | SPW_DMACTRL_NS)
+#define SPW_PREPAREMASK_RX (SPW_DMACTRL_TXEN | SPW_DMACTRL_TXIE | SPW_DMACTRL_AI | SPW_DMACTRL_PR | SPW_DMACTRL_RA)
+
+static int grspw_hw_init(GRSPW_DEV *pDev);
+static int grspw_hw_send(GRSPW_DEV *pDev, unsigned int hlen, char *hdr, unsigned int dlen, char *data, unsigned int options);
+static int grspw_hw_receive(GRSPW_DEV *pDev,char *b,int c);
+static int grspw_hw_startup (GRSPW_DEV *pDev, int timeout);
+static int grspw_hw_stop (GRSPW_DEV *pDev, int rx, int tx);
+static void grspw_hw_wait_rx_inactive(GRSPW_DEV *pDev);
+static int grspw_hw_waitlink (GRSPW_DEV *pDev, int timeout);
+static void grspw_hw_reset(GRSPW_DEV *pDev);
+static void grspw_hw_read_config(GRSPW_DEV *pDev);
+
+static void check_rx_errors(GRSPW_DEV *pDev, int ctrl);
+static void grspw_rxnext(GRSPW_DEV *pDev);
+static void grspw_interrupt(void *arg);
+static int grspw_buffer_alloc(GRSPW_DEV *pDev);
+
+static rtems_device_driver grspw_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_close(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_read(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_write(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_control(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+#define GRSPW_DRIVER_TABLE_ENTRY \
+ { grspw_initialize, \
+ grspw_open, \
+ grspw_close, \
+ grspw_read, \
+ grspw_write, \
+ grspw_control }
+
+static rtems_driver_address_table grspw_driver = GRSPW_DRIVER_TABLE_ENTRY;
+static int grspw_driver_io_registered = 0;
+static rtems_device_major_number grspw_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int grspw_register_io(rtems_device_major_number *m);
+int grspw_device_init(GRSPW_DEV *pDev);
+
+int grspw_init2(struct drvmgr_dev *dev);
+int grspw_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops grspw_ops =
+{
+ .init = {NULL, grspw_init2, grspw_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grspw_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPW},
+ {VENDOR_GAISLER, GAISLER_SPW2},
+ {VENDOR_GAISLER, GAISLER_SPW2_DMA},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grspw_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRSPW_ID, /* Driver ID */
+ "GRSPW_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grspw_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grspw_ids[0]
+};
+
+void grspw_register_drv (void)
+{
+ SPACEWIRE_DBG("Registering GRSPW driver\n");
+ drvmgr_drv_register(&grspw_drv_info.general);
+}
+
+int grspw_init2(struct drvmgr_dev *dev)
+{
+ GRSPW_DEV *priv;
+
+ SPACEWIRE_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
+ dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int grspw_init3(struct drvmgr_dev *dev)
+{
+ GRSPW_DEV *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( grspw_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grspw_register_io(&grspw_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grspw_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ /* Get frequency in Hz */
+ if ( drvmgr_freq_get(dev, DEV_APB_SLV, &priv->core_freq_khz) ) {
+ return DRVMGR_FAIL;
+ }
+ /* Convert from Hz -> kHz */
+ priv->core_freq_khz = priv->core_freq_khz / 1000;
+
+ if ( grspw_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grspw%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrspw%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grspw_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+int grspw_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grspw_driver, m)) == RTEMS_SUCCESSFUL) {
+ SPACEWIRE_DBG("GRSPW driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("GRSPW rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("GRSPW rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("GRSPW rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("GRSPW rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int grspw_device_init(GRSPW_DEV *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (LEON3_SPACEWIRE_Regs_Map *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+
+ /* Get SpaceWire core version */
+ switch( pnpinfo->device ) {
+ case GAISLER_SPW:
+ pDev->core_ver = 1;
+ break;
+ case GAISLER_SPW2:
+ pDev->core_ver = 2;
+ break;
+ case GAISLER_SPW2_DMA:
+ pDev->core_ver = 3;
+ break;
+ default:
+ return -1;
+ }
+
+ /* initialize the code with some resonable values,
+ * actual initialization is done later using ioctl(fd)
+ * on the opened device */
+ pDev->config.rxmaxlen = SPACEWIRE_RXPCK_SIZE;
+ pDev->txdbufsize = SPACEWIRE_TXD_SIZE;
+ pDev->txhbufsize = SPACEWIRE_TXH_SIZE;
+ pDev->rxbufsize = SPACEWIRE_RXPCK_SIZE;
+ pDev->txbufcnt = SPACEWIRE_TXBUFS_NR;
+ pDev->rxbufcnt = SPACEWIRE_RXBUFS_NR;
+
+ pDev->_ptr_rxbuf0 = 0;
+ pDev->ptr_rxbuf0 = 0;
+ pDev->ptr_txdbuf0 = 0;
+ pDev->ptr_txhbuf0 = 0;
+ pDev->ptr_bd0 = 0;
+ pDev->rx_dma_area = 0;
+ pDev->tx_data_dma_area = 0;
+ pDev->tx_hdr_dma_area = 0;
+ pDev->bd_dma_area = 0;
+
+ /* Get Configuration from Bus resources (Let user override defaults) */
+
+ value = drvmgr_dev_key_get(pDev->dev, "txBdCnt", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txbufcnt = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxBdCnt", DRVMGR_KT_INT);
+ if ( value )
+ pDev->rxbufcnt = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txDataSize", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txdbufsize = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txHdrSize", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txhbufsize = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxPktSize", DRVMGR_KT_INT);
+ if ( value ) {
+ pDev->rxbufsize = value->i;
+ pDev->config.rxmaxlen = pDev->rxbufsize;
+ }
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->rx_dma_area = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txDataDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->tx_data_dma_area = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txHdrDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->tx_hdr_dma_area = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "bdDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->bd_dma_area = value->i;
+
+ if (grspw_buffer_alloc(pDev))
+ return RTEMS_NO_MEMORY;
+
+ /* Create semaphores */
+ rtems_semaphore_create(
+ rtems_build_name('T', 'x', 'S', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &(pDev->txsp));
+
+ rtems_semaphore_create(
+ rtems_build_name('R', 'x', 'S', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &(pDev->rxsp));
+
+ grspw_hw_init(pDev);
+
+ return 0;
+}
+
+/* Get a value at least 6.4us in number of clock cycles */
+static unsigned int grspw_calc_timer64(int freq_khz){
+ unsigned int timer64 = (freq_khz*64+9999)/10000;
+ return timer64 & 0xfff;
+}
+
+/* Get a value at least 850ns in number of clock cycles - 3 */
+static unsigned int grspw_calc_disconnect(int freq_khz){
+ unsigned int disconnect = ((freq_khz*85+99999)/100000) - 3;
+ return disconnect & 0x3ff;
+}
+
+static int grspw_buffer_alloc(GRSPW_DEV *pDev)
+{
+ /* RX DMA AREA */
+ if (pDev->rx_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_rxbuf0_remote = (char *)(pDev->rx_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_rxbuf0_remote,
+ (void **)&pDev->ptr_rxbuf0,
+ pDev->rxbufsize * pDev->rxbufcnt);
+
+ } else {
+ if (pDev->rx_dma_area == 0) {
+ if (pDev->_ptr_rxbuf0)
+ free((void *)pDev->_ptr_rxbuf0);
+ pDev->_ptr_rxbuf0 = (unsigned int) grlib_malloc(
+ pDev->rxbufsize * pDev->rxbufcnt+4);
+ pDev->ptr_rxbuf0 = (char *)((pDev->_ptr_rxbuf0+7)&~7);
+ if ( !pDev->ptr_rxbuf0 )
+ return 1;
+ } else {
+ pDev->ptr_rxbuf0 = (char *)pDev->rx_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_rxbuf0,
+ (void **)&pDev->ptr_rxbuf0_remote,
+ pDev->rxbufsize * pDev->rxbufcnt);
+ }
+
+ /* TX-DATA DMA AREA */
+ if (pDev->tx_data_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_txdbuf0_remote = (char*)(pDev->tx_data_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_txdbuf0_remote,
+ (void **)&pDev->ptr_txdbuf0,
+ pDev->txdbufsize * pDev->txbufcnt);
+ } else {
+ if (pDev->tx_data_dma_area == 0) {
+ if (pDev->ptr_txdbuf0)
+ free(pDev->ptr_txdbuf0);
+ pDev->ptr_txdbuf0 = (char *) grlib_malloc(
+ pDev->txdbufsize * pDev->txbufcnt);
+ if (!pDev->ptr_txdbuf0)
+ return 1;
+ } else {
+ pDev->ptr_txdbuf0 = (char *)pDev->tx_data_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_txdbuf0,
+ (void **)&pDev->ptr_txdbuf0_remote,
+ pDev->txdbufsize * pDev->txbufcnt);
+ }
+
+ /* TX-HEADER DMA AREA */
+ if (pDev->tx_hdr_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_txhbuf0_remote = (char *)(pDev->tx_hdr_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_txhbuf0_remote,
+ (void **)&pDev->ptr_txhbuf0,
+ pDev->txhbufsize * pDev->txbufcnt);
+ } else {
+ if (pDev->tx_hdr_dma_area == 0) {
+ if (pDev->ptr_txhbuf0)
+ free(pDev->ptr_txhbuf0);
+ pDev->ptr_txhbuf0 = (char *) grlib_malloc(
+ pDev->txhbufsize * pDev->txbufcnt);
+ if (!pDev->ptr_txhbuf0)
+ return 1;
+ } else {
+ pDev->ptr_txhbuf0 = (char *)pDev->tx_hdr_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_txhbuf0,
+ (void **)&pDev->ptr_txhbuf0_remote,
+ pDev->txhbufsize * pDev->txbufcnt);
+ }
+
+ /* DMA DESCRIPTOR TABLES */
+ if (pDev->bd_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_bd0_remote = (char *)(pDev->bd_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_bd0_remote,
+ (void **)&pDev->ptr_bd0,
+ 2 * SPACEWIRE_BDTABLE_SIZE);
+ } else {
+ if (pDev->bd_dma_area == 0) {
+ if (pDev->_ptr_bd0)
+ free(pDev->_ptr_bd0);
+ pDev->_ptr_bd0 =
+ rtems_heap_allocate_aligned_with_boundary(
+ SPACEWIRE_BDTABLE_SIZE*2, 1024, 0);
+ if (!pDev->_ptr_bd0)
+ return 1;
+ pDev->ptr_bd0 = (char *)pDev->_ptr_bd0;
+ } else {
+ pDev->ptr_bd0 = (char *)pDev->bd_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_bd0,
+ (void **)&pDev->ptr_bd0_remote,
+ 2 * SPACEWIRE_BDTABLE_SIZE);
+ }
+
+ return 0;
+}
+
+static void grspw_interrupt(void *arg)
+{
+ GRSPW_DEV *pDev = (GRSPW_DEV *)arg;
+ int dmactrl;
+ int status;
+ int ctrl;
+ unsigned int timecode;
+
+ status = SPW_STATUS_READ(pDev);
+ /*SPW_STATUS_WRITE(pDev, SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE | SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE | SPW_STATUS_TO);*/
+ SPW_STATUS_WRITE(pDev, status & (SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE | SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE));
+
+ /* Make sure to put the timecode handling first in order to get the smallest
+ * possible interrupt latency
+ */
+ if ( (status & SPW_STATUS_TO) && (grspw_timecode_callback != NULL) ) {
+ /* Timecode received. Let custom function handle this */
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO);
+ timecode = SPW_READ(&pDev->regs->time);
+ (grspw_timecode_callback)(pDev,pDev->regs,pDev->minor,timecode);
+ }
+
+ /* Clear SPW_DMACTRL_PR if set */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ /*SPW_WRITE(&pDev->regs->dma0ctrl, dmactrl | SPW_DMACTRL_PR);*/
+ SPW_WRITE(&pDev->regs->dma0ctrl, dmactrl);
+
+ /* If linkinterrupts are enabled check if it was a linkerror irq and then send an event to the
+ process set in the config */
+ if (pDev->config.link_err_irq) {
+ if (status & (SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE | SPW_STATUS_WE)) {
+ rtems_event_send(pDev->config.event_id, SPW_LINKERR_EVENT);
+ if (pDev->config.disable_err) {
+ /* disable link*/
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFFFFFC) | SPW_CTRL_LINKDISABLED);
+ pDev->config.linkdisabled = 1;
+ pDev->config.linkstart = 0;
+ pDev->running = 0;
+ }
+ }
+ }
+ if (status & SPW_STATUS_CE) {
+ pDev->stat.credit_err++;
+ }
+ if (status & SPW_STATUS_ER) {
+ pDev->stat.escape_err++;
+ }
+ if (status & SPW_STATUS_DE) {
+ pDev->stat.disconnect_err++;
+ }
+ if (status & SPW_STATUS_PE) {
+ pDev->stat.parity_err++;
+ }
+ if (status & SPW_STATUS_WE) {
+ pDev->stat.write_sync_err++;
+ }
+ if (status & SPW_STATUS_IA) {
+ pDev->stat.invalid_address++;
+ }
+ if (status & SPW_STATUS_EE) {
+ pDev->stat.early_ep++;
+ }
+
+ /* Check for tx interrupts */
+ while( (pDev->tx_sent != pDev->tx_cur) || pDev->tx_all_in_use) {
+ /* Has this descriptor been sent? */
+ ctrl = SPW_READ((volatile void *)&pDev->tx[pDev->tx_sent].ctrl);
+ if ( ctrl & SPW_TXBD_EN ) {
+ break;
+ }
+ /* Yes, increment status counters & tx_sent so we can use this descriptor to send more packets with */
+ pDev->stat.packets_sent++;
+
+ rtems_semaphore_release(pDev->txsp);
+
+ if ( ctrl & SPW_TXBD_LE ) {
+ pDev->stat.tx_link_err++;
+ }
+
+ /* step to next descriptor */
+ pDev->tx_sent = (pDev->tx_sent + 1) % pDev->txbufcnt;
+ pDev->tx_all_in_use = 0; /* not all of the descriptors can be in use since we just freed one. */
+ }
+
+ /* Check for rx interrupts */
+ if (dmactrl & SPW_DMACTRL_PR) {
+ rtems_semaphore_release(pDev->rxsp);
+ }
+}
+
+static rtems_device_driver grspw_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg
+)
+{
+ /* Initialize device-common data structures here */
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "open [%i,%i]\n", major, minor);
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ SPACEWIRE_DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ if ( pDev->open )
+ return RTEMS_RESOURCE_IN_USE;
+
+ /* Mark device open */
+ pDev->open = 1;
+
+ pDev->stat.tx_link_err = 0;
+ pDev->stat.rx_rmap_header_crc_err = 0;
+ pDev->stat.rx_rmap_data_crc_err = 0;
+ pDev->stat.rx_eep_err = 0;
+ pDev->stat.rx_truncated = 0;
+ pDev->stat.parity_err = 0;
+ pDev->stat.escape_err = 0;
+ pDev->stat.credit_err = 0;
+ pDev->stat.write_sync_err = 0;
+ pDev->stat.disconnect_err = 0;
+ pDev->stat.early_ep = 0;
+ pDev->stat.invalid_address = 0;
+ pDev->stat.packets_sent = 0;
+ pDev->stat.packets_received = 0;
+
+ pDev->config.rm_prot_id = 0;
+ pDev->config.keep_source = 0;
+ pDev->config.check_rmap_err = 0;
+ pDev->config.tx_blocking = 0;
+ pDev->config.tx_block_on_full = 0;
+ pDev->config.rx_blocking = 0;
+ pDev->config.disable_err = 0;
+ pDev->config.link_err_irq = 0;
+ pDev->config.event_id = 0;
+ pDev->config.rtimeout = 0;
+
+ pDev->running = 0;
+ pDev->core_freq_khz = 0;
+
+ /* Reset Core */
+ grspw_hw_reset(pDev);
+
+ /* Read default configuration */
+ grspw_hw_read_config(pDev);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_close(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "close [%i,%i]\n", major, minor);
+ rtems_semaphore_delete(pDev->txsp);
+ rtems_semaphore_delete(pDev->rxsp);
+
+ grspw_hw_stop(pDev,1,1);
+
+ grspw_hw_reset(pDev);
+
+ /* Mark device closed - not open */
+ pDev->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_read(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ rtems_libio_rw_args_t *rw_args;
+ unsigned int count = 0;
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+ int status;
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+
+ /* is link up? */
+ if ( !pDev->running ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ((rw_args->count < 1) || (rw_args->buffer == NULL)) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "read [%i,%i]: buf:0x%x len:%i \n", major, minor, (unsigned int)rw_args->buffer, rw_args->count);
+
+ while ( (count = grspw_hw_receive(pDev, rw_args->buffer, rw_args->count)) == 0) {
+ /* wait a moment for any descriptors to get available
+ *
+ * Semaphore is signaled by interrupt handler
+ */
+ if (pDev->config.rx_blocking) {
+ SPACEWIRE_DBG2("Rx blocking\n");
+ if ( pDev->config.rtimeout ) {
+ status = rtems_semaphore_obtain(pDev->rxsp, RTEMS_WAIT, pDev->config.rtimeout);
+ if ( status == RTEMS_TIMEOUT )
+ return RTEMS_TIMEOUT;
+ } else {
+ rtems_semaphore_obtain(pDev->rxsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ }
+ } else {
+ SPACEWIRE_DBG2("Rx non blocking\n");
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+
+#ifdef DEBUG_SPACEWIRE_ONOFF
+ if (DEBUG_SPACEWIRE_FLAGS & DBGSPW_DUMP) {
+ int k;
+ for (k = 0; k < count; k++){
+ if (k % 16 == 0) {
+ printf ("\n");
+ }
+ printf ("%.2x(%c) ", rw_args->buffer[k] & 0xff, isprint(rw_args->buffer[k] & 0xff) ? rw_args->buffer[k] & 0xff : ' ');
+ }
+ printf ("\n");
+ }
+#endif
+
+ rw_args->bytes_moved = count;
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_write(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+)
+{
+ rtems_libio_rw_args_t *rw_args;
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "write [%i,%i]: buf:0x%x len:%i\n", major, minor, (unsigned int)rw_args->buffer, rw_args->count);
+
+ /* is link up? */
+ if ( !pDev->running ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ((rw_args->count > pDev->txdbufsize) || (rw_args->count < 1) || (rw_args->buffer == NULL)) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ while ((rw_args->bytes_moved = grspw_hw_send(pDev, 0, NULL, rw_args->count, rw_args->buffer, 0)) == 0) {
+ if (pDev->config.tx_block_on_full == 1) {
+ SPACEWIRE_DBG2("Tx Block on full \n");
+ rtems_semaphore_obtain(pDev->txsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else {
+ SPACEWIRE_DBG2("Tx non blocking return when full \n");
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_control(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ spw_ioctl_pkt_send *args;
+ spw_ioctl_packetsize *ps;
+ int status;
+ unsigned int tmp,mask,nodeaddr,nodemask;
+ int timeout;
+ rtems_device_driver ret;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *) arg;
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "ctrl [%i,%i]\n", major, minor);
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case SPACEWIRE_IOCTRL_SET_NODEADDR:
+ /*set node address*/
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_NODEADDR %i\n",(unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ nodeaddr = ((unsigned int)ioarg->buffer) & 0xff;
+ tmp = SPW_READ(&pDev->regs->nodeaddr);
+ tmp &= 0xffffff00; /* Remove old address */
+ tmp |= nodeaddr;
+ SPW_WRITE(&pDev->regs->nodeaddr, tmp);
+ if ((SPW_READ(&pDev->regs->nodeaddr)&0xff) != nodeaddr) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.nodeaddr = nodeaddr;
+ break;
+ case SPACEWIRE_IOCTRL_SET_NODEMASK:
+ /*set node address*/
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_NODEMASK %i\n",(unsigned int)ioarg->buffer);
+ if ( pDev->core_ver > 1 ){
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ nodemask = ((unsigned int)ioarg->buffer) & 0xff;
+ tmp = SPW_READ(&pDev->regs->nodeaddr);
+ tmp &= 0xffff00ff; /* Remove old mask */
+ tmp |= nodemask<<8;
+ SPW_WRITE(&pDev->regs->nodeaddr, tmp);
+ if (((SPW_READ(&pDev->regs->nodeaddr)>>8)&0xff) != nodemask) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.nodemask = nodemask;
+ }else{
+ SPACEWIRE_DBG("SPACEWIRE_IOCTRL_SET_NODEMASK: not implemented in GRSPW1 HW\n");
+ }
+ break;
+ case SPACEWIRE_IOCTRL_SET_RXBLOCK:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_RXBLOCK %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.rx_blocking = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_DESTKEY:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_DESTKEY %i\n", (unsigned int)ioarg->buffer);
+ if (!pDev->config.is_rmap) {
+ return RTEMS_NOT_IMPLEMENTED;
+ }
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_WRITE(&pDev->regs->destkey, (unsigned int)ioarg->buffer);
+ if (SPW_READ(&pDev->regs->destkey) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.destkey = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_CLKDIV:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_CLKDIV %i\n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ if ( pDev->core_ver == 3 )
+ break;
+ tmp = SPW_READ(&pDev->regs->clkdiv);
+ tmp &= ~0xff; /* Remove old Clockdiv Setting */
+ tmp |= ((unsigned int)ioarg->buffer) & 0xff; /* add new clockdiv setting */
+ SPW_WRITE(&pDev->regs->clkdiv, tmp);
+ if (SPW_READ(&pDev->regs->clkdiv) != tmp) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.clkdiv = tmp;
+ break;
+ case SPACEWIRE_IOCTRL_SET_CLKDIVSTART:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_CLKDIVSTART %i\n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ if ( pDev->core_ver == 3 )
+ break;
+ tmp = SPW_READ(&pDev->regs->clkdiv);
+ tmp &= ~0xff00; /* Remove old Clockdiv Start Setting */
+ tmp |= (((unsigned int)ioarg->buffer) & 0xff)<<8; /* add new clockdiv start setting */
+ SPW_WRITE(&pDev->regs->clkdiv, tmp);
+ if (SPW_READ(&pDev->regs->clkdiv) != tmp) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.clkdiv = tmp;
+ break;
+ case SPACEWIRE_IOCTRL_SET_TIMER:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_TIMER %i\n", (unsigned int)ioarg->buffer);
+ if ( pDev->core_ver <= 1 ) {
+ if ((unsigned int)ioarg->buffer > 4095) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_WRITE(&pDev->regs->timer, (SPW_READ(&pDev->regs->timer) & 0xFFFFF000) | ((unsigned int)ioarg->buffer & 0xFFF));
+ if ((SPW_READ(&pDev->regs->timer) & 0xFFF) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.timer = (unsigned int)ioarg->buffer;
+ }else{
+ SPACEWIRE_DBG("SPACEWIRE_IOCTRL_SET_TIMER: removed in GRSPW2 HW\n");
+ }
+ break;
+ case SPACEWIRE_IOCTRL_SET_DISCONNECT:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_DISCONNECT %i\n", (unsigned int)ioarg->buffer);
+ if ( pDev->core_ver <= 1 ) {
+ if ((unsigned int)ioarg->buffer > 1023) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_WRITE(&pDev->regs->timer, (SPW_READ(&pDev->regs->timer) & 0xFFC00FFF) | (((unsigned int)ioarg->buffer & 0x3FF) << 12));
+ if (((SPW_READ(&pDev->regs->timer) >> 12) & 0x3FF) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.disconnect = (unsigned int)ioarg->buffer;
+ }else{
+ SPACEWIRE_DBG("SPACEWIRE_IOCTRL_SET_DISCONNECT: not implemented for GRSPW2\n");
+ }
+ break;
+ case SPACEWIRE_IOCTRL_SET_PROMISCUOUS:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_PROMISCUOUS %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_CTRL_WRITE(pDev, SPW_CTRL_READ(pDev) | ((unsigned int)ioarg->buffer << 5));
+ if (((SPW_CTRL_READ(pDev) >> 5) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.promiscuous = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_RMAPEN:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_RMAPEN %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFEFFFF) | ((unsigned int)ioarg->buffer << 16));
+ if (((SPW_CTRL_READ(pDev) >> 16) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.rmapen = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_RMAPBUFDIS:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_RMAPBUFDIS %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFDFFFF) | ((unsigned int)ioarg->buffer << 17));
+ if (((SPW_CTRL_READ(pDev) >> 17) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.rmapbufdis = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_CHECK_RMAP:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_CHECK_RMAP %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.check_rmap_err = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_RM_PROT_ID:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_RM_PROT_ID %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.rm_prot_id = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_KEEP_SOURCE:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_KEEP_SOURCE %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.keep_source = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_TXBLOCK:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_TXBLOCK %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.tx_blocking = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_TXBLOCK_ON_FULL:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_TXBLOCK_ON_FULL %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.tx_block_on_full = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_DISABLE_ERR:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_DISABLE_ERR %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.disable_err = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_LINK_ERR_IRQ:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_LINK_ERR_IRQ %i \n", (unsigned int)ioarg->buffer);
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "CTRL REG: %x\n", SPW_CTRL_READ(pDev));
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ tmp = (SPW_CTRL_READ(pDev) & 0xFFFFFDF7) | ((unsigned int)ioarg->buffer << 9);
+ if (tmp & (SPW_CTRL_LI|SPW_CTRL_TQ))
+ tmp |= SPW_CTRL_IE;
+ SPW_CTRL_WRITE(pDev, tmp);
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "CTRL REG: %x\n", SPW_CTRL_READ(pDev));
+ if (((SPW_CTRL_READ(pDev) >> 9) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.link_err_irq = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_EVENT_ID:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_EVENT_ID %i \n", (unsigned int)ioarg->buffer);
+ pDev->config.event_id = (rtems_id)ioarg->buffer;
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "Event id: %i\n", pDev->config.event_id);
+ break;
+
+ /* Change MAX Packet size by:
+ * - stop RX/TX (if on)
+ * - wait for hw to complete RX DMA (if on)
+ * - reallocate buffers with new size
+ * - tell hw about new size & start RX/TX again (if previously on)
+ */
+ case SPACEWIRE_IOCTRL_SET_PACKETSIZE:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ ps = (spw_ioctl_packetsize*) ioarg->buffer;
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_RXPACKETSIZE %i \n", (unsigned int)ioarg->buffer);
+
+ tmp = pDev->running;
+
+ if ( pDev->running ){
+ /* Stop RX */
+ grspw_hw_stop(pDev,1,1);
+
+ /* If packetsize fails it is good to know if in running mode */
+ pDev->running = 0;
+
+ /* Wait for Receiver to finnish pending DMA transfers if any */
+ grspw_hw_wait_rx_inactive(pDev);
+ }
+
+ /* Save new buffer sizes */
+ pDev->rxbufsize = ((ps->rxsize+7)&~7);
+ pDev->txdbufsize = ps->txdsize;
+ pDev->txhbufsize = ps->txhsize;
+ pDev->config.rxmaxlen = pDev->rxbufsize;
+
+ /* Free previous buffers & allocate buffers with new size */
+ if (grspw_buffer_alloc(pDev))
+ return RTEMS_NO_MEMORY;
+
+ /* if RX was actived before, we reactive it again */
+ if ( tmp ) {
+ if ( (status = grspw_hw_startup(pDev,-1)) != RTEMS_SUCCESSFUL ) {
+ return status;
+ }
+ pDev->running = 1;
+ }
+#if 0
+ /* Rewrite previous config which was wasted due to reset in hw_startup */
+ SPW_WRITE(&pDev->regs->nodeaddr, pDev->config.nodeaddr);
+ SPW_WRITE(&pDev->regs->destkey, pDev->config.destkey);
+ SPW_WRITE(&pDev->regs->clkdiv, pDev->config.clkdiv);
+ SPW_WRITE(&pDev->regs->timer, pDev->config.timer | ( (pDev->config.disconnect & 0x3FF) << 12) );
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & !(SPW_CTRL_LINKSTART | SPW_CTRL_PM | SPW_CTRL_RE | SPW_CTRL_RD | SPW_CTRL_TT | SPW_CTRL_TR)) | \
+ (pDev->config.promiscuous << 5) | (pDev->config.rmapen << 16) | (pDev->config.rmapbufdis << 17) | \
+ (pDev->config.linkdisabled) | (pDev->config.linkstart << 1));
+#endif
+ break;
+ case SPACEWIRE_IOCTRL_GET_CONFIG:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ SPACEWIRE_DBG2("SPACEWIRE_IOCTRL_GET_CONFIG \n");
+ (*(spw_config *)ioarg->buffer).nodeaddr = pDev->config.nodeaddr;
+ (*(spw_config *)ioarg->buffer).nodemask = pDev->config.nodemask;
+ (*(spw_config *)ioarg->buffer).destkey = pDev->config.destkey;
+ (*(spw_config *)ioarg->buffer).clkdiv = pDev->config.clkdiv;
+ (*(spw_config *)ioarg->buffer).rxmaxlen = pDev->config.rxmaxlen;
+ (*(spw_config *)ioarg->buffer).timer = pDev->config.timer;
+ (*(spw_config *)ioarg->buffer).disconnect = pDev->config.disconnect;
+ (*(spw_config *)ioarg->buffer).promiscuous = pDev->config.promiscuous;
+ (*(spw_config *)ioarg->buffer).rmapen = pDev->config.rmapen;
+ (*(spw_config *)ioarg->buffer).rmapbufdis = pDev->config.rmapbufdis;
+ (*(spw_config *)ioarg->buffer).check_rmap_err = pDev->config.check_rmap_err;
+ (*(spw_config *)ioarg->buffer).rm_prot_id = pDev->config.rm_prot_id;
+ (*(spw_config *)ioarg->buffer).tx_blocking = pDev->config.tx_blocking;
+ (*(spw_config *)ioarg->buffer).disable_err = pDev->config.disable_err;
+ (*(spw_config *)ioarg->buffer).link_err_irq = pDev->config.link_err_irq;
+ (*(spw_config *)ioarg->buffer).event_id = pDev->config.event_id;
+ (*(spw_config *)ioarg->buffer).is_rmap = pDev->config.is_rmap;
+ (*(spw_config *)ioarg->buffer).is_rmapcrc = pDev->config.is_rmapcrc;
+ (*(spw_config *)ioarg->buffer).is_rxunaligned = pDev->config.is_rxunaligned;
+ (*(spw_config *)ioarg->buffer).linkdisabled = pDev->config.linkdisabled;
+ (*(spw_config *)ioarg->buffer).linkstart = pDev->config.linkstart;
+ (*(spw_config *)ioarg->buffer).rx_blocking = pDev->config.rx_blocking;
+ (*(spw_config *)ioarg->buffer).tx_block_on_full = pDev->config.tx_block_on_full;
+ (*(spw_config *)ioarg->buffer).keep_source = pDev->config.keep_source;
+ (*(spw_config *)ioarg->buffer).rtimeout = pDev->config.rtimeout;
+ break;
+ case SPACEWIRE_IOCTRL_GET_LINK_STATUS:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_GET_STATUS=%i \n", (unsigned int)((SPW_STATUS_READ(pDev) >> 21) & 0x7));
+ *(unsigned int *)ioarg->buffer = (unsigned int )((SPW_STATUS_READ(pDev) >> 21) & 0x7);
+ break;
+ case SPACEWIRE_IOCTRL_GET_STATISTICS:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ SPACEWIRE_DBG2("SPACEWIRE_IOCTRL_GET_STATISTICS \n");
+ (*(spw_stats *)ioarg->buffer).tx_link_err = pDev->stat.tx_link_err;
+ (*(spw_stats *)ioarg->buffer).rx_rmap_header_crc_err = pDev->stat.rx_rmap_header_crc_err;
+ (*(spw_stats *)ioarg->buffer).rx_rmap_data_crc_err = pDev->stat.rx_rmap_data_crc_err;
+ (*(spw_stats *)ioarg->buffer).rx_eep_err = pDev->stat.rx_eep_err;
+ (*(spw_stats *)ioarg->buffer).rx_truncated = pDev->stat.rx_truncated;
+ (*(spw_stats *)ioarg->buffer).parity_err = pDev->stat.parity_err;
+ (*(spw_stats *)ioarg->buffer).escape_err = pDev->stat.escape_err;
+ (*(spw_stats *)ioarg->buffer).credit_err = pDev->stat.credit_err;
+ (*(spw_stats *)ioarg->buffer).write_sync_err = pDev->stat.write_sync_err;
+ (*(spw_stats *)ioarg->buffer).disconnect_err = pDev->stat.disconnect_err;
+ (*(spw_stats *)ioarg->buffer).early_ep = pDev->stat.early_ep;
+ (*(spw_stats *)ioarg->buffer).invalid_address = pDev->stat.invalid_address;
+ (*(spw_stats *)ioarg->buffer).packets_sent = pDev->stat.packets_sent;
+ (*(spw_stats *)ioarg->buffer).packets_received = pDev->stat.packets_received;
+ break;
+ case SPACEWIRE_IOCTRL_CLR_STATISTICS:
+ SPACEWIRE_DBG2("SPACEWIRE_IOCTRL_CLR_STATISTICS \n");
+ pDev->stat.tx_link_err = 0;
+ pDev->stat.rx_rmap_header_crc_err = 0;
+ pDev->stat.rx_rmap_data_crc_err = 0;
+ pDev->stat.rx_eep_err = 0;
+ pDev->stat.rx_truncated = 0;
+ pDev->stat.parity_err = 0;
+ pDev->stat.escape_err = 0;
+ pDev->stat.credit_err = 0;
+ pDev->stat.write_sync_err = 0;
+ pDev->stat.disconnect_err = 0;
+ pDev->stat.early_ep = 0;
+ pDev->stat.invalid_address = 0;
+ pDev->stat.packets_sent = 0;
+ pDev->stat.packets_received = 0;
+ break;
+ case SPACEWIRE_IOCTRL_SEND:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ args = (spw_ioctl_pkt_send *)ioarg->buffer;
+ args->sent = 0;
+
+ /* is link up? */
+ if ( !pDev->running ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "write [%i,%i]: hlen: %i hbuf:0x%x dlen:%i dbuf:0x%x\n", major, minor,
+ (unsigned int)args->hlen, (int)args->hdr,(unsigned int)args->dlen, (int)args->data);
+
+ if ((args->hlen > pDev->txhbufsize) || (args->dlen > pDev->txdbufsize) ||
+ ((args->hlen+args->dlen) < 1) ||
+ ((args->hdr == NULL) && (args->hlen != 0)) || ((args->data == NULL) && (args->dlen != 0))) {
+ return RTEMS_INVALID_NAME;
+ }
+ while ((args->sent = grspw_hw_send(pDev, args->hlen, args->hdr, args->dlen, args->data, args->options)) == 0) {
+ if (pDev->config.tx_block_on_full == 1) {
+ SPACEWIRE_DBG2("Tx Block on full \n");
+ rtems_semaphore_obtain(pDev->txsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else {
+ SPACEWIRE_DBG2("Tx non blocking return when full \n");
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "Tx ioctl return: %i \n", args->sent);
+ break;
+
+ case SPACEWIRE_IOCTRL_LINKDISABLE:
+ pDev->config.linkdisabled = 1;
+ pDev->config.linkstart = 0;
+ if ( pDev->core_ver != 3 ) {
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFFFFFC) | 1);
+ if ((SPW_CTRL_READ(pDev) & 3) != 1) {
+ return RTEMS_IO_ERROR;
+ }
+ }
+ break;
+
+ case SPACEWIRE_IOCTRL_LINKSTART:
+ pDev->config.linkdisabled = 0;
+ pDev->config.linkstart = 1;
+ if ( pDev->core_ver != 3 ) {
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFFFFFC) | 2);
+ if ((SPW_CTRL_READ(pDev) & 3) != 2) {
+ return RTEMS_IO_ERROR;
+ }
+ }
+ break;
+
+ /* Calculate timer register from GRSPW Core frequency
+ * Also possible to set disconnect and timer64 from
+ * - SPACEWIRE_IOCTRL_SET_DISCONNECT
+ * - SPACEWIRE_IOCTRL_SET_TIMER
+ */
+ case SPACEWIRE_IOCTRL_SET_COREFREQ:
+ pDev->core_freq_khz = (unsigned int)ioarg->buffer;
+ if ( pDev->core_freq_khz == 0 ){
+ /* Get GRSPW clock frequency from system clock.
+ * System clock has been read from timer inited
+ * by RTEMS loader (mkprom)
+ */
+ drvmgr_freq_get(pDev->dev, DEV_APB_SLV,
+ &pDev->core_freq_khz);
+ /* Convert from Hz -> kHz */
+ pDev->core_freq_khz = pDev->core_freq_khz / 1000;
+ }
+
+ /* Only GRSPW1 needs the Timer64 and Disconnect values
+ * GRSPW2 and onwards doesn't have this register.
+ */
+ if ( pDev->core_ver <= 1 ){
+ /* Calculate Timer64 & Disconnect */
+ pDev->config.timer = grspw_calc_timer64(pDev->core_freq_khz);
+ pDev->config.disconnect = grspw_calc_disconnect(pDev->core_freq_khz);
+
+ /* Set Timer64 & Disconnect Register */
+ SPW_WRITE(&pDev->regs->timer,
+ (SPW_READ(&pDev->regs->timer) & 0xFFC00000) |
+ ((pDev->config.disconnect & 0x3FF)<<12) |
+ (pDev->config.timer & 0xFFF));
+
+ /* Check that the registers were written successfully */
+ tmp = SPW_READ(&pDev->regs->timer) & 0x003fffff;
+ if ( ((tmp & 0xFFF) != pDev->config.timer) ||
+ (((tmp >> 12) & 0x3FF) != pDev->config.disconnect) ) {
+ return RTEMS_IO_ERROR;
+ }
+ }
+ break;
+
+ case SPACEWIRE_IOCTRL_START:
+ if ( pDev->running ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Get timeout from userspace
+ * timeout:
+ * ¤ -1 = Default timeout
+ * ¤ less than -1 = forever
+ * ¤ 0 = no wait, proceed if link is up
+ * ¤ positive = specifies number of system clock ticks that
+ * startup will wait for link to enter ready mode.
+ */
+ timeout = (int)ioarg->buffer;
+
+ if ( (ret=grspw_hw_startup(pDev,timeout)) != RTEMS_SUCCESSFUL ) {
+ return ret;
+ }
+ pDev->running = 1;
+ /* Register interrupt routine and unmask IRQ */
+ drvmgr_interrupt_register(pDev->dev, 0, "grspw", grspw_interrupt, pDev);
+
+ break;
+
+ case SPACEWIRE_IOCTRL_STOP:
+ if ( !pDev->running ){
+ return RTEMS_INVALID_NAME;
+ }
+ /* Disable interrupts */
+ drvmgr_interrupt_unregister(dev, 0, grspw_interrupt, pDev);
+
+ pDev->running = 0;
+
+ /* Stop Receiver and transmitter */
+ grspw_hw_stop(pDev,1,1);
+ break;
+
+ /* Set time-code control register bits, and Enables/Disables
+ * Time code interrupt, make sure to connect the callback
+ * grspw_timecode_callback if using interrupts.
+ */
+ case SPACEWIRE_IOCTRL_SET_TCODE_CTRL:
+ tmp = (unsigned int)ioarg->buffer;
+ mask = tmp & (SPACEWIRE_TCODE_CTRL_IE_MSK|SPACEWIRE_TCODE_CTRL_TT_MSK|SPACEWIRE_TCODE_CTRL_TR_MSK);
+ mask <<= 8;
+ tmp &= mask;
+ tmp = (SPW_CTRL_READ(pDev) & ~(mask | SPW_CTRL_IE)) | tmp;
+ if (tmp & (SPW_CTRL_LI|SPW_CTRL_TQ))
+ tmp |= SPW_CTRL_IE;
+ SPW_CTRL_WRITE(pDev, tmp);
+ break;
+
+ /* Set time register and optionaly send a time code */
+ case SPACEWIRE_IOCTRL_SET_TCODE:
+ tmp = (unsigned int)ioarg->buffer;
+ /* Set timecode register */
+ if (tmp & SPACEWIRE_TCODE_SET) {
+ SPW_WRITE(&pDev->regs->time,
+ ((SPW_READ(&pDev->regs->time) & ~(0xff)) |
+ (tmp & SPACEWIRE_TCODE_TCODE)));
+ }
+ /* Send timecode directly (tick-in) ? */
+ if (tmp & SPACEWIRE_TCODE_TX) {
+ SPW_CTRL_WRITE(pDev,
+ ((SPW_CTRL_READ(pDev) & ~(SPW_CTRL_TI)) | SPW_CTRL_TI));
+ }
+ break;
+
+ /* Read time code register and tick-out status bit */
+ case SPACEWIRE_IOCTRL_GET_TCODE:
+ tmp = (unsigned int)ioarg->buffer;
+ if ( !tmp ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Copy timecode register */
+ if (SPW_READ(&pDev->regs->status) & SPW_STATUS_TO) {
+ *(unsigned int *)tmp = (1 << 8) | SPW_READ(&pDev->regs->time);
+ } else {
+ *(unsigned int *)tmp = SPW_READ(&pDev->regs->time);
+ }
+ break;
+
+ case SPACEWIRE_IOCTRL_SET_READ_TIMEOUT:
+ pDev->config.rtimeout = (unsigned int)ioarg->buffer;
+ break;
+
+ default:
+ return RTEMS_NOT_IMPLEMENTED;
+ }
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "SPW_IOCTRL Return\n");
+ return RTEMS_SUCCESSFUL;
+}
+
+/* ============================================================================== */
+
+static int grspw_set_rxmaxlen(GRSPW_DEV *pDev) {
+ unsigned int rxmax;
+ SPW_WRITE(&pDev->regs->dma0rxmax,pDev->config.rxmaxlen); /*set rx maxlength*/
+ rxmax = SPW_READ(&pDev->regs->dma0rxmax);
+ if (rxmax != pDev->config.rxmaxlen) {
+ return 0;
+ }
+ return 1;
+}
+
+static int grspw_hw_init(GRSPW_DEV *pDev) {
+ unsigned int ctrl;
+
+ ctrl = SPW_CTRL_READ(pDev);
+
+ pDev->rx = (SPACEWIRE_RXBD *) pDev->ptr_bd0;
+ pDev->tx = (SPACEWIRE_TXBD *) (pDev->ptr_bd0 + SPACEWIRE_BDTABLE_SIZE);
+
+ /* Set up remote addresses */
+ pDev->rx_remote = (unsigned int)pDev->ptr_bd0_remote;
+ pDev->tx_remote = pDev->rx_remote + SPACEWIRE_BDTABLE_SIZE;
+
+ SPACEWIRE_DBG("hw_init [minor %i]\n", pDev->minor);
+
+ pDev->config.is_rmap = ctrl & SPW_CTRL_RA;
+ pDev->config.is_rxunaligned = ctrl & SPW_CTRL_RX;
+ pDev->config.is_rmapcrc = ctrl & SPW_CTRL_RC;
+ return 0;
+}
+
+static int grspw_hw_waitlink (GRSPW_DEV *pDev, int timeout)
+{
+ int j;
+
+ /* No actual link interface on a DMA-only GRSPW2 connected to the
+ * SPW router
+ */
+ if (pDev->core_ver == 3)
+ return 0;
+
+ if ( timeout == -1 ){
+ /* Wait default timeout */
+ timeout = SPACEWIRE_INIT_TIMEOUT;
+ }
+
+ j=0;
+ while (SPW_LINKSTATE(SPW_STATUS_READ(pDev)) != 5) {
+ if ( timeout < -1 ) {
+ /* wait forever */
+ }else if ( j >= timeout ){
+ /* timeout reached, return fail */
+ return 1;
+ }
+
+ /* Sleep for 10 ticks */
+ rtems_task_wake_after(10);
+ j+=10;
+ }
+ return 0;
+}
+
+static void grspw_hw_reset(GRSPW_DEV *pDev)
+{
+ SPW_CTRL_WRITE(pDev, SPW_CTRL_RESET); /*reset core*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+
+ /* Add extra writes to make sure we wait the number of clocks required
+ * after reset
+ */
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+
+ SPW_CTRL_WRITE(pDev, SPW_CTRL_LINKSTART); /*start link core*/
+}
+
+static void grspw_hw_read_config(GRSPW_DEV *pDev)
+{
+ unsigned int tmp;
+
+ tmp = SPW_READ(&pDev->regs->nodeaddr);
+ pDev->config.nodeaddr = 0xFF & tmp;
+ pDev->config.nodemask = 0xFF & (tmp>>8);
+ pDev->config.destkey = 0xFF & SPW_READ(&pDev->regs->destkey);
+ pDev->config.clkdiv = 0xFFFF & SPW_READ(&pDev->regs->clkdiv);
+
+ tmp = SPW_CTRL_READ(pDev);
+ pDev->config.promiscuous = 1 & (tmp >> 5);
+ pDev->config.rmapen = 1 & (tmp >> 16);
+ pDev->config.rmapbufdis = 1 & (tmp >> 17);
+ pDev->config.is_rmap = 1 & (tmp >> 31);
+ pDev->config.is_rxunaligned = 1 & (tmp >> 30);
+ pDev->config.is_rmapcrc = 1 & (tmp >> 29);
+ pDev->config.linkdisabled = 1 & tmp;
+ pDev->config.linkstart = 1 & (tmp >> 1);
+
+ if ( pDev->core_ver <= 1 ){
+ tmp = SPW_READ(&pDev->regs->timer);
+ pDev->config.timer = 0xFFF & tmp;
+ pDev->config.disconnect = 0x3FF & (tmp >> 12);
+ }else{
+ pDev->config.timer = 0;
+ pDev->config.disconnect = 0;
+ }
+
+ return;
+}
+
+/* timeout is given in ticks */
+static int grspw_hw_startup (GRSPW_DEV *pDev, int timeout)
+{
+ int i;
+ unsigned int dmactrl;
+
+ SPW_WRITE(&pDev->regs->status, (SPW_STATUS_TO|SPW_STATUS_CE|SPW_STATUS_ER|SPW_STATUS_DE|SPW_STATUS_PE|SPW_STATUS_WE|SPW_STATUS_IA|SPW_STATUS_EE)); /*clear status*/
+
+ if (grspw_hw_waitlink(pDev,timeout)) {
+ SPACEWIRE_DBG2("Device open. Link is not up\n");
+ return RTEMS_TIMEOUT;
+ }
+
+ SPW_WRITE(&pDev->regs->dma0ctrl, SPW_DMACTRL_PS | SPW_DMACTRL_PR | SPW_DMACTRL_TA | SPW_DMACTRL_RA); /*clear status, set ctrl*/
+
+ if ((dmactrl = SPW_READ(&pDev->regs->dma0ctrl)) != 0) {
+ SPACEWIRE_DBG2("DMACtrl is not cleared\n");
+ return RTEMS_IO_ERROR;
+ }
+
+ /* prepare transmit buffers */
+ for (i = 0; i < pDev->txbufcnt; i++) {
+ pDev->tx[i].ctrl = 0;
+ pDev->tx[i].addr_header = ((unsigned int)&pDev->ptr_txhbuf0_remote[0]) + (i * pDev->txhbufsize);
+ pDev->tx[i].addr_data = ((unsigned int)&pDev->ptr_txdbuf0_remote[0]) + (i * pDev->txdbufsize);
+ }
+ pDev->tx_cur = 0;
+ pDev->tx_sent = 0;
+ pDev->tx_all_in_use = 0;
+
+ /* prepare receive buffers */
+ for (i = 0; i < pDev->rxbufcnt; i++) {
+ if (i+1 == pDev->rxbufcnt) {
+ pDev->rx[i].ctrl = SPW_RXBD_IE | SPW_RXBD_EN | SPW_RXBD_WR;
+ } else {
+ pDev->rx[i].ctrl = SPW_RXBD_IE | SPW_RXBD_EN;
+ }
+ pDev->rx[i].addr = ((unsigned int)&pDev->ptr_rxbuf0_remote[0]) + (i * pDev->rxbufsize);
+ }
+ pDev->rxcur = 0;
+ pDev->rxbufcur = -1;
+ grspw_set_rxmaxlen(pDev);
+
+ SPW_WRITE(&pDev->regs->dma0txdesc, pDev->tx_remote);
+ SPW_WRITE(&pDev->regs->dma0rxdesc, pDev->rx_remote);
+
+ /* start RX */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_RX) | SPW_DMACTRL_RD | SPW_DMACTRL_RXEN | SPW_DMACTRL_NS | SPW_DMACTRL_TXIE | SPW_DMACTRL_RXIE);
+
+ SPACEWIRE_DBGC(DBGSPW_TX,"0x%x: setup complete\n", (unsigned int)pDev->regs);
+ return RTEMS_SUCCESSFUL;
+}
+
+/* Wait until the receiver is inactive */
+static void grspw_hw_wait_rx_inactive(GRSPW_DEV *pDev)
+{
+ while( SPW_READ(&pDev->regs->dma0ctrl) & SPW_DMACTRL_RX ){
+ /* switching may be needed:
+ * - low frequency GRSPW
+ * - mega packet incoming
+ */
+ rtems_task_wake_after(1);
+ }
+}
+
+/* Stop the rx or/and tx by disabling the receiver/transmitter */
+static int grspw_hw_stop (GRSPW_DEV *pDev, int rx, int tx)
+{
+ unsigned int dmactrl;
+
+ /* stop rx and/or tx */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ if ( rx ) {
+ dmactrl &= ~(SPW_DMACTRL_RXEN|SPW_DMACTRL_RXIE|SPW_DMACTRL_RD);
+ }
+ if ( tx ) {
+ dmactrl &= ~(SPW_DMACTRL_TXEN|SPW_DMACTRL_TXIE);
+ }
+ /*SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_RX) & ~(SPW_DMACTRL_RD | SPW_DMACTRL_RXEN) & ~(SPW_DMACTRL_TXEN));*/
+
+ /* don't clear status flags */
+ dmactrl &= ~(SPW_DMACTRL_RA|SPW_DMACTRL_PR|SPW_DMACTRL_AI);
+ SPW_WRITE(&pDev->regs->dma0ctrl, dmactrl);
+ return RTEMS_SUCCESSFUL;
+}
+
+
+
+int grspw_hw_send(GRSPW_DEV *pDev, unsigned int hlen, char *hdr, unsigned int dlen, char *data, unsigned int options)
+{
+ unsigned int dmactrl, ctrl;
+#ifdef DEBUG_SPACEWIRE_ONOFF
+ unsigned int k;
+#endif
+ rtems_interrupt_level level;
+ unsigned int cur = pDev->tx_cur, bdctrl;
+ char *txh = pDev->ptr_txhbuf0 + (cur * pDev->txhbufsize);
+ char *txd = pDev->ptr_txdbuf0 + (cur * pDev->txdbufsize);
+ char *txh_remote = pDev->ptr_txhbuf0_remote + (cur * pDev->txhbufsize);
+ char *txd_remote = pDev->ptr_txdbuf0_remote + (cur * pDev->txdbufsize);
+
+ ctrl = SPW_READ((volatile void *)&pDev->tx[cur].ctrl);
+
+ if (ctrl & SPW_TXBD_EN) {
+ return 0;
+ }
+
+ memcpy(&txd[0], data, dlen);
+ memcpy(&txh[0], hdr, hlen);
+
+#ifdef DEBUG_SPACEWIRE_ONOFF
+ if (DEBUG_SPACEWIRE_FLAGS & DBGSPW_DUMP) {
+ for (k = 0; k < hlen; k++){
+ if (k % 16 == 0) {
+ printf ("\n");
+ }
+ printf ("%.2x(%c) ",txh[k] & 0xff,isprint(txh[k] & 0xff) ? txh[k] & 0xff : ' ');
+ }
+ printf ("\n");
+ }
+ if (DEBUG_SPACEWIRE_FLAGS & DBGSPW_DUMP) {
+ for (k = 0; k < dlen; k++){
+ if (k % 16 == 0) {
+ printf ("\n");
+ }
+ printf ("%.2x(%c) ",txd[k] & 0xff,isprint(txd[k] & 0xff) ? txd[k] & 0xff : ' ');
+ }
+ printf ("\n");
+ }
+#endif
+
+ pDev->tx[cur].addr_header = (unsigned int)txh_remote;
+ pDev->tx[cur].len = dlen;
+ pDev->tx[cur].addr_data = (unsigned int)txd_remote;
+
+ bdctrl = SPW_TXBD_IE | SPW_TXBD_EN | hlen;
+ if ( options & GRSPW_PKTSEND_OPTION_HDR_CRC )
+ bdctrl |= SPW_TXBD_HC;
+ if ( options & GRSPW_PKTSEND_OPTION_DATA_CRC )
+ bdctrl |= SPW_TXBD_DC;
+ bdctrl |= options & GRSPW_PKTSEND_OPTION_NOCRCLEN_MASK;
+
+ /* Update counters */
+ rtems_interrupt_disable(level);
+ if (pDev->tx_cur == (pDev->txbufcnt - 1) ) {
+ bdctrl |= SPW_TXBD_WR;
+ }
+ pDev->tx[cur].ctrl = bdctrl;
+
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_TX) | SPW_DMACTRL_TXEN | SPW_DMACTRL_TXIE);
+
+ pDev->tx_cur = (pDev->tx_cur + 1) % pDev->txbufcnt;
+ if (pDev->tx_cur == pDev->tx_sent) {
+ pDev->tx_all_in_use = 1;
+ }
+ rtems_interrupt_enable(level);
+
+ /* In blocking mode wait until message is sent */
+ if (pDev->config.tx_blocking) {
+ while ( SPW_READ(&pDev->regs->dma0ctrl) & SPW_DMACTRL_TXEN) {
+ /* if changed to blocking mode */
+ SPACEWIRE_DBGC(DBGSPW_TX, "Tx blocking\n");
+ rtems_semaphore_obtain(pDev->txsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ }
+ }
+ SPACEWIRE_DBGC(DBGSPW_TX, "0x%x: transmitted <%i> bytes\n", (unsigned int) pDev->regs, dlen+hlen);
+ return hlen + dlen;
+}
+
+static int grspw_hw_receive(GRSPW_DEV *pDev, char *b, int c) {
+ unsigned int len, rxlen, ctrl;
+ unsigned int cur;
+ unsigned int tmp;
+ unsigned int dump_start_len;
+ int i;
+ char *rxb;
+
+ if ( pDev->config.promiscuous || pDev->config.keep_source ) {
+ dump_start_len = 0; /* make sure address and prot can be read in promiscuous mode */
+ } else if (pDev->config.rm_prot_id) {
+ dump_start_len = 2; /* skip source address and protocol id */
+ } else {
+ dump_start_len = 1; /* default: skip only source address */
+ }
+
+ rxlen = 0;
+ cur = pDev->rxcur;
+ rxb = pDev->ptr_rxbuf0 + (cur * pDev->rxbufsize);
+
+ SPACEWIRE_DBGC(DBGSPW_RX, "0x%x: waitin packet at pos %i\n", (unsigned int) pDev->regs, cur);
+
+ ctrl = SPW_READ((volatile void *)&pDev->rx[cur].ctrl);
+ if (ctrl & SPW_RXBD_EN) {
+ return rxlen;
+ }
+ SPACEWIRE_DBGC(DBGSPW_RX, "checking packet\n");
+
+ len = SPW_RXBD_LENGTH & ctrl;
+ if (!((ctrl & SPW_RXBD_ERROR) || (pDev->config.check_rmap_err && (ctrl & SPW_RXBD_RMAPERROR)))) {
+ if (pDev->rxbufcur == -1) {
+ SPACEWIRE_DBGC(DBGSPW_RX, "incoming packet len %i\n", len);
+ pDev->stat.packets_received++;
+ pDev->rxbufcur = dump_start_len;
+ }
+ rxlen = tmp = len - pDev->rxbufcur;
+ SPACEWIRE_DBGC(DBGSPW_RX, "C %i\n", c);
+ SPACEWIRE_DBGC(DBGSPW_RX, "Dump %i\n", dump_start_len);
+ SPACEWIRE_DBGC(DBGSPW_RX, "Bufcur %i\n", pDev->rxbufcur);
+ SPACEWIRE_DBGC(DBGSPW_RX, "Rxlen %i\n", rxlen );
+ if (rxlen > c) {
+ rxlen = c;
+ }
+ if (CPU_SPARC_HAS_SNOOPING) {
+/* if ( 1 ) {*/
+ /*printf("RX_MEMCPY(0x%x, 0x%x, 0x%x)\n", (unsigned int)b, (unsigned int)(rxb+pDev->rxbufcur), (unsigned int)rxlen);*/
+ memcpy(b, rxb+pDev->rxbufcur, rxlen);
+ } else {
+ int left = rxlen;
+ /* Copy word wise if Aligned */
+ if ( (((int)b & 3) == 0) && (((int)(rxb+pDev->rxbufcur) & 3) == 0) ){
+ while(left>=32){
+ *(int *)(b+0) = MEM_READ32(rxb+pDev->rxbufcur+0);
+ *(int *)(b+4) = MEM_READ32(rxb+pDev->rxbufcur+4);
+ *(int *)(b+8) = MEM_READ32(rxb+pDev->rxbufcur+8);
+ *(int *)(b+12) = MEM_READ32(rxb+pDev->rxbufcur+12);
+ *(int *)(b+16) = MEM_READ32(rxb+pDev->rxbufcur+16);
+ *(int *)(b+20) = MEM_READ32(rxb+pDev->rxbufcur+20);
+ *(int *)(b+24) = MEM_READ32(rxb+pDev->rxbufcur+24);
+ *(int *)(b+28) = MEM_READ32(rxb+pDev->rxbufcur+28);
+ rxb+=32;
+ b+=32;
+ left-=32;
+ }
+ while(left>=4){
+ *(int *)b = MEM_READ32(rxb+pDev->rxbufcur);
+ rxb+=4;
+ b+=4;
+ left-=4;
+ }
+ }
+ for(i = 0; i < left; i++) {
+ b[i] = MEM_READ8(rxb+pDev->rxbufcur+i);
+ }
+ }
+
+ pDev->rxbufcur += rxlen;
+ if (c >= tmp) {
+ SPACEWIRE_DBGC(DBGSPW_RX, "Next descriptor\n");
+ grspw_rxnext(pDev);
+ }
+ } else {
+ check_rx_errors(pDev, ctrl);
+ grspw_rxnext(pDev);
+ }
+ return rxlen;
+}
+
+static void grspw_rxnext(GRSPW_DEV *pDev)
+{
+ unsigned int dmactrl;
+ unsigned int cur = pDev->rxcur;
+ unsigned int ctrl = 0;
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+
+ if (cur == (pDev->rxbufcnt - 1)) {
+ pDev->rx[cur].ctrl = ctrl | SPW_RXBD_EN | SPW_RXBD_IE | SPW_RXBD_WR;
+ cur = 0;
+ } else {
+ pDev->rx[cur].ctrl = ctrl | SPW_RXBD_EN | SPW_RXBD_IE;
+ cur++;
+ }
+
+ pDev->rxcur = cur;
+ pDev->rxbufcur = -1;
+
+ /* start RX */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_RX) | SPW_DMACTRL_RD | SPW_DMACTRL_RXEN | SPW_DMACTRL_RXIE | SPW_DMACTRL_NS);
+
+ rtems_interrupt_enable(level);
+}
+
+static void check_rx_errors(GRSPW_DEV *pDev, int ctrl)
+{
+ if (ctrl & SPW_RXBD_EEP) {
+ pDev->stat.rx_eep_err++;
+ }
+ if (ctrl & SPW_RXBD_EHC) {
+ if (pDev->config.check_rmap_err) {
+ pDev->stat.rx_rmap_header_crc_err++;
+ }
+ }
+ if (ctrl & SPW_RXBD_EDC) {
+ if (pDev->config.check_rmap_err) {
+ pDev->stat.rx_rmap_data_crc_err++;
+ }
+ }
+ if (ctrl & SPW_RXBD_ETR) {
+ pDev->stat.rx_truncated++;
+ }
+}
+
+
+static void grspw_print_dev(struct drvmgr_dev *dev, int options)
+{
+ GRSPW_DEV *pDev = dev->priv;
+
+ /* Print */
+ printf("--- GRSPW %s ---\n", pDev->devName);
+ printf(" REGS: 0x%x\n", (unsigned int)pDev->regs);
+ printf(" IRQ: %d\n", pDev->irq);
+ printf(" CORE VERSION: %d\n", pDev->core_ver);
+ printf(" CTRL: 0x%x\n", pDev->regs->ctrl);
+ printf(" STATUS: 0x%x\n", pDev->regs->status);
+ printf(" DMA0CTRL: 0x%x\n", pDev->regs->dma0ctrl);
+ printf(" TXBD: 0x%x\n", (unsigned int)pDev->tx);
+ printf(" RXBD: 0x%x\n", (unsigned int)pDev->rx);
+}
+
+void grspw_print(int options)
+{
+ struct amba_drv_info *drv = &grspw_drv_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ grspw_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/spw/grspw_pkt.c b/bsps/shared/grlib/spw/grspw_pkt.c
new file mode 100644
index 0000000000..208f5a14f7
--- /dev/null
+++ b/bsps/shared/grlib/spw/grspw_pkt.c
@@ -0,0 +1,3295 @@
+/*
+ * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
+ *
+ * This driver can be used to implement a standard I/O system "char"-driver
+ * or used directly.
+ *
+ * COPYRIGHT (c) 2011
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grspw_pkt.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define GRSPW_DBG(args...) printk(args)*/
+#define GRSPW_DBG(args...)
+
+struct grspw_dma_regs {
+ volatile unsigned int ctrl; /* DMA Channel Control */
+ volatile unsigned int rxmax; /* RX Max Packet Length */
+ volatile unsigned int txdesc; /* TX Descriptor Base/Current */
+ volatile unsigned int rxdesc; /* RX Descriptor Base/Current */
+ volatile unsigned int addr; /* Address Register */
+ volatile unsigned int resv[3];
+};
+
+struct grspw_regs {
+ volatile unsigned int ctrl;
+ volatile unsigned int status;
+ volatile unsigned int nodeaddr;
+ volatile unsigned int clkdiv;
+ volatile unsigned int destkey;
+ volatile unsigned int time;
+ volatile unsigned int timer; /* Used only in GRSPW1 */
+ volatile unsigned int resv1;
+
+ /* DMA Registers, ctrl.NCH determines number of ports,
+ * up to 4 channels are supported
+ */
+ struct grspw_dma_regs dma[4];
+
+ volatile unsigned int icctrl;
+ volatile unsigned int icrx;
+ volatile unsigned int icack;
+ volatile unsigned int ictimeout;
+ volatile unsigned int ictickomask;
+ volatile unsigned int icaamask;
+ volatile unsigned int icrlpresc;
+ volatile unsigned int icrlisr;
+ volatile unsigned int icrlintack;
+ volatile unsigned int resv2;
+ volatile unsigned int icisr;
+ volatile unsigned int resv3;
+};
+
+/* GRSPW - Control Register - 0x00 */
+#define GRSPW_CTRL_RA_BIT 31
+#define GRSPW_CTRL_RX_BIT 30
+#define GRSPW_CTRL_RC_BIT 29
+#define GRSPW_CTRL_NCH_BIT 27
+#define GRSPW_CTRL_PO_BIT 26
+#define GRSPW_CTRL_CC_BIT 25
+#define GRSPW_CTRL_ID_BIT 24
+#define GRSPW_CTRL_LE_BIT 22
+#define GRSPW_CTRL_PS_BIT 21
+#define GRSPW_CTRL_NP_BIT 20
+#define GRSPW_CTRL_RD_BIT 17
+#define GRSPW_CTRL_RE_BIT 16
+#define GRSPW_CTRL_TF_BIT 12
+#define GRSPW_CTRL_TR_BIT 11
+#define GRSPW_CTRL_TT_BIT 10
+#define GRSPW_CTRL_LI_BIT 9
+#define GRSPW_CTRL_TQ_BIT 8
+#define GRSPW_CTRL_RS_BIT 6
+#define GRSPW_CTRL_PM_BIT 5
+#define GRSPW_CTRL_TI_BIT 4
+#define GRSPW_CTRL_IE_BIT 3
+#define GRSPW_CTRL_AS_BIT 2
+#define GRSPW_CTRL_LS_BIT 1
+#define GRSPW_CTRL_LD_BIT 0
+
+#define GRSPW_CTRL_RA (1<<GRSPW_CTRL_RA_BIT)
+#define GRSPW_CTRL_RX (1<<GRSPW_CTRL_RX_BIT)
+#define GRSPW_CTRL_RC (1<<GRSPW_CTRL_RC_BIT)
+#define GRSPW_CTRL_NCH (0x3<<GRSPW_CTRL_NCH_BIT)
+#define GRSPW_CTRL_PO (1<<GRSPW_CTRL_PO_BIT)
+#define GRSPW_CTRL_CC (1<<GRSPW_CTRL_CC_BIT)
+#define GRSPW_CTRL_ID (1<<GRSPW_CTRL_ID_BIT)
+#define GRSPW_CTRL_LE (1<<GRSPW_CTRL_LE_BIT)
+#define GRSPW_CTRL_PS (1<<GRSPW_CTRL_PS_BIT)
+#define GRSPW_CTRL_NP (1<<GRSPW_CTRL_NP_BIT)
+#define GRSPW_CTRL_RD (1<<GRSPW_CTRL_RD_BIT)
+#define GRSPW_CTRL_RE (1<<GRSPW_CTRL_RE_BIT)
+#define GRSPW_CTRL_TF (1<<GRSPW_CTRL_TF_BIT)
+#define GRSPW_CTRL_TR (1<<GRSPW_CTRL_TR_BIT)
+#define GRSPW_CTRL_TT (1<<GRSPW_CTRL_TT_BIT)
+#define GRSPW_CTRL_LI (1<<GRSPW_CTRL_LI_BIT)
+#define GRSPW_CTRL_TQ (1<<GRSPW_CTRL_TQ_BIT)
+#define GRSPW_CTRL_RS (1<<GRSPW_CTRL_RS_BIT)
+#define GRSPW_CTRL_PM (1<<GRSPW_CTRL_PM_BIT)
+#define GRSPW_CTRL_TI (1<<GRSPW_CTRL_TI_BIT)
+#define GRSPW_CTRL_IE (1<<GRSPW_CTRL_IE_BIT)
+#define GRSPW_CTRL_AS (1<<GRSPW_CTRL_AS_BIT)
+#define GRSPW_CTRL_LS (1<<GRSPW_CTRL_LS_BIT)
+#define GRSPW_CTRL_LD (1<<GRSPW_CTRL_LD_BIT)
+
+#define GRSPW_CTRL_IRQSRC_MASK \
+ (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
+#define GRSPW_ICCTRL_IRQSRC_MASK \
+ (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
+
+
+/* GRSPW - Status Register - 0x04 */
+#define GRSPW_STS_LS_BIT 21
+#define GRSPW_STS_AP_BIT 9
+#define GRSPW_STS_EE_BIT 8
+#define GRSPW_STS_IA_BIT 7
+#define GRSPW_STS_WE_BIT 6 /* GRSPW1 */
+#define GRSPW_STS_PE_BIT 4
+#define GRSPW_STS_DE_BIT 3
+#define GRSPW_STS_ER_BIT 2
+#define GRSPW_STS_CE_BIT 1
+#define GRSPW_STS_TO_BIT 0
+
+#define GRSPW_STS_LS (0x7<<GRSPW_STS_LS_BIT)
+#define GRSPW_STS_AP (1<<GRSPW_STS_AP_BIT)
+#define GRSPW_STS_EE (1<<GRSPW_STS_EE_BIT)
+#define GRSPW_STS_IA (1<<GRSPW_STS_IA_BIT)
+#define GRSPW_STS_WE (1<<GRSPW_STS_WE_BIT) /* GRSPW1 */
+#define GRSPW_STS_PE (1<<GRSPW_STS_PE_BIT)
+#define GRSPW_STS_DE (1<<GRSPW_STS_DE_BIT)
+#define GRSPW_STS_ER (1<<GRSPW_STS_ER_BIT)
+#define GRSPW_STS_CE (1<<GRSPW_STS_CE_BIT)
+#define GRSPW_STS_TO (1<<GRSPW_STS_TO_BIT)
+
+/* GRSPW - Default Address Register - 0x08 */
+#define GRSPW_DEF_ADDR_BIT 0
+#define GRSPW_DEF_MASK_BIT 8
+#define GRSPW_DEF_ADDR (0xff<<GRSPW_DEF_ADDR_BIT)
+#define GRSPW_DEF_MASK (0xff<<GRSPW_DEF_MASK_BIT)
+
+/* GRSPW - Clock Divisor Register - 0x0C */
+#define GRSPW_CLKDIV_START_BIT 8
+#define GRSPW_CLKDIV_RUN_BIT 0
+#define GRSPW_CLKDIV_START (0xff<<GRSPW_CLKDIV_START_BIT)
+#define GRSPW_CLKDIV_RUN (0xff<<GRSPW_CLKDIV_RUN_BIT)
+#define GRSPW_CLKDIV_MASK (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
+
+/* GRSPW - Destination key Register - 0x10 */
+#define GRSPW_DK_DESTKEY_BIT 0
+#define GRSPW_DK_DESTKEY (0xff<<GRSPW_DK_DESTKEY_BIT)
+
+/* GRSPW - Time Register - 0x14 */
+#define GRSPW_TIME_CTRL_BIT 6
+#define GRSPW_TIME_CNT_BIT 0
+#define GRSPW_TIME_CTRL (0x3<<GRSPW_TIME_CTRL_BIT)
+#define GRSPW_TIME_TCNT (0x3f<<GRSPW_TIME_CNT_BIT)
+
+/* GRSPW - DMA Control Register - 0x20*N */
+#define GRSPW_DMACTRL_LE_BIT 16
+#define GRSPW_DMACTRL_SP_BIT 15
+#define GRSPW_DMACTRL_SA_BIT 14
+#define GRSPW_DMACTRL_EN_BIT 13
+#define GRSPW_DMACTRL_NS_BIT 12
+#define GRSPW_DMACTRL_RD_BIT 11
+#define GRSPW_DMACTRL_RX_BIT 10
+#define GRSPW_DMACTRL_AT_BIT 9
+#define GRSPW_DMACTRL_RA_BIT 8
+#define GRSPW_DMACTRL_TA_BIT 7
+#define GRSPW_DMACTRL_PR_BIT 6
+#define GRSPW_DMACTRL_PS_BIT 5
+#define GRSPW_DMACTRL_AI_BIT 4
+#define GRSPW_DMACTRL_RI_BIT 3
+#define GRSPW_DMACTRL_TI_BIT 2
+#define GRSPW_DMACTRL_RE_BIT 1
+#define GRSPW_DMACTRL_TE_BIT 0
+
+#define GRSPW_DMACTRL_LE (1<<GRSPW_DMACTRL_LE_BIT)
+#define GRSPW_DMACTRL_SP (1<<GRSPW_DMACTRL_SP_BIT)
+#define GRSPW_DMACTRL_SA (1<<GRSPW_DMACTRL_SA_BIT)
+#define GRSPW_DMACTRL_EN (1<<GRSPW_DMACTRL_EN_BIT)
+#define GRSPW_DMACTRL_NS (1<<GRSPW_DMACTRL_NS_BIT)
+#define GRSPW_DMACTRL_RD (1<<GRSPW_DMACTRL_RD_BIT)
+#define GRSPW_DMACTRL_RX (1<<GRSPW_DMACTRL_RX_BIT)
+#define GRSPW_DMACTRL_AT (1<<GRSPW_DMACTRL_AT_BIT)
+#define GRSPW_DMACTRL_RA (1<<GRSPW_DMACTRL_RA_BIT)
+#define GRSPW_DMACTRL_TA (1<<GRSPW_DMACTRL_TA_BIT)
+#define GRSPW_DMACTRL_PR (1<<GRSPW_DMACTRL_PR_BIT)
+#define GRSPW_DMACTRL_PS (1<<GRSPW_DMACTRL_PS_BIT)
+#define GRSPW_DMACTRL_AI (1<<GRSPW_DMACTRL_AI_BIT)
+#define GRSPW_DMACTRL_RI (1<<GRSPW_DMACTRL_RI_BIT)
+#define GRSPW_DMACTRL_TI (1<<GRSPW_DMACTRL_TI_BIT)
+#define GRSPW_DMACTRL_RE (1<<GRSPW_DMACTRL_RE_BIT)
+#define GRSPW_DMACTRL_TE (1<<GRSPW_DMACTRL_TE_BIT)
+
+/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
+#define GRSPW_DMARXLEN_MAX_BIT 0
+#define GRSPW_DMARXLEN_MAX (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
+
+/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
+#define GRSPW_DMAADR_ADDR_BIT 0
+#define GRSPW_DMAADR_MASK_BIT 8
+#define GRSPW_DMAADR_ADDR (0xff<<GRSPW_DMAADR_ADDR_BIT)
+#define GRSPW_DMAADR_MASK (0xff<<GRSPW_DMAADR_MASK_BIT)
+
+/* GRSPW - Interrupt code receive register - 0xa4 */
+#define GRSPW_ICCTRL_INUM_BIT 27
+#define GRSPW_ICCTRL_IA_BIT 24
+#define GRSPW_ICCTRL_LE_BIT 23
+#define GRSPW_ICCTRL_PR_BIT 22
+#define GRSPW_ICCTRL_DQ_BIT 21 /* never used */
+#define GRSPW_ICCTRL_TQ_BIT 20
+#define GRSPW_ICCTRL_AQ_BIT 19
+#define GRSPW_ICCTRL_IQ_BIT 18
+#define GRSPW_ICCTRL_IR_BIT 17
+#define GRSPW_ICCTRL_IT_BIT 16
+#define GRSPW_ICCTRL_NUMI_BIT 13
+#define GRSPW_ICCTRL_BIRQ_BIT 8
+#define GRSPW_ICCTRL_ID_BIT 7
+#define GRSPW_ICCTRL_II_BIT 6
+#define GRSPW_ICCTRL_TXIRQ_BIT 0
+#define GRSPW_ICCTRL_INUM (0x1f << GRSPW_ICCTRL_INUM_BIT)
+#define GRSPW_ICCTRL_IA (1 << GRSPW_ICCTRL_IA_BIT)
+#define GRSPW_ICCTRL_LE (1 << GRSPW_ICCTRL_LE_BIT)
+#define GRSPW_ICCTRL_PR (1 << GRSPW_ICCTRL_PR_BIT)
+#define GRSPW_ICCTRL_DQ (1 << GRSPW_ICCTRL_DQ_BIT)
+#define GRSPW_ICCTRL_TQ (1 << GRSPW_ICCTRL_TQ_BIT)
+#define GRSPW_ICCTRL_AQ (1 << GRSPW_ICCTRL_AQ_BIT)
+#define GRSPW_ICCTRL_IQ (1 << GRSPW_ICCTRL_IQ_BIT)
+#define GRSPW_ICCTRL_IR (1 << GRSPW_ICCTRL_IR_BIT)
+#define GRSPW_ICCTRL_IT (1 << GRSPW_ICCTRL_IT_BIT)
+#define GRSPW_ICCTRL_NUMI (0x7 << GRSPW_ICCTRL_NUMI_BIT)
+#define GRSPW_ICCTRL_BIRQ (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
+#define GRSPW_ICCTRL_ID (1 << GRSPW_ICCTRL_ID_BIT)
+#define GRSPW_ICCTRL_II (1 << GRSPW_ICCTRL_II_BIT)
+#define GRSPW_ICCTRL_TXIRQ (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
+
+/* RX Buffer Descriptor */
+struct grspw_rxbd {
+ volatile unsigned int ctrl;
+ volatile unsigned int addr;
+};
+
+/* TX Buffer Descriptor */
+struct grspw_txbd {
+ volatile unsigned int ctrl;
+ volatile unsigned int haddr;
+ volatile unsigned int dlen;
+ volatile unsigned int daddr;
+};
+
+/* GRSPW - DMA RXBD Ctrl */
+#define GRSPW_RXBD_LEN_BIT 0
+#define GRSPW_RXBD_LEN (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
+#define GRSPW_RXBD_EN (1<<25)
+#define GRSPW_RXBD_WR (1<<26)
+#define GRSPW_RXBD_IE (1<<27)
+#define GRSPW_RXBD_EP (1<<28)
+#define GRSPW_RXBD_HC (1<<29)
+#define GRSPW_RXBD_DC (1<<30)
+#define GRSPW_RXBD_TR (1<<31)
+
+#define GRSPW_TXBD_HLEN (0xff<<0)
+#define GRSPW_TXBD_NCL (0xf<<8)
+#define GRSPW_TXBD_EN (1<<12)
+#define GRSPW_TXBD_WR (1<<13)
+#define GRSPW_TXBD_IE (1<<14)
+#define GRSPW_TXBD_LE (1<<15)
+#define GRSPW_TXBD_HC (1<<16)
+#define GRSPW_TXBD_DC (1<<17)
+
+#define GRSPW_DMAADR_MASK_BIT 8
+#define GRSPW_DMAADR_ADDR (0xff<<GRSPW_DMAADR_ADDR_BIT)
+#define GRSPW_DMAADR_MASK (0xff<<GRSPW_DMAADR_MASK_BIT)
+
+
+/* GRSPW Error Condition */
+#define GRSPW_STAT_ERROR (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
+#define GRSPW_DMA_STATUS_ERROR (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
+/* GRSPW Link configuration options */
+#define GRSPW_LINK_CFG (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
+#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
+
+/* Software Defaults */
+#define DEFAULT_RXMAX 1024 /* 1 KBytes Max RX Packet Size */
+
+/* GRSPW Constants */
+#define GRSPW_TXBD_NR 64 /* Maximum number of TX Descriptors */
+#define GRSPW_RXBD_NR 128 /* Maximum number of RX Descriptors */
+#define GRSPW_TXBD_SIZE 16 /* Size in bytes of one TX descriptor */
+#define GRSPW_RXBD_SIZE 8 /* Size in bytes of one RX descriptor */
+#define BDTAB_SIZE 0x400 /* BD Table Size (RX or TX) */
+#define BDTAB_ALIGN 0x400 /* BD Table Alignment Requirement */
+
+/* Memory and HW Registers Access routines. All 32-bit access routines */
+#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
+#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+struct grspw_ring {
+ struct grspw_ring *next; /* Next Descriptor */
+ union {
+ struct grspw_txbd *tx; /* Descriptor Address */
+ struct grspw_rxbd *rx; /* Descriptor Address */
+ } bd;
+ struct grspw_pkt *pkt; /* Packet description associated.NULL if none*/
+};
+
+/* An entry in the TX descriptor Ring */
+struct grspw_txring {
+ struct grspw_txring *next; /* Next Descriptor */
+ struct grspw_txbd *bd; /* Descriptor Address */
+ struct grspw_pkt *pkt; /* Packet description associated.NULL if none*/
+};
+
+/* An entry in the RX descriptor Ring */
+struct grspw_rxring {
+ struct grspw_rxring *next; /* Next Descriptor */
+ struct grspw_rxbd *bd; /* Descriptor Address */
+ struct grspw_pkt *pkt; /* Packet description associated.NULL if none*/
+};
+
+
+struct grspw_dma_priv {
+ struct grspw_priv *core; /* GRSPW Core */
+ struct grspw_dma_regs *regs; /* DMA Channel Registers */
+ int index; /* DMA Channel Index @ GRSPW core */
+ int open; /* DMA Channel opened by user */
+ int started; /* DMA Channel activity (start|stop) */
+ rtems_id sem_rxdma; /* DMA Channel RX Semaphore */
+ rtems_id sem_txdma; /* DMA Channel TX Semaphore */
+ struct grspw_dma_stats stats; /* DMA Channel Statistics */
+ struct grspw_dma_config cfg; /* DMA Channel Configuration */
+
+ /*** RX ***/
+
+ /* RX Descriptor Ring */
+ struct grspw_rxbd *rx_bds; /* Descriptor Address */
+ struct grspw_rxbd *rx_bds_hwa; /* Descriptor HW Address */
+ struct grspw_rxring *rx_ring_base;
+ struct grspw_rxring *rx_ring_head; /* Next descriptor to enable */
+ struct grspw_rxring *rx_ring_tail; /* Oldest enabled Descriptor */
+ int rx_irq_en_cnt_curr;
+ struct {
+ int waiting;
+ int ready_cnt;
+ int op;
+ int recv_cnt;
+ rtems_id sem_wait; /* RX Semaphore used to implement RX blocking */
+ } rx_wait;
+
+ /* Queue of Packets READY to be scheduled */
+ struct grspw_list ready;
+ int ready_cnt;
+
+ /* Scheduled RX Packets Queue */
+ struct grspw_list rx_sched;
+ int rx_sched_cnt;
+
+ /* Queue of Packets that has been RECIEVED */
+ struct grspw_list recv;
+ int recv_cnt;
+
+
+ /*** TX ***/
+
+ /* TX Descriptor Ring */
+ struct grspw_txbd *tx_bds; /* Descriptor Address */
+ struct grspw_txbd *tx_bds_hwa; /* Descriptor HW Address */
+ struct grspw_txring *tx_ring_base;
+ struct grspw_txring *tx_ring_head;
+ struct grspw_txring *tx_ring_tail;
+ int tx_irq_en_cnt_curr;
+ struct {
+ int waiting;
+ int send_cnt;
+ int op;
+ int sent_cnt;
+ rtems_id sem_wait; /* TX Semaphore used to implement TX blocking */
+ } tx_wait;
+
+ /* Queue of Packets ready to be scheduled for transmission */
+ struct grspw_list send;
+ int send_cnt;
+
+ /* Scheduled TX Packets Queue */
+ struct grspw_list tx_sched;
+ int tx_sched_cnt;
+
+ /* Queue of Packets that has been SENT */
+ struct grspw_list sent;
+ int sent_cnt;
+};
+
+struct grspw_priv {
+ char devname[8]; /* Device name "grspw%d" */
+ struct drvmgr_dev *dev; /* Device */
+ struct grspw_regs *regs; /* Virtual Address of APB Registers */
+ int irq; /* AMBA IRQ number of core */
+ int index; /* Index in order it was probed */
+ int core_index; /* Core Bus Index */
+ int open; /* If Device is alrady opened (=1) or not (=0) */
+ void *data; /* User private Data for this device instance, set by grspw_initialize_user */
+
+ /* Features supported by Hardware */
+ struct grspw_hw_sup hwsup;
+
+ /* Pointer to an array of Maximally 4 DMA Channels */
+ struct grspw_dma_priv *dma;
+
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+
+ /* Descriptor Memory Area for TX & RX and all DMA channels */
+ unsigned int bd_mem;
+ unsigned int bd_mem_alloced;
+
+ /*** Time Code Handling ***/
+ void (*tcisr)(void *data, int timecode);
+ void *tcisr_arg;
+
+ /*** Interrupt-code Handling ***/
+ spwpkt_ic_isr_t icisr;
+ void *icisr_arg;
+
+ /* Bit mask representing events which shall cause link disable. */
+ unsigned int dis_link_on_err;
+
+ /* Bit mask for link status bits to clear by ISR */
+ unsigned int stscfg;
+
+ /*** Message Queue Handling ***/
+ struct grspw_work_config wc;
+
+ /* "Core Global" Statistics gathered, not dependent on DMA channel */
+ struct grspw_core_stats stats;
+};
+
+int grspw_initialized = 0;
+int grspw_count = 0;
+rtems_id grspw_sem;
+static struct grspw_priv *priv_tab[GRSPW_MAX];
+
+/* callback to upper layer when devices are discovered/removed */
+void *(*grspw_dev_add)(int) = NULL;
+void (*grspw_dev_del)(int,void*) = NULL;
+
+/* Defaults to do nothing - user can override this function.
+ * Called from work-task.
+ */
+void __attribute__((weak)) grspw_work_event(
+ enum grspw_worktask_ev ev,
+ unsigned int msg)
+{
+
+}
+
+/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
+ * the work-task and work-queue to save space.
+ */
+int grspw_work_task_priority __attribute__((weak)) = 100;
+rtems_id grspw_work_task;
+static struct grspw_work_config grspw_wc_def;
+
+STATIC void grspw_hw_stop(struct grspw_priv *priv);
+STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
+STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
+STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
+STATIC void grspw_isr(void *data);
+
+void *grspw_open(int dev_no)
+{
+ struct grspw_priv *priv;
+ unsigned int bdtabsize, hwa;
+ int i;
+ union drvmgr_key_value *value;
+
+ if (grspw_initialized != 1 || (dev_no >= grspw_count))
+ return NULL;
+
+ priv = priv_tab[dev_no];
+
+ /* Take GRSPW lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return NULL;
+
+ if (priv->open) {
+ priv = NULL;
+ goto out;
+ }
+
+ /* Initialize Spin-lock for GRSPW Device. This is to protect
+ * CTRL and DMACTRL registers from ISR.
+ */
+ SPIN_INIT(&priv->devlock, priv->devname);
+
+ priv->tcisr = NULL;
+ priv->tcisr_arg = NULL;
+ priv->icisr = NULL;
+ priv->icisr_arg = NULL;
+ priv->stscfg = LINKSTS_MASK;
+
+ /* Default to common work queue and message queue, if not created
+ * during initialization then its disabled.
+ */
+ grspw_work_cfg(priv, &grspw_wc_def);
+
+ grspw_stats_clr(priv);
+
+ /* Allocate TX & RX Descriptor memory area for all DMA
+ * channels. Max-size descriptor area is allocated (or user assigned):
+ * - 128 RX descriptors per DMA Channel
+ * - 64 TX descriptors per DMA Channel
+ * Specified address must be in CPU RAM.
+ */
+ bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
+ value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
+ if (value) {
+ priv->bd_mem = value->i;
+ priv->bd_mem_alloced = 0;
+ if (priv->bd_mem & (BDTAB_ALIGN-1)) {
+ GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
+ priv->index);
+ priv = NULL;
+ goto out;
+ }
+ } else {
+ priv->bd_mem_alloced = (unsigned int)grlib_malloc(bdtabsize + BDTAB_ALIGN - 1);
+ if (priv->bd_mem_alloced == 0) {
+ priv = NULL;
+ goto out;
+ }
+ /* Align memory */
+ priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
+ ~(BDTAB_ALIGN-1);
+ }
+
+ /* Translate into DMA address that HW can use to access DMA
+ * descriptors
+ */
+ drvmgr_translate_check(
+ priv->dev,
+ CPUMEM_TO_DMA,
+ (void *)priv->bd_mem,
+ (void **)&hwa,
+ bdtabsize);
+
+ GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
+ priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ /* Do DMA Channel Init, other variables etc. are inited
+ * when respective DMA channel is opened.
+ *
+ * index & core are initialized by probe function.
+ */
+ priv->dma[i].open = 0;
+ priv->dma[i].rx_bds = (struct grspw_rxbd *)
+ (priv->bd_mem + i*BDTAB_SIZE*2);
+ priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
+ (hwa + BDTAB_SIZE*(2*i));
+ priv->dma[i].tx_bds = (struct grspw_txbd *)
+ (priv->bd_mem + BDTAB_SIZE*(2*i+1));
+ priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
+ (hwa + BDTAB_SIZE*(2*i+1));
+ GRSPW_DBG(" DMA[%i]: RX %p - %p (%p - %p) TX %p - %p (%p - %p)\n",
+ i,
+ priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
+ priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
+ priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
+ priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
+ }
+
+ /* Basic initialization of hardware, clear some registers but
+ * keep Link/RMAP/Node-Address registers intact.
+ */
+ grspw_hw_stop(priv);
+
+ /* Register Interrupt handler and enable IRQ at IRQ ctrl */
+ drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
+
+ /* Take the device */
+ priv->open = 1;
+out:
+ rtems_semaphore_release(grspw_sem);
+ return priv;
+}
+
+int grspw_close(void *d)
+{
+ struct grspw_priv *priv = d;
+ int i;
+
+ /* Take GRSPW lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Check that user has stopped and closed all DMA channels
+ * appropriately. At this point the Hardware shall not be doing DMA
+ * or generating Interrupts. We want HW in a "startup-state".
+ */
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ if (priv->dma[i].open) {
+ rtems_semaphore_release(grspw_sem);
+ return 1;
+ }
+ }
+ grspw_hw_stop(priv);
+
+ /* Uninstall Interrupt handler */
+ drvmgr_interrupt_unregister(priv->dev, 0, grspw_isr, priv);
+
+ /* Free descriptor table memory if allocated using malloc() */
+ if (priv->bd_mem_alloced) {
+ free((void *)priv->bd_mem_alloced);
+ priv->bd_mem_alloced = 0;
+ }
+
+ /* Mark not open */
+ priv->open = 0;
+ rtems_semaphore_release(grspw_sem);
+ return 0;
+}
+
+void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
+{
+ struct grspw_priv *priv = d;
+
+ *hw = priv->hwsup;
+}
+
+void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl, nodeaddr;
+ SPIN_IRQFLAGS(irqflags);
+ int i;
+
+ if (!priv || !cfg)
+ return;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ if (cfg->promiscuous != -1) {
+ /* Set Configuration */
+ ctrl = REG_READ(&regs->ctrl);
+ if (cfg->promiscuous)
+ ctrl |= GRSPW_CTRL_PM;
+ else
+ ctrl &= ~GRSPW_CTRL_PM;
+ REG_WRITE(&regs->ctrl, ctrl);
+ REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
+
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ ctrl = REG_READ(&regs->dma[i].ctrl);
+ ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
+ if (cfg->dma_nacfg[i].node_en) {
+ ctrl |= GRSPW_DMACTRL_EN;
+ REG_WRITE(&regs->dma[i].addr,
+ (cfg->dma_nacfg[i].node_addr & 0xff) |
+ ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
+ } else {
+ ctrl &= ~GRSPW_DMACTRL_EN;
+ }
+ REG_WRITE(&regs->dma[i].ctrl, ctrl);
+ }
+ }
+
+ /* Read Current Configuration */
+ cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
+ nodeaddr = REG_READ(&regs->nodeaddr);
+ cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
+ cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
+ GRSPW_DMACTRL_EN;
+ ctrl = REG_READ(&regs->dma[i].addr);
+ cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
+ GRSPW_DMAADR_ADDR_BIT;
+ cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
+ GRSPW_DMAADR_MASK_BIT;
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ for (; i<4; i++) {
+ cfg->dma_nacfg[i].node_en = 0;
+ cfg->dma_nacfg[i].node_addr = 0;
+ cfg->dma_nacfg[i].node_mask = 0;
+ }
+}
+
+/* Return Current DMA CTRL/Status Register */
+unsigned int grspw_dma_ctrlsts(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ return REG_READ(&dma->regs->ctrl);
+}
+
+/* Return Current Status Register */
+unsigned int grspw_link_status(void *d)
+{
+ struct grspw_priv *priv = d;
+
+ return REG_READ(&priv->regs->status);
+}
+
+/* Clear Status Register bits */
+void grspw_link_status_clr(void *d, unsigned int mask)
+{
+ struct grspw_priv *priv = d;
+
+ REG_WRITE(&priv->regs->status, mask);
+}
+
+/* Return Current Link State */
+spw_link_state_t grspw_link_state(void *d)
+{
+ struct grspw_priv *priv = d;
+ unsigned int status = REG_READ(&priv->regs->status);
+
+ return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
+}
+
+/* Enable Global IRQ only if some irq source is set */
+static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
+{
+ return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
+ (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
+}
+
+
+/* options and clkdiv [in/out]: set to -1 to only read current config */
+void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Write? */
+ if (clkdiv) {
+ if (*clkdiv != -1)
+ REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
+ *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
+ }
+ if (options) {
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ if (*options != -1) {
+ ctrl = (ctrl & ~GRSPW_LINK_CFG) |
+ (*options & GRSPW_LINK_CFG);
+
+ /* Enable Global IRQ only if some irq source is set */
+ if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
+ ctrl |= GRSPW_CTRL_IE;
+ else
+ ctrl &= ~GRSPW_CTRL_IE;
+
+ REG_WRITE(&regs->ctrl, ctrl);
+ /* Store the link disable events for use in
+ ISR. The LINKOPTS_DIS_ON_* options are actually the
+ corresponding bits in the status register, shifted
+ by 16. */
+ priv->dis_link_on_err = *options &
+ (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
+ }
+ if (stscfg) {
+ if (*stscfg != -1) {
+ priv->stscfg = *stscfg & LINKSTS_MASK;
+ }
+ *stscfg = priv->stscfg;
+ }
+}
+
+/* Generate Tick-In (increment Time Counter, Send Time Code) */
+void grspw_tc_tx(void *d)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+void grspw_tc_ctrl(void *d, int *options)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (options == NULL)
+ return;
+
+ /* Write? */
+ if (*options != -1) {
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
+ ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
+
+ /* Enable Global IRQ only if some irq source is set */
+ if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
+ ctrl |= GRSPW_CTRL_IE;
+ else
+ ctrl &= ~GRSPW_CTRL_IE;
+
+ REG_WRITE(&regs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ } else
+ ctrl = REG_READ(&regs->ctrl);
+ *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
+}
+
+/* Assign ISR Function to TimeCode RX IRQ */
+void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
+{
+ struct grspw_priv *priv = d;
+
+ priv->tcisr_arg = data;
+ priv->tcisr = tcisr;
+}
+
+/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
+ * TCTRL = bits 7 and 6
+ * TIMECNT = bits 5 to 0
+ */
+void grspw_tc_time(void *d, int *time)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+
+ if (time == NULL)
+ return;
+ if (*time != -1)
+ REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
+ *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
+}
+
+/* Generate Tick-In for the given Interrupt-code and check for generation
+ * error.
+ *
+ * Returns zero on success and non-zero on failure
+ */
+int grspw_ic_tickin(void *d, int ic)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ SPIN_IRQFLAGS(irqflags);
+ unsigned int icctrl, mask;
+
+ /* Prepare before turning off IRQ */
+ mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
+ ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
+ GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ icctrl = REG_READ(&regs->icctrl);
+ icctrl &= ~mask;
+ icctrl |= ic;
+ REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
+ /* the ID bit is valid after two clocks, so we not to wait here */
+ icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return icctrl & GRSPW_ICCTRL_ID;
+}
+
+#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
+#define ICOPTS_ICCTRL_MASK \
+ (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE | ICOPTS_EN_SPWIRQ_ON_IA | \
+ ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
+ ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
+ ICOPTS_BASEIRQ)
+
+/* Control Interrupt-code settings of core
+ * Write if not pointing to -1, always read current value
+ *
+ * TODO: A lot of code duplication with grspw_tc_ctrl
+ */
+void grspw_ic_ctrl(void *d, unsigned int *options)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ unsigned int icctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (options == NULL)
+ return;
+
+ if (*options != -1) {
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
+ * irqopts bits and ctrl bits */
+ ctrl |= (*options & ICOPTS_CTRL_MASK) <<
+ (GRSPW_CTRL_TF_BIT - 0);
+
+ icctrl = REG_READ(&regs->icctrl);
+ icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
+ * irqopts bits and icctrl bits */
+ icctrl |= *options & ICOPTS_ICCTRL_MASK;
+
+ /* Enable Global IRQ only if some irq source is set */
+ if (grspw_is_irqsource_set(ctrl, icctrl))
+ ctrl |= GRSPW_CTRL_IE;
+ else
+ ctrl &= ~GRSPW_CTRL_IE;
+
+ REG_WRITE(&regs->ctrl, ctrl);
+ REG_WRITE(&regs->icctrl, icctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+ *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
+ (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
+}
+
+void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+
+ if (!cfg)
+ return;
+
+ if (rw & 1) {
+ REG_WRITE(&regs->ictickomask, cfg->tomask);
+ REG_WRITE(&regs->icaamask, cfg->aamask);
+ REG_WRITE(&regs->icrlpresc, cfg->scaler);
+ REG_WRITE(&regs->icrlisr, cfg->isr_reload);
+ REG_WRITE(&regs->icrlintack, cfg->ack_reload);
+ }
+ if (rw & 2) {
+ cfg->tomask = REG_READ(&regs->ictickomask);
+ cfg->aamask = REG_READ(&regs->icaamask);
+ cfg->scaler = REG_READ(&regs->icrlpresc);
+ cfg->isr_reload = REG_READ(&regs->icrlisr);
+ cfg->ack_reload = REG_READ(&regs->icrlintack);
+ }
+}
+
+/* Read or Write Interrupt-code status registers */
+void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+
+ /* No locking needed since the status bits are clear-on-write */
+
+ if (rxirq) {
+ if (*rxirq != 0)
+ REG_WRITE(&regs->icrx, *rxirq);
+ else
+ *rxirq = REG_READ(&regs->icrx);
+ }
+
+ if (rxack) {
+ if (*rxack != 0)
+ REG_WRITE(&regs->icack, *rxack);
+ else
+ *rxack = REG_READ(&regs->icack);
+ }
+
+ if (intto) {
+ if (*intto != 0)
+ REG_WRITE(&regs->ictimeout, *intto);
+ else
+ *intto = REG_READ(&regs->ictimeout);
+ }
+}
+
+/* Assign handler function to Interrupt-code tick out IRQ */
+void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
+{
+ struct grspw_priv *priv = d;
+
+ priv->icisr_arg = data;
+ priv->icisr = handler;
+}
+
+/* Set (not -1) and/or read RMAP options. */
+int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (dstkey) {
+ if (*dstkey != -1)
+ REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
+ *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
+ }
+ if (options) {
+ if (*options != -1) {
+ if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
+ return -1;
+
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
+ ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
+ REG_WRITE(&regs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+ *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
+ }
+
+ return 0;
+}
+
+void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
+{
+ struct grspw_priv *priv = d;
+
+ if (rmap)
+ *rmap = priv->hwsup.rmap;
+ if (rmap_crc)
+ *rmap_crc = priv->hwsup.rmap_crc;
+}
+
+/* Select port, if
+ * -1=The current selected port is returned
+ * 0=Port 0
+ * 1=Port 1
+ * Others=Both Port0 and Port1
+ */
+int grspw_port_ctrl(void *d, int *port)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (port == NULL)
+ return -1;
+
+ if ((*port == 1) || (*port == 0)) {
+ /* Select port user selected */
+ if ((*port == 1) && (priv->hwsup.nports < 2))
+ return -1; /* Changing to Port 1, but only one port available */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
+ ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
+ REG_WRITE(&regs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ } else if (*port > 1) {
+ /* Select both ports */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+
+ /* Get current settings */
+ ctrl = REG_READ(&regs->ctrl);
+ if (ctrl & GRSPW_CTRL_NP) {
+ /* Any port, selected by hardware */
+ if (priv->hwsup.nports > 1)
+ *port = 3;
+ else
+ *port = 0; /* Port0 the only port available */
+ } else {
+ *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
+ }
+
+ return 0;
+}
+
+/* Returns Number ports available in hardware */
+int grspw_port_count(void *d)
+{
+ struct grspw_priv *priv = d;
+
+ return priv->hwsup.nports;
+}
+
+/* Current active port: 0 or 1 */
+int grspw_port_active(void *d)
+{
+ struct grspw_priv *priv = d;
+ unsigned int status;
+
+ status = REG_READ(&priv->regs->status);
+
+ return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
+}
+
+void grspw_stats_read(void *d, struct grspw_core_stats *sts)
+{
+ struct grspw_priv *priv = d;
+
+ if (sts == NULL)
+ return;
+ memcpy(sts, &priv->stats, sizeof(priv->stats));
+}
+
+void grspw_stats_clr(void *d)
+{
+ struct grspw_priv *priv = d;
+
+ /* Clear most of the statistics */
+ memset(&priv->stats, 0, sizeof(priv->stats));
+}
+
+/*** DMA Interface ***/
+
+/* Initialize the RX and TX Descriptor Ring, empty of packets */
+STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
+{
+ struct grspw_ring *r;
+ int i;
+
+ /* Empty BD rings */
+ dma->rx_ring_head = dma->rx_ring_base;
+ dma->rx_ring_tail = dma->rx_ring_base;
+ dma->tx_ring_head = dma->tx_ring_base;
+ dma->tx_ring_tail = dma->tx_ring_base;
+
+ /* Init RX Descriptors */
+ r = (struct grspw_ring *)dma->rx_ring_base;
+ for (i=0; i<GRSPW_RXBD_NR; i++) {
+
+ /* Init Ring Entry */
+ r[i].next = &r[i+1];
+ r[i].bd.rx = &dma->rx_bds[i];
+ r[i].pkt = NULL;
+
+ /* Init HW Descriptor */
+ BD_WRITE(&r[i].bd.rx->ctrl, 0);
+ BD_WRITE(&r[i].bd.rx->addr, 0);
+ }
+ r[GRSPW_RXBD_NR-1].next = &r[0];
+
+ /* Init TX Descriptors */
+ r = (struct grspw_ring *)dma->tx_ring_base;
+ for (i=0; i<GRSPW_TXBD_NR; i++) {
+
+ /* Init Ring Entry */
+ r[i].next = &r[i+1];
+ r[i].bd.tx = &dma->tx_bds[i];
+ r[i].pkt = NULL;
+
+ /* Init HW Descriptor */
+ BD_WRITE(&r[i].bd.tx->ctrl, 0);
+ BD_WRITE(&r[i].bd.tx->haddr, 0);
+ BD_WRITE(&r[i].bd.tx->dlen, 0);
+ BD_WRITE(&r[i].bd.tx->daddr, 0);
+ }
+ r[GRSPW_TXBD_NR-1].next = &r[0];
+}
+
+/* Try to populate descriptor ring with as many as possible READY unused packet
+ * buffers. The packets assigned with to a descriptor are put in the end of
+ * the scheduled list.
+ *
+ * The number of Packets scheduled is returned.
+ *
+ * - READY List -> RX-SCHED List
+ * - Descriptors are initialized and enabled for reception
+ */
+STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
+{
+ int cnt;
+ unsigned int ctrl, dmactrl;
+ void *hwaddr;
+ struct grspw_rxring *curr_bd;
+ struct grspw_pkt *curr_pkt, *last_pkt;
+ struct grspw_list lst;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Is Ready Q empty? */
+ if (grspw_list_is_empty(&dma->ready))
+ return 0;
+
+ cnt = 0;
+ lst.head = curr_pkt = dma->ready.head;
+ curr_bd = dma->rx_ring_head;
+ while (!curr_bd->pkt) {
+
+ /* Assign Packet to descriptor */
+ curr_bd->pkt = curr_pkt;
+
+ /* Prepare descriptor address. */
+ hwaddr = curr_pkt->data;
+ if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
+ drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
+ hwaddr, &hwaddr);
+ if (curr_pkt->data == hwaddr) /* translation needed? */
+ curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
+ }
+ BD_WRITE(&curr_bd->bd->addr, hwaddr);
+
+ ctrl = GRSPW_RXBD_EN;
+ if (curr_bd->next == dma->rx_ring_base) {
+ /* Wrap around (only needed when smaller descriptor
+ * table)
+ */
+ ctrl |= GRSPW_RXBD_WR;
+ }
+
+ /* Is this Packet going to be an interrupt Packet? */
+ if ((--dma->rx_irq_en_cnt_curr) <= 0) {
+ if (dma->cfg.rx_irq_en_cnt == 0) {
+ /* IRQ is disabled. A big number to avoid
+ * equal to zero too often
+ */
+ dma->rx_irq_en_cnt_curr = 0x3fffffff;
+ } else {
+ dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
+ ctrl |= GRSPW_RXBD_IE;
+ }
+ }
+
+ if (curr_pkt->flags & RXPKT_FLAG_IE)
+ ctrl |= GRSPW_RXBD_IE;
+
+ /* Enable descriptor */
+ BD_WRITE(&curr_bd->bd->ctrl, ctrl);
+
+ last_pkt = curr_pkt;
+ curr_bd = curr_bd->next;
+ cnt++;
+
+ /* Get Next Packet from Ready Queue */
+ if (curr_pkt == dma->ready.tail) {
+ /* Handled all in ready queue. */
+ curr_pkt = NULL;
+ break;
+ }
+ curr_pkt = curr_pkt->next;
+ }
+
+ /* Has Packets been scheduled? */
+ if (cnt > 0) {
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove scheduled packets from ready queue */
+ grspw_list_remove_head_list(&dma->ready, &lst);
+ dma->ready_cnt -= cnt;
+ if (dma->stats.ready_cnt_min > dma->ready_cnt)
+ dma->stats.ready_cnt_min = dma->ready_cnt;
+
+ /* Insert scheduled packets into scheduled queue */
+ grspw_list_append_list(&dma->rx_sched, &lst);
+ dma->rx_sched_cnt += cnt;
+ if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
+ dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
+
+ /* Update TX ring posistion */
+ dma->rx_ring_head = curr_bd;
+
+ /* Make hardware aware of the newly enabled descriptors
+ * We must protect from ISR which writes RI|TI
+ */
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ dmactrl = REG_READ(&dma->regs->ctrl);
+ dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
+ dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
+ REG_WRITE(&dma->regs->ctrl, dmactrl);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+ }
+
+ return cnt;
+}
+
+/* Scans the RX desciptor table for scheduled Packet that has been received,
+ * and moves these Packet from the head of the scheduled queue to the
+ * tail of the recv queue.
+ *
+ * Also, for all packets the status is updated.
+ *
+ * - SCHED List -> SENT List
+ *
+ * Return Value
+ * Number of packets moved
+ */
+STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
+{
+ struct grspw_rxring *curr;
+ struct grspw_pkt *last_pkt;
+ int recv_pkt_cnt = 0;
+ unsigned int ctrl;
+ struct grspw_list lst;
+
+ curr = dma->rx_ring_tail;
+
+ /* Step into RX ring to find if packets have been scheduled for
+ * reception.
+ */
+ if (!curr->pkt)
+ return 0; /* No scheduled packets, thus no received, abort */
+
+ /* There has been Packets scheduled ==> scheduled Packets may have been
+ * received and needs to be collected into RECV List.
+ *
+ * A temporary list "lst" with all received packets is created.
+ */
+ lst.head = curr->pkt;
+
+ /* Loop until first enabled "unrecveived" SpW Packet is found.
+ * An unused descriptor is indicated by an unassigned pkt field.
+ */
+ while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
+ /* Handle one received Packet */
+
+ /* Remember last handled Packet so that insertion/removal from
+ * Packet lists go fast.
+ */
+ last_pkt = curr->pkt;
+
+ /* Get Length of Packet in bytes, and reception options */
+ last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
+
+ /* Set flags to indicate error(s) and CRC information,
+ * and Mark Received.
+ */
+ last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
+ ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
+ RXPKT_FLAG_RX;
+
+ /* Packet was Truncated? */
+ if (ctrl & GRSPW_RXBD_TR)
+ dma->stats.rx_err_trunk++;
+
+ /* Error End-Of-Packet? */
+ if (ctrl & GRSPW_RXBD_EP)
+ dma->stats.rx_err_endpkt++;
+ curr->pkt = NULL; /* Mark descriptor unused */
+
+ /* Increment */
+ curr = curr->next;
+ recv_pkt_cnt++;
+ }
+
+ /* 1. Remove all handled packets from scheduled queue
+ * 2. Put all handled packets into recv queue
+ */
+ if (recv_pkt_cnt > 0) {
+
+ /* Update Stats, Number of Received Packets */
+ dma->stats.rx_pkts += recv_pkt_cnt;
+
+ /* Save RX ring posistion */
+ dma->rx_ring_tail = curr;
+
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove received Packets from RX-SCHED queue */
+ grspw_list_remove_head_list(&dma->rx_sched, &lst);
+ dma->rx_sched_cnt -= recv_pkt_cnt;
+ if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
+ dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
+
+ /* Insert received Packets into RECV queue */
+ grspw_list_append_list(&dma->recv, &lst);
+ dma->recv_cnt += recv_pkt_cnt;
+ if (dma->stats.recv_cnt_max < dma->recv_cnt)
+ dma->stats.recv_cnt_max = dma->recv_cnt;
+ }
+
+ return recv_pkt_cnt;
+}
+
+/* Try to populate descriptor ring with as many SEND packets as possible. The
+ * packets assigned with to a descriptor are put in the end of
+ * the scheduled list.
+ *
+ * The number of Packets scheduled is returned.
+ *
+ * - SEND List -> TX-SCHED List
+ * - Descriptors are initialized and enabled for transmission
+ */
+STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
+{
+ int cnt;
+ unsigned int ctrl, dmactrl;
+ void *hwaddr;
+ struct grspw_txring *curr_bd;
+ struct grspw_pkt *curr_pkt, *last_pkt;
+ struct grspw_list lst;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Is Ready Q empty? */
+ if (grspw_list_is_empty(&dma->send))
+ return 0;
+
+ cnt = 0;
+ lst.head = curr_pkt = dma->send.head;
+ curr_bd = dma->tx_ring_head;
+ while (!curr_bd->pkt) {
+
+ /* Assign Packet to descriptor */
+ curr_bd->pkt = curr_pkt;
+
+ /* Set up header transmission */
+ if (curr_pkt->hdr && curr_pkt->hlen) {
+ hwaddr = curr_pkt->hdr;
+ if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
+ drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
+ hwaddr, &hwaddr);
+ /* translation needed? */
+ if (curr_pkt->hdr == hwaddr)
+ curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
+ }
+ BD_WRITE(&curr_bd->bd->haddr, hwaddr);
+ ctrl = GRSPW_TXBD_EN |
+ (curr_pkt->hlen & GRSPW_TXBD_HLEN);
+ } else {
+ ctrl = GRSPW_TXBD_EN;
+ }
+ /* Enable IRQ generation and CRC options as specified
+ * by user.
+ */
+ ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
+
+ if (curr_bd->next == dma->tx_ring_base) {
+ /* Wrap around (only needed when smaller descriptor table) */
+ ctrl |= GRSPW_TXBD_WR;
+ }
+
+ /* Is this Packet going to be an interrupt Packet? */
+ if ((--dma->tx_irq_en_cnt_curr) <= 0) {
+ if (dma->cfg.tx_irq_en_cnt == 0) {
+ /* IRQ is disabled.
+ * A big number to avoid equal to zero too often
+ */
+ dma->tx_irq_en_cnt_curr = 0x3fffffff;
+ } else {
+ dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
+ ctrl |= GRSPW_TXBD_IE;
+ }
+ }
+
+ /* Prepare descriptor address. Parts of CTRL is written to
+ * DLEN for debug-only (CTRL is cleared by HW).
+ */
+ if (curr_pkt->data && curr_pkt->dlen) {
+ hwaddr = curr_pkt->data;
+ if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
+ drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
+ hwaddr, &hwaddr);
+ /* translation needed? */
+ if (curr_pkt->data == hwaddr)
+ curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
+ }
+ BD_WRITE(&curr_bd->bd->daddr, hwaddr);
+ BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
+ ((ctrl & 0x3f000) << 12));
+ } else {
+ BD_WRITE(&curr_bd->bd->daddr, 0);
+ BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
+ }
+
+ /* Enable descriptor */
+ BD_WRITE(&curr_bd->bd->ctrl, ctrl);
+
+ last_pkt = curr_pkt;
+ curr_bd = curr_bd->next;
+ cnt++;
+
+ /* Get Next Packet from Ready Queue */
+ if (curr_pkt == dma->send.tail) {
+ /* Handled all in ready queue. */
+ curr_pkt = NULL;
+ break;
+ }
+ curr_pkt = curr_pkt->next;
+ }
+
+ /* Have Packets been scheduled? */
+ if (cnt > 0) {
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove scheduled packets from ready queue */
+ grspw_list_remove_head_list(&dma->send, &lst);
+ dma->send_cnt -= cnt;
+ if (dma->stats.send_cnt_min > dma->send_cnt)
+ dma->stats.send_cnt_min = dma->send_cnt;
+
+ /* Insert scheduled packets into scheduled queue */
+ grspw_list_append_list(&dma->tx_sched, &lst);
+ dma->tx_sched_cnt += cnt;
+ if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
+ dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
+
+ /* Update TX ring posistion */
+ dma->tx_ring_head = curr_bd;
+
+ /* Make hardware aware of the newly enabled descriptors */
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ dmactrl = REG_READ(&dma->regs->ctrl);
+ dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
+ dmactrl |= GRSPW_DMACTRL_TE;
+ REG_WRITE(&dma->regs->ctrl, dmactrl);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+ }
+ return cnt;
+}
+
+/* Scans the TX desciptor table for transmitted packets, and moves these
+ * packets from the head of the scheduled queue to the tail of the sent queue.
+ *
+ * Also, for all packets the status is updated.
+ *
+ * - SCHED List -> SENT List
+ *
+ * Return Value
+ * Number of packet moved
+ */
+STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
+{
+ struct grspw_txring *curr;
+ struct grspw_pkt *last_pkt;
+ int sent_pkt_cnt = 0;
+ unsigned int ctrl;
+ struct grspw_list lst;
+
+ curr = dma->tx_ring_tail;
+
+ /* Step into TX ring to find if packets have been scheduled for
+ * transmission.
+ */
+ if (!curr->pkt)
+ return 0; /* No scheduled packets, thus no sent, abort */
+
+ /* There has been Packets scheduled ==> scheduled Packets may have been
+ * transmitted and needs to be collected into SENT List.
+ *
+ * A temporary list "lst" with all sent packets is created.
+ */
+ lst.head = curr->pkt;
+
+ /* Loop until first enabled "un-transmitted" SpW Packet is found.
+ * An unused descriptor is indicated by an unassigned pkt field.
+ */
+ while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
+ /* Handle one sent Packet */
+
+ /* Remember last handled Packet so that insertion/removal from
+ * packet lists go fast.
+ */
+ last_pkt = curr->pkt;
+
+ /* Set flags to indicate error(s) and Mark Sent.
+ */
+ last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
+ (ctrl & TXPKT_FLAG_LINKERR) |
+ TXPKT_FLAG_TX;
+
+ /* Sent packet experienced link error? */
+ if (ctrl & GRSPW_TXBD_LE)
+ dma->stats.tx_err_link++;
+
+ curr->pkt = NULL; /* Mark descriptor unused */
+
+ /* Increment */
+ curr = curr->next;
+ sent_pkt_cnt++;
+ }
+
+ /* 1. Remove all handled packets from TX-SCHED queue
+ * 2. Put all handled packets into SENT queue
+ */
+ if (sent_pkt_cnt > 0) {
+ /* Update Stats, Number of Transmitted Packets */
+ dma->stats.tx_pkts += sent_pkt_cnt;
+
+ /* Save TX ring posistion */
+ dma->tx_ring_tail = curr;
+
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove sent packets from TX-SCHED queue */
+ grspw_list_remove_head_list(&dma->tx_sched, &lst);
+ dma->tx_sched_cnt -= sent_pkt_cnt;
+ if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
+ dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
+
+ /* Insert received packets into SENT queue */
+ grspw_list_append_list(&dma->sent, &lst);
+ dma->sent_cnt += sent_pkt_cnt;
+ if (dma->stats.sent_cnt_max < dma->sent_cnt)
+ dma->stats.sent_cnt_max = dma->sent_cnt;
+ }
+
+ return sent_pkt_cnt;
+}
+
+void *grspw_dma_open(void *d, int chan_no)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_dma_priv *dma;
+ int size;
+
+ if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
+ return NULL;
+
+ dma = &priv->dma[chan_no];
+
+ /* Take GRSPW lock */
+ if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return NULL;
+
+ if (dma->open) {
+ dma = NULL;
+ goto out;
+ }
+
+ dma->started = 0;
+
+ /* Set Default Configuration:
+ *
+ * - MAX RX Packet Length =
+ * - Disable IRQ generation
+ * -
+ */
+ dma->cfg.rxmaxlen = DEFAULT_RXMAX;
+ dma->cfg.rx_irq_en_cnt = 0;
+ dma->cfg.tx_irq_en_cnt = 0;
+ dma->cfg.flags = DMAFLAG_NO_SPILL;
+
+ /* set to NULL so that error exit works correctly */
+ dma->sem_rxdma = RTEMS_ID_NONE;
+ dma->sem_txdma = RTEMS_ID_NONE;
+ dma->rx_wait.sem_wait = RTEMS_ID_NONE;
+ dma->tx_wait.sem_wait = RTEMS_ID_NONE;
+ dma->rx_ring_base = NULL;
+
+ /* DMA Channel Semaphore created with count = 1 */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
+ dma->sem_rxdma = RTEMS_ID_NONE;
+ goto err;
+ }
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
+ dma->sem_txdma = RTEMS_ID_NONE;
+ goto err;
+ }
+
+ /* Allocate memory for the two descriptor rings */
+ size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
+ dma->rx_ring_base = grlib_malloc(size);
+ dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
+ if (dma->rx_ring_base == NULL)
+ goto err;
+
+ /* Create DMA RX and TX Channel sempahore with count = 0 */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
+ dma->rx_wait.sem_wait = RTEMS_ID_NONE;
+ goto err;
+ }
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
+ dma->tx_wait.sem_wait = RTEMS_ID_NONE;
+ goto err;
+ }
+
+ /* Reset software structures */
+ grspw_dma_reset(dma);
+
+ /* Take the device */
+ dma->open = 1;
+out:
+ /* Return GRSPW Lock */
+ rtems_semaphore_release(grspw_sem);
+
+ return dma;
+
+ /* initialization error happended */
+err:
+ if (dma->sem_rxdma != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->sem_rxdma);
+ if (dma->sem_txdma != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->sem_txdma);
+ if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->rx_wait.sem_wait);
+ if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->tx_wait.sem_wait);
+ if (dma->rx_ring_base)
+ free(dma->rx_ring_base);
+ dma = NULL;
+ goto out;
+}
+
+/* Initialize Software Structures:
+ * - Clear all Queues
+ * - init BD ring
+ * - init IRQ counter
+ * - clear statistics counters
+ * - init wait structures and semaphores
+ */
+STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
+{
+ /* Empty RX and TX queues */
+ grspw_list_clr(&dma->ready);
+ grspw_list_clr(&dma->rx_sched);
+ grspw_list_clr(&dma->recv);
+ grspw_list_clr(&dma->send);
+ grspw_list_clr(&dma->tx_sched);
+ grspw_list_clr(&dma->sent);
+ dma->ready_cnt = 0;
+ dma->rx_sched_cnt = 0;
+ dma->recv_cnt = 0;
+ dma->send_cnt = 0;
+ dma->tx_sched_cnt = 0;
+ dma->sent_cnt = 0;
+
+ dma->rx_irq_en_cnt_curr = 0;
+ dma->tx_irq_en_cnt_curr = 0;
+
+ grspw_bdrings_init(dma);
+
+ dma->rx_wait.waiting = 0;
+ dma->tx_wait.waiting = 0;
+
+ grspw_dma_stats_clr(dma);
+}
+
+int grspw_dma_close(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ if (!dma->open)
+ return 0;
+
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ rtems_semaphore_release(dma->sem_rxdma);
+ return -1;
+ }
+
+ /* Can not close active DMA channel. User must stop DMA and make sure
+ * no threads are active/blocked within driver.
+ */
+ if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
+ rtems_semaphore_release(dma->sem_txdma);
+ rtems_semaphore_release(dma->sem_rxdma);
+ return 1;
+ }
+
+ /* Free resources */
+ rtems_semaphore_delete(dma->rx_wait.sem_wait);
+ rtems_semaphore_delete(dma->tx_wait.sem_wait);
+ /* Release and delete lock. Operations requiring lock will fail */
+ rtems_semaphore_delete(dma->sem_txdma);
+ rtems_semaphore_delete(dma->sem_rxdma);
+ dma->sem_txdma = RTEMS_ID_NONE;
+ dma->sem_rxdma = RTEMS_ID_NONE;
+
+ /* Free memory */
+ if (dma->rx_ring_base)
+ free(dma->rx_ring_base);
+ dma->rx_ring_base = NULL;
+ dma->tx_ring_base = NULL;
+
+ dma->open = 0;
+ return 0;
+}
+
+unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
+{
+ struct grspw_dma_priv *dma = c;
+ int rc = 0;
+ unsigned int ctrl, ctrl_old;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ if (dma->started == 0) {
+ rc = 1; /* DMA stopped */
+ goto out;
+ }
+ ctrl = REG_READ(&dma->regs->ctrl);
+ ctrl_old = ctrl;
+
+ /* Read/Write DMA error ? */
+ if (ctrl & GRSPW_DMA_STATUS_ERROR) {
+ rc = 2; /* DMA error */
+ goto out;
+ }
+
+ /* DMA has finished a TX/RX packet and user wants work-task to
+ * take care of DMA table processing.
+ */
+ ctrl &= ~GRSPW_DMACTRL_AT;
+
+ if ((rxtx & 1) == 0)
+ ctrl &= ~GRSPW_DMACTRL_PR;
+ else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
+ (dma->cfg.flags & DMAFLAG2_RXIE)))
+ ctrl |= GRSPW_DMACTRL_RI;
+
+ if ((rxtx & 2) == 0)
+ ctrl &= ~GRSPW_DMACTRL_PS;
+ else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
+ (dma->cfg.flags & DMAFLAG2_TXIE)))
+ ctrl |= GRSPW_DMACTRL_TI;
+
+ REG_WRITE(&dma->regs->ctrl, ctrl);
+ /* Re-enabled interrupts previously enabled */
+ rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
+out:
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+ return rc;
+}
+
+/* Schedule List of packets for transmission at some point in
+ * future.
+ *
+ * 1. Move transmitted packets to SENT List (SCHED->SENT)
+ * 2. Add the requested packets to the SEND List (USER->SEND)
+ * 3. Schedule as many packets as possible (SEND->SCHED)
+ */
+int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ if (dma->started == 0) {
+ ret = 1; /* signal DMA has been stopped */
+ goto out;
+ }
+ ret = 0;
+
+ /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
+ if ((opts & 1) == 0)
+ grspw_tx_process_scheduled(dma);
+
+ /* 2. Add the requested packets to the SEND List (USER->SEND) */
+ if (pkts && (count > 0)) {
+ grspw_list_append_list(&dma->send, pkts);
+ dma->send_cnt += count;
+ if (dma->stats.send_cnt_max < dma->send_cnt)
+ dma->stats.send_cnt_max = dma->send_cnt;
+ }
+
+ /* 3. Schedule as many packets as possible (SEND->SCHED) */
+ if ((opts & 2) == 0)
+ grspw_tx_schedule_send(dma);
+
+out:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+
+ return ret;
+}
+
+int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
+{
+ struct grspw_dma_priv *dma = c;
+ struct grspw_pkt *pkt, *lastpkt;
+ int cnt, started;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
+ started = dma->started;
+ if ((started > 0) && ((opts & 1) == 0))
+ grspw_tx_process_scheduled(dma);
+
+ /* Move all/count SENT packet to the callers list (SENT->USER) */
+ if (pkts) {
+ if ((count == NULL) || (*count == -1) ||
+ (*count >= dma->sent_cnt)) {
+ /* Move all SENT Packets */
+ *pkts = dma->sent;
+ grspw_list_clr(&dma->sent);
+ if (count)
+ *count = dma->sent_cnt;
+ dma->sent_cnt = 0;
+ } else {
+ /* Move a number of SENT Packets */
+ pkts->head = pkt = lastpkt = dma->sent.head;
+ cnt = 0;
+ while (cnt < *count) {
+ lastpkt = pkt;
+ pkt = pkt->next;
+ cnt++;
+ }
+ if (cnt > 0) {
+ pkts->tail = lastpkt;
+ grspw_list_remove_head_list(&dma->sent, pkts);
+ dma->sent_cnt -= cnt;
+ } else {
+ grspw_list_clr(pkts);
+ }
+ }
+ } else if (count) {
+ *count = 0;
+ }
+
+ /* 3. Schedule as many packets as possible (SEND->SCHED) */
+ if ((started > 0) && ((opts & 2) == 0))
+ grspw_tx_schedule_send(dma);
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+
+ return (~started) & 1; /* signal DMA has been stopped */
+}
+
+void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
+{
+ struct grspw_dma_priv *dma = c;
+ int sched_cnt, diff;
+ unsigned int hwbd;
+ struct grspw_txbd *tailbd;
+
+ /* Take device lock - Wait until we get semaphore.
+ * The lock is taken so that the counters are in sync with each other
+ * and that DMA descriptor table and tx_ring_tail is not being updated
+ * during HW counter processing in this function.
+ */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ if (send)
+ *send = dma->send_cnt;
+ sched_cnt = dma->tx_sched_cnt;
+ if (sched)
+ *sched = sched_cnt;
+ if (sent)
+ *sent = dma->sent_cnt;
+ if (hw) {
+ /* Calculate number of descriptors (processed by HW) between
+ * HW pointer and oldest SW pointer.
+ */
+ hwbd = REG_READ(&dma->regs->txdesc);
+ tailbd = dma->tx_ring_tail->bd;
+ diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
+ (GRSPW_TXBD_NR - 1);
+ /* Handle special case when HW and SW pointers are equal
+ * because all TX descriptors have been processed by HW.
+ */
+ if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
+ ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
+ diff = GRSPW_TXBD_NR;
+ }
+ *hw = diff;
+ }
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+}
+
+static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
+{
+ int send_val, sent_val;
+
+ if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
+ send_val = 1;
+ else
+ send_val = 0;
+
+ if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
+ sent_val = 1;
+ else
+ sent_val = 0;
+
+ /* AND or OR ? */
+ if (dma->tx_wait.op == 0)
+ return send_val & sent_val; /* AND */
+ else
+ return send_val | sent_val; /* OR */
+}
+
+/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
+ * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
+ * is met.
+ * If a link error occurs and the Stop on Link error is defined, this function
+ * will also return to caller.
+ */
+int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret, rc, initialized = 0;
+
+ if (timeout == 0)
+ timeout = RTEMS_NO_TIMEOUT;
+
+check_condition:
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Check so that no other thread is waiting, this driver only supports
+ * one waiter at a time.
+ */
+ if (initialized == 0 && dma->tx_wait.waiting) {
+ ret = 3;
+ goto out_release;
+ }
+
+ /* Stop if link error or similar (DMA stopped), abort */
+ if (dma->started == 0) {
+ ret = 1;
+ goto out_release;
+ }
+
+ /* Set up Condition */
+ dma->tx_wait.send_cnt = send_cnt;
+ dma->tx_wait.op = op;
+ dma->tx_wait.sent_cnt = sent_cnt;
+
+ if (grspw_tx_wait_eval(dma) == 0) {
+ /* Prepare Wait */
+ initialized = 1;
+ dma->tx_wait.waiting = 1;
+
+ /* Release DMA channel lock */
+ rtems_semaphore_release(dma->sem_txdma);
+
+ /* Try to take Wait lock, if this fail link may have gone down
+ * or user stopped this DMA channel
+ */
+ rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
+ timeout);
+ if (rc == RTEMS_TIMEOUT) {
+ ret = 2;
+ goto out;
+ } else if (rc == RTEMS_UNSATISFIED ||
+ rc == RTEMS_OBJECT_WAS_DELETED) {
+ ret = 1; /* sem was flushed/deleted, means DMA stop */
+ goto out;
+ } else if (rc != RTEMS_SUCCESSFUL) {
+ /* Unknown Error */
+ ret = -1;
+ goto out;
+ } else if (dma->started == 0) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Check condition once more */
+ goto check_condition;
+ }
+
+ ret = 0;
+
+out_release:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+
+out:
+ if (initialized)
+ dma->tx_wait.waiting = 0;
+ return ret;
+}
+
+int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
+{
+ struct grspw_dma_priv *dma = c;
+ struct grspw_pkt *pkt, *lastpkt;
+ int cnt, started;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
+ started = dma->started;
+ if (((opts & 1) == 0) && (started > 0))
+ grspw_rx_process_scheduled(dma);
+
+ /* Move all RECV packet to the callers list */
+ if (pkts) {
+ if ((count == NULL) || (*count == -1) ||
+ (*count >= dma->recv_cnt)) {
+ /* Move all Received packets */
+ *pkts = dma->recv;
+ grspw_list_clr(&dma->recv);
+ if ( count )
+ *count = dma->recv_cnt;
+ dma->recv_cnt = 0;
+ } else {
+ /* Move a number of RECV Packets */
+ pkts->head = pkt = lastpkt = dma->recv.head;
+ cnt = 0;
+ while (cnt < *count) {
+ lastpkt = pkt;
+ pkt = pkt->next;
+ cnt++;
+ }
+ if (cnt > 0) {
+ pkts->tail = lastpkt;
+ grspw_list_remove_head_list(&dma->recv, pkts);
+ dma->recv_cnt -= cnt;
+ } else {
+ grspw_list_clr(pkts);
+ }
+ }
+ } else if (count) {
+ *count = 0;
+ }
+
+ /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
+ if (((opts & 2) == 0) && (started > 0))
+ grspw_rx_schedule_ready(dma);
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+ return (~started) & 1;
+}
+
+int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ if (dma->started == 0) {
+ ret = 1;
+ goto out;
+ }
+
+ /* 1. Move Received packets to RECV List (SCHED->RECV) */
+ if ((opts & 1) == 0)
+ grspw_rx_process_scheduled(dma);
+
+ /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
+ if (pkts && (count > 0)) {
+ grspw_list_append_list(&dma->ready, pkts);
+ dma->ready_cnt += count;
+ if (dma->stats.ready_cnt_max < dma->ready_cnt)
+ dma->stats.ready_cnt_max = dma->ready_cnt;
+ }
+
+ /* 3. Schedule as many packets as possible (READY->SCHED) */
+ if ((opts & 2) == 0)
+ grspw_rx_schedule_ready(dma);
+
+ ret = 0;
+out:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+ return ret;
+}
+
+void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
+{
+ struct grspw_dma_priv *dma = c;
+ int sched_cnt, diff;
+ unsigned int hwbd;
+ struct grspw_rxbd *tailbd;
+
+ /* Take device lock - Wait until we get semaphore.
+ * The lock is taken so that the counters are in sync with each other
+ * and that DMA descriptor table and rx_ring_tail is not being updated
+ * during HW counter processing in this function.
+ */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ if (ready)
+ *ready = dma->ready_cnt;
+ sched_cnt = dma->rx_sched_cnt;
+ if (sched)
+ *sched = sched_cnt;
+ if (recv)
+ *recv = dma->recv_cnt;
+ if (hw) {
+ /* Calculate number of descriptors (processed by HW) between
+ * HW pointer and oldest SW pointer.
+ */
+ hwbd = REG_READ(&dma->regs->rxdesc);
+ tailbd = dma->rx_ring_tail->bd;
+ diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
+ (GRSPW_RXBD_NR - 1);
+ /* Handle special case when HW and SW pointers are equal
+ * because all RX descriptors have been processed by HW.
+ */
+ if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
+ ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
+ diff = GRSPW_RXBD_NR;
+ }
+ *hw = diff;
+ }
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+}
+
+static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
+{
+ int ready_val, recv_val;
+
+ if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
+ ready_val = 1;
+ else
+ ready_val = 0;
+
+ if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
+ recv_val = 1;
+ else
+ recv_val = 0;
+
+ /* AND or OR ? */
+ if (dma->rx_wait.op == 0)
+ return ready_val & recv_val; /* AND */
+ else
+ return ready_val | recv_val; /* OR */
+}
+
+/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
+ * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
+ * condition is met.
+ * If a link error occurs and the Stop on Link error is defined, this function
+ * will also return to caller, however with an error.
+ */
+int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret, rc, initialized = 0;
+
+ if (timeout == 0)
+ timeout = RTEMS_NO_TIMEOUT;
+
+check_condition:
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Check so that no other thread is waiting, this driver only supports
+ * one waiter at a time.
+ */
+ if (initialized == 0 && dma->rx_wait.waiting) {
+ ret = 3;
+ goto out_release;
+ }
+
+ /* Stop if link error or similar (DMA stopped), abort */
+ if (dma->started == 0) {
+ ret = 1;
+ goto out_release;
+ }
+
+ /* Set up Condition */
+ dma->rx_wait.recv_cnt = recv_cnt;
+ dma->rx_wait.op = op;
+ dma->rx_wait.ready_cnt = ready_cnt;
+
+ if (grspw_rx_wait_eval(dma) == 0) {
+ /* Prepare Wait */
+ initialized = 1;
+ dma->rx_wait.waiting = 1;
+
+ /* Release channel lock */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+ /* Try to take Wait lock, if this fail link may have gone down
+ * or user stopped this DMA channel
+ */
+ rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
+ timeout);
+ if (rc == RTEMS_TIMEOUT) {
+ ret = 2;
+ goto out;
+ } else if (rc == RTEMS_UNSATISFIED ||
+ rc == RTEMS_OBJECT_WAS_DELETED) {
+ ret = 1; /* sem was flushed/deleted, means DMA stop */
+ goto out;
+ } else if (rc != RTEMS_SUCCESSFUL) {
+ /* Unknown Error */
+ ret = -1;
+ goto out;
+ } else if (dma->started == 0) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Check condition once more */
+ goto check_condition;
+ }
+
+ ret = 0;
+
+out_release:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+out:
+ if (initialized)
+ dma->rx_wait.waiting = 0;
+ return ret;
+}
+
+int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
+{
+ struct grspw_dma_priv *dma = c;
+
+ if (dma->started || !cfg)
+ return -1;
+
+ if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
+ return -1;
+
+ /* Update Configuration */
+ memcpy(&dma->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
+{
+ struct grspw_dma_priv *dma = c;
+
+ /* Copy Current Configuration */
+ memcpy(cfg, &dma->cfg, sizeof(*cfg));
+}
+
+void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
+{
+ struct grspw_dma_priv *dma = c;
+
+ memcpy(sts, &dma->stats, sizeof(dma->stats));
+}
+
+void grspw_dma_stats_clr(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ /* Clear most of the statistics */
+ memset(&dma->stats, 0, sizeof(dma->stats));
+
+ /* Init proper default values so that comparisons will work the
+ * first time.
+ */
+ dma->stats.send_cnt_min = 0x3fffffff;
+ dma->stats.tx_sched_cnt_min = 0x3fffffff;
+ dma->stats.ready_cnt_min = 0x3fffffff;
+ dma->stats.rx_sched_cnt_min = 0x3fffffff;
+}
+
+int grspw_dma_start(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+ struct grspw_dma_regs *dregs = dma->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (dma->started)
+ return 0;
+
+ /* Initialize Software Structures:
+ * - Clear all Queues
+ * - init BD ring
+ * - init IRQ counter
+ * - clear statistics counters
+ * - init wait structures and semaphores
+ */
+ grspw_dma_reset(dma);
+
+ /* RX&RD and TX is not enabled until user fills SEND and READY Queue
+ * with SpaceWire Packet buffers. So we do not have to worry about
+ * IRQs for this channel just yet. However other DMA channels
+ * may be active.
+ *
+ * Some functionality that is not changed during started mode is set up
+ * once and for all here:
+ *
+ * - RX MAX Packet length
+ * - TX Descriptor base address to first BD in TX ring (not enabled)
+ * - RX Descriptor base address to first BD in RX ring (not enabled)
+ * - IRQs (TX DMA, RX DMA, DMA ERROR)
+ * - Strip PID
+ * - Strip Address
+ * - No Spill
+ * - Receiver Enable
+ * - disable on link error (LE)
+ *
+ * Note that the address register and the address enable bit in DMACTRL
+ * register must be left untouched, they are configured on a GRSPW
+ * core level.
+ *
+ * Note that the receiver is enabled here, but since descriptors are
+ * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
+ * descriptors are enabled or it may ignore RX packets (NS=0) until
+ * descriptors are enabled (writing RD bit).
+ */
+ REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
+ REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
+
+ /* MAX Packet length */
+ REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
+
+ ctrl = GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
+ GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
+ (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
+ if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
+ ctrl |= GRSPW_DMACTRL_LE;
+ if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
+ ctrl |= GRSPW_DMACTRL_RI;
+ if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
+ ctrl |= GRSPW_DMACTRL_TI;
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
+ REG_WRITE(&dregs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+
+ dma->started = 1; /* open up other DMA interfaces */
+
+ return 0;
+}
+
+STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
+{
+ SPIN_IRQFLAGS(irqflags);
+
+ if (dma->started == 0)
+ return;
+ dma->started = 0;
+
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ grspw_hw_dma_stop(dma);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+
+ /* From here no more packets will be sent, however
+ * there may still exist scheduled packets that has been
+ * sent, and packets in the SEND Queue waiting for free
+ * descriptors. All packets are moved to the SENT Queue
+ * so that the user may get its buffers back, the user
+ * must look at the TXPKT_FLAG_TX in order to determine
+ * if the packet was sent or not.
+ */
+
+ /* Retreive scheduled all sent packets */
+ grspw_tx_process_scheduled(dma);
+
+ /* Move un-sent packets in SEND and SCHED queue to the
+ * SENT Queue. (never marked sent)
+ */
+ if (!grspw_list_is_empty(&dma->tx_sched)) {
+ grspw_list_append_list(&dma->sent, &dma->tx_sched);
+ grspw_list_clr(&dma->tx_sched);
+ dma->sent_cnt += dma->tx_sched_cnt;
+ dma->tx_sched_cnt = 0;
+ }
+ if (!grspw_list_is_empty(&dma->send)) {
+ grspw_list_append_list(&dma->sent, &dma->send);
+ grspw_list_clr(&dma->send);
+ dma->sent_cnt += dma->send_cnt;
+ dma->send_cnt = 0;
+ }
+
+ /* Similar for RX */
+ grspw_rx_process_scheduled(dma);
+ if (!grspw_list_is_empty(&dma->rx_sched)) {
+ grspw_list_append_list(&dma->recv, &dma->rx_sched);
+ grspw_list_clr(&dma->rx_sched);
+ dma->recv_cnt += dma->rx_sched_cnt;
+ dma->rx_sched_cnt = 0;
+ }
+ if (!grspw_list_is_empty(&dma->ready)) {
+ grspw_list_append_list(&dma->recv, &dma->ready);
+ grspw_list_clr(&dma->ready);
+ dma->recv_cnt += dma->ready_cnt;
+ dma->ready_cnt = 0;
+ }
+
+ /* Throw out blocked threads */
+ rtems_semaphore_flush(dma->rx_wait.sem_wait);
+ rtems_semaphore_flush(dma->tx_wait.sem_wait);
+}
+
+void grspw_dma_stop(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ /* If DMA channel is closed we should not access the semaphore */
+ if (!dma->open)
+ return;
+
+ /* Take DMA Channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ rtems_semaphore_release(dma->sem_rxdma);
+ return;
+ }
+
+ grspw_dma_stop_locked(dma);
+
+ rtems_semaphore_release(dma->sem_txdma);
+ rtems_semaphore_release(dma->sem_rxdma);
+}
+
+/* Do general work, invoked indirectly from ISR */
+static void grspw_work_shutdown_func(struct grspw_priv *priv)
+{
+ int i;
+
+ /* Link is down for some reason, and the user has configured
+ * that we stop all (open) DMA channels and throw out all their
+ * blocked threads.
+ */
+ for (i=0; i<priv->hwsup.ndma_chans; i++)
+ grspw_dma_stop(&priv->dma[i]);
+ grspw_hw_stop(priv);
+}
+
+/* Do DMA work on one channel, invoked indirectly from ISR */
+static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
+{
+ int tx_cond_true, rx_cond_true, rxtx;
+
+ /* If DMA channel is closed we should not access the semaphore */
+ if (dma->open == 0)
+ return;
+
+ dma->stats.irq_cnt++;
+
+ /* Look at cause we were woken up and clear source */
+ rxtx = 0;
+ if (msg & WORK_DMA_RX_MASK)
+ rxtx |= 1;
+ if (msg & WORK_DMA_TX_MASK)
+ rxtx |= 2;
+ switch (grspw_dma_enable_int(dma, rxtx, 0)) {
+ case 1:
+ /* DMA stopped */
+ return;
+ case 2:
+ /* DMA error -> Stop DMA channel (both RX and TX) */
+ if (msg & WORK_DMA_ER_MASK) {
+ /* DMA error and user wants work-task to handle error */
+ grspw_dma_stop(dma);
+ grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
+ }
+ return;
+ default:
+ break;
+ }
+ if (msg == 0)
+ return;
+
+ rx_cond_true = 0;
+ tx_cond_true = 0;
+
+ if ((dma->cfg.flags & DMAFLAG2_IRQD_MASK) == DMAFLAG2_IRQD_BOTH) {
+ /* In case both interrupt sources are disabled simultaneously
+ * by the ISR the re-enabling of the interrupt source must also
+ * do so to avoid missing interrupts. Both RX and TX process
+ * will be forced.
+ */
+ msg |= WORK_DMA_RX_MASK | WORK_DMA_TX_MASK;
+ }
+
+ if (msg & WORK_DMA_RX_MASK) {
+ /* Do RX Work */
+
+ /* Take DMA channel RX lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ dma->stats.rx_work_cnt++;
+ grspw_rx_process_scheduled(dma);
+ if (dma->started) {
+ dma->stats.rx_work_enabled +=
+ grspw_rx_schedule_ready(dma);
+ /* Check to see if condition for waking blocked
+ * USER task is fullfilled.
+ */
+ if (dma->rx_wait.waiting)
+ rx_cond_true = grspw_rx_wait_eval(dma);
+ }
+ rtems_semaphore_release(dma->sem_rxdma);
+ }
+
+ if (msg & WORK_DMA_TX_MASK) {
+ /* Do TX Work */
+
+ /* Take DMA channel TX lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ dma->stats.tx_work_cnt++;
+ grspw_tx_process_scheduled(dma);
+ if (dma->started) {
+ dma->stats.tx_work_enabled +=
+ grspw_tx_schedule_send(dma);
+ /* Check to see if condition for waking blocked
+ * USER task is fullfilled.
+ */
+ if (dma->tx_wait.waiting)
+ tx_cond_true = grspw_tx_wait_eval(dma);
+ }
+ rtems_semaphore_release(dma->sem_txdma);
+ }
+
+ if (rx_cond_true)
+ rtems_semaphore_release(dma->rx_wait.sem_wait);
+
+ if (tx_cond_true)
+ rtems_semaphore_release(dma->tx_wait.sem_wait);
+}
+
+/* Work task is receiving work for the work message queue posted from
+ * the ISR.
+ */
+void grspw_work_func(rtems_id msgQ)
+{
+ unsigned int message = 0, msg;
+ size_t size;
+ struct grspw_priv *priv;
+ int i;
+
+ /* Wait for ISR to schedule work */
+ while (rtems_message_queue_receive(msgQ, &message, &size,
+ RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
+ if (message & WORK_QUIT_TASK)
+ break;
+
+ /* Handle work */
+ priv = priv_tab[message >> WORK_CORE_BIT];
+ if (message & WORK_SHUTDOWN) {
+ grspw_work_shutdown_func(priv);
+
+ grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
+ } else if (message & WORK_DMA_MASK) {
+ for (i = 0; i < priv->hwsup.ndma_chans; i++) {
+ msg = message &
+ (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
+ if (msg)
+ grspw_work_dma_func(&priv->dma[i], msg);
+ }
+ }
+ message = 0;
+ }
+
+ if (message & WORK_FREE_MSGQ)
+ rtems_message_queue_delete(msgQ);
+
+ grspw_work_event(WORKTASK_EV_QUIT, message);
+ rtems_task_exit();
+}
+
+STATIC void grspw_isr(void *data)
+{
+ struct grspw_priv *priv = data;
+ unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
+ unsigned int rxirq, rxack, intto;
+ int i, handled = 0, call_user_int_isr;
+ unsigned int message = WORK_NONE, dma_en;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Get Status from Hardware */
+ stat = REG_READ(&priv->regs->status);
+ stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
+ (GRSPW_STS_TO | priv->stscfg);
+
+ /* Make sure to put the timecode handling first in order to get the
+ * smallest possible interrupt latency
+ */
+ if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
+ ctrl = REG_READ(&priv->regs->ctrl);
+ if (ctrl & GRSPW_CTRL_TQ) {
+ /* Timecode received. Let custom function handle this */
+ timecode = REG_READ(&priv->regs->time) &
+ (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
+ (priv->tcisr)(priv->tcisr_arg, timecode);
+ }
+ }
+
+ /* Get Interrupt status from hardware */
+ icctrl = REG_READ(&priv->regs->icctrl);
+ if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
+ call_user_int_isr = 0;
+ rxirq = rxack = intto = 0;
+
+ if ((icctrl & GRSPW_ICCTRL_IQ) &&
+ (rxirq = REG_READ(&priv->regs->icrx)) != 0)
+ call_user_int_isr = 1;
+
+ if ((icctrl & GRSPW_ICCTRL_AQ) &&
+ (rxack = REG_READ(&priv->regs->icack)) != 0)
+ call_user_int_isr = 1;
+
+ if ((icctrl & GRSPW_ICCTRL_TQ) &&
+ (intto = REG_READ(&priv->regs->ictimeout)) != 0)
+ call_user_int_isr = 1;
+
+ /* Let custom functions handle this POTENTIAL SPW interrupt. The
+ * user function is called even if no such IRQ has happened!
+ * User must make sure to clear all interrupts that have been
+ * handled from the three registers by writing a one.
+ */
+ if (call_user_int_isr)
+ priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
+ }
+
+ /* An Error occured? */
+ if (stat & GRSPW_STAT_ERROR) {
+ /* Wake Global WorkQ */
+ handled = 1;
+
+ if (stat & GRSPW_STS_EE)
+ priv->stats.err_eeop++;
+
+ if (stat & GRSPW_STS_IA)
+ priv->stats.err_addr++;
+
+ if (stat & GRSPW_STS_PE)
+ priv->stats.err_parity++;
+
+ if (stat & GRSPW_STS_DE)
+ priv->stats.err_disconnect++;
+
+ if (stat & GRSPW_STS_ER)
+ priv->stats.err_escape++;
+
+ if (stat & GRSPW_STS_CE)
+ priv->stats.err_credit++;
+
+ if (stat & GRSPW_STS_WE)
+ priv->stats.err_wsync++;
+
+ if (((priv->dis_link_on_err >> 16) & stat) &&
+ (REG_READ(&priv->regs->ctrl) & GRSPW_CTRL_IE)) {
+ /* Disable the link, no more transfers are expected
+ * on any DMA channel.
+ */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ ctrl = REG_READ(&priv->regs->ctrl);
+ REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
+ (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+ /* Signal to work-thread to stop DMA and clean up */
+ message = WORK_SHUTDOWN;
+ }
+ }
+
+ /* Clear Status Flags */
+ if (stat_clrmsk) {
+ handled = 1;
+ REG_WRITE(&priv->regs->status, stat_clrmsk);
+ }
+
+ /* A DMA transfer or Error occured? In that case disable more IRQs
+ * from the DMA channel, then invoke the workQ.
+ *
+ * Also the GI interrupt flag may not be available for older
+ * designs where (was added together with mutiple DMA channels).
+ */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
+ /* Check for Errors and if Packets been sent or received if
+ * respective IRQ are enabled
+ */
+ irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
+ | GRSPW_DMA_STATUS_ERROR) & dma_stat;
+ if (!irqs)
+ continue;
+
+ handled = 1;
+
+ /* DMA error has priority, if error happens it is assumed that
+ * the common work-queue stops the DMA operation for that
+ * channel and makes the DMA tasks exit from their waiting
+ * functions (both RX and TX tasks).
+ *
+ * Disable Further IRQs (until enabled again)
+ * from this DMA channel. Let the status
+ * bit remain so that they can be handled by
+ * work function.
+ */
+ if (irqs & GRSPW_DMA_STATUS_ERROR) {
+ REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
+ ~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
+ GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
+ GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
+ GRSPW_DMACTRL_AT));
+ message |= WORK_DMA_ER(i);
+ } else {
+ /* determine if RX/TX interrupt source(s) shall remain
+ * enabled.
+ */
+ if (priv->dma[i].cfg.flags & DMAFLAG2_IRQD_SRC) {
+ dma_en = ~irqs >> 3;
+ } else {
+ dma_en = priv->dma[i].cfg.flags >>
+ (DMAFLAG2_IRQD_BIT - GRSPW_DMACTRL_TI_BIT);
+ }
+ dma_en &= (GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI);
+ REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
+ (~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
+ GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
+ GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
+ GRSPW_DMACTRL_AT) | dma_en));
+ message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ if (handled != 0)
+ priv->stats.irq_cnt++;
+
+ /* Schedule work by sending message to work thread */
+ if (message != WORK_NONE && priv->wc.msgisr) {
+ int status;
+ message |= WORK_CORE(priv->index);
+ /* func interface compatible with msgQSend() on purpose, but
+ * at the same time the user can assign a custom function to
+ * handle DMA RX/TX operations as indicated by the "message"
+ * and clear the handled bits before given to msgQSend().
+ */
+ status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
+ if (status != RTEMS_SUCCESSFUL) {
+ printk("grspw_isr(%d): message fail %d (0x%x)\n",
+ priv->index, status, message);
+ }
+ }
+}
+
+STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
+{
+ unsigned int ctrl;
+ struct grspw_dma_regs *dregs = dma->regs;
+
+ ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
+ GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
+ ctrl |= GRSPW_DMACTRL_AT;
+ REG_WRITE(&dregs->ctrl, ctrl);
+}
+
+STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
+{
+ unsigned int ctrl;
+ struct grspw_dma_regs *dregs = dma->regs;
+
+ ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
+ REG_WRITE(&dregs->ctrl, ctrl);
+
+ REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
+ REG_WRITE(&dregs->txdesc, 0);
+ REG_WRITE(&dregs->rxdesc, 0);
+}
+
+/* Hardware Action:
+ * - stop DMA
+ * - do not bring down the link (RMAP may be active)
+ * - RMAP settings untouched (RMAP may be active)
+ * - port select untouched (RMAP may be active)
+ * - timecodes are disabled
+ * - IRQ generation disabled
+ * - status not cleared (let user analyze it if requested later on)
+ * - Node address / First DMA channels Node address
+ * is untouched (RMAP may be active)
+ */
+STATIC void grspw_hw_stop(struct grspw_priv *priv)
+{
+ int i;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ for (i=0; i<priv->hwsup.ndma_chans; i++)
+ grspw_hw_dma_stop(&priv->dma[i]);
+
+ ctrl = REG_READ(&priv->regs->ctrl);
+ REG_WRITE(&priv->regs->ctrl, ctrl & (
+ GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
+ GRSPW_CTRL_RE | GRSPW_CTRL_RD |
+ GRSPW_CTRL_NP | GRSPW_CTRL_PS));
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+/* Soft reset of GRSPW core registers */
+STATIC void grspw_hw_softreset(struct grspw_priv *priv)
+{
+ int i;
+ unsigned int tmp;
+
+ for (i=0; i<priv->hwsup.ndma_chans; i++)
+ grspw_hw_dma_softreset(&priv->dma[i]);
+
+ REG_WRITE(&priv->regs->status, 0xffffffff);
+ REG_WRITE(&priv->regs->time, 0);
+ /* Clear all but valuable reset values of ICCTRL */
+ tmp = REG_READ(&priv->regs->icctrl);
+ tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
+ tmp |= GRSPW_ICCTRL_ID;
+ REG_WRITE(&priv->regs->icctrl, tmp);
+ REG_WRITE(&priv->regs->icrx, 0xffffffff);
+ REG_WRITE(&priv->regs->icack, 0xffffffff);
+ REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
+}
+
+int grspw_dev_count(void)
+{
+ return grspw_count;
+}
+
+void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
+{
+ int i;
+ struct grspw_priv *priv;
+
+ /* Set new Device Found Handler */
+ grspw_dev_add = devfound;
+ grspw_dev_del = devremove;
+
+ if (grspw_initialized == 1 && grspw_dev_add) {
+ /* Call callback for every previously found device */
+ for (i=0; i<grspw_count; i++) {
+ priv = priv_tab[i];
+ if (priv)
+ priv->data = grspw_dev_add(i);
+ }
+ }
+}
+
+/* Get a value at least 6.4us in number of clock cycles */
+static unsigned int grspw1_calc_timer64(int freq_khz)
+{
+ unsigned int timer64 = (freq_khz * 64 + 9999) / 10000;
+ return timer64 & 0xfff;
+}
+
+/* Get a value at least 850ns in number of clock cycles - 3 */
+static unsigned int grspw1_calc_discon(int freq_khz)
+{
+ unsigned int discon = ((freq_khz * 85 + 99999) / 100000) - 3;
+ return discon & 0x3ff;
+}
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static int grspw_common_init(void);
+static int grspw2_init3(struct drvmgr_dev *dev);
+
+static struct drvmgr_drv_ops grspw2_ops =
+{
+ .init = {NULL, NULL, grspw2_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+static struct amba_dev_id grspw2_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
+ {VENDOR_GAISLER, GAISLER_SPW2},
+ {VENDOR_GAISLER, GAISLER_SPW2_DMA},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info grspw2_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
+ "GRSPW_PKT_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grspw2_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct grspw_priv), /* Let DrvMgr alloc priv */
+ },
+ &grspw2_ids[0]
+};
+
+void grspw2_register_drv (void)
+{
+ GRSPW_DBG("Registering GRSPW2 packet driver\n");
+ drvmgr_drv_register(&grspw2_drv_info.general);
+}
+
+static int grspw2_init3(struct drvmgr_dev *dev)
+{
+ struct grspw_priv *priv;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ int i;
+ unsigned int ctrl, icctrl, numi;
+ union drvmgr_key_value *value;
+
+ GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
+ dev->parent->dev->name);
+
+ if (grspw_count >= GRSPW_MAX)
+ return DRVMGR_ENORES;
+
+ priv = dev->priv;
+ if (priv == NULL)
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* If first device init common part of driver */
+ if (grspw_common_init())
+ return DRVMGR_FAIL;
+
+ /*** Now we take care of device initialization ***/
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if (ambadev == NULL)
+ return -1;
+ pnpinfo = &ambadev->info;
+ priv->irq = pnpinfo->irq;
+ priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
+
+ /* Read Hardware Support from Control Register */
+ ctrl = REG_READ(&priv->regs->ctrl);
+ priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
+ priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
+ priv->hwsup.ccsds_crc = (ctrl & GRSPW_CTRL_CC) >> GRSPW_CTRL_CC_BIT;
+ priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
+ priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
+ priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
+ priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
+ icctrl = REG_READ(&priv->regs->icctrl);
+ numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
+ if (numi > 0)
+ priv->hwsup.irq_num = 1 << (numi - 1);
+ else
+ priv->hwsup.irq_num = 0;
+
+ /* Construct hardware version identification */
+ priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
+
+ if ((pnpinfo->device == GAISLER_SPW2) ||
+ (pnpinfo->device == GAISLER_SPW2_DMA)) {
+ priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
+ priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
+ } else {
+ unsigned int apb_hz, apb_khz;
+
+ /* Autodetect GRSPW1 features? */
+ priv->hwsup.strip_adr = 0;
+ priv->hwsup.strip_pid = 0;
+
+ drvmgr_freq_get(dev, DEV_APB_SLV, &apb_hz);
+ apb_khz = apb_hz / 1000;
+
+ REG_WRITE(&priv->regs->timer,
+ ((grspw1_calc_discon(apb_khz) & 0x3FF) << 12) |
+ (grspw1_calc_timer64(apb_khz) & 0xFFF));
+ }
+
+ /* Probe width of SpaceWire Interrupt ISR timers. All have the same
+ * width... so only the first is probed, if no timer result will be
+ * zero.
+ */
+ REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
+ ctrl = REG_READ(&priv->regs->icrlpresc);
+ REG_WRITE(&priv->regs->icrlpresc, 0);
+ priv->hwsup.itmr_width = 0;
+ while (ctrl & 1) {
+ priv->hwsup.itmr_width++;
+ ctrl = ctrl >> 1;
+ }
+
+ /* Let user limit the number of DMA channels on this core to save
+ * space. Only the first nDMA channels will be available.
+ */
+ value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
+ if (value && (value->i < priv->hwsup.ndma_chans))
+ priv->hwsup.ndma_chans = value->i;
+
+ /* Allocate and init Memory for all DMA channels */
+ priv->dma = grlib_calloc(priv->hwsup.ndma_chans, sizeof(*priv->dma));
+ if (priv->dma == NULL)
+ return DRVMGR_NOMEM;
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ priv->dma[i].core = priv;
+ priv->dma[i].index = i;
+ priv->dma[i].regs = &priv->regs->dma[i];
+ }
+
+ /* Startup Action:
+ * - stop DMA
+ * - do not bring down the link (RMAP may be active)
+ * - RMAP settings untouched (RMAP may be active)
+ * - port select untouched (RMAP may be active)
+ * - timecodes are diabled
+ * - IRQ generation disabled
+ * - status cleared
+ * - Node address / First DMA channels Node address
+ * is untouched (RMAP may be active)
+ */
+ grspw_hw_stop(priv);
+ grspw_hw_softreset(priv);
+
+ /* Register character device in registered region */
+ priv->index = grspw_count;
+ priv_tab[priv->index] = priv;
+ grspw_count++;
+
+ /* Device name */
+ sprintf(priv->devname, "grspw%d", priv->index);
+
+ /* Tell above layer about new device */
+ if (grspw_dev_add)
+ priv->data = grspw_dev_add(priv->index);
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+/* Creates a MsgQ (optional) and spawns a worker task associated with the
+ * message Q. The task can also be associated with a custom msgQ if *msgQ.
+ * is non-zero.
+ */
+rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
+{
+ rtems_id tid;
+ int created_msgq = 0;
+ static char work_name = 'A';
+
+ if (pMsgQ == NULL)
+ return OBJECTS_ID_NONE;
+
+ if (*pMsgQ == OBJECTS_ID_NONE) {
+ if (msgMax <= 0)
+ msgMax = 32;
+
+ if (rtems_message_queue_create(
+ rtems_build_name('S', 'G', 'Q', work_name),
+ msgMax, 4, RTEMS_FIFO, pMsgQ) !=
+ RTEMS_SUCCESSFUL)
+ return OBJECTS_ID_NONE;
+ created_msgq = 1;
+ }
+
+ if (prio < 0)
+ prio = grspw_work_task_priority; /* default prio */
+ if (stack < 0x800)
+ stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
+
+ if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
+ prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
+ RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
+ tid = OBJECTS_ID_NONE;
+ else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
+ RTEMS_SUCCESSFUL) {
+ rtems_task_delete(tid);
+ tid = OBJECTS_ID_NONE;
+ }
+
+ if (tid == OBJECTS_ID_NONE && created_msgq) {
+ rtems_message_queue_delete(*pMsgQ);
+ *pMsgQ = OBJECTS_ID_NONE;
+ } else {
+ if (++work_name > 'Z')
+ work_name = 'A';
+ }
+ return tid;
+}
+
+/* Free task associated with message queue and optionally also the message
+ * queue itself. The message queue is deleted by the work task and is therefore
+ * delayed until it the work task resumes its execution.
+ */
+rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
+{
+ int msg = WORK_QUIT_TASK;
+ if (freeMsgQ)
+ msg |= WORK_FREE_MSGQ;
+ return rtems_message_queue_send(msgQ, &msg, 4);
+}
+
+void grspw_work_cfg(void *d, struct grspw_work_config *wc)
+{
+ struct grspw_priv *priv = (struct grspw_priv *)d;
+
+ if (wc == NULL)
+ wc = &grspw_wc_def; /* use default config */
+ priv->wc = *wc;
+}
+
+#ifdef RTEMS_SMP
+int grspw_isr_affinity(void *d, const cpu_set_t *cpus)
+{
+ return -1; /* BSP support only static configured IRQ affinity */
+}
+#endif
+
+static int grspw_common_init(void)
+{
+ if (grspw_initialized == 1)
+ return 0;
+ if (grspw_initialized == -1)
+ return -1;
+ grspw_initialized = -1;
+
+ /* Device Semaphore created with count = 1 */
+ if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Work queue, Work thread. Not created if user disables it.
+ * user can disable it when interrupt is not used to save resources
+ */
+ if (grspw_work_task_priority != -1) {
+ grspw_work_task = grspw_work_spawn(-1, 0,
+ (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
+ if (grspw_work_task == OBJECTS_ID_NONE)
+ return -2;
+ grspw_wc_def.msgisr =
+ (grspw_msgqisr_t) rtems_message_queue_send;
+ } else {
+ grspw_wc_def.msgisr = NULL;
+ grspw_wc_def.msgisr_arg = NULL;
+ }
+
+ grspw_initialized = 1;
+ return 0;
+}
diff --git a/bsps/shared/grlib/spw/grspw_router.c b/bsps/shared/grlib/spw/grspw_router.c
new file mode 100644
index 0000000000..d8ba8feef3
--- /dev/null
+++ b/bsps/shared/grlib/spw/grspw_router.c
@@ -0,0 +1,1939 @@
+/* GRSPW ROUTER APB-Register Driver.
+ *
+ * COPYRIGHT (c) 2010-2017.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <rtems/libio.h>
+#include <rtems/bspIo.h>
+#include <stdio.h>
+#include <bsp.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grspw_router.h>
+
+#include <grlib/grlib_impl.h>
+
+//#define STATIC
+#define STATIC static
+
+#define UNUSED __attribute__((unused))
+
+//#define DEBUG 1
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+#define THREAD_SAFE 1
+
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+
+/*
+ * ROUTER RTPMAP register fields
+ */
+#define RTPMAP_PE (0x7fffffff << RTPMAP_PE_BIT)
+#define RTPMAP_PD (0x1 << RTPMAP_PD_BIT)
+
+#define RTPMAP_PE_BIT 1
+#define RTPMAP_PD_BIT 0
+
+/*
+ * ROUTER RTACTRL register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PCTRL register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PSTSCFG register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PSTS register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PTIMER register fields
+ */
+#define PTIMER_RL (0xffff << PTIMER_RL_BIT)
+
+#define PTIMER_RL_BIT 0
+
+/*
+ * ROUTER PCTRL2 register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER RTRCFG register fields
+ */
+#define RTRCFG_SP (0x1f << RTRCFG_SP_BIT)
+#define RTRCFG_AP (0x1f << RTRCFG_AP_BIT)
+#define RTRCFG_FP (0x1f << RTRCFG_FP_BIT)
+#define RTRCFG_SR (0x1 << RTRCFG_SR_BIT)
+#define RTRCFG_PE (0x1 << RTRCFG_PE_BIT)
+#define RTRCFG_IC (0x1 << RTRCFG_IC_BIT)
+#define RTRCFG_IS (0x1 << RTRCFG_IS_BIT)
+#define RTRCFG_IP (0x1 << RTRCFG_IP_BIT)
+#define RTRCFG_AI (0x1 << RTRCFG_AI_BIT)
+#define RTRCFG_AT (0x1 << RTRCFG_AT_BIT)
+#define RTRCFG_IE (0x1 << RTRCFG_IE_BIT)
+#define RTRCFG_RE (0x1 << RTRCFG_RE_BIT)
+#define RTRCFG_EE (0x1 << RTRCFG_EE_BIT)
+#define RTRCFG_LS (0x1 << RTRCFG_LS_BIT)
+#define RTRCFG_SA (0x1 << RTRCFG_SA_BIT)
+#define RTRCFG_TF (0x1 << RTRCFG_TF_BIT)
+#define RTRCFG_ME (0x1 << RTRCFG_ME_BIT)
+#define RTRCFG_TA (0x1 << RTRCFG_TA_BIT)
+#define RTRCFG_PP (0x1 << RTRCFG_PP_BIT)
+#define RTRCFG_WCLEAR (RTRCFG_ME)
+
+#define RTRCFG_SP_BIT 27
+#define RTRCFG_AP_BIT 22
+#define RTRCFG_FP_BIT 17
+#define RTRCFG_SR_BIT 15
+#define RTRCFG_PE_BIT 14
+#define RTRCFG_IC_BIT 13
+#define RTRCFG_IS_BIT 12
+#define RTRCFG_IP_BIT 11
+#define RTRCFG_AI_BIT 10
+#define RTRCFG_AT_BIT 9
+#define RTRCFG_IE_BIT 8
+#define RTRCFG_RE_BIT 7
+#define RTRCFG_EE_BIT 6
+#define RTRCFG_LS_BIT 5
+#define RTRCFG_SA_BIT 4
+#define RTRCFG_TF_BIT 3
+#define RTRCFG_ME_BIT 2
+#define RTRCFG_TA_BIT 1
+#define RTRCFG_PP_BIT 0
+
+/*
+ * ROUTER TC register fields
+ */
+#define TC_RE (0x3f << TC_RE_BIT)
+#define TC_EN (0x3f << TC_EN_BIT)
+#define TC_CF (0x3f << TC_CF_BIT)
+#define TC_TC (0x3f << TC_TC_BIT)
+
+#define TC_RE_BIT 9
+#define TC_EN_BIT 8
+#define TC_CF_BIT 6
+#define TC_TC_BIT 0
+
+/*
+ * ROUTER VER register fields
+ */
+#define VER_MA (0xff << VER_MA_BIT)
+#define VER_MI (0xff << VER_MI_BIT)
+#define VER_PA (0xff << VER_PA_BIT)
+#define VER_ID (0xff << VER_ID_BIT)
+
+#define VER_MA_BIT 24
+#define VER_MI_BIT 16
+#define VER_PA_BIT 8
+#define VER_ID_BIT 0
+
+/*
+ * ROUTER IDIV register fields
+ */
+#define IDIV_ID (0xff << IDIV_ID_BIT)
+
+#define IDIV_ID_BIT 0
+
+/*
+ * ROUTER CFGWE register fields
+ */
+#define CFGWE_WE (0x1 << CFGWE_WE_BIT)
+
+#define CFGWE_WE_BIT 0
+
+/*
+ * ROUTER PRESCALER register fields
+ */
+#define PRESCALER_RL (0xffff << PRESCALER_RL_BIT)
+
+#define PRESCALER_RL_BIT 0
+
+/*
+ * ROUTER IMASK register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER ICODEGEN register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER ISRTIMER register fields
+ */
+#define ISRTIMER_RL (0xffff << ISRTIMER_RL_BIT)
+
+#define ISRTIMER_RL_BIT 0
+
+/*
+ * ROUTER AITIMER register fields
+ */
+#define AITIMER_RL (0xffff << AITIMER_RL_BIT)
+
+#define AITIMER_RL_BIT 0
+
+/*
+ * ROUTER ISRCTIMER register fields
+ */
+#define ISRCTIMER_RL (0x1f << ISRCTIMER_RL_BIT)
+
+#define ISRCTIMER_RL_BIT 0
+
+/*
+ * ROUTER CAP register fields
+ */
+#define CAP_AF (0x3 << CAP_AF_BIT)
+#define CAP_PF (0x7 << CAP_PF_BIT)
+#define CAP_RM (0x7 << CAP_RM_BIT)
+#define CAP_AS (0x1 << CAP_AS_BIT)
+#define CAP_AX (0x1 << CAP_AX_BIT)
+#define CAP_DP (0x1 << CAP_DP_BIT)
+#define CAP_ID (0x1 << CAP_ID_BIT)
+#define CAP_SD (0x1 << CAP_SD_BIT)
+#define CAP_PC (0x1f << CAP_PC_BIT)
+#define CAP_CC (0x1f << CAP_CC_BIT)
+
+#define CAP_AF_BIT 24
+#define CAP_PF_BIT 29
+#define CAP_RM_BIT 16
+#define CAP_AS_BIT 14
+#define CAP_AX_BIT 13
+#define CAP_DP_BIT 12
+#define CAP_ID_BIT 11
+#define CAP_SD_BIT 10
+#define CAP_PC_BIT 4
+#define CAP_CC_BIT 0
+
+/*
+ * ROUTER PNPVEND register fields
+ */
+#define PNPVEND_VI (0xffff << PNPVEND_VI_BIT)
+#define PNPVEND_PI (0xffff << PNPVEND_PI_BIT)
+
+#define PNPVEND_VI_BIT 16
+#define PNPVEND_PI_BIT 0
+
+/*
+ * ROUTER PNPUVEND register fields
+ */
+#define PNPUVEND_VI (0xffff << PNPUVEND_VI_BIT)
+#define PNPUVEND_PI (0xffff << PNPUVEND_PI_BIT)
+
+#define PNPUVEND_VI_BIT 16
+#define PNPUVEND_PI_BIT 0
+
+/*
+ * ROUTER MAXPLEN register fields
+ */
+#define MAXPLEN_ML (0xffffff << MAXPLEN_ML_BIT)
+
+#define MAXPLEN_ML_BIT 0
+
+/*
+ * ROUTER CHARO register fields
+ */
+#define CHARO_OR (0x1 << CHARO_OR_BIT)
+#define CHARO_CC (0x7fffffff << CHARO_CC_BIT)
+
+#define CHARO_OR_BIT 31
+#define CHARO_CC_BIT 0
+
+/*
+ * ROUTER CHARI register fields
+ */
+#define CHARI_OR (0x1 << CHARI_OR_BIT)
+#define CHARI_CC (0x7fffffff << CHARI_CC_BIT)
+
+#define CHARI_OR_BIT 31
+#define CHARI_CC_BIT 0
+
+/*
+ * ROUTER PKTO register fields
+ */
+#define PKTO_OR (0x1 << PKTO_OR_BIT)
+#define PKTO_CC (0x7fffffff << PKTO_CC_BIT)
+
+#define PKTO_OR_BIT 31
+#define PKTO_CC_BIT 0
+
+/*
+ * ROUTER PKTI register fields
+ */
+#define PKTI_OR (0x1 << PKTI_OR_BIT)
+#define PKTI_CC (0x7fffffff << PKTI_CC_BIT)
+
+#define PKTI_OR_BIT 31
+#define PKTI_CC_BIT 0
+
+/*
+ * ROUTER CRED register fields
+ */
+#define CRED_OC (0x3f << CRED_OC_BIT)
+#define CRED_IC (0x3f << CRED_IC_BIT)
+
+#define CRED_OC_BIT 6
+#define CRED_IC_BIT 0
+
+/*
+ * ROUTER RTRCOMB register fields
+ */
+#define RTRCOMB_SR (0x1 << RTRCOMB_SR_BIT)
+#define RTRCOMB_EN (0x1 << RTRCOMB_EN_BIT)
+#define RTRCOMB_PR (0x1 << RTRCOMB_PR_BIT)
+#define RTRCOMB_HD (0x1 << RTRCOMB_HD_BIT)
+#define RTRCOMB_PE (0x7ffff << RTRCOMB_PE_BIT)
+#define RTRCOMB_PD (0x1 << RTRCOMB_PD_BIT)
+
+#define RTRCOMB_SR_BIT 31
+#define RTRCOMB_EN_BIT 30
+#define RTRCOMB_PR_BIT 29
+#define RTRCOMB_HD_BIT 28
+#define RTRCOMB_PE_BIT 1
+#define RTRCOMB_PD_BIT 0
+
+struct router_regs {
+ unsigned int resv1; /* 0x000 */
+ unsigned int psetup[255]; /* 0x004 */
+ unsigned int resv2; /* 0x400 */
+ unsigned int routes[255]; /* 0x404 */
+ unsigned int pctrl[32]; /* 0x800 */
+ unsigned int psts[32]; /* 0x880 */
+ unsigned int treload[32]; /* 0x900 */
+ unsigned int pctrl2[32]; /* 0x980 */
+ unsigned int cfgsts; /* 0xA00 */
+ unsigned int timecode; /* 0xA04 */
+ unsigned int ver; /* 0xA08 */
+ unsigned int idiv; /* 0xA0C */
+ unsigned int cfgwe; /* 0xA10 */
+ unsigned int tprescaler; /* 0xA14 */
+ unsigned int imask; /* 0xA18 */
+ unsigned int ipmask; /* 0xA1C */
+ unsigned int pip; /* 0xA20 */
+ unsigned int icodegen; /* 0xA24 */
+ unsigned int isr0; /* 0xA28 */
+ unsigned int isr1; /* 0xA2C */
+ unsigned int isrtimer; /* 0xA30 */
+ unsigned int aitimer; /* 0xA34 */
+ unsigned int isrctimer; /* 0xA38 */
+ unsigned int resv4; /* 0xA3C */
+ unsigned int lrunsts; /* 0xA40 */
+ unsigned int cap; /* 0xA44 */
+ unsigned int resv5[111]; /* 0xA48 */
+ unsigned int charo[31]; /* 0xC04 */ /* TODO check GR718 */
+ unsigned int resv6; /* 0xC80 */
+ unsigned int chari[31]; /* 0xC84 */
+ unsigned int resv7; /* 0xD00 */
+ unsigned int pkto[31]; /* 0xD04 */
+ unsigned int resv8; /* 0xD80 */
+ unsigned int pkti[31]; /* 0xD84 */
+ unsigned int maxplen[32]; /* 0xE00 */
+ unsigned int resv9; /* 0xE80 */
+ unsigned int credcnt[31]; /* 0xE84 */
+ unsigned int resv10[64]; /* 0xF00 */
+ unsigned int resv11; /* 0x1000 */
+ unsigned int rtcomb[255]; /* 0x1004 */
+};
+
+struct router_priv {
+ struct drvmgr_dev *dev;
+
+ /* ROUTER control registers */
+ struct router_regs *regs;
+
+ #ifdef THREAD_SAFE
+ /* ROUTER semaphore */
+ rtems_id sem;
+ #endif
+
+ /* ROUTER driver register */
+ char devname[9];
+ int index; /* Index in order it was probed */
+
+ int minor;
+ int open;
+ struct router_hw_info hwinfo;
+ int nports;
+ int irq_init;
+
+ SPIN_DECLARE(plock[32])
+
+};
+
+int router_count = 0;
+static struct router_priv *priv_tab[ROUTER_MAX];
+
+/* Driver prototypes */
+
+STATIC int router_init(struct router_priv *priv);
+STATIC void router_hwinfo(struct router_priv *priv,
+ struct router_hw_info *hwinfo);
+STATIC int router_acontrol_set(struct router_priv *priv,
+ struct router_route_acontrol *control);
+STATIC int router_acontrol_get(struct router_priv *priv,
+ struct router_route_acontrol *control);
+STATIC int router_portmap_set(struct router_priv *priv,
+ struct router_route_portmap *pmap);
+STATIC int router_portmap_get(struct router_priv *priv,
+ struct router_route_portmap *pmap);
+
+/* -IRQ handler */
+void router_isr(void *arg);
+
+int router_init2(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops router_ops =
+{
+ .init = {NULL, router_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id router_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPW_ROUTER},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info router_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_SPW_ROUTER_ID,/* Driver ID */
+ "ROUTER_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &router_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct router_priv), /* Let DRVMGR allocate for us */
+ },
+ &router_ids[0],
+};
+
+void router_register_drv (void)
+{
+ DBG("Registering SPW ROUTER driver\n");
+ drvmgr_drv_register(&router_drv_info.general);
+}
+
+STATIC void router_hwinfo(struct router_priv *priv,
+ struct router_hw_info *hwinfo)
+{
+ unsigned int tmp;
+
+ /* Find router info */
+ tmp = REG_READ(&priv->regs->cfgsts);
+ hwinfo->nports_spw = (tmp & RTRCFG_SP) >> RTRCFG_SP_BIT;
+ hwinfo->nports_amba = (tmp & RTRCFG_AP) >> RTRCFG_AP_BIT;
+ hwinfo->nports_fifo = (tmp & RTRCFG_FP) >> RTRCFG_FP_BIT;
+ hwinfo->srouting = (tmp & RTRCFG_SR) >> RTRCFG_SR_BIT;
+ hwinfo->pnp_enable = (tmp & RTRCFG_PE) >> RTRCFG_PE_BIT;
+ hwinfo->timers_avail = (tmp & RTRCFG_TA) >> RTRCFG_TA_BIT;
+ hwinfo->pnp_avail = (tmp & RTRCFG_PP) >> RTRCFG_PP_BIT;
+
+ tmp = REG_READ(&priv->regs->ver);
+ hwinfo->ver_major = (tmp & VER_MA) >> VER_MA_BIT;
+ hwinfo->ver_minor = (tmp & VER_MI) >> VER_MI_BIT;
+ hwinfo->ver_patch = (tmp & VER_PA) >> VER_PA_BIT;
+ hwinfo->iid = (tmp & VER_ID) >> VER_ID_BIT;
+
+ /* Find router capabilities */
+ tmp = REG_READ(&priv->regs->cap);
+ hwinfo->amba_port_fifo_size = 4 << ((tmp & CAP_AF) >> CAP_AF_BIT);
+ hwinfo->spw_port_fifo_size = 16 << ((tmp & CAP_PF) >> CAP_PF_BIT);
+ hwinfo->rmap_maxdlen = 4 << ((tmp & CAP_RM) >> CAP_RM_BIT);
+ hwinfo->aux_async = (tmp & CAP_AS) >> CAP_AS_BIT;
+ hwinfo->aux_dist_int_support = (tmp & CAP_AX) >> CAP_AX_BIT;
+ hwinfo->dual_port_support = (tmp & CAP_ID) >> CAP_ID_BIT;
+ hwinfo->dist_int_support = (tmp & CAP_DP) >> CAP_DP_BIT;
+ hwinfo->spwd_support = (tmp & CAP_SD) >> CAP_SD_BIT;
+ hwinfo->pktcnt_support = (tmp & CAP_PC) >> CAP_PC_BIT;
+ hwinfo->charcnt_support = (tmp & CAP_CC) >> CAP_CC_BIT;
+}
+
+STATIC void router_hwinfo_print(struct router_hw_info *hwinfo)
+{
+ DBG(" -PORTS= SPW: %d, AMBA: %d, FIFO: %d\n", hwinfo->nports_spw,
+ hwinfo->nports_amba, hwinfo->nports_fifo);
+ DBG(" -Static routing: %s, Timers: %s\n",
+ (hwinfo->srouting?"Enabled":"Disabled"),
+ (hwinfo->timers_avail?"Available":"N/A"));
+ DBG(" -PnP: %s, %s\n",
+ (hwinfo->pnp_avail?"Available":"N/A"),
+ (hwinfo->pnp_enable?"Enabled":"Disabled"));
+ DBG(" -Version= Major: 0x%02x, Minor: 0x%02x, Patch: 0x%02x, ID: 0x%02x\n",
+ hwinfo->ver_major, hwinfo->ver_minor,
+ hwinfo->ver_patch, hwinfo->iid);
+ DBG(" -Aux: %s, AuxDistInt: %s, DistInt: %s, SPWD: %s, PKTCNT: %s, "
+ "CHARCNT: %s\n",
+ (hwinfo->aux_async?"Async":"Sync"),
+ (hwinfo->aux_dist_int_support?"Supported":"N/A"),
+ (hwinfo->dist_int_support?"Supported":"N/A"),
+ (hwinfo->spwd_support?"Supported":"N/A"),
+ (hwinfo->pktcnt_support?"Supported":"N/A"),
+ (hwinfo->charcnt_support?"Supported":"N/A"));
+}
+
+STATIC int router_acontrol_set(struct router_priv *priv,
+ struct router_route_acontrol *control)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ REG_WRITE(&priv->regs->routes[i], control->control[i]);
+ }
+ for (i=0; i<224; i++) {
+ REG_WRITE(&priv->regs->routes[i+31], control->control_logical[i]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_acontrol_get(struct router_priv *priv,
+ struct router_route_acontrol *control)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ control->control[i] = REG_READ(&priv->regs->routes[i]);
+ }
+ for (i=0; i<224; i++) {
+ control->control_logical[i] = REG_READ(&priv->regs->routes[i+31]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_portmap_set(struct router_priv *priv,
+ struct router_route_portmap *pmap)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ REG_WRITE(&priv->regs->psetup[i], pmap->pmap[i]);
+ }
+ for (i=0; i<224; i++) {
+ REG_WRITE(&priv->regs->psetup[i+31], pmap->pmap_logical[i]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_portmap_get(struct router_priv *priv,
+ struct router_route_portmap *pmap)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ pmap->pmap[i] = REG_READ(&priv->regs->psetup[i]);
+ }
+ for (i=0; i<224; i++) {
+ pmap->pmap_logical[i] = REG_READ(&priv->regs->psetup[i+31]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_init(struct router_priv *priv)
+{
+ #ifdef THREAD_SAFE
+ int i;
+
+ /* Device Semaphore created with count = 1 */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'R', 'O', '0' + priv->index), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &priv->sem) != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+ #endif
+
+ /* Find router info */
+ router_hwinfo(priv, &priv->hwinfo);
+
+ priv->open = 0;
+ /* Number of ports has to consider the configuration port (1 + SPW + AMBA + FIFO) */
+ priv->nports = 1 + priv->hwinfo.nports_spw + priv->hwinfo.nports_amba +
+ priv->hwinfo.nports_fifo;
+ if ((priv->nports < 2) || (priv->nports > 32)) {
+ return DRVMGR_EIO;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Init port spin-lock memory structures */
+ for (i=0; i<priv->nports; i++) {
+ SPIN_INIT(&priv->plock[i],"portlock");
+ }
+ #endif
+
+ /* DEBUG print */
+ DBG("SPW ROUTER[%d] with following capabilities:\n", priv->index);
+ router_hwinfo_print(&priv->hwinfo);
+
+ return DRVMGR_OK;
+}
+
+int router_init2(struct drvmgr_dev *dev)
+{
+ struct router_priv *priv = dev->priv;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ unsigned int tmp;
+ int i;
+ int status;
+
+ DBG("SPW ROUTER[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (router_count >= ROUTER_MAX) {
+ return DRVMGR_ENORES;
+ }
+
+ if (priv == NULL) {
+ return DRVMGR_NOMEM;
+ }
+ priv->dev = dev;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if (ambadev == NULL) {
+ return DRVMGR_FAIL;
+ }
+ pnpinfo = &ambadev->info;
+ priv->regs = (struct router_regs *)pnpinfo->ahb_slv->start[0];
+ priv->minor = dev->minor_drv;
+
+ /* Initilize driver struct */
+ status = router_init(priv);
+ if (status != DRVMGR_OK) {
+ return status;
+ }
+
+ /* Startup Action:
+ * - Clear interrupts
+ * - Mask interrupts
+ */
+
+ /* Mask interrupts in ROTUER */
+ REG_WRITE(&priv->regs->imask,0);
+ REG_WRITE(&priv->regs->ipmask,0);
+
+ /* Clear interrupts in ROTUER */
+ REG_WRITE(&priv->regs->pip,0xffffffff);
+
+ /* Clear errors in router and ports */
+ tmp = REG_READ(&priv->regs->cfgsts);
+ REG_WRITE(&priv->regs->cfgsts, tmp | RTRCFG_WCLEAR);
+ tmp = REG_READ(&priv->regs->psts[0]);
+ REG_WRITE(&priv->regs->psts[0], (tmp & PSTSCFG_WCLEAR) | PSTSCFG_WCLEAR2);
+ for (i=1; i<priv->nports; i++) {
+ tmp = REG_READ(&priv->regs->psts[i]);
+ REG_WRITE(&priv->regs->psts[i], tmp & PSTS_WCLEAR);
+ }
+
+ /* Register driver internally */
+ priv->index = router_count;
+ priv_tab[priv->index] = priv;
+ router_count++;
+
+ /* Device name */
+ sprintf(priv->devname, "router%d", priv->index);
+
+ return DRVMGR_OK;
+}
+
+void *router_open(unsigned int dev_no)
+{
+ struct router_priv *priv, *ret;
+
+ if (dev_no >= router_count) {
+ DBG("ROUTER Wrong index %u\n", dev_no);
+ return NULL;
+ }
+
+ priv = priv_tab[dev_no];
+
+ if (priv == NULL) {
+ DBG("ROUTER Device not initialized\n");
+ return NULL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return NULL;
+ }
+ #endif
+
+ if (priv->open) {
+ DBG("ROUTER Device already opened\n");
+ ret = NULL;
+ } else {
+ /* Take the device */
+ priv->open = 1;
+ ret = priv;
+ }
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ret;
+}
+
+int router_close(void *d)
+{
+ struct router_priv *priv = d;
+ int ret;
+
+ if (priv == NULL) {
+ DBG("ROUTER Device not initialized\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ if (priv->open == 0) {
+ DBG("ROUTER Device already closed\n");
+ ret = ROUTER_ERR_ERROR;
+ } else {
+ /* Mark not open */
+ priv->open = 0;
+ ret = ROUTER_ERR_OK;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ret;
+}
+
+STATIC int router_check_open(void *d)
+{
+ struct router_priv *priv = d;
+
+ if (priv == NULL) {
+ DBG("ROUTER Device not initialized\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if (priv->open == 0) {
+ DBG("ROUTER Device closed\n");
+ return ROUTER_ERR_ERROR;
+ }
+
+ return 0;
+}
+
+STATIC int router_check_port(void *d, int port)
+{
+ int ret = router_check_open(d);
+
+ if (ret == 0) {
+ struct router_priv *priv = d;
+ if((port < 0) || (port >= priv->nports)) {
+ DBG("ROUTER wrong port\n");
+ ret = ROUTER_ERR_EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+STATIC int router_check_distint_support(void *d)
+{
+ int ret = router_check_open(d);
+
+ if (ret == 0) {
+ struct router_priv *priv = d;
+ if (priv->hwinfo.dist_int_support == 0) {
+ DBG("ROUTER Dist interrupts not supported\n");
+ ret = ROUTER_ERR_IMPLEMENTED;
+ }
+ }
+
+ return ret;
+}
+
+int router_hwinfo_get(void *d, struct router_hw_info *hwinfo)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (hwinfo == NULL) {
+ DBG("ROUTER Wrong pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ /* Get hwinfo */
+ router_hwinfo(priv, hwinfo);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_print(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ /* DEBUG print */
+ DBG("Number of routers: %d\n", router_count);
+ DBG("SPW ROUTER[%d] with following capabilities:\n", priv->index);
+ router_hwinfo_print(&priv->hwinfo);
+
+ return ROUTER_ERR_OK;
+}
+
+/* Configure Router. Leave field NULL in order to skip configuration
+ */
+int router_config_set(void *d, struct router_config *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if ((cfg->flags & (ROUTER_FLG_TPRES|ROUTER_FLG_TRLD)) &&
+ !priv->hwinfo.timers_avail) {
+ return ROUTER_ERR_IMPLEMENTED;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Write only configuration bits in Config register */
+ if (cfg->flags & ROUTER_FLG_CFG) {
+ REG_WRITE(&priv->regs->cfgsts, cfg->config & ~(RTRCFG_WCLEAR));
+ }
+
+ /* Write Instance ID to Version Register */
+ if (cfg->flags & ROUTER_FLG_IID) {
+ REG_WRITE(&priv->regs->ver, (cfg->iid << VER_ID_BIT) & VER_ID);
+ }
+
+ /* Write startup-clock-divisor Register */
+ if (cfg->flags & ROUTER_FLG_IDIV) {
+ REG_WRITE(&priv->regs->idiv, (cfg->idiv << IDIV_ID_BIT) & IDIV_ID);
+ }
+
+ /* Write Timer Prescaler Register */
+ if (cfg->flags & ROUTER_FLG_TPRES) {
+ REG_WRITE(&priv->regs->tprescaler,
+ (cfg->timer_prescaler << PRESCALER_RL_BIT) & PRESCALER_RL);
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_config_get(void *d, struct router_config *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ cfg->config = REG_READ(&priv->regs->cfgsts) &
+ ~(RTRCFG_SP|RTRCFG_AP|RTRCFG_FP|RTRCFG_SR|RTRCFG_PE|RTRCFG_ME|
+ RTRCFG_TA|RTRCFG_PP);
+ cfg->iid = (REG_READ(&priv->regs->ver) & VER_ID) >> VER_ID_BIT;
+ cfg->idiv = (REG_READ(&priv->regs->idiv) & IDIV_ID) >> IDIV_ID_BIT;
+ cfg->timer_prescaler =
+ (REG_READ(&priv->regs->tprescaler) & PRESCALER_RL) >> PRESCALER_RL_BIT;
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+/* Configure Router routing table.
+ * Leave field NULL in order to skip configuration
+ */
+int router_routing_table_set(void *d, struct router_routing_table *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Write Address control */
+ if (cfg->flags & ROUTER_ROUTE_FLG_CTRL) {
+ router_acontrol_set(priv,&cfg->acontrol);
+ }
+
+ /* Write Port map */
+ if (cfg->flags & ROUTER_ROUTE_FLG_MAP) {
+ router_portmap_set(priv,&cfg->portmap);
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_routing_table_get(void *d, struct router_routing_table *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Read Address control */
+ router_acontrol_get(priv,&cfg->acontrol);
+
+ /* Read Port map */
+ router_portmap_get(priv,&cfg->portmap);
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_route_set(void *d, struct router_route *route)
+{
+ struct router_priv *priv = d;
+ int i;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (route == NULL) {
+ DBG("ROUTER route wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if (route->from_address < 32) {
+ /* Physical address */
+ if ((route->from_address == 0) ||
+ (route->from_address >= priv->nports)) {
+ DBG("ROUTER wrong physical address\n");
+ return ROUTER_ERR_TOOMANY;
+ }
+ }
+
+ /* Compute port map */
+ mask=0;
+ for (i=0; i < route->count; i++) {
+ if ((route->to_port[i] == 0) || (route->to_port[i] >= priv->nports)) {
+ DBG("ROUTER route wrong destiny port\n");
+ return ROUTER_ERR_EINVAL;
+ }
+ mask |= (0x1 << route->to_port[i]);
+ }
+ if (route->options & ROUTER_ROUTE_PACKETDISTRIBUTION_ENABLE) {
+ mask |= RTPMAP_PD;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Write port map */
+ REG_WRITE(&priv->regs->psetup[route->from_address-1], mask);
+
+ /* Write Address control */
+ REG_WRITE(&priv->regs->routes[route->from_address-1],
+ route->options & (0xf));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_route_get(void *d, struct router_route *route)
+{
+ struct router_priv *priv = d;
+ int i,count;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (route == NULL) {
+ DBG("ROUTER route wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if (route->from_address < 32) {
+ /* Physical address */
+ if ((route->from_address == 0) ||
+ (route->from_address >= priv->nports)) {
+ DBG("ROUTER wrong physical address\n");
+ return ROUTER_ERR_TOOMANY;
+ }
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Get Address control */
+ route->options =
+ REG_READ(&priv->regs->routes[route->from_address-1]) & (0xf);
+
+ /* Read port map */
+ mask=REG_READ(&priv->regs->psetup[route->from_address-1]);
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ if (mask & RTPMAP_PD) {
+ route->options |= ROUTER_ROUTE_PACKETDISTRIBUTION_ENABLE;
+ }
+
+ /*DBG("ROUTE from address 0x%02x read, PMAP: 0x%08x, CTRL: 0x%08x\n",
+ * (unsigned int) route->from_address, mask,
+ * (unsigned int) route->options);*/
+
+ i=0;
+ count=0;
+ mask &= (RTPMAP_PE);
+ while (mask != 0) {
+ if (mask & 0x1) {
+ route->to_port[count] = i;
+ count++;
+ }
+ mask >>= 1;
+ i++;
+ }
+ route->count=count;
+
+ return ROUTER_ERR_OK;
+}
+
+int router_write_enable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->cfgwe, 0x1);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_write_disable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->cfgwe, 0x0);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_ioc(void *d, int port, struct router_port *cfg)
+{
+ struct router_priv *priv = d;
+ unsigned int ctrl, ctrl2, sts, timer, pktl;
+ SPIN_IRQFLAGS(irqflags);
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER Wrong cfg\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ ctrl = cfg->ctrl;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_CTRL) {
+ ctrl = REG_READ(&priv->regs->pctrl[port]);
+ }
+ ctrl2 = cfg->ctrl;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_CTRL2) {
+ ctrl2 = REG_READ(&priv->regs->pctrl2[port]);
+ }
+ sts = cfg->sts;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_STS) {
+ sts = REG_READ(&priv->regs->psts[port]);
+ }
+ timer = cfg->timer_reload;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_TIMER) {
+ REG_READ(&priv->regs->treload[port]);
+ }
+ pktl = cfg->packet_length;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_PKTLEN) {
+ REG_READ(&priv->regs->maxplen[port]);
+ }
+
+ if (cfg->flag & ROUTER_PORT_FLG_SET_CTRL) {
+ REG_WRITE(&priv->regs->pctrl[port], cfg->ctrl);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_CTRL2) {
+ REG_WRITE(&priv->regs->pctrl2[port], cfg->ctrl2);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_STS) {
+ REG_WRITE(&priv->regs->psts[port], cfg->sts);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_TIMER) {
+ REG_WRITE(&priv->regs->treload[port], cfg->timer_reload & PTIMER_RL);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_PKTLEN) {
+ REG_WRITE(&priv->regs->maxplen[port], cfg->packet_length & MAXPLEN_ML);
+ }
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ cfg->ctrl = ctrl;
+ cfg->ctrl2 = ctrl2;
+ cfg->sts = sts;
+ cfg->timer_reload = timer;
+ cfg->packet_length = pktl;
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_ctrl_rmw(void *d, int port, uint32_t *oldvalue, uint32_t bitmask, uint32_t value)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+ unsigned int oldctrl, ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ oldctrl = REG_READ(&priv->regs->pctrl[port]);
+ ctrl = ((oldctrl & ~(bitmask)) | (value & bitmask));
+ REG_WRITE(&priv->regs->pctrl[port], ctrl);
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ if (oldvalue != NULL) {
+ *oldvalue = oldctrl;
+ }
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_ctrl2_rmw(void *d, int port, uint32_t *oldvalue, uint32_t bitmask, uint32_t value)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+ unsigned int oldctrl, ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ oldctrl = REG_READ(&priv->regs->pctrl2[port]);
+ ctrl = ((oldctrl & ~(bitmask)) | (value & bitmask));
+ REG_WRITE(&priv->regs->pctrl2[port], ctrl);
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ if (oldvalue != NULL) {
+ *oldvalue = oldctrl;
+ }
+
+ return ROUTER_ERR_OK;
+}
+
+/* Read Port Control register */
+int router_port_ctrl_get(void *d, int port, uint32_t *ctrl)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (ctrl == NULL) {
+ DBG("ROUTER Wrong ctrl\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *ctrl = REG_READ(&priv->regs->pctrl[port]);
+ return ROUTER_ERR_OK;
+}
+
+/* Read Port Status register and clear errors if there are */
+int router_port_status(void *d, int port, uint32_t *sts, uint32_t clrmsk)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+ SPIN_IRQFLAGS(irqflags);
+
+ if (error)
+ return error;
+
+ if (sts == NULL) {
+ DBG("ROUTER Wrong sts\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+ *sts = REG_READ(&priv->regs->psts[port]);
+ if (port == 0) {
+ REG_WRITE(&priv->regs->psts[port], ((*sts) & (PSTSCFG_WCLEAR & clrmsk)) | (PSTSCFG_WCLEAR2 & clrmsk));
+ }else{
+ REG_WRITE(&priv->regs->psts[port], (*sts) & (PSTS_WCLEAR & clrmsk));
+ }
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+ return ROUTER_ERR_OK;
+}
+
+/* Read Port Control2 register */
+int router_port_ctrl2_get(void *d, int port, uint32_t *ctrl2)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (ctrl2 == NULL) {
+ DBG("ROUTER Wrong ctrl2\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *ctrl2 = REG_READ(&priv->regs->pctrl2[port]);
+ return ROUTER_ERR_OK;
+}
+
+/* Write Port Control Register */
+int router_port_ctrl_set(void *d, int port, uint32_t mask, uint32_t ctrl)
+{
+ return router_port_ctrl_rmw(d, port, NULL, mask, ctrl);
+}
+
+/* Write Port Control2 Register */
+int router_port_ctrl2_set(void *d, int port, uint32_t mask, uint32_t ctrl2)
+{
+ return router_port_ctrl_rmw(d, port, NULL, mask, ctrl2);
+}
+
+int router_port_treload_set(void *d, int port, uint32_t reload)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->treload[port], reload & PTIMER_RL);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_treload_get(void *d, int port, uint32_t *reload)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (reload == NULL) {
+ DBG("ROUTER Wrong reload pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *reload = REG_READ(&priv->regs->treload[port]) & PTIMER_RL;
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_maxplen_set(void *d, int port, uint32_t length)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->maxplen[port], length & MAXPLEN_ML);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_maxplen_get(void *d, int port, uint32_t *length)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (length == NULL) {
+ DBG("ROUTER Wrong length pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *length = REG_READ(&priv->regs->maxplen[port]);
+
+ return ROUTER_ERR_OK;
+}
+
+/* Get Port Link Status */
+int router_port_link_status(void *d, int port)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ return ((REG_READ(&priv->regs->psts[port]) & PSTS_LS) >> PSTS_LS_BIT);
+}
+
+int router_port_disable(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_DI, PCTRL_DI);
+}
+
+int router_port_enable(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_DI, 0);
+}
+
+int router_port_link_stop(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_LD | PCTRL_LS, PCTRL_LD);
+}
+
+int router_port_link_start(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_LD | PCTRL_LS, PCTRL_LS);
+}
+
+int router_port_link_receive_spill(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ ctrl = REG_READ(&priv->regs->pctrl[port]);
+ REG_WRITE(&priv->regs->pctrl[port], (ctrl| (PCTRL_RS)));
+
+ /* Wait until the spill is done */
+ while(REG_READ(&priv->regs->pctrl[port]) & PCTRL_RS) {};
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_link_transmit_reset(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ ctrl = REG_READ(&priv->regs->pctrl[port]);
+ REG_WRITE(&priv->regs->pctrl[port], (ctrl| (PCTRL_TF)));
+
+ /* Wait until the spill is done */
+ while(REG_READ(&priv->regs->pctrl[port]) & PCTRL_TF) {};
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_cred_get(void *d, int port, uint32_t *cred)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (cred == NULL) {
+ DBG("ROUTER Wrong cred pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *cred = REG_READ(&priv->regs->credcnt[port]);
+ return ROUTER_ERR_OK;
+}
+
+int router_instance_set(void *d, uint8_t instance)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->ver, (instance << VER_ID_BIT) & VER_ID);
+ return ROUTER_ERR_OK;
+}
+
+int router_idiv_set(void *d, uint8_t idiv)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->idiv, (idiv << IDIV_ID_BIT) & IDIV_ID);
+ return ROUTER_ERR_OK;
+}
+
+int router_tpresc_set(void *d, uint32_t prescaler)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->tprescaler,
+ (prescaler << PRESCALER_RL_BIT) & PRESCALER_RL);
+ return ROUTER_ERR_OK;
+}
+
+int router_instance_get(void *d, uint8_t *instance)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (instance == NULL) {
+ DBG("ROUTER Wrong instance pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *instance = REG_READ(&priv->regs->ver);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_idiv_get(void *d, uint8_t *idiv)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (idiv == NULL) {
+ DBG("ROUTER Wrong idiv pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *idiv = REG_READ(&priv->regs->idiv);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tpresc_get(void *d, uint32_t *prescaler)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (prescaler == NULL) {
+ DBG("ROUTER Wrong prescaler pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *prescaler = REG_READ(&priv->regs->tprescaler);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_cfgsts_set(void *d, uint32_t cfgsts)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->cfgsts, cfgsts);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_cfgsts_get(void *d, uint32_t *cfgsts)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfgsts == NULL) {
+ DBG("ROUTER Wrong cfgsts pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *cfgsts = REG_READ(&priv->regs->cfgsts);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_enable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->timecode, TC_EN);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_disable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->timecode, 0);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_reset(void *d)
+{
+ struct router_priv *priv = d;
+ unsigned int tc;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ tc = REG_READ(&priv->regs->timecode);
+ REG_WRITE(&priv->regs->timecode, tc | TC_RE);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_get(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ return (REG_READ(&priv->regs->timecode) & (TC_CF | TC_TC)) >> TC_TC_BIT;
+}
+
+int router_interrupt_unmask(void *d, int options)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Unmask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->imask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, 0xffffffff);
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->imask, mask | options);
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_interrupt_mask(void *d, int options)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Mask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->imask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, 0xffffffff);
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->imask, mask & ~(options));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_interrupt_unmask(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Unmask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->ipmask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, (0x1 << port));
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->ipmask, mask | (0x1 << port));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_interrupt_mask(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Mask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->ipmask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, (0x1 << port));
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->ipmask, mask & ~(0x1 << port));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_reset(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ /* Reset router */
+ REG_WRITE(&priv->regs->cfgsts, RTRCFG_RE);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_icodegen_enable(void *d, uint8_t intn, uint32_t aitimer,
+ int options)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ REG_WRITE(&priv->regs->icodegen, (options & ~(ICODEGEN_IN)) |
+ ICODEGEN_EN | (intn & ICODEGEN_IN));
+
+ if (options & ICODEGEN_TE) {
+ REG_WRITE(&priv->regs->aitimer, (aitimer & AITIMER_RL));
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_icodegen_disable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->icodegen, ICODEGEN_TE);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrctimer_set(void *d, uint32_t reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ /* Set ISRC TIMER */
+ REG_WRITE(&priv->regs->isrctimer, (reloadvalue & (ISRCTIMER_RL)));
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrtimer_set(void *d, uint32_t reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ /* Set ISR TIMER */
+ REG_WRITE(&priv->regs->isrtimer, (reloadvalue & (ISRTIMER_RL)));
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrctimer_get(void *d, uint32_t *reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ if (reloadvalue == NULL) {
+ DBG("ROUTER Wrong reloadvalue pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ /* Set ISRC TIMER */
+ *reloadvalue = REG_READ(&priv->regs->isrctimer) & (ISRCTIMER_RL);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrtimer_get(void *d, uint32_t *reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ if (reloadvalue == NULL) {
+ DBG("ROUTER Wrong reloadvalue pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ /* Set ISR TIMER */
+ *reloadvalue = REG_READ(&priv->regs->isrtimer) & (ISRTIMER_RL);
+
+ return ROUTER_ERR_OK;
+}
diff --git a/bsps/shared/grlib/spw/spwtdp.c b/bsps/shared/grlib/spw/spwtdp.c
new file mode 100644
index 0000000000..df74675355
--- /dev/null
+++ b/bsps/shared/grlib/spw/spwtdp.c
@@ -0,0 +1,991 @@
+/* SPWTDP - SpaceWire Time Distribution Protocol. The driver provides
+ * device discovery and interrupt management.
+ *
+ * COPYRIGHT (c) 2017.
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+#include <bsp.h>
+#include <grlib/spwtdp.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define INLINE*/
+#define INLINE inline
+
+/*#define UNUSED*/
+#define UNUSED __attribute__((unused))
+
+#define DEBUG 1
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+/* Memory and HW Registers Access routines. All 32-bit access routines */
+#define REG_WRITE(addr, val) \
+ (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+/*
+ * Configuration register definitions
+ * DEFINED in header
+ */
+
+/*
+ * Control register definitions
+ * DEFINED in header
+ */
+
+/*
+ * TSTX Control register definitions
+ */
+#define TSTXCTRL_TSTC (0xff<<TSTXCTRL_TSTC_BIT)
+#define ETCTRL_PF (0xffff<<ETCTRL_PF_BIT)
+
+#define TSTXCTRL_TSTC_BIT 24
+#define ETCTRL_PF_BIT 0
+
+#define DEVNAME_LEN 11
+/* Private structure of SPWTDP driver. */
+struct spwtdp_priv {
+ char devname[DEVNAME_LEN];
+ struct drvmgr_dev *dev; /* Device */
+ struct spwtdp_regs *regs;
+ int open;
+ int index;
+ int initiator; /* Initiator configured */
+ int target; /* Target configured */
+ int freq; /* Frequency configured */
+
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+
+ /* Driver semaphore */
+ rtems_id sem;
+ spwtdp_isr_t isr;
+ void * isr_arg;
+};
+int spwtdp_count = 0;
+static struct spwtdp_priv *priv_tab[SPWTDP_MAX];
+
+
+STATIC void spwtdp_isr(void *data);
+STATIC int spwtdp_hw_reset(struct spwtdp_priv *priv);
+STATIC int spwtdp_init2(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops spwtdp_ops =
+{
+ {NULL, spwtdp_init2, NULL, NULL},
+ NULL,
+ NULL
+};
+
+struct amba_dev_id spwtdp_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPWTDP},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info spwtdp_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_SPWTDP_ID,/* Driver ID */
+ "SPWTDP_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &spwtdp_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct spwtdp_priv), /* Let DrvMgr allocate priv */
+ },
+ &spwtdp_ids[0]
+};
+
+/* Register the SPWTDP Driver */
+void spwtdp_register_drv(void)
+{
+ DBG("Registering SPWTDP driver\n");
+ drvmgr_drv_register(&spwtdp_drv_info.general);
+}
+
+STATIC int spwtdp_init(struct spwtdp_priv *priv)
+{
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+ struct ambapp_apb_info *apb;
+
+ /* Get device information from AMBA PnP information */
+ if (ainfo == NULL) {
+ return -1;
+ }
+ apb = ainfo->info.apb_slv;
+ priv->regs = (struct spwtdp_regs *)apb->start;
+
+ spwtdp_hw_reset(priv);
+ /* Only support 56 bits counter */
+ if (REG_READ(&priv->regs->dat_ctrl) != 0x2f00) {
+ DBG("SPWTDP only supports 56 bit precission counters.\n");
+ return -1;
+ }
+ DBG("SPWTDP driver initialized\n");
+
+ return 0;
+}
+
+/*** INTERFACE TO DRIVER MANAGER ***/
+STATIC int spwtdp_init2(struct drvmgr_dev *dev)
+{
+ int status;
+ struct spwtdp_priv *priv;
+
+ DBG("SPWTDP[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (spwtdp_count >= SPWTDP_MAX)
+ return DRVMGR_ENORES;
+
+ priv = dev->priv;
+ if (priv == NULL)
+ return DRVMGR_NOMEM;
+
+ /* Register device */
+ priv->dev = dev;
+ priv->index = spwtdp_count;
+ priv_tab[priv->index] = priv;
+ snprintf(priv->devname, DEVNAME_LEN, "spwtdp%01u", priv->index);
+ spwtdp_count++;
+
+ /* Initialize Semaphore */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'T', 'P', '0' + priv->index), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &priv->sem) != RTEMS_SUCCESSFUL) {
+ priv->sem = RTEMS_ID_NONE;
+ return DRVMGR_FAIL;
+ }
+
+ /* Initialize SPWTDP Hardware */
+ status = spwtdp_init(priv);
+ if (status) {
+ printk("Failed to initialize spwtdp driver %d\n", status);
+ return -1;
+ }
+
+ return DRVMGR_OK;
+}
+
+/* Hardware Reset of SPWTDP */
+STATIC int spwtdp_hw_reset(struct spwtdp_priv *priv)
+{
+ int i = 1000;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Clear interrupts */
+ REG_WRITE(&priv->regs->ists, SPWTDP_IRQ_WCLEAR);
+
+ /* Reset the SPWTDP core */
+ REG_WRITE(&priv->regs->conf[0], CONF0_RS);
+
+ /* Wait for reset */
+ while ((REG_READ(&priv->regs->conf[0]) & CONF0_RS) && (i > 0)) {
+ i--;
+ }
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return ((i > 0)? SPWTDP_ERR_OK : SPWTDP_ERR_ERROR);
+}
+
+int spwtdp_reset(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv and unregister isr */
+ int ret = spwtdp_isr_unregister(spwtdp);
+ if (ret != SPWTDP_ERR_OK)
+ return ret;
+
+ priv->initiator=0;
+ priv->target=0;
+ priv->freq=0;
+
+ return spwtdp_hw_reset(priv);
+}
+
+void *spwtdp_open(int dev_no)
+{
+ struct spwtdp_priv *priv;
+
+ if (dev_no >= spwtdp_count)
+ return NULL;
+
+ /* Get Device */
+ priv = priv_tab[dev_no];
+ if ((priv == NULL)||(priv->open == 1)) {
+ return NULL;
+ }
+
+ /* Set initial state of software */
+ priv->open = 1;
+
+ return priv;
+}
+
+int spwtdp_close(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv and reset core */
+ int ret = spwtdp_reset(spwtdp);
+ if (ret != SPWTDP_ERR_OK)
+ return ret;
+
+ priv->open = 0;
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_freq_setup(void *spwtdp, uint32_t fsinc, uint32_t cv, uint8_t etinc)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ REG_WRITE(&priv->regs->conf[1], fsinc & CONF1_FSINC);
+ REG_WRITE(&priv->regs->conf[2],
+ ((cv<<CONF2_CV_BIT) & CONF2_CV) |
+ ((uint32_t)etinc & CONF2_ETINC));
+
+ rtems_semaphore_release(priv->sem);
+ priv->freq = 1;
+
+ return SPWTDP_ERR_OK;
+}
+
+#define CONF0_INI_MASK (CONF0_EP|CONF0_ET|CONF0_SP|CONF0_SE|CONF0_LE| \
+ CONF0_TD)
+int spwtdp_initiator_conf(void *spwtdp, uint8_t mapping, uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target == 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ conf0 &= ~(CONF0_INI_MASK|CONF0_MAP);
+ REG_WRITE(&priv->regs->conf[0],
+ conf0 | (options & CONF0_INI_MASK) |
+ (((uint32_t)mapping << CONF0_MAP_BIT) & CONF0_MAP));
+
+ priv->initiator = 1;
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+#define CONF0_TAR_MASK (CONF0_JE|CONF0_ST|CONF0_EP|CONF0_ET|CONF0_SP| \
+ CONF0_SE|CONF0_LE|CONF0_TD|CONF0_ME)
+int spwtdp_target_conf(void *spwtdp, uint8_t mapping, uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator == 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ conf0 &= ~(CONF0_TAR_MASK|CONF0_MAP);
+ REG_WRITE(&priv->regs->conf[0],
+ conf0 | (options & CONF0_TAR_MASK) |
+ (((uint32_t)mapping << CONF0_MAP_BIT) & CONF0_MAP));
+
+ priv->initiator = 1;
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_int_conf(void *spwtdp, uint8_t stm, uint8_t inrx,
+ uint8_t intx)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ REG_WRITE(&priv->regs->conf[3],
+ (((uint32_t)stm << CONF3_STM_BIT) & CONF3_STM) |
+ (((uint32_t)inrx << CONF3_INRX_BIT) & CONF3_INRX) |
+ (((uint32_t)intx << CONF3_INTX_BIT) & CONF3_INTX));
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_target_int_conf(void *spwtdp, uint8_t inrx, uint8_t intx,
+ uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL) {
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ REG_WRITE(&priv->regs->conf[3],
+ (options & CONF3_DI) |
+ (((uint32_t)inrx << CONF3_INRX_BIT) & CONF3_INRX) |
+ (((uint32_t)intx << CONF3_INTX_BIT) & CONF3_INTX));
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_enable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL) {
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if frequency is configured */
+ if (priv->freq != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 | CONF0_TE);
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_target_enable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if frequency is configured */
+ if (priv->freq != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 | CONF0_RE);
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_disable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 & ~(CONF0_TE));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_target_disable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 & ~(CONF0_RE));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Get and Clear status */
+int spwtdp_status(void *spwtdp, uint32_t *sts, uint32_t clrmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ unsigned int status = REG_READ(&priv->regs->stat[0]);
+ REG_WRITE(&priv->regs->stat[0], status & clrmask);
+
+ if (sts != NULL)
+ *sts = status;
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Get and Clear interrupts */
+int spwtdp_interrupt_status(void *spwtdp, uint32_t *sts, uint32_t clrmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ unsigned int status = REG_READ(&priv->regs->ists);
+ REG_WRITE(&priv->regs->ists, status & clrmask);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (sts != NULL)
+ *sts = status;
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Unmask interrupts */
+int spwtdp_interrupt_unmask(void *spwtdp, uint32_t irqmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int ctrl = REG_READ(&priv->regs->ien);
+ REG_WRITE(&priv->regs->ien, ctrl | irqmask);
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Mask interrupts */
+int spwtdp_interrupt_mask(void *spwtdp, uint32_t irqmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int ctrl = REG_READ(&priv->regs->ien);
+ REG_WRITE(&priv->regs->ien, ctrl & ~(irqmask));
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_isr_register(void *spwtdp, spwtdp_isr_t func, void *data)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check isr */
+ if (func == NULL) {
+ /* No ISR */
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ priv->isr = func;
+ priv->isr_arg = data;
+
+ /* Register and Enable Interrupt at Interrupt controller */
+ drvmgr_interrupt_register(priv->dev, 0, "spwtdp", spwtdp_isr, priv);
+
+ /* Enable AMBA Interrupts */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ unsigned int cfg0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], cfg0 | CONF0_AE);
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_isr_unregister(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Disable IRQS (and check for priv!=NULL) */
+ int ret=spwtdp_interrupt_mask(spwtdp, SPWTDP_IRQ_WCLEAR);
+ if (ret != SPWTDP_ERR_OK)
+ return ret;
+
+ /* Disable AMBA Interrupts */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ unsigned int cfg0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], cfg0 & ~(CONF0_AE));
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* Disable Interrupt at Interrupt controller */
+ drvmgr_interrupt_unregister(priv->dev, 0, spwtdp_isr, priv);
+
+ /* Unregister isr */
+ priv->isr = NULL;
+ priv->isr_arg = NULL;
+
+ return SPWTDP_ERR_OK;
+}
+
+STATIC void spwtdp_isr(void *arg)
+{
+ struct spwtdp_priv *priv = arg;
+ unsigned int ists = REG_READ(&priv->regs->ists);
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Return if the SPWTDP didn't generate the IRQ */
+ if (ists == 0)
+ return;
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ REG_WRITE(&priv->regs->ists, ists); /* clear handled interrupt events */
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* Let user Handle Interrupt */
+ if (priv->isr!=NULL)
+ priv->isr(ists, priv->isr_arg);
+
+ return;
+}
+
+int spwtdp_dat_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->dat_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->dat_et[0]);
+ buffer[1] = REG_READ(&priv->regs->dat_et[1]);
+ buffer[2] = REG_READ(&priv->regs->dat_et[2]);
+ buffer[3] = REG_READ(&priv->regs->dat_et[3]);
+ buffer[4] = REG_READ(&priv->regs->dat_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_tsrx_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->ts_rx_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->ts_rx_et[0]);
+ buffer[1] = REG_READ(&priv->regs->ts_rx_et[1]);
+ buffer[2] = REG_READ(&priv->regs->ts_rx_et[2]);
+ buffer[3] = REG_READ(&priv->regs->ts_rx_et[3]);
+ buffer[4] = REG_READ(&priv->regs->ts_rx_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_tstx_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->ts_tx_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->ts_tx_et[0]);
+ buffer[1] = REG_READ(&priv->regs->ts_tx_et[1]);
+ buffer[2] = REG_READ(&priv->regs->ts_tx_et[2]);
+ buffer[3] = REG_READ(&priv->regs->ts_tx_et[3]);
+ buffer[4] = REG_READ(&priv->regs->ts_tx_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_lat_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->lat_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->lat_et[0]);
+ buffer[1] = REG_READ(&priv->regs->lat_et[1]);
+ buffer[2] = REG_READ(&priv->regs->lat_et[2]);
+ buffer[3] = REG_READ(&priv->regs->lat_et[3]);
+ buffer[4] = REG_READ(&priv->regs->lat_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_cmd_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->cmd_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->cmd_et[0]);
+ buffer[1] = REG_READ(&priv->regs->cmd_et[1]);
+ buffer[2] = REG_READ(&priv->regs->cmd_et[2]);
+ buffer[3] = REG_READ(&priv->regs->cmd_et[3]);
+ buffer[4] = REG_READ(&priv->regs->cmd_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_tstx_conf(void * spwtdp, uint8_t tstc)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ REG_WRITE(&priv->regs->ts_tx_ctrl,
+ (((uint32_t)tstc) << TSTXCTRL_TSTC_BIT) & TSTXCTRL_TSTC);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_cmd_et_set(void *spwtdp, spwtdp_time_t val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int * buffer = (unsigned int *) val.data;
+ REG_WRITE(&priv->regs->lat_et[0], buffer[0]);
+ REG_WRITE(&priv->regs->lat_et[1], buffer[1]);
+ REG_WRITE(&priv->regs->lat_et[2], buffer[2]);
+ REG_WRITE(&priv->regs->lat_et[3], buffer[3]);
+ REG_WRITE(&priv->regs->lat_et[4], buffer[4]);
+
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ /* Signal new command */
+ unsigned int ctrl = REG_READ(&priv->regs->cmd_ctrl);
+ REG_WRITE(&priv->regs->cmd_ctrl, ctrl | CTRL_NC);
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_cmd_spwtc_set(void *spwtdp, uint8_t spwtc)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int ctrl = (REG_READ(&priv->regs->cmd_ctrl) &~ CTRL_SPWTC);
+ REG_WRITE(&priv->regs->cmd_ctrl,
+ ctrl | (((uint32_t)spwtc << CTRL_SPWTC_BIT) & CTRL_SPWTC));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+#define CTRL_TAR_MASK (CTRL_NC|CTRL_IS)
+int spwtdp_target_cmd_conf(void *spwtdp, uint8_t spwtc, uint16_t cpf,
+ uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ REG_WRITE(&priv->regs->cmd_ctrl,
+ (options & CTRL_TAR_MASK) |
+ ((cpf << CTRL_CPF_BIT) & CTRL_CPF) |
+ (((uint32_t)spwtc << CTRL_SPWTC_BIT) & CTRL_SPWTC));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_precision_get(void *spwtdp, uint8_t *fine, uint8_t *coarse)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ int coarse_precision, fine_precision;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int preamble = REG_READ(&priv->regs->dat_ctrl);
+
+ if (preamble & 0x80) {
+ DBG("Pfield second extension set: unknown format");
+ return SPWTDP_ERR_ERROR;
+ }
+ if (!((preamble & 0x7000) == 0x2000 || (preamble & 0x7000) == 0x1000)) {
+ DBG(" PField indicates not unsegmented code: unknown format");
+ return SPWTDP_ERR_ERROR;
+ }
+ /*
+ * coarse_precision = 32;
+ * fine_precision = 24;
+ */
+ coarse_precision = ((preamble >> 10) & 0x3) + 1;
+ if (preamble & 0x80)
+ coarse_precision += (preamble >> 5) & 0x3;
+ fine_precision = (preamble >> 8) & 0x3;
+ if (preamble & 0x80)
+ fine_precision += (preamble >> 2) & 0x7;
+ if (coarse!=NULL)
+ *coarse = coarse_precision;
+ if (fine!=NULL)
+ *fine = fine_precision;
+
+ return SPWTDP_ERR_OK;
+}
+