summaryrefslogtreecommitdiffstats
path: root/bsps/shared/grlib
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-12-22 18:31:04 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2019-01-22 12:46:33 +0100
commit7eb606d393306da25fd6e6aa7f8595ffb2e924fc (patch)
tree085befd6fe5e29d229fec9683735516d48e9d41e /bsps/shared/grlib
parentgrlib: Move header files (diff)
downloadrtems-7eb606d393306da25fd6e6aa7f8595ffb2e924fc.tar.bz2
grlib: Move source files
Update #3678.
Diffstat (limited to 'bsps/shared/grlib')
-rw-r--r--bsps/shared/grlib/1553/b1553brm.c1549
-rw-r--r--bsps/shared/grlib/1553/b1553rt.c857
-rw-r--r--bsps/shared/grlib/1553/gr1553b.c312
-rw-r--r--bsps/shared/grlib/1553/gr1553bc.c1685
-rw-r--r--bsps/shared/grlib/1553/gr1553bm.c509
-rw-r--r--bsps/shared/grlib/1553/gr1553rt.c1210
-rw-r--r--bsps/shared/grlib/amba/ahbstat.c239
-rw-r--r--bsps/shared/grlib/amba/ambapp.c457
-rw-r--r--bsps/shared/grlib/amba/ambapp_alloc.c25
-rw-r--r--bsps/shared/grlib/amba/ambapp_count.c23
-rw-r--r--bsps/shared/grlib/amba/ambapp_depth.c25
-rw-r--r--bsps/shared/grlib/amba/ambapp_find_by_idx.c39
-rw-r--r--bsps/shared/grlib/amba/ambapp_freq.c109
-rw-r--r--bsps/shared/grlib/amba/ambapp_names.c447
-rw-r--r--bsps/shared/grlib/amba/ambapp_old.c112
-rw-r--r--bsps/shared/grlib/amba/ambapp_parent.c23
-rw-r--r--bsps/shared/grlib/amba/ambapp_show.c68
-rw-r--r--bsps/shared/grlib/analog/gradcdac.c580
-rw-r--r--bsps/shared/grlib/ascs/grascs.c619
-rw-r--r--bsps/shared/grlib/btimer/gptimer.c545
-rw-r--r--bsps/shared/grlib/btimer/tlib.c77
-rw-r--r--bsps/shared/grlib/btimer/tlib_ckinit.c442
-rw-r--r--bsps/shared/grlib/can/canmux.c199
-rw-r--r--bsps/shared/grlib/can/grcan.c1976
-rw-r--r--bsps/shared/grlib/can/occan.c1971
-rw-r--r--bsps/shared/grlib/can/satcan.c716
-rw-r--r--bsps/shared/grlib/drvmgr/ambapp_bus.c840
-rw-r--r--bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c252
-rw-r--r--bsps/shared/grlib/drvmgr/get_resarray_count.c20
-rw-r--r--bsps/shared/grlib/gpio/gpiolib.c272
-rw-r--r--bsps/shared/grlib/gpio/grgpio.c449
-rw-r--r--bsps/shared/grlib/i2c/i2cmst.c416
-rw-r--r--bsps/shared/grlib/iommu/griommu.c1458
-rw-r--r--bsps/shared/grlib/irq/genirq.c244
-rw-r--r--bsps/shared/grlib/l2c/l2c.c2118
-rw-r--r--bsps/shared/grlib/mem/mctrl.c213
-rw-r--r--bsps/shared/grlib/net/README7
-rw-r--r--bsps/shared/grlib/net/greth.c1655
-rw-r--r--bsps/shared/grlib/net/network_interface_add.c62
-rw-r--r--bsps/shared/grlib/pci/gr_701.c618
-rw-r--r--bsps/shared/grlib/pci/gr_rasta_adcdac.c694
-rw-r--r--bsps/shared/grlib/pci/gr_rasta_io.c892
-rw-r--r--bsps/shared/grlib/pci/gr_rasta_spw_router.c696
-rw-r--r--bsps/shared/grlib/pci/gr_rasta_tmtc.c897
-rw-r--r--bsps/shared/grlib/pci/gr_tmtc_1553.c595
-rw-r--r--bsps/shared/grlib/pci/grpci.c722
-rw-r--r--bsps/shared/grlib/pci/grpci2.c970
-rw-r--r--bsps/shared/grlib/pci/grpci2dma.c2026
-rw-r--r--bsps/shared/grlib/pci/pcif.c586
-rw-r--r--bsps/shared/grlib/pwm/grpwm.c854
-rw-r--r--bsps/shared/grlib/scrub/memscrub.c692
-rw-r--r--bsps/shared/grlib/slink/grslink.c664
-rw-r--r--bsps/shared/grlib/spi/spictrl.c1018
-rw-r--r--bsps/shared/grlib/spw/grspw.c2038
-rw-r--r--bsps/shared/grlib/spw/grspw_pkt.c3295
-rw-r--r--bsps/shared/grlib/spw/grspw_router.c1939
-rw-r--r--bsps/shared/grlib/spw/spwtdp.c991
-rw-r--r--bsps/shared/grlib/stat/l4stat.c626
-rw-r--r--bsps/shared/grlib/time/grctm.c411
-rw-r--r--bsps/shared/grlib/time/spwcuc.c371
-rw-r--r--bsps/shared/grlib/tmtc/grtc.c1984
-rw-r--r--bsps/shared/grlib/tmtc/grtm.c1613
-rw-r--r--bsps/shared/grlib/uart/apbuart_cons.c757
-rw-r--r--bsps/shared/grlib/uart/apbuart_polled.c52
-rw-r--r--bsps/shared/grlib/uart/apbuart_termios.c259
-rw-r--r--bsps/shared/grlib/uart/cons.c137
66 files changed, 49217 insertions, 0 deletions
diff --git a/bsps/shared/grlib/1553/b1553brm.c b/bsps/shared/grlib/1553/b1553brm.c
new file mode 100644
index 0000000000..5575abb525
--- /dev/null
+++ b/bsps/shared/grlib/1553/b1553brm.c
@@ -0,0 +1,1549 @@
+/*
+ * BRM driver
+ *
+ * COPYRIGHT (c) 2006.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/********** Set defaults **********/
+
+/* default to 16K memory layout */
+#define DMA_MEM_128K
+#if !defined(DMA_MEM_128K)
+ #define DMA_MEM_16K
+#endif
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/b1553brm.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Uncomment for debug output */
+/*#define DEBUG 1
+#define FUNCDEBUG 1*/
+#undef DEBUG
+#undef FUNCDEBUG
+
+/* EVENT_QUEUE_SIZE sets the size of the event queue
+ */
+#define EVENT_QUEUE_SIZE 1024
+
+
+#define INDEX(x) ( x&(EVENT_QUEUE_SIZE-1) )
+
+#if 0
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#ifdef FUNCDEBUG
+#define FUNCDBG(x...) printk(x)
+#else
+#define FUNCDBG(x...)
+#endif
+
+#define READ_REG(address) (*(volatile unsigned int *)address)
+#define READ_DMA(address) _BRM_REG_READ16((unsigned int)address)
+static __inline__ unsigned short _BRM_REG_READ16(unsigned int addr) {
+ unsigned short tmp;
+ __asm__ (" lduha [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+
+static rtems_device_driver brm_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver brm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver brm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver brm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver brm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver brm_control(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define BRM_DRIVER_TABLE_ENTRY { brm_initialize, brm_open, brm_close, brm_read, brm_write, brm_control }
+
+static rtems_driver_address_table b1553brm_driver = BRM_DRIVER_TABLE_ENTRY;
+
+struct msg {
+ unsigned short miw;
+ unsigned short time;
+ unsigned short data[32];
+};
+#if defined(DMA_MEM_128K)
+struct circ_buf {
+ struct msg msgs[9];
+};
+#elif defined(DMA_MEM_16K)
+/* two message queue */
+struct circ_buf_2 {
+ struct msg msgs[2];
+};
+/* one message queue */
+struct circ_buf_1 {
+ struct msg msgs[1];
+};
+#endif
+
+struct irq_log_list {
+ volatile unsigned short iiw;
+ volatile unsigned short iaw;
+};
+
+typedef struct {
+
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ struct brm_reg *regs;
+
+ unsigned int memarea_base;
+ unsigned int memarea_base_remote;
+ unsigned int cfg_clksel;
+ unsigned int cfg_clkdiv;
+ unsigned int cfg_freq;
+
+ /* BRM descriptors */
+ struct desc_table {
+ volatile unsigned short ctrl;
+ volatile unsigned short top;
+ volatile unsigned short cur;
+ volatile unsigned short bot;
+ } *desc;
+
+ volatile unsigned short *mem;
+ /* bc mem struct */
+ struct {
+ /* BC Descriptors */
+ struct {
+ unsigned short ctrl; /* control */
+ unsigned short cw1; /* Command word 1*/
+ unsigned short cw2; /* Command word 1*/
+ unsigned short dptr; /* Data pointer in halfword offset from bcmem */
+ unsigned short tsw[2]; /* status word 1 & 2 */
+ unsigned short ba; /* branch address */
+ unsigned short timer; /* timer value */
+ } descs[128]; /* 2k (1024 half words) */
+
+ /* message data */
+ struct {
+ unsigned short data[32]; /* 1 message's data */
+ } msg_data[128]; /* 8k */
+
+#if defined(DMA_MEM_128K)
+ /* offset to last 64bytes of 128k */
+ unsigned short unused[(64*1024-(128*8+128*32))-16*2];
+#elif defined(DMA_MEM_16K)
+ unsigned short unused[(8*1024-(128*8+128*32))-16*2];
+#endif
+ /* interrupt log at 64 bytes from end */
+ struct irq_log_list irq_logs[16];
+ } *bcmem;
+
+#if defined(DMA_MEM_128K)
+ /* Memory structure of a RT being inited, just used
+ * for RT initialization.
+ *
+ * *mesgs[32] fit each minimally 8 messages per sub address.
+ */
+ struct {
+ /* RX Sub Address descriptors */
+ struct desc_table rxsubs[32];
+ /* TX Sub Address descriptors */
+ struct desc_table txsubs[32];
+ /* RX mode code descriptors */
+ struct desc_table rxmodes[32];
+ /* TX mode code descriptors */
+ struct desc_table txmodes[32];
+
+ /* RX Sub Address messages */
+ struct circ_buf rxsuba_msgs[32];
+ /* TX Sub Address messages */
+ struct circ_buf txsuba_msgs[32];
+ /* RX Mode Code messages */
+ struct circ_buf rxmode_msgs[32];
+ /* RX Mode Code messages */
+ struct circ_buf txmode_msgs[32];
+
+ /* offset to last 64bytes of 128k: tot-used-needed */
+ unsigned short unused[(64*1024-(4*32*4+4*32*9*34))-16*2];
+
+ /* interrupt log at 64 bytes from end */
+ struct irq_log_list irq_logs[16];
+ } *rtmem;
+#elif defined(DMA_MEM_16K)
+ /* Memory structure of a RT being inited, just used
+ * for RT initialization.
+ *
+ * circ_buf_2 *mesgs[32] fit each minimally 2 messages per queue.
+ * circ_buf_1 *mesgs[32] fit each minimally 1 messages per queue.
+ */
+ struct {
+ /* RX Sub Address descriptors */
+ struct desc_table rxsubs[32];
+ /* TX Sub Address descriptors */
+ struct desc_table txsubs[32];
+ /* RX mode code descriptors */
+ struct desc_table rxmodes[32];
+ /* TX mode code descriptors */
+ struct desc_table txmodes[32];
+
+ /* RX Sub Address messages */
+ struct circ_buf_2 rxsuba_msgs[32];
+ /* TX Sub Address messages */
+ struct circ_buf_2 txsuba_msgs[32];
+ /* RX Mode Code messages */
+ struct circ_buf_2 rxmode_msgs[32];
+ /* RX Mode Code messages */
+ struct circ_buf_1 txmode_msgs[32];
+
+ /* offset to last 64bytes of 16k: tot-used-needed */
+ unsigned short unused[8*1024 -(4*32*4 +3*32*2*34 +1*32*1*34) -16*2];
+
+ /* interrupt log at 64 bytes from end */
+ struct irq_log_list irq_logs[16];
+ } *rtmem;
+#else
+ #error You must define one DMA_MEM_???K
+#endif
+
+ /* Interrupt log list */
+ struct irq_log_list *irq_log;
+ unsigned int irq;
+
+ /* Received events waiting to be read */
+ struct rt_msg *rt_event;
+ struct bm_msg *bm_event;
+
+ unsigned int head, tail;
+
+ unsigned int last_read[128];
+ unsigned int written[32];
+
+ struct bc_msg *cur_list;
+
+ int tx_blocking, rx_blocking;
+
+ rtems_id rx_sem, tx_sem, dev_sem;
+ int minor;
+ int irqno;
+ unsigned int mode;
+#ifdef DEBUG
+ unsigned int log[EVENT_QUEUE_SIZE*4];
+ unsigned int log_i;
+#endif
+
+ rtems_id event_id; /* event that may be signalled upon errors, needs to be set through ioctl command BRM_SET_EVENTID */
+ unsigned int status;
+ int bc_list_fail;
+} brm_priv;
+
+static void b1553brm_interrupt(void *arg);
+static rtems_device_driver rt_init(brm_priv *brm);
+
+#define OFS(ofs) (((unsigned int)&ofs & 0x1ffff)>>1)
+
+static int b1553brm_driver_io_registered = 0;
+static rtems_device_major_number b1553brm_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int b1553brm_register_io(rtems_device_major_number *m);
+int b1553brm_device_init(brm_priv *pDev);
+
+int b1553brm_init2(struct drvmgr_dev *dev);
+int b1553brm_init3(struct drvmgr_dev *dev);
+int b1553brm_remove(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops b1553brm_ops =
+{
+ .init = {NULL, b1553brm_init2, b1553brm_init3, NULL},
+ .remove = b1553brm_remove,
+ .info = NULL
+};
+
+struct amba_dev_id b1553brm_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_B1553BRM},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info b1553brm_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_B1553BRM_ID, /* Driver ID */
+ "B1553BRM_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &b1553brm_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &b1553brm_ids[0]
+};
+
+void b1553brm_register_drv (void)
+{
+ DBG("Registering B1553BRM driver\n");
+ drvmgr_drv_register(&b1553brm_drv_info.general);
+}
+
+int b1553brm_init2(struct drvmgr_dev *dev)
+{
+ brm_priv *priv;
+
+ DBG("B1553BRM[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int b1553brm_init3(struct drvmgr_dev *dev)
+{
+ brm_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( b1553brm_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( b1553brm_register_io(&b1553brm_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ b1553brm_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ if ( b1553brm_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/b1553brm%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sb1553brm%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, b1553brm_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+int b1553brm_remove(struct drvmgr_dev *dev)
+{
+ /* Stop more tasks to open driver */
+
+ /* Throw out all tasks using this driver */
+
+ /* Unregister I/O node */
+
+ /* Unregister and disable Interrupt */
+
+ /* Free device memory */
+
+ /* Return sucessfully */
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+int b1553brm_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &b1553brm_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("B1553BRM driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("B1553BRM rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("B1553BRM rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("B1553BRM rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("B1553BRM rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int b1553brm_device_init(brm_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+ unsigned int mem;
+ int size;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irqno = pnpinfo->irq;
+ /* Two versions of the BRM core. One where the registers are accessed using the AHB bus
+ * and one where the APB bus is used
+ */
+ if ( pnpinfo->ahb_slv ) {
+ /* Registers accessed over AHB */
+ pDev->regs = (struct brm_reg *)pnpinfo->ahb_slv->start[0];
+ } else {
+ /* Registers accessed over APB */
+ pDev->regs = (struct brm_reg *)pnpinfo->apb_slv->start;
+ }
+ pDev->minor = pDev->dev->minor_drv;
+#ifdef DEBUG
+ pDev->log_i = 0;
+ memset(pDev->log,0,sizeof(pDev->log));
+#endif
+
+#ifdef DMA_MEM_128K
+ size = 128 * 1024;
+#else
+ size = 16 * 1024;
+#endif
+
+ /* Get memory configuration from bus resources */
+ value = drvmgr_dev_key_get(pDev->dev, "dmaBaseAdr", DRVMGR_KT_POINTER);
+ if (value)
+ mem = (unsigned int)value->ptr;
+
+ if (value && (mem & 1)) {
+ /* Remote address, address as BRM looks at it. */
+
+ /* Translate the base address into an address that the the CPU can understand */
+ pDev->memarea_base_remote = mem & ~1;
+ drvmgr_translate_check(pDev->dev, DMAMEM_TO_CPU,
+ (void *)pDev->memarea_base_remote,
+ (void **)&pDev->memarea_base,
+ size);
+ } else {
+ if (!value) {
+ /* Use dynamically allocated memory + 128k for
+ * alignment
+ */
+ mem = (unsigned int)grlib_malloc(size + 128 * 1024);
+ if (!mem){
+ printk("BRM: Failed to allocate HW memory\n\r");
+ return -1;
+ }
+ /* align memory to 128k boundary */
+ pDev->memarea_base = (mem + 0x1ffff) & ~0x1ffff;
+ } else {
+ pDev->memarea_base = mem;
+ }
+
+ /* Translate the base address into an address that the BRM core can understand */
+ drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA,
+ (void *)pDev->memarea_base,
+ (void **)&pDev->memarea_base_remote,
+ size);
+ }
+
+ /* clear the used memory */
+ memset((char *)pDev->memarea_base, 0, size);
+
+ /* Set base address of all descriptors */
+ pDev->desc = (struct desc_table *) pDev->memarea_base;
+ pDev->mem = (volatile unsigned short *) pDev->memarea_base;
+ pDev->irq_log = (struct irq_log_list *)(pDev->memarea_base + (0xFFE0<<1)); /* last 64byte */
+
+ pDev->bm_event = NULL;
+ pDev->rt_event = NULL;
+
+ pDev->cfg_clksel = 0;
+ pDev->cfg_clkdiv = 0;
+ pDev->cfg_freq = BRM_FREQ_24MHZ;
+
+ value = drvmgr_dev_key_get(pDev->dev, "clkSel", DRVMGR_KT_INT);
+ if ( value ) {
+ pDev->cfg_clksel = value->i & CLKSEL_MASK;
+ }
+
+ value = drvmgr_dev_key_get(pDev->dev, "clkDiv", DRVMGR_KT_INT);
+ if ( value ) {
+ pDev->cfg_clkdiv = value->i & CLKDIV_MASK;
+ }
+
+ value = drvmgr_dev_key_get(pDev->dev, "coreFreq", DRVMGR_KT_INT);
+ if ( value ) {
+ pDev->cfg_freq = value->i & BRM_FREQ_MASK;
+ }
+
+ /* Sel clock so that we can write to BRM's registers */
+ pDev->regs->w_ctrl = (pDev->cfg_clksel<<9) | (pDev->cfg_clkdiv<<5);
+ /* Reset BRM core */
+ pDev->regs->w_ctrl = 1<<10 | READ_REG(&pDev->regs->w_ctrl);
+
+ /* RX Semaphore created with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('B', 'M', 'R', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->rx_sem) != RTEMS_SUCCESSFUL ) {
+ printk("BRM: Failed to create rx semaphore\n");
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* TX Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('B', 'M', 'T', '0' + pDev->minor),
+ 1,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->tx_sem) != RTEMS_SUCCESSFUL ){
+ printk("BRM: Failed to create tx semaphore\n");
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('B', 'M', 'D', '0' + pDev->minor),
+ 1,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->dev_sem) != RTEMS_SUCCESSFUL ){
+ printk("BRM: Failed to create device semaphore\n");
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Default to RT-mode */
+ rt_init(pDev);
+
+ return 0;
+}
+
+static int odd_parity(unsigned int data) {
+ unsigned int i=0;
+
+ while(data)
+ {
+ i++;
+ data &= (data - 1);
+ }
+
+ return !(i&1);
+}
+
+static void start_operation(brm_priv *brm) {
+ unsigned int ctrl = READ_REG(&brm->regs->ctrl);
+ brm->regs->ctrl = ctrl | 0x8000;
+}
+
+static void stop_operation(brm_priv *brm) {
+ unsigned int ctrl = READ_REG(&brm->regs->ctrl);
+ brm->regs->ctrl = ctrl & ~0x8000;
+}
+
+static int is_executing(brm_priv *brm) {
+ unsigned int ctrl = READ_REG(&brm->regs->ctrl);
+ return ((ctrl>>15) & 1);
+}
+
+static void clr_int_logs(struct irq_log_list *logs){
+ int i;
+ for(i=0; i<16; i++){
+ logs[i].iiw = 0xffff;
+ logs[i].iaw = 0x0;
+ }
+}
+
+unsigned short b1553brm_rt_cmd_legalize[16] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0xffff,
+ 0xffff,
+ 0xffff,
+ 0xfffd,
+ 0xfe01,
+ 0xfff2,
+ 0xffff,
+ 0xfffd,
+ 0xfe05,
+ 0xffff,
+};
+
+static rtems_device_driver rt_init(brm_priv *brm) {
+ unsigned int i;
+
+ brm->head = brm->tail = 0;
+ brm->rx_blocking = brm->tx_blocking = 1;
+
+ if ( brm->bm_event )
+ free(brm->bm_event);
+ brm->bm_event = NULL;
+
+ if ( brm->rt_event )
+ free(brm->rt_event);
+
+ brm->bcmem = NULL;
+ brm->rtmem = (void *)brm->mem;
+
+ brm->rt_event = grlib_malloc(EVENT_QUEUE_SIZE*sizeof(*brm->rt_event));
+
+ if (brm->rt_event == NULL) {
+ DBG("BRM driver failed to allocated memory.");
+ return RTEMS_NO_MEMORY;
+ }
+
+ brm->irq_log = (struct irq_log_list *)&brm->rtmem->irq_logs[0];
+
+ brm->regs->ctrl = 0x1912; /* enable both buses, circular 1 bufmode, broadcast, interrupt log */
+ brm->regs->oper = 0x0900; /* configure as RT, with addr 1 */
+ brm->regs->imask = BRM_RT_ILLCMD_IRQ|BRM_SUBAD_IRQ|BRM_TAPF_IRQ|BRM_DMAF_IRQ|BRM_WRAPF_IRQ|BRM_MERR_IRQ;
+ brm->regs->dpoint = 0;
+ brm->regs->ipoint = OFS(brm->rtmem->irq_logs[0]);
+ brm->regs->enhanced = 0x0000 | brm->cfg_freq; /* BRM clocked with freq = 12,16,20 or 24MHz */
+ brm->regs->w_ctrl = (brm->cfg_clksel<<9) | (brm->cfg_clkdiv<<5) | 1;
+ brm->regs->w_irqctrl = 6;
+ brm->regs->w_ahbaddr = brm->memarea_base_remote;
+
+ clr_int_logs(brm->irq_log);
+
+ /* Initialize the Legalize register with standard values */
+ for (i = 0; i < 16; i++) {
+ brm->regs->rt_cmd_leg[i] = b1553brm_rt_cmd_legalize[i];
+ }
+
+ /* Init descriptor table
+ *
+ * Each circular buffer has room for 8 messages with up to 34 (32 data + miw + time) words (16b) in each.
+ * The buffers must separated by 34 words.
+ */
+
+
+ /* RX Sub-address 0 - 31 */
+ for (i = 0; i < 32; i++) {
+ brm->rtmem->rxsubs[i].ctrl = 0x00E0; /* Interrupt: INTX, IWA, and IBRD */
+ brm->rtmem->rxsubs[i].top = OFS(brm->rtmem->rxsuba_msgs[i]); /* Top address */
+ brm->rtmem->rxsubs[i].cur = OFS(brm->rtmem->rxsuba_msgs[i]); /* Current address */
+ brm->rtmem->rxsubs[i].bot = OFS(brm->rtmem->rxsuba_msgs[i+1]) - sizeof(struct msg)/2; /* Bottom address */
+ brm->last_read[i] = OFS(brm->rtmem->rxsuba_msgs[i]);
+ }
+ /* TX Sub-address 0 - 31 */
+ for (i = 0; i < 32; i++) {
+ brm->rtmem->txsubs[i].ctrl = 0x0060; /* Interrupt: IWA and IBRD */
+ brm->rtmem->txsubs[i].top = OFS(brm->rtmem->txsuba_msgs[i]); /* Top address */
+ brm->rtmem->txsubs[i].cur = OFS(brm->rtmem->txsuba_msgs[i]); /* Current address */
+ brm->rtmem->txsubs[i].bot = OFS(brm->rtmem->txsuba_msgs[i+1]) - sizeof(struct msg)/2; /* Bottom address */
+ brm->last_read[i+32] = OFS(brm->rtmem->txsuba_msgs[i]);
+ brm->written[i] = OFS(brm->rtmem->txsuba_msgs[i]);
+ }
+ /* RX mode code 0 - 31 */
+ for (i = 0; i < 32; i++) {
+ brm->rtmem->rxmodes[i].ctrl = 0x00E0; /* Interrupt: INTX, IWA, and IBRD */
+ brm->rtmem->rxmodes[i].top = OFS(brm->rtmem->rxmode_msgs[i]); /* Top address */
+ brm->rtmem->rxmodes[i].cur = OFS(brm->rtmem->rxmode_msgs[i]); /* Current address */
+ brm->rtmem->rxmodes[i].bot = OFS(brm->rtmem->rxmode_msgs[i+1]) - sizeof(struct msg)/2; /* Bottom address */
+ brm->last_read[i+64] = OFS(brm->rtmem->rxmode_msgs[i]);
+ }
+ /* TX mode code 0 - 31 */
+ for (i = 0; i < 32; i++) {
+ brm->rtmem->txmodes[i].ctrl = 0x0060; /* Interrupt: IWA and IBRD */
+ brm->rtmem->txmodes[i].top = OFS(brm->rtmem->txmode_msgs[i]); /* Top address */
+ brm->rtmem->txmodes[i].cur = OFS(brm->rtmem->txmode_msgs[i]); /* Current address */
+ brm->rtmem->txmodes[i].bot = OFS(brm->rtmem->txmode_msgs[i+1]) - sizeof(struct msg)/2; /* Bottom address */
+ brm->last_read[i+96] = OFS(brm->rtmem->txmode_msgs[i]);
+ }
+
+#ifdef DEBUG
+ printk("b1553BRM DMA_AREA: 0x%x\n", (unsigned int)brm->rtmem);
+ printk("LOG: 0x%x\n", &brm->log[0]);
+ printk("LOG_I: 0x%x\n", &brm->log_i);
+#endif
+
+ brm->mode = BRM_MODE_RT;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver bc_init(brm_priv *brm){
+
+ if ( brm->bm_event )
+ free(brm->bm_event);
+ brm->bm_event = NULL;
+
+ if ( brm->rt_event )
+ free(brm->rt_event);
+ brm->rt_event = NULL;
+
+ brm->bcmem = (void *)brm->mem;
+ brm->rtmem = NULL;
+ brm->irq_log = (struct irq_log_list *)&brm->bcmem->irq_logs[0];
+
+ brm->head = brm->tail = 0;
+ brm->rx_blocking = brm->tx_blocking = 1;
+
+ brm->regs->ctrl = 0x0006; /* ping pong enable and enable interrupt log */
+ brm->regs->oper = 0x0800; /* configure as BC */
+ brm->regs->imask = BRM_EOL_IRQ|BRM_BC_ILLCMD_IRQ|BRM_ILLOP_IRQ|BRM_DMAF_IRQ|BRM_WRAPF_IRQ|BRM_MERR_IRQ;
+ brm->regs->dpoint = 0;
+ brm->regs->ipoint = OFS(brm->bcmem->irq_logs[0]);
+ brm->regs->enhanced = 0x0000 | (brm->cfg_freq&BRM_FREQ_MASK); /* freq = 24 */
+ brm->regs->w_ctrl = (brm->cfg_clksel<<9) | (brm->cfg_clkdiv<<5) | 1;
+ brm->regs->w_irqctrl = 6;
+ brm->regs->w_ahbaddr = brm->memarea_base_remote;
+
+ clr_int_logs(brm->irq_log);
+
+ brm->mode = BRM_MODE_BC;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver bm_init(brm_priv *brm) {
+
+
+ brm->head = brm->tail = 0;
+ brm->rx_blocking = brm->tx_blocking = 1;
+
+ if ( brm->rt_event )
+ free(brm->rt_event);
+ brm->rt_event = NULL;
+
+ if ( brm->bm_event )
+ free(brm->bm_event);
+
+ brm->bcmem = NULL;
+ brm->rtmem = NULL;
+
+ brm->bm_event = grlib_malloc(EVENT_QUEUE_SIZE*sizeof(*brm->bm_event));
+
+ if (brm->bm_event == NULL) {
+ DBG("BRM driver failed to allocated memory.");
+ return RTEMS_NO_MEMORY;
+ }
+
+ /* end of 16K, fits all current modes (128K, 16K) */
+ brm->irq_log = (struct irq_log_list *)&brm->mem[8*1024-16*2];
+
+ brm->regs->ctrl = 0x0006; /* ping pong enable and enable interrupt log */
+ brm->regs->oper = 0x0A00; /* configure as BM */
+ brm->regs->imask = BRM_MBC_IRQ|BRM_MERR_IRQ|BRM_DMAF_IRQ;
+ brm->regs->dpoint = 0;
+ brm->regs->ipoint = OFS(brm->mem[8*1024-16*2]);
+ brm->regs->mcpoint = 0; /* Command pointer */
+ brm->regs->mdpoint = 0x100; /* Data pointer */
+ brm->regs->mbc = 1; /* Block count */
+ brm->regs->enhanced = 0x0000 | (brm->cfg_freq&BRM_FREQ_MASK); /* freq = 24 */
+ brm->regs->w_ctrl = (brm->cfg_clksel<<9) | (brm->cfg_clkdiv<<5) | 1;
+ brm->regs->w_irqctrl = 6;
+ brm->regs->w_ahbaddr = brm->memarea_base_remote;
+
+ clr_int_logs(brm->irq_log);
+
+ brm->mode = BRM_MODE_BM;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+static rtems_device_driver brm_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver brm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) {
+ brm_priv *brm;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG("brm_open\n");
+
+ if ( drvmgr_get_dev(&b1553brm_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_UNSATISFIED;
+ }
+ brm = (brm_priv *)dev->priv;
+
+ if (rtems_semaphore_obtain(brm->dev_sem, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL) {
+ DBG("brm_open: resource in use\n");
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+
+ /* Set defaults */
+ brm->event_id = 0;
+
+ start_operation(brm);
+
+ /* Register interrupt routine */
+ if ( drvmgr_interrupt_register(brm->dev, 0, "b1553brm", b1553brm_interrupt, brm) ) {
+ rtems_semaphore_release(brm->dev_sem);
+ return RTEMS_UNSATISFIED;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver brm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ brm_priv *brm;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG("brm_close");
+
+ if ( drvmgr_get_dev(&b1553brm_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ brm = (brm_priv *)dev->priv;
+
+ drvmgr_interrupt_unregister(brm->dev, 0, b1553brm_interrupt, brm);
+
+ stop_operation(brm);
+ rtems_semaphore_release(brm->dev_sem);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static int get_rt_messages(brm_priv *brm, void *buf, unsigned int msg_count)
+{
+ struct rt_msg *dest = (struct rt_msg *) buf;
+ int count = 0;
+
+ if (brm->head == brm->tail) {
+ return 0;
+ }
+
+ do {
+
+ DBG("rt read - head: %d, tail: %d\n", brm->head, brm->tail);
+ dest[count++] = brm->rt_event[INDEX(brm->tail++)];
+ } while (brm->head != brm->tail && count < msg_count);
+
+ return count;
+}
+
+static int get_bm_messages(brm_priv *brm, void *buf, unsigned int msg_count)
+{
+ struct bm_msg *dest = (struct bm_msg *) buf;
+ int count = 0;
+
+ if (brm->head == brm->tail) {
+ return 0;
+ }
+
+ do {
+
+ DBG("bm read - head: %d, tail: %d\n", brm->head, brm->tail);
+ dest[count++] = brm->bm_event[INDEX(brm->tail++)];
+
+ } while (brm->head != brm->tail && count < msg_count);
+
+ return count;
+}
+
+static rtems_device_driver brm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_rw_args_t *rw_args;
+ int count = 0;
+ brm_priv *brm;
+ struct drvmgr_dev *dev;
+ int (*get_messages)(brm_priv *brm, void *buf, unsigned int count);
+
+ if ( drvmgr_get_dev(&b1553brm_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ brm = (brm_priv *)dev->priv;
+
+ if ( ! (brm->mode & (BRM_MODE_RT | BRM_MODE_BM)) ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+
+ if ( ((READ_REG(&brm->regs->oper)>>8) & 3) == 1 ) { /* RT */
+ get_messages = get_rt_messages;
+ } else { /* BM */
+ get_messages = get_bm_messages;
+ }
+
+ FUNCDBG("brm_read [%i,%i]: buf: 0x%x len: %i\n",major, minor, (unsigned int)rw_args->buffer,rw_args->count);
+
+ while ( (count=get_messages(brm,rw_args->buffer, rw_args->count)) == 0 ) {
+ if (brm->rx_blocking) {
+ rtems_semaphore_obtain(brm->rx_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else {
+ /* Translates to EBUSY */
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+
+ rw_args->bytes_moved = count;
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver brm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_rw_args_t *rw_args;
+ struct rt_msg *source;
+ unsigned int count=0, current, next, descriptor, wc, suba;
+ brm_priv *brm;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&b1553brm_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ brm = (brm_priv *)dev->priv;
+
+ if ( ! (brm->mode & BRM_MODE_RT) ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+ source = (struct rt_msg *) rw_args->buffer;
+
+ FUNCDBG("brm_write [%i,%i]: buf: 0x%x len: %i\n",major, minor, (unsigned int)rw_args->buffer,rw_args->count);
+
+ do {
+
+ descriptor = source[count].desc & 0x7F;
+ suba = descriptor-32;
+ wc = source[count].miw >> 11;
+ wc = wc ? wc : 32;
+
+ /* Only subaddress transmission is allowed with write */
+ if (descriptor < 32 || descriptor >= 64)
+ return RTEMS_INVALID_NAME;
+
+ current = brm->desc[descriptor].cur;
+ next = brm->written[suba] + 2 + wc;
+
+ if (brm->written[suba] < current) {
+
+ if (next > current) {
+
+ /* No room in transmission buffer */
+ if (brm->tx_blocking && count == 0) {
+ rtems_semaphore_obtain(brm->tx_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else if ( count > 0 ) {
+ /* return the number of messages sent so far */
+ break;
+ } else {
+ /* Translates to posix EBUSY */
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+ }
+
+ memcpy((void *)&brm->mem[brm->written[suba]], &source[count], (2+wc)*2);
+
+ count++;
+
+ if (next >= brm->desc[descriptor].bot) {
+ next = brm->desc[descriptor].top;
+ }
+ brm->written[suba] = next;
+
+ } while (count < rw_args->count);
+
+ rw_args->bytes_moved = count;
+
+ if (count >= 0) {
+ return RTEMS_SUCCESSFUL;
+ }
+ return RTEMS_UNSATISFIED;
+}
+
+static rtems_device_driver brm_control(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+
+ unsigned int i=0;
+ unsigned short ctrl, oper, cw1, cw2;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *) arg;
+ unsigned int *data = ioarg->buffer;
+ struct bc_msg *cmd_list = (struct bc_msg *) ioarg->buffer;
+ brm_priv *brm;
+ struct drvmgr_dev *dev;
+ rtems_device_driver ret;
+ int len, msglen;
+
+ FUNCDBG("brm_control[%d]: [%i,%i]\n", minor, major, minor);
+
+ if ( drvmgr_get_dev(&b1553brm_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ brm = (brm_priv *)dev->priv;
+
+ if (!ioarg) {
+ DBG("brm_control: invalid argument\n");
+ return RTEMS_INVALID_NAME;
+ }
+
+ ioarg->ioctl_return = 0;
+ switch (ioarg->command) {
+
+ case BRM_SET_MODE:
+ if ( data[0] > 2 )
+ return RTEMS_INVALID_NAME;
+ stop_operation(brm);
+ if (data[0] == 0) {
+ ret = bc_init(brm);
+ } else if (data[0] == 1) {
+ ret = rt_init(brm);
+ } else if (data[0] == 2) {
+ ret = bm_init(brm);
+ } else {
+ ret = RTEMS_INVALID_NAME;
+ }
+ if ( ret != RTEMS_SUCCESSFUL)
+ return ret;
+
+ if ( brm->mode & (BRM_MODE_RT | BRM_MODE_BM ) )
+ start_operation(brm);
+ break;
+
+ case BRM_SET_BUS:
+ stop_operation(brm);
+ ctrl = READ_REG(&brm->regs->ctrl);
+ ctrl &= 0xE7FF; /* Clear bit 12-11 ... */
+ ctrl |= (data[0]&0x3)<<11; /* ... OR in new bus status */
+ brm->regs->ctrl = ctrl;
+ start_operation(brm);
+ break;
+
+ case BRM_SET_MSGTO:
+ stop_operation(brm);
+ ctrl = READ_REG(&brm->regs->ctrl);
+ ctrl &= 0xFDFF; /* Clear bit 9 ... */
+ ctrl |= (data[0]&1)<<9; /* ... OR in new MSGTO */
+ brm->regs->ctrl = ctrl;
+ start_operation(brm);
+ break;
+
+ case BRM_SET_RT_ADDR:
+ stop_operation(brm);
+ oper = READ_REG(&brm->regs->oper);
+ oper &= 0x03FF; /* Clear bit 15-10 ... */
+ oper |= (data[0]&0x1f)<<11; /* ... OR in new address */
+ oper |= odd_parity(data[0]&0x1f)<<10; /* ... OR in parity */
+ brm->regs->oper = oper;
+ start_operation(brm);
+ break;
+
+ case BRM_SET_STD:
+ stop_operation(brm);
+ ctrl = READ_REG(&brm->regs->ctrl);
+ ctrl &= 0xFF7F; /* Clear bit 7 ... */
+ ctrl |= (data[0]&1)<<7; /* ... OR in new ABSTD (1=A) */
+ brm->regs->ctrl = ctrl;
+ start_operation(brm);
+ break;
+
+ case BRM_SET_BCE:
+ stop_operation(brm);
+ ctrl = READ_REG(&brm->regs->ctrl);
+ ctrl &= 0xFFEF; /* Clear bit 4 ... */
+ ctrl |= (data[0]&1)<<4; /* ... OR in new BCE */
+ brm->regs->ctrl = ctrl;
+ start_operation(brm);
+ break;
+
+ case BRM_TX_BLOCK:
+ brm->tx_blocking = data[0];
+ break;
+
+ case BRM_RX_BLOCK:
+ brm->rx_blocking = data[0];
+ break;
+
+ case BRM_DO_LIST:
+ if ( brm->mode != BRM_MODE_BC ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Check if we are bus controller */
+ if ( ((READ_REG(&brm->regs->oper)>>8) & 3) != 0 ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Already processing list? */
+ if (is_executing(brm)) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* clear any earlier releases */
+ rtems_semaphore_obtain(brm->tx_sem, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT);
+
+ brm->bc_list_fail = 0;
+ brm->cur_list = cmd_list;
+ brm->regs->dpoint = 0;
+
+ i = 0;
+ while ( (cmd_list[i].ctrl & BC_EOL) == 0) {
+
+ ctrl = (4<<12) | (((cmd_list[i].ctrl&BC_BUSA)==BC_BUSA)<<9) | (((cmd_list[i].ctrl&BC_RTRT)==BC_RTRT)<<8);
+
+ if (cmd_list[i].ctrl&BC_RTRT) {
+ cw1 = (cmd_list[i].rtaddr[0]<<11) | (0<<10) | (cmd_list[i].subaddr[0]<<5) | (cmd_list[i].wc & 0x1f); /* receive cw */
+ cw2 = (cmd_list[i].rtaddr[1]<<11) | (1<<10) | (cmd_list[i].subaddr[1]<<5) | (cmd_list[i].wc & 0x1f); /* transmit cw */
+ } else {
+ cw1 = (cmd_list[i].rtaddr[0]<<11) | (((cmd_list[i].ctrl&BC_TR)==BC_TR)<<10) | (cmd_list[i].subaddr[0]<<5) | (cmd_list[i].wc&0x1f);
+ cw2 = 0;
+ }
+
+ /* Set up command block */
+ brm->bcmem->descs[i].ctrl = ctrl;
+ brm->bcmem->descs[i].cw1 = cw1;
+ brm->bcmem->descs[i].cw2 = cw2;
+ /* data pointer:
+ * (&brm->bcmem->msg_data[i].data[0] & 0x1ffff) / 2
+ */
+ brm->bcmem->descs[i].dptr = 1024+i*32; /* data pointer */
+ brm->bcmem->descs[i].tsw[0] = 0;
+ brm->bcmem->descs[i].tsw[1] = 0;
+ brm->bcmem->descs[i].ba = 0;
+ brm->bcmem->descs[i].timer = 0;
+
+ msglen = cmd_list[i].wc;
+ if ( msglen == 0 )
+ msglen = 32;
+ memcpy((void *)&brm->bcmem->msg_data[i].data[0], &cmd_list[i].data[0], msglen*2);
+
+ i++;
+ }
+
+ brm->bcmem->descs[i].ctrl = 0; /* end of list */
+
+ start_operation(brm);
+ break;
+
+ case BRM_LIST_DONE:
+
+ if ( brm->mode != BRM_MODE_BC ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Check if we are bus controller */
+ if ( ((READ_REG(&brm->regs->oper)>>8) & 3) != 0 ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if (is_executing(brm)) {
+
+ data[0] = 0;
+ if (brm->tx_blocking) {
+ rtems_semaphore_obtain(brm->tx_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ data[0] = 1;
+ if ( brm->bc_list_fail ){
+ return RTEMS_INVALID_NAME;
+ }
+ } else {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ } else {
+ data[0] = 1; /* done */
+ }
+
+ /* copy finished list results back into bc_msg array */
+ i = 0;
+ while ( (brm->cur_list[i].ctrl & BC_EOL) == 0) {
+ if (READ_DMA(&brm->bcmem->descs[i].ctrl) & 1) {
+ brm->cur_list[i].ctrl |= 0x8000; /* Set BAME */
+ }
+ if (brm->cur_list[i].ctrl & BC_TR) {
+ /* RT Transmit command, copy received data */
+ len = brm->cur_list[i].wc;
+ if ( len == 0 )
+ len = 32;
+ while ( len-- > 0) {
+ brm->cur_list[i].data[len] = READ_DMA(&brm->bcmem->msg_data[i].data[len]);
+ }
+ }
+ brm->cur_list[i].tsw[0] = READ_DMA(&brm->bcmem->descs[i].tsw[0]);
+ brm->cur_list[i].tsw[1] = READ_DMA(&brm->bcmem->descs[i].tsw[1]);
+
+ i++;
+ }
+ break;
+
+ case BRM_CLR_STATUS:
+ brm->status = 0;
+ break;
+
+ case BRM_GET_STATUS: /* copy status */
+ if ( !ioarg->buffer )
+ return RTEMS_INVALID_NAME;
+
+ *(unsigned int *)ioarg->buffer = brm->status;
+ break;
+
+ case BRM_SET_EVENTID:
+ brm->event_id = (rtems_id)ioarg->buffer;
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static void b1553brm_interrupt(void *arg)
+{
+ brm_priv *brm = arg;
+ unsigned short descriptor, current, pending, miw, wc, tmp, ctrl;
+ unsigned short msgadr, iaw, iiw;
+ int len;
+ int signal_event=0, wake_rx_task=0, wake_tx_task=0;
+ unsigned int event_status=0;
+ int accessed;
+ #define SET_ERROR_DESCRIPTOR(descriptor) (event_status = (event_status & 0x0000ffff) | descriptor<<16)
+
+ while( (iiw=READ_DMA(&brm->irq_log[brm->irq].iiw)) != 0xffff ){
+ iaw=READ_DMA(&brm->irq_log[brm->irq].iaw);
+
+ /* indicate that the interrupt log entry has been processed */
+ brm->irq_log[brm->irq].iiw = 0xffff;
+
+ /* Interpret interrupt log entry */
+ descriptor = iaw >> 2;
+ pending = iiw;
+ brm->irq = (brm->irq + 1) % 16;
+
+ /* Clear the log so that we */
+
+
+ /* Subaddress accessed irq (RT only)
+ *
+ * Can be either a receive or transmit command
+ * as well as a mode code.
+ */
+ if (pending & BRM_SUBAD_IRQ) {
+
+ /* Pointer to next free message in circular buffer */
+ current = READ_DMA(&brm->desc[descriptor].cur);
+ ctrl = READ_DMA(&brm->desc[descriptor].ctrl);
+#ifdef DEBUG
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = (0xff<<16);
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = current;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = ctrl;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = 0;
+#endif
+ accessed = ctrl & 0x10;
+ /* Note that current may be equal to bot and top when
+ * circular buffer one can handle one message.
+ */
+ if ( accessed )
+ do {
+ msgadr = brm->last_read[descriptor];
+
+ /* Get word count */
+ miw = READ_DMA(&brm->mem[msgadr]);
+ wc = miw >> 11;
+
+ /* Data received */
+ if (descriptor < 32) {
+ wc = wc ? wc : 32;
+ }
+ /* Data transmitted */
+ else if (descriptor < 64) {
+ wc = wc ? wc : 32;
+ wake_tx_task=1;
+ }
+ /* RX Mode code */
+ else if (descriptor < 96) {
+ wc = (wc>>4);
+ }
+ /* TX Mode code */
+ else if (descriptor < 128) {
+ wc = (wc>>4);
+ }
+
+#ifdef DEBUG
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = (descriptor << 16) | wc;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = current;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = msgadr;
+#endif
+
+ /* If there is room in the event queue, copy the event there */
+ if (brm->head - brm->tail != EVENT_QUEUE_SIZE) {
+
+ /* Copy to event queue */
+ brm->rt_event[INDEX(brm->head)].miw = READ_DMA(&brm->mem[msgadr]);
+ brm->rt_event[INDEX(brm->head)].time = READ_DMA(&brm->mem[msgadr+1]);
+ len = wc;
+ while( len-- > 0){
+ brm->rt_event[INDEX(brm->head)].data[len] = READ_DMA(&brm->mem[msgadr+2+len]);
+ }
+ brm->rt_event[INDEX(brm->head)].desc = descriptor;
+ brm->head++;
+ }
+ else {
+ /* Indicate overrun */
+ brm->rt_event[INDEX(brm->head)].desc |= 0x8000;
+ }
+
+ msgadr += (2+wc);
+
+ if (msgadr >= READ_DMA(&brm->desc[descriptor].bot)) {
+ msgadr = READ_DMA(&brm->desc[descriptor].top);
+ }
+ brm->last_read[descriptor] = msgadr;
+
+#ifdef DEBUG
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = msgadr;
+#endif
+ wake_rx_task = 1;
+ } while ( (msgadr=brm->last_read[descriptor]) != current );
+ }
+
+ if (pending & BRM_EOL_IRQ) {
+ wake_tx_task = 1;
+ }
+
+ if (pending & BRM_BC_ILLCMD_IRQ) {
+ brm->bc_list_fail = 1;
+ wake_tx_task = 1;
+ SET_ERROR_DESCRIPTOR(descriptor);
+ FUNCDBG("BRM: ILLCMD IRQ\n\r");
+ }
+
+ /* Monitor irq */
+ if (pending & BRM_MBC_IRQ) {
+
+ stop_operation(brm);
+ brm->regs->mbc = 1;
+ start_operation(brm);
+
+ /* If there is room in the event queue, copy the event there */
+ if (brm->head - brm->tail != EVENT_QUEUE_SIZE) {
+
+ /* Copy to event queue */
+
+ brm->bm_event[INDEX(brm->head)].miw = READ_DMA(&brm->mem[0]);
+ brm->bm_event[INDEX(brm->head)].cw1 = READ_DMA(&brm->mem[1]);
+ brm->bm_event[INDEX(brm->head)].cw2 = READ_DMA(&brm->mem[2]);
+ brm->bm_event[INDEX(brm->head)].sw1 = READ_DMA(&brm->mem[4]);
+ brm->bm_event[INDEX(brm->head)].sw2 = READ_DMA(&brm->mem[5]);
+ brm->bm_event[INDEX(brm->head)].time = READ_DMA(&brm->mem[6]);
+
+ len = 32;
+ while ( len-- ){
+ brm->bm_event[INDEX(brm->head)].data[len] = READ_DMA(&brm->mem[0x100+len]);
+ len--;
+ brm->bm_event[INDEX(brm->head)].data[len] = READ_DMA(&brm->mem[0x100+len]);
+ len--;
+ brm->bm_event[INDEX(brm->head)].data[len] = READ_DMA(&brm->mem[0x100+len]);
+ len--;
+ brm->bm_event[INDEX(brm->head)].data[len] = READ_DMA(&brm->mem[0x100+len]);
+ }
+/* memcpy((void *)brm->bm_event[INDEX(brm->head)].data, &brm->mem[0x100], 32);*/
+
+#ifdef DEBUG
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = READ_REG(&brm->regs->mbc) & 0xffff;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = READ_DMA(&brm->mem[0]);
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = READ_DMA(&brm->mem[1]);
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = READ_DMA(&brm->mem[4]);
+#endif
+
+ brm->head++;
+
+ }
+ else {
+ /* Indicate overrun */
+ brm->bm_event[INDEX(brm->head)].miw |= 0x8000;
+ }
+
+ /* Wake any blocking thread */
+ wake_rx_task = 1;
+ }
+
+ /* The reset of the interrupts
+ * cause a event to be signalled
+ * so that user can handle error.
+ */
+ if ( pending & BRM_RT_ILLCMD_IRQ){
+ FUNCDBG("BRM: BRM_RT_ILLCMD_IRQ\n\r");
+ brm->status |= BRM_RT_ILLCMD_IRQ;
+ event_status |= BRM_RT_ILLCMD_IRQ;
+ SET_ERROR_DESCRIPTOR(descriptor);
+ signal_event=1;
+ }
+
+ if ( pending & BRM_ILLOP_IRQ){
+ FUNCDBG("BRM: BRM_ILLOP_IRQ\n\r");
+ brm->bc_list_fail = 1;
+ wake_tx_task = 1;
+ event_status |= BRM_ILLOP_IRQ;
+ SET_ERROR_DESCRIPTOR(descriptor);
+ signal_event=1;
+ }
+
+ if ( pending & BRM_MERR_IRQ){
+ FUNCDBG("BRM: BRM_MERR_IRQ\n\r");
+ event_status |= BRM_MERR_IRQ;
+ SET_ERROR_DESCRIPTOR(descriptor);
+ signal_event=1;
+ }
+ /* Clear Block Accessed Bit */
+ tmp = READ_DMA(&brm->desc[descriptor].ctrl);
+ brm->desc[descriptor].ctrl = tmp & ~0x10;
+#ifdef DEBUG
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = (0xfe<<16);
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = 0;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = tmp & ~0x10;
+ brm->log[brm->log_i++ % EVENT_QUEUE_SIZE] = tmp;
+#endif
+ } /* While */
+
+ /* clear interrupt flags & handle Hardware errors */
+ pending = READ_REG(&brm->regs->ipend);
+
+ if ( pending & BRM_DMAF_IRQ){
+ FUNCDBG("BRM: BRM_DMAF_IRQ\n\r");
+ event_status |= BRM_DMAF_IRQ;
+ signal_event=1;
+ }
+
+ if ( pending & BRM_WRAPF_IRQ){
+ FUNCDBG("BRM: BRM_WRAPF_IRQ\n\r");
+ event_status |= BRM_WRAPF_IRQ;
+ signal_event=1;
+ }
+
+ if ( pending & BRM_TAPF_IRQ){
+ FUNCDBG("BRM: BRM_TAPF_IRQ\n\r");
+ event_status |= BRM_TAPF_IRQ;
+ signal_event=1;
+ }
+
+ /* Copy current mask to status mask */
+ if ( event_status ){
+ if ( event_status & 0xffff0000 )
+ brm->status &= 0x0000ffff;
+ brm->status |= event_status;
+ }
+
+ /* Wake any blocked rx thread only on receive interrupts */
+ if ( wake_rx_task ) {
+ rtems_semaphore_release(brm->rx_sem);
+ }
+
+ /* Wake any blocked tx thread only on transmit interrupts */
+ if ( wake_tx_task ) {
+ rtems_semaphore_release(brm->tx_sem);
+ }
+
+ /* signal event once */
+ if ( signal_event && (brm->event_id!=0) ){
+ rtems_event_send(brm->event_id, event_status);
+ }
+
+}
+
+void b1553brm_print_dev(struct drvmgr_dev *dev, int options)
+{
+ brm_priv *pDev = dev->priv;
+ struct brm_reg *regs = pDev->regs;
+
+ /* Print */
+ printf("--- B1553BRM[%d] %s ---\n", pDev->minor, pDev->devName);
+ printf(" REGS: 0x%x\n", (unsigned int)pDev->regs);
+ printf(" IRQ: %d\n", pDev->irqno);
+ switch (pDev->mode) {
+ case BRM_MODE_BC:
+ printf(" MODE: BC\n");
+ printf(" DESCS: 0x%x\n", (unsigned int)&pDev->bcmem->descs[0]);
+ printf(" DATA: 0x%x\n", (unsigned int)&pDev->bcmem->msg_data[0].data[0]);
+ printf(" IRQLOG: 0x%x\n", (unsigned int)&pDev->bcmem->irq_logs[0]);
+ break;
+ case BRM_MODE_BM:
+ printf(" MODE: BM\n");
+ break;
+ case BRM_MODE_RT:
+ printf(" MODE: RT\n");
+ printf(" RXSUBS: 0x%x\n", (unsigned int)&pDev->rtmem->rxsubs[0]);
+ printf(" TXSUBS: 0x%x\n", (unsigned int)&pDev->rtmem->txsubs[0]);
+ printf(" RXMODES: 0x%x\n", (unsigned int)&pDev->rtmem->rxmodes[0]);
+ printf(" TXOMODES: 0x%x\n", (unsigned int)&pDev->rtmem->txmodes[0]);
+ printf(" RXSUBS MSGS: 0x%x\n", (unsigned int)&pDev->rtmem->rxsuba_msgs[0]);
+ printf(" TXSUBS MSGS: 0x%x\n", (unsigned int)&pDev->rtmem->txsuba_msgs[0]);
+ printf(" RXMODES MSGS: 0x%x\n", (unsigned int)&pDev->rtmem->rxmode_msgs[0]);
+ printf(" TXMODES MSGS: 0x%x\n", (unsigned int)&pDev->rtmem->txmode_msgs[0]);
+ printf(" IRQLOG: 0x%x\n", (unsigned int)&pDev->rtmem->irq_logs[0]);
+ break;
+ }
+ printf(" CTRL: 0x%x\n", regs->ctrl);
+ printf(" OPER: 0x%x\n", regs->oper);
+ printf(" CUR_CMD: 0x%x\n", regs->cur_cmd);
+ printf(" IMASK: 0x%x\n", regs->imask);
+ printf(" IPEND: 0x%x\n", regs->ipend);
+ printf(" IPOINT: 0x%x\n", regs->ipoint);
+ printf(" BIT_REG: 0x%x\n", regs->bit_reg);
+ printf(" TTAG: 0x%x\n", regs->ttag);
+ printf(" DPOINT: 0x%x\n", regs->dpoint);
+ printf(" SW: 0x%x\n", regs->sw);
+ printf(" INITCOUNT: 0x%x\n", regs->initcount);
+ printf(" MCPOINT: 0x%x\n", regs->mcpoint);
+ printf(" MDPOINT: 0x%x\n", regs->mdpoint);
+ printf(" MBC: 0x%x\n", regs->mbc);
+ printf(" MFILTA: 0x%x\n", regs->mfilta);
+ printf(" MFILTB: 0x%x\n", regs->mfiltb);
+ printf(" ENHANCED: 0x%x\n", regs->enhanced);
+ printf(" W_CTRL: 0x%x\n", regs->w_ctrl);
+ printf(" W_IRQCTRL: 0x%x\n", regs->w_irqctrl);
+ printf(" W_AHBADDR: 0x%x\n", regs->w_ahbaddr);
+}
+
+void b1553brm_print(int options)
+{
+ struct amba_drv_info *drv = &b1553brm_drv_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ b1553brm_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/1553/b1553rt.c b/bsps/shared/grlib/1553/b1553rt.c
new file mode 100644
index 0000000000..35afd901c8
--- /dev/null
+++ b/bsps/shared/grlib/1553/b1553rt.c
@@ -0,0 +1,857 @@
+/*
+ * B1553RT driver implmenetation
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/b1553rt.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Uncomment for debug output */
+/*#define DEBUG 1*/
+
+/*
+ #define FUNCDEBUG 1*/
+/*#undef DEBUG*/
+#undef FUNCDEBUG
+
+/* EVENT_QUEUE_SIZE sets the size of the event queue
+ */
+#define EVENT_QUEUE_SIZE 1024
+
+
+#define INDEX(x) ( x&(EVENT_QUEUE_SIZE-1) )
+
+#if 0
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#ifdef FUNCDEBUG
+#define FUNCDBG(x...) printk(x)
+#else
+#define FUNCDBG(x...)
+#endif
+
+#define READ_DMA(address) _READ16((unsigned int)address)
+
+static __inline__ unsigned short _READ16(unsigned int addr) {
+ unsigned short tmp;
+ asm(" lduha [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+
+static rtems_device_driver rt_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver rt_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver rt_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver rt_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver rt_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver rt_control(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define RT_DRIVER_TABLE_ENTRY { rt_initialize, rt_open, rt_close, rt_read, rt_write, rt_control }
+
+static rtems_driver_address_table b1553rt_driver = RT_DRIVER_TABLE_ENTRY;
+
+typedef struct {
+
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+
+ struct rt_reg *regs;
+ unsigned int ctrl_copy; /* Local copy of config register */
+
+ unsigned int cfg_freq;
+
+ unsigned int memarea_base;
+ unsigned int memarea_base_remote;
+
+ volatile unsigned short *mem;
+
+ /* Received events waiting to be read */
+ struct rt_msg *rt_event;
+ unsigned int head, tail;
+
+ int rx_blocking;
+
+ rtems_id rx_sem, tx_sem, dev_sem;
+ int minor;
+ int irqno;
+
+#ifdef DEBUG
+ unsigned int log[EVENT_QUEUE_SIZE*4];
+ unsigned int log_i;
+#endif
+
+ unsigned int status;
+ rtems_id event_id; /* event that may be signalled upon errors, needs to be set through ioctl command RT_SET_EVENTID */
+
+} rt_priv;
+
+static void b1553rt_interrupt(void *arg);
+static rtems_device_driver rt_init(rt_priv *rt);
+
+#define OFS(ofs) (((unsigned int)&ofs & 0x1ffff)>>1)
+
+static int b1553rt_driver_io_registered = 0;
+static rtems_device_major_number b1553rt_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int b1553rt_register_io(rtems_device_major_number *m);
+int b1553rt_device_init(rt_priv *pDev);
+
+int b1553rt_init2(struct drvmgr_dev *dev);
+int b1553rt_init3(struct drvmgr_dev *dev);
+int b1553rt_remove(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops b1553rt_ops =
+{
+ .init = {NULL, b1553rt_init2, b1553rt_init3, NULL},
+ .remove = b1553rt_remove,
+ .info = NULL
+};
+
+struct amba_dev_id b1553rt_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_B1553RT},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info b1553rt_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_B1553RT_ID, /* Driver ID */
+ "B1553RT_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &b1553rt_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+
+ },
+ &b1553rt_ids[0]
+};
+
+void b1553rt_register_drv (void)
+{
+ DBG("Registering B1553RT driver\n");
+ drvmgr_drv_register(&b1553rt_drv_info.general);
+}
+
+int b1553rt_init2(struct drvmgr_dev *dev)
+{
+ rt_priv *priv;
+
+ DBG("B1553RT[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int b1553rt_init3(struct drvmgr_dev *dev)
+{
+ rt_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( b1553rt_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( b1553rt_register_io(&b1553rt_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ b1553rt_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ if ( b1553rt_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/b1553rt%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sb1553rt%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, b1553rt_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+int b1553rt_remove(struct drvmgr_dev *dev)
+{
+ /* Stop more tasks to open driver */
+
+ /* Throw out all tasks using this driver */
+
+ /* Unregister I/O node */
+
+ /* Unregister and disable Interrupt */
+
+ /* Free device memory */
+
+ /* Return sucessfully */
+
+ return DRVMGR_FAIL;
+}
+
+/******************* Driver Implementation ***********************/
+
+int b1553rt_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &b1553rt_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("B1553RT driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("B1553RT rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("B1553RT rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("B1553RT rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("B1553RT rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int b1553rt_device_init(rt_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+ unsigned int mem;
+ unsigned int sys_freq_hz;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irqno = pnpinfo->irq;
+ pDev->regs = (struct rt_reg *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+
+#ifdef DEBUG
+ pDev->log_i = 0;
+ memset(pDev->log,0,sizeof(pDev->log));
+ printf("LOG: 0x%x\n", &pDev->log[0]);
+ printf("LOG_I: 0x%x\n", &pDev->log_i);
+#endif
+
+ /* Get memory configuration from bus resources */
+ value = drvmgr_dev_key_get(pDev->dev, "dmaBaseAdr", DRVMGR_KT_POINTER);
+ if (value)
+ mem = (unsigned int)value->ptr;
+
+ if (value && (mem & 1)) {
+ /* Remote address, address as RT looks at it. */
+
+ /* Translate the base address into an address that the the CPU can understand */
+ pDev->memarea_base = mem & ~1;
+ drvmgr_translate_check(pDev->dev, DMAMEM_TO_CPU,
+ (void *)pDev->memarea_base_remote,
+ (void **)&pDev->memarea_base,
+ 4 * 1024);
+ } else {
+ if (!value) {
+ /* Use dynamically allocated memory,
+ * 4k DMA memory + 4k for alignment
+ */
+ mem = (unsigned int)grlib_malloc(4 * 1024 * 2);
+ if ( !mem ){
+ printk("RT: Failed to allocate HW memory\n\r");
+ return -1;
+ }
+ /* align memory to 4k boundary */
+ pDev->memarea_base = (mem + 0xfff) & ~0xfff;
+ } else {
+ pDev->memarea_base = mem;
+ }
+
+ /* Translate the base address into an address that the RT core can understand */
+ drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA,
+ (void *)pDev->memarea_base,
+ (void **)&pDev->memarea_base_remote,
+ 4 * 1024);
+ }
+
+ /* clear the used memory */
+ memset((char *)pDev->memarea_base, 0, 4 * 1024);
+
+ /* Set base address of all descriptors */
+ pDev->memarea_base = (unsigned int)mem;
+ pDev->mem = (volatile unsigned short *)pDev->memarea_base;
+
+ pDev->rt_event = NULL;
+
+ /* The RT is always clocked at the same frequency as the bus
+ * If the frequency doesnt match it is defaulted to 24MHz,
+ * user can always override it.
+ */
+ pDev->cfg_freq = RT_FREQ_24MHZ;
+
+ /* Get frequency in Hz */
+ if ( drvmgr_freq_get(pDev->dev, DEV_APB_SLV, &sys_freq_hz) == 0 ) {
+ if ( sys_freq_hz == 20000000 ) {
+ pDev->cfg_freq = RT_FREQ_20MHZ;
+ } else if ( sys_freq_hz == 16000000 ) {
+ pDev->cfg_freq = RT_FREQ_16MHZ;
+ } else if ( sys_freq_hz == 12000000 ) {
+ pDev->cfg_freq = RT_FREQ_12MHZ;
+ }
+ }
+
+ value = drvmgr_dev_key_get(pDev->dev, "coreFreq", DRVMGR_KT_INT);
+ if ( value ) {
+ pDev->cfg_freq = value->i & RT_FREQ_MASK;
+ }
+
+ /* RX Semaphore created with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('R', 'T', '0', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->rx_sem) != RTEMS_SUCCESSFUL ) {
+ printk("RT: Failed to create rx semaphore\n");
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('R', 'T', '0', '0' + pDev->minor),
+ 1,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->dev_sem) != RTEMS_SUCCESSFUL ){
+ printk("RT: Failed to create device semaphore\n");
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Default to RT-mode */
+ rt_init(pDev);
+
+ return 0;
+}
+
+static int odd_parity(unsigned int data)
+{
+ unsigned int i=0;
+
+ while(data)
+ {
+ i++;
+ data &= (data - 1);
+ }
+
+ return !(i&1);
+}
+
+static void start_operation(rt_priv *rt)
+{
+
+}
+
+static void stop_operation(rt_priv *rt)
+{
+
+}
+
+static void set_extmdata_en(rt_priv *rt, int extmdata)
+{
+ if ( extmdata )
+ extmdata = 1;
+ rt->ctrl_copy = (rt->ctrl_copy & ~(1<<16)) | (extmdata<<16);
+ rt->regs->ctrl = rt->ctrl_copy;
+}
+
+static void set_vector_word(rt_priv *rt, unsigned short vword)
+{
+ rt->regs->vword = vword;
+}
+
+/* Set clock speed */
+static void set_clkspd(rt_priv *rt, int spd)
+{
+ rt->ctrl_copy = (rt->ctrl_copy & ~0xC0) | (spd<<6);
+ rt->regs->ctrl = rt->ctrl_copy;
+ asm volatile("nop"::);
+ rt->regs->ctrl = rt->ctrl_copy | (1<<20);
+}
+
+static void set_rtaddr(rt_priv *rt, int addr)
+{
+ rt->ctrl_copy = (rt->ctrl_copy & ~0x3F00) | (addr << 8) | (odd_parity(addr)<<13);
+ rt->regs->ctrl = rt->ctrl_copy;
+}
+
+static void set_broadcast_en(rt_priv *rt, int data)
+{
+ rt->ctrl_copy = (rt->ctrl_copy & ~0x40000) | (data<<18);
+ rt->regs->ctrl = rt->ctrl_copy;
+}
+
+static rtems_device_driver rt_init(rt_priv *rt)
+{
+ rt->rx_blocking = 1;
+
+ if ( rt->rt_event )
+ free(rt->rt_event);
+ rt->rt_event = NULL;
+
+ rt->rt_event = grlib_malloc(EVENT_QUEUE_SIZE*sizeof(*rt->rt_event));
+
+ if (rt->rt_event == NULL) {
+ DBG("RT driver failed to allocated memory.");
+ return RTEMS_NO_MEMORY;
+ }
+
+ rt->ctrl_copy = rt->regs->ctrl & 0x3F00; /* Keep rtaddr and rtaddrp */
+ rt->ctrl_copy |= 0x3C0D0; /* broadcast disabled, extmdata=1, writetsw = writecmd = 1 */
+ rt->regs->ctrl = rt->ctrl_copy;
+
+ /* Set Clock speed */
+ set_clkspd(rt, rt->cfg_freq);
+
+ rt->regs->addr = rt->memarea_base_remote;
+ rt->regs->ipm = 0x70000; /* Enable RT RX, MEM Failure and AHB Error interrupts */
+
+ DBG("B1553RT DMA_AREA: 0x%x\n", (unsigned int)rt->mem);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+static rtems_device_driver rt_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver rt_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) {
+ rt_priv *rt;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG("rt_open\n");
+
+ if ( drvmgr_get_dev(&b1553rt_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_UNSATISFIED;
+ }
+ rt = (rt_priv *)dev->priv;
+
+ if (rtems_semaphore_obtain(rt->dev_sem, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL) {
+ DBG("rt_open: resource in use\n");
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+
+ /* Set defaults */
+ rt->event_id = 0;
+
+ start_operation(rt);
+
+ /* Register interrupt routine */
+ if (drvmgr_interrupt_register(rt->dev, 0, "b1553rt", b1553rt_interrupt, rt)) {
+ rtems_semaphore_release(rt->dev_sem);
+ return -1;
+ }
+
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver rt_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rt_priv *rt;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG("rt_close");
+
+ if ( drvmgr_get_dev(&b1553rt_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ rt = (rt_priv *)dev->priv;
+
+ drvmgr_interrupt_unregister(rt->dev, 0, b1553rt_interrupt, rt);
+
+ stop_operation(rt);
+ rtems_semaphore_release(rt->dev_sem);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static int get_messages(rt_priv *rt, void *buf, unsigned int msg_count)
+{
+
+ struct rt_msg *dest = (struct rt_msg *) buf;
+ int count = 0;
+
+ if (rt->head == rt->tail) {
+ return 0;
+ }
+
+ do {
+
+ DBG("rt read - head: %d, tail: %d\n", rt->head, rt->tail);
+ dest[count++] = rt->rt_event[INDEX(rt->tail++)];
+
+ } while (rt->head != rt->tail && count < msg_count);
+
+ return count;
+
+}
+static rtems_device_driver rt_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_rw_args_t *rw_args;
+ int count = 0;
+ rt_priv *rt;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&b1553rt_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ rt = (rt_priv *)dev->priv;
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+
+ FUNCDBG("rt_read [%i,%i]: buf: 0x%x, len: %i\n",major, minor, (unsigned int)rw_args->buffer, rw_args->count);
+
+ while ( (count = get_messages(rt,rw_args->buffer, rw_args->count)) == 0 ) {
+
+ if (rt->rx_blocking) {
+ rtems_semaphore_obtain(rt->rx_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else {
+ /* Translates to EBUSY */
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+
+ rw_args->bytes_moved = count;
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver rt_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_rw_args_t *rw_args;
+ struct rt_msg *source;
+ rt_priv *rt;
+ struct drvmgr_dev *dev;
+ unsigned int descriptor, suba, wc;
+
+ if ( drvmgr_get_dev(&b1553rt_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ rt = (rt_priv *)dev->priv;
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+
+ if ( rw_args->count != 1 ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ source = (struct rt_msg *) rw_args->buffer;
+
+ descriptor = source[0].desc & 0x7F;
+ suba = descriptor-32;
+ wc = source[0].miw >> 11;
+ wc = wc ? wc : 32;
+
+ FUNCDBG("rt_write [%i,%i]: buf: 0x%x\n",major, minor, (unsigned int)rw_args->buffer);
+
+ memcpy((void *)&rt->mem[0x400 + suba*32], &source[0].data[0], wc*2);
+
+ rw_args->bytes_moved = 1;
+
+ return RTEMS_SUCCESSFUL;
+
+}
+
+static rtems_device_driver rt_control(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *) arg;
+ unsigned int *data = ioarg->buffer;
+
+ rt_priv *rt;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG("rt_control[%d]: [%i,%i]\n", minor, major, minor);
+
+ if ( drvmgr_get_dev(&b1553rt_drv_info.general, minor, &dev) ) {
+ return RTEMS_UNSATISFIED;
+ }
+ rt = (rt_priv *)dev->priv;
+
+ if (!ioarg) {
+ DBG("rt_control: invalid argument\n");
+ return RTEMS_INVALID_NAME;
+ }
+
+ ioarg->ioctl_return = 0;
+ switch (ioarg->command) {
+
+ case RT_SET_ADDR:
+ set_rtaddr(rt, data[0]);
+ break;
+
+ case RT_SET_BCE:
+ set_broadcast_en(rt, data[0]);
+ break;
+
+ case RT_SET_VECTORW:
+ set_vector_word(rt, data[0]);
+ break;
+
+ case RT_SET_EXTMDATA:
+ set_extmdata_en(rt, data[0]);
+ break;
+
+ case RT_RX_BLOCK:
+ rt->rx_blocking = data[0];
+ break;
+
+ case RT_CLR_STATUS:
+ rt->status = 0;
+ break;
+
+ case RT_GET_STATUS: /* copy status */
+ if ( !ioarg->buffer )
+ return RTEMS_INVALID_NAME;
+
+ *(unsigned int *)ioarg->buffer = rt->status;
+ break;
+
+ case RT_SET_EVENTID:
+ rt->event_id = (rtems_id)ioarg->buffer;
+ break;
+
+ default:
+ return RTEMS_NOT_IMPLEMENTED;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void b1553rt_interrupt(void *arg)
+{
+ rt_priv *rt = arg;
+ unsigned short descriptor;
+ int signal_event=0, wake_rx_task=0;
+ unsigned int event_status=0;
+ unsigned int wc, irqv, cmd, tsw, suba, tx, miw, i;
+ unsigned int ipend;
+
+ #define SET_ERROR_DESCRIPTOR(descriptor) (event_status = (event_status & 0x0000ffff) | descriptor<<16)
+ ipend = rt->regs->ipm;
+
+ if (ipend == 0) {
+ /* IRQ mask has been cleared, we must have been reset */
+ /* Restore ctrl registers */
+ rt->regs->ctrl = rt->ctrl_copy;
+ rt->regs->addr = rt->memarea_base_remote;
+ rt->regs->ipm = 0x70000;
+ /* Send reset mode code event */
+ if (rt->head - rt->tail != EVENT_QUEUE_SIZE) {
+ miw = (8<<11);
+ descriptor = 64 + 32 + 8;
+ rt->rt_event[INDEX(rt->head)].miw = miw;
+ rt->rt_event[INDEX(rt->head)].time = 0;
+ rt->rt_event[INDEX(rt->head)].desc = descriptor;
+ rt->head++;
+ }
+ }
+
+ if ( ipend & 0x1 ) {
+ /* RT IRQ */
+ if (rt->head - rt->tail != EVENT_QUEUE_SIZE) {
+
+ irqv = rt->regs->irq;
+ cmd = irqv >> 7;
+ wc = cmd & 0x1F; /* word count / mode code */
+ suba = irqv & 0x1F; /* sub address (0-31) */
+ tx = (irqv >> 5) & 1;
+
+ /* read status word */
+ tsw = READ_DMA(&rt->mem[tx*0x3E0+suba]);
+
+ /* Build Message Information Word (B1553BRM-style) */
+ miw = (wc<<11) | (tsw&RT_TSW_BUS)>>4 | !(tsw&RT_TSW_OK)>>7 | (tsw&RT_TSW_ILL)>>5 |
+ (tsw&RT_TSW_PAR)>>5 | (tsw&RT_TSW_MAN)>>7;
+
+ descriptor = (tx << 5) | suba;
+
+ /* Mode codes */
+ if (suba == 0 || suba == 31) {
+ descriptor = 64 + (tx*32) + wc;
+ }
+
+ /* Data received or transmitted */
+ if (descriptor < 64) {
+ wc = wc ? wc : 32; /* wc = 0 means 32 words transmitted */
+ }
+ /* RX Mode code */
+ else if (descriptor < 96) {
+ wc = (wc>>4);
+ }
+ /* TX Mode code */
+ else if (descriptor < 128) {
+ wc = (wc>>4);
+ }
+
+ /* Copy to event queue */
+ rt->rt_event[INDEX(rt->head)].miw = miw;
+ rt->rt_event[INDEX(rt->head)].time = 0;
+
+ for (i = 0; i < wc; i++) {
+ rt->rt_event[INDEX(rt->head)].data[i] = READ_DMA(&rt->mem[tx*0x400 + suba*32 + i]);
+ }
+ rt->rt_event[INDEX(rt->head)].desc = descriptor;
+ rt->head++;
+
+
+ /* Handle errors */
+ if ( tsw & RT_TSW_ILL){
+ FUNCDBG("RT: RT_ILLCMD\n\r");
+ rt->status |= RT_ILLCMD_IRQ;
+ event_status |= RT_ILLCMD_IRQ;
+ SET_ERROR_DESCRIPTOR(descriptor);
+ signal_event=1;
+ }
+
+ if ( !(tsw & RT_TSW_OK) ) {
+ FUNCDBG("RT: RT_MERR_IRQ\n\r");
+ rt->status |= RT_MERR_IRQ;
+ event_status |= RT_MERR_IRQ;
+ SET_ERROR_DESCRIPTOR(descriptor);
+ signal_event=1;
+ }
+
+ }
+ else {
+ /* Indicate overrun */
+ rt->rt_event[INDEX(rt->head)].desc |= 0x8000;
+ }
+ }
+
+ if ( ipend & 0x2 ) {
+ /* Memory failure IRQ */
+ FUNCDBG("B1553RT: Memory failure\n");
+ event_status |= RT_DMAF_IRQ;
+ signal_event=1;
+ }
+
+ if ( ipend & 0x4 ) {
+ /* AHB Error */
+ FUNCDBG("B1553RT: AHB ERROR\n");
+ event_status |= RT_DMAF_IRQ;
+ signal_event=1;
+ }
+
+#ifdef DEBUG
+ rt->log[rt->log_i++ % EVENT_QUEUE_SIZE] = descriptor;
+ rt->log[rt->log_i++ % EVENT_QUEUE_SIZE] = cmd;
+ rt->log[rt->log_i++ % EVENT_QUEUE_SIZE] = miw;
+ rt->log[rt->log_i++ % EVENT_QUEUE_SIZE] = tsw;
+#endif
+
+ wake_rx_task = 1;
+
+ /* Wake any blocked rx thread only on receive interrupts */
+ if ( wake_rx_task ) {
+ rtems_semaphore_release(rt->rx_sem);
+ }
+
+ /* Copy current mask to status mask */
+ if ( event_status ) {
+ if ( event_status & 0xffff0000 )
+ rt->status &= 0x0000ffff;
+ rt->status |= event_status;
+ }
+
+ /* signal event once */
+ if ( signal_event && (rt->event_id != 0) ) {
+ rtems_event_send(rt->event_id, event_status);
+ }
+
+}
+
+void b1553rt_print_dev(struct drvmgr_dev *dev, int options)
+{
+ rt_priv *pDev = dev->priv;
+
+ /* Print */
+ printf("--- B1553RT[%d] %s ---\n", pDev->minor, pDev->devName);
+ printf(" REGS: 0x%x\n", (unsigned int)pDev->regs);
+ printf(" IRQ: %d\n", pDev->irqno);
+
+}
+
+void b1553rt_print(int options)
+{
+ struct amba_drv_info *drv = &b1553rt_drv_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ b1553rt_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/1553/gr1553b.c b/bsps/shared/grlib/1553/gr1553b.c
new file mode 100644
index 0000000000..777b6dc743
--- /dev/null
+++ b/bsps/shared/grlib/1553/gr1553b.c
@@ -0,0 +1,312 @@
+/* GR1553B driver, used by BC, RT and/or BM driver
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/gr1553b.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Driver Manager interface for BC, RT, BM, BRM, BC-BM and RT-BM */
+
+#define GR1553B_WRITE_REG(adr, val) *(volatile uint32_t *)(adr) = (val)
+#define GR1553B_READ_REG(adr) (*(volatile uint32_t *)(adr))
+
+#define FEAT_BC 0x1
+#define FEAT_RT 0x2
+#define FEAT_BM 0x4
+
+#define ALLOC_BC 0x1
+#define ALLOC_RT 0x2
+#define ALLOC_BM 0x4
+
+struct gr1553_device {
+ struct drvmgr_dev *dev;
+ int features;
+ int alloc;
+};
+
+struct gr1553_device_feature {
+ struct gr1553_device_feature *next;
+ struct gr1553_device *dev;
+ int minor;
+};
+
+/* Device lists */
+static struct gr1553_device_feature *gr1553_bm_root = NULL;
+static struct gr1553_device_feature *gr1553_rt_root = NULL;
+static struct gr1553_device_feature *gr1553_bc_root = NULL;
+
+/* Driver registered */
+static int gr1553_driver_registerd = 0;
+
+/* Add 'feat' to linked list pointed to by 'root'. A minor is also assigned. */
+static void gr1553_list_add
+ (
+ struct gr1553_device_feature **root,
+ struct gr1553_device_feature *feat
+ )
+{
+ int minor;
+ struct gr1553_device_feature *curr;
+
+ if ( *root == NULL ) {
+ *root = feat;
+ feat->next = NULL;
+ feat->minor = 0;
+ return;
+ }
+
+ minor = 0;
+retry_new_minor:
+ curr = *root;
+ while ( curr->next ) {
+ if ( curr->minor == minor ) {
+ minor++;
+ goto retry_new_minor;
+ }
+ curr = curr->next;
+ }
+
+ feat->next = NULL;
+ feat->minor = minor;
+ curr->next = feat;
+}
+
+static struct gr1553_device_feature *gr1553_list_find
+ (
+ struct gr1553_device_feature *root,
+ int minor
+ )
+{
+ struct gr1553_device_feature *curr = root;
+ while ( curr ) {
+ if ( curr->minor == minor ) {
+ return curr;
+ }
+ curr = curr->next;
+ }
+ return NULL;
+}
+
+struct drvmgr_dev **gr1553_bc_open(int minor)
+{
+ struct gr1553_device_feature *feat;
+
+ feat = gr1553_list_find(gr1553_bc_root, minor);
+ if ( feat == NULL )
+ return NULL;
+
+ /* Only possible to allocate is RT and BC is free,
+ * this is beacuse it is not possible to use the
+ * RT and the BC at the same time.
+ */
+ if ( feat->dev->alloc & (ALLOC_BC|ALLOC_RT) )
+ return NULL;
+
+ /* Alloc BC device */
+ feat->dev->alloc |= ALLOC_BC;
+
+ return &feat->dev->dev;
+}
+
+void gr1553_bc_close(struct drvmgr_dev **dev)
+{
+ struct gr1553_device *d = (struct gr1553_device *)dev;
+
+ d->alloc &= ~ALLOC_BC;
+}
+
+struct drvmgr_dev **gr1553_rt_open(int minor)
+{
+ struct gr1553_device_feature *feat;
+
+ feat = gr1553_list_find(gr1553_rt_root, minor);
+ if ( feat == NULL )
+ return NULL;
+
+ /* Only possible to allocate is RT and BC is free,
+ * this is beacuse it is not possible to use the
+ * RT and the BC at the same time.
+ */
+ if ( feat->dev->alloc & (ALLOC_BC|ALLOC_RT) )
+ return NULL;
+
+ /* Alloc RT device */
+ feat->dev->alloc |= ALLOC_RT;
+
+ return &feat->dev->dev;
+}
+
+void gr1553_rt_close(struct drvmgr_dev **dev)
+{
+ struct gr1553_device *d = (struct gr1553_device *)dev;
+
+ d->alloc &= ~ALLOC_RT;
+}
+
+struct drvmgr_dev **gr1553_bm_open(int minor)
+{
+ struct gr1553_device_feature *feat;
+
+ feat = gr1553_list_find(gr1553_bm_root, minor);
+ if ( feat == NULL )
+ return NULL;
+
+ /* Only possible to allocate is RT and BC is free,
+ * this is beacuse it is not possible to use the
+ * RT and the BC at the same time.
+ */
+ if ( feat->dev->alloc & ALLOC_BM )
+ return NULL;
+
+ /* Alloc BM device */
+ feat->dev->alloc |= ALLOC_BM;
+
+ return &feat->dev->dev;
+}
+
+void gr1553_bm_close(struct drvmgr_dev **dev)
+{
+ struct gr1553_device *d = (struct gr1553_device *)dev;
+
+ d->alloc &= ~ALLOC_BM;
+}
+
+static int gr1553_init2(struct drvmgr_dev *dev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ struct gr1553b_regs *regs;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if ( ambadev == NULL ) {
+ return DRVMGR_FAIL;
+ }
+ pnpinfo = &ambadev->info;
+ if ( pnpinfo->apb_slv == NULL )
+ return DRVMGR_EIO;
+ regs = (struct gr1553b_regs *)pnpinfo->apb_slv->start;
+
+ /* Stop IRQ */
+ GR1553B_WRITE_REG(&regs->imask, 0);
+ GR1553B_WRITE_REG(&regs->irq, 0xffffffff);
+ /* Stop BC if not already stopped (just in case) */
+ GR1553B_WRITE_REG(&regs->bc_ctrl, 0x15520204);
+ /* Stop RT rx (just in case) */
+ GR1553B_WRITE_REG(&regs->rt_cfg, 0x15530000);
+ /* Stop BM logging (just in case) */
+ GR1553B_WRITE_REG(&regs->bm_ctrl, 0);
+
+ return DRVMGR_OK;
+}
+
+/* Register the different functionalities that the
+ * core supports.
+ */
+static int gr1553_init3(struct drvmgr_dev *dev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ struct gr1553_device *priv;
+ struct gr1553_device_feature *feat;
+ struct gr1553b_regs *regs;
+
+ priv = grlib_malloc(sizeof(*priv));
+ if ( priv == NULL )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+ priv->alloc = 0;
+ priv->features = 0;
+ dev->priv = NULL; /* Let higher level driver handle this */
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ pnpinfo = &ambadev->info;
+ regs = (struct gr1553b_regs *)pnpinfo->apb_slv->start;
+
+ if ( GR1553B_READ_REG(&regs->bm_stat) & GR1553B_BM_STAT_BMSUP ) {
+ priv->features |= FEAT_BM;
+ feat = grlib_malloc(sizeof(*feat));
+ feat->dev = priv;
+ /* Init Minor and Next */
+ gr1553_list_add(&gr1553_bm_root, feat);
+ }
+
+ if ( GR1553B_READ_REG(&regs->bc_stat) & GR1553B_BC_STAT_BCSUP ) {
+ priv->features |= FEAT_BC;
+ feat = grlib_malloc(sizeof(*feat));
+ feat->dev = priv;
+ /* Init Minor and Next */
+ gr1553_list_add(&gr1553_bc_root, feat);
+ }
+
+ if ( GR1553B_READ_REG(&regs->rt_stat) & GR1553B_RT_STAT_RTSUP ) {
+ priv->features |= FEAT_RT;
+ feat = grlib_malloc(sizeof(*feat));
+ feat->dev = priv;
+ /* Init Minor and Next */
+ gr1553_list_add(&gr1553_rt_root, feat);
+ }
+
+ if ( priv->features == 0 ) {
+ /* no features in HW should never happen.. an I/O error? */
+ free(priv);
+ return DRVMGR_EIO;
+ }
+
+ return DRVMGR_OK;
+}
+
+struct drvmgr_drv_ops gr1553_ops =
+{
+ {NULL, gr1553_init2, gr1553_init3, NULL},
+ NULL,
+ NULL
+};
+
+struct amba_dev_id gr1553_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GR1553B},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info gr1553_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GR1553B_ID,/* Driver ID */
+ "GR1553_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &gr1553_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gr1553_ids[0]
+};
+
+/* Multiple drivers may call this function. The drivers that depends on
+ * this driver:
+ * - BM driver
+ * - BC driver
+ * - RT driver
+ */
+void gr1553_register(void)
+{
+ if ( gr1553_driver_registerd == 0 ) {
+ gr1553_driver_registerd = 1;
+ drvmgr_drv_register(&gr1553_drv_info.general);
+ }
+}
diff --git a/bsps/shared/grlib/1553/gr1553bc.c b/bsps/shared/grlib/1553/gr1553bc.c
new file mode 100644
index 0000000000..a22e2d8007
--- /dev/null
+++ b/bsps/shared/grlib/1553/gr1553bc.c
@@ -0,0 +1,1685 @@
+/* GR1553B BC driver
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <rtems.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/gr1553b.h>
+#include <grlib/gr1553bc.h>
+
+#include <grlib/grlib_impl.h>
+
+#define GR1553BC_WRITE_MEM(adr, val) *(volatile uint32_t *)(adr) = (uint32_t)(val)
+#define GR1553BC_READ_MEM(adr) (*(volatile uint32_t *)(adr))
+
+#define GR1553BC_WRITE_REG(adr, val) *(volatile uint32_t *)(adr) = (uint32_t)(val)
+#define GR1553BC_READ_REG(adr) (*(volatile uint32_t *)(adr))
+
+/* Needed by list for data pinter and BD translation */
+struct gr1553bc_priv {
+ struct drvmgr_dev **pdev;
+ struct gr1553b_regs *regs;
+ struct gr1553bc_list *list;
+ struct gr1553bc_list *alist;
+ int started;
+ SPIN_DECLARE(devlock);
+
+ /* IRQ log management */
+ void *irq_log_p;
+ uint32_t *irq_log_base;
+ uint32_t *irq_log_curr;
+ uint32_t *irq_log_end;
+ uint32_t *irq_log_base_hw;
+
+ /* Standard IRQ handler function */
+ bcirq_func_t irq_func;
+ void *irq_data;
+};
+
+
+/*************** LIST HANDLING ROUTINES ***************/
+
+/* This marks that the jump is a jump to next Minor.
+ * It is important that it sets one of the two LSB
+ * so that we can separate it from a JUMP-IRQ function,
+ * function pointers must be aligned to 4bytes.
+ *
+ * This marker is used to optimize the INDICATION process,
+ * from a descriptor pointer we can step to next Jump that
+ * has this MARKER set, then we know that the MID is stored
+ * there.
+ *
+ * The marker is limited to 1 byte.
+ */
+#define NEXT_MINOR_MARKER 0x01
+
+/* To separate ASYNC list from SYNC list we mark them differently, but with
+ * LSB always set. This can be used to get the list the descriptor is a part
+ * of.
+ */
+#define NEXT_MINOR_MARKER_ASYNC 0x80
+
+struct gr1553bc_list_cfg gr1553bc_def_cfg =
+{
+ .rt_timeout =
+ {
+ 20, 20, 20, 20,
+ 20, 20, 20, 20,
+ 20, 20, 20, 20,
+ 20, 20, 20, 20,
+ 20, 20, 20, 20,
+ 20, 20, 20, 20,
+ 20, 20, 20, 20,
+ 20, 20, 20
+ },
+ .bc_timeout = 30,
+ .tropt_irq_on_err = 0,
+ .tropt_pause_on_err = 0,
+ .async_list = 0,
+};
+
+int gr1553bc_list_alloc(struct gr1553bc_list **list, int max_major)
+{
+ size_t size;
+ struct gr1553bc_list *l;
+
+ size = sizeof(*l) + max_major * sizeof(void *);
+ l = grlib_calloc(1, size);
+ if ( l == NULL )
+ return -1;
+
+ l->major_cnt = max_major;
+ *list = l;
+
+ /* Set default options:
+ * - RT timeout tolerance 20us
+ * - Global transfer options used when generating transfer descriptors
+ * - No BC device, note that this only works when no translation is
+ * required
+ */
+ if ( gr1553bc_list_config(l, &gr1553bc_def_cfg, NULL) ) {
+ free(l);
+ return -1;
+ }
+
+ return 0;
+}
+
+void gr1553bc_list_free(struct gr1553bc_list *list)
+{
+ gr1553bc_list_table_free(list);
+ free(list);
+}
+
+int gr1553bc_list_config
+ (
+ struct gr1553bc_list *list,
+ struct gr1553bc_list_cfg *cfg,
+ void *bc
+ )
+{
+ int timeout, i, tropts;
+
+ /* RT Time Tolerances */
+ for (i=0; i<31; i++) {
+ /* 0=14us, 1=18us ... 0xf=74us
+ * round upwards: 15us will be 18us
+ */
+ timeout = ((cfg->rt_timeout[i] + 1) - 14) / 4;
+ if ( (timeout > 0xf) || (timeout < 0) )
+ return -1;
+ list->rt_timeout[i] = timeout;
+ }
+ timeout = ((cfg->bc_timeout + 1) - 14) / 4;
+ if ( timeout > 0xf )
+ return -1;
+ list->rt_timeout[i] = timeout;
+
+ /* Transfer descriptor generation options */
+ tropts = 0;
+ if ( cfg->tropt_irq_on_err )
+ tropts |= 1<<28;
+ if ( cfg->tropt_pause_on_err )
+ tropts |= 1<<26;
+ list->tropts = tropts;
+
+ list->async_list = cfg->async_list;
+ list->bc = bc;
+
+ return 0;
+}
+
+void gr1553bc_list_link_major(
+ struct gr1553bc_major *major,
+ struct gr1553bc_major *next
+ )
+{
+ if ( major ) {
+ major->next = next;
+ if ( next ) {
+ major->minors[major->cfg->minor_cnt-1]->next =
+ next->minors[0];
+ } else {
+ major->minors[major->cfg->minor_cnt-1]->next = NULL;
+ }
+ }
+}
+
+int gr1553bc_list_set_major(
+ struct gr1553bc_list *list,
+ struct gr1553bc_major *major,
+ int no)
+{
+ struct gr1553bc_major *prev, *next;
+
+ if ( no >= list->major_cnt )
+ return -1;
+
+ list->majors[no] = major;
+
+ /* Link previous Major frame with this one */
+ if ( no > 0 ) {
+ prev = list->majors[no-1];
+ } else {
+ /* First Major is linked with last major */
+ prev = list->majors[list->major_cnt-1];
+ }
+
+ /* Link to next Major if not the last one and if there is
+ * a next major
+ */
+ if ( no == list->major_cnt-1 ) {
+ /* The last major, assume that it is connected with the first */
+ next = list->majors[0];
+ } else {
+ next = list->majors[no+1];
+ }
+
+ /* Link previous frame to jump into this */
+ gr1553bc_list_link_major(prev, major);
+
+ /* Link This frame to jump into the next */
+ gr1553bc_list_link_major(major, next);
+
+ return 0;
+}
+
+/* Translate Descriptor address from CPU-address to Hardware Address */
+static inline union gr1553bc_bd *gr1553bc_bd_cpu2hw
+ (
+ struct gr1553bc_list *list,
+ union gr1553bc_bd *bd
+ )
+{
+ return (union gr1553bc_bd *)(((unsigned int)bd - list->table_cpu) +
+ list->table_hw);
+}
+
+/* Translate Descriptor address from HW-address to CPU Address */
+static inline union gr1553bc_bd *gr1553bc_bd_hw2cpu
+ (
+ struct gr1553bc_list *list,
+ union gr1553bc_bd *bd
+ )
+{
+ return (union gr1553bc_bd *)(((unsigned int)bd - list->table_hw) +
+ list->table_cpu);
+}
+
+int gr1553bc_minor_table_size(struct gr1553bc_minor *minor)
+{
+ struct gr1553bc_minor_cfg *mincfg = minor->cfg;
+ int slot_cnt;
+
+ /* SLOTS + JUMP */
+ slot_cnt = mincfg->slot_cnt + 1;
+ if ( mincfg->timeslot ) {
+ /* time management requires 1 extra slot */
+ slot_cnt++;
+ }
+
+ return slot_cnt * GR1553BC_BD_SIZE;
+}
+
+int gr1553bc_list_table_size(struct gr1553bc_list *list)
+{
+ struct gr1553bc_major *major;
+ int i, j, minor_cnt, size;
+
+ size = 0;
+ for (i=0; i<list->major_cnt; i++) {
+ major = list->majors[i];
+ minor_cnt = major->cfg->minor_cnt;
+ for (j=0; j<minor_cnt; j++) {
+ /* 128-bit Alignment required by HW */
+ size += (GR1553BC_BD_ALIGN -
+ (size & (GR1553BC_BD_ALIGN-1))) &
+ ~(GR1553BC_BD_ALIGN-1);
+
+ /* Size required by descriptors */
+ size += gr1553bc_minor_table_size(major->minors[j]);
+ }
+ }
+
+ return size;
+}
+
+int gr1553bc_list_table_alloc
+ (
+ struct gr1553bc_list *list,
+ void *bdtab_custom
+ )
+{
+ struct gr1553bc_major *major;
+ int i, j, minor_cnt, size;
+ unsigned int table;
+ struct gr1553bc_priv *bcpriv = list->bc;
+
+ /* Free previous allocated descriptor table */
+ gr1553bc_list_table_free(list);
+
+ /* Remember user's settings for uninitialization */
+ list->_table_custom = bdtab_custom;
+
+ /* Get Size required for descriptors */
+ size = gr1553bc_list_table_size(list);
+
+ if ((unsigned int)bdtab_custom & 0x1) {
+ /* Address given in Hardware accessible address, we
+ * convert it into CPU-accessible address.
+ */
+ list->table_hw = (unsigned int)bdtab_custom & ~0x1;
+ list->_table = bdtab_custom;
+ drvmgr_translate_check(
+ *bcpriv->pdev,
+ DMAMEM_TO_CPU,
+ (void *)list->table_hw,
+ (void **)&list->table_cpu,
+ size);
+ } else {
+ if (bdtab_custom == NULL) {
+ /* Allocate descriptors */
+ list->_table = grlib_malloc(size + (GR1553BC_BD_ALIGN-1));
+ if ( list->_table == NULL )
+ return -1;
+ } else {
+ /* Custom address, given in CPU-accessible address */
+ list->_table = bdtab_custom;
+ }
+ /* 128-bit Alignment required by HW */
+ list->table_cpu =
+ (((unsigned int)list->_table + (GR1553BC_BD_ALIGN-1)) &
+ ~(GR1553BC_BD_ALIGN-1));
+
+ /* We got CPU accessible descriptor table address, now we
+ * translate that into an address that the Hardware can
+ * understand
+ */
+ if (bcpriv) {
+ drvmgr_translate_check(
+ *bcpriv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)list->table_cpu,
+ (void **)&list->table_hw,
+ size
+ );
+ } else {
+ list->table_hw = list->table_cpu;
+ }
+ }
+
+ /* Write End-Of-List all over the descriptor table here,
+ * For debugging/safety?
+ */
+
+ /* Assign descriptors to all minor frames. The addresses is
+ * CPU-accessible addresses.
+ */
+ table = list->table_cpu;
+ for (i=0; i<list->major_cnt; i++) {
+ major = list->majors[i];
+ minor_cnt = major->cfg->minor_cnt;
+ for (j=0; j<minor_cnt; j++) {
+ /* 128-bit Alignment required by HW */
+ table = (table + (GR1553BC_BD_ALIGN-1)) &
+ ~(GR1553BC_BD_ALIGN-1);
+ major->minors[j]->bds = (union gr1553bc_bd *)table;
+
+ /* Calc size required by descriptors */
+ table += gr1553bc_minor_table_size(major->minors[j]);
+ }
+ }
+
+ return 0;
+}
+
+void gr1553bc_list_table_free(struct gr1553bc_list *list)
+{
+ if ( (list->_table_custom == NULL) && list->_table ) {
+ free(list->_table);
+ }
+ list->_table = NULL;
+ list->_table_custom = NULL;
+ list->table_cpu = 0;
+ list->table_hw = 0;
+}
+
+/* Init descriptor table provided by each minor frame,
+ * we link them together using unconditional JUMP.
+ */
+int gr1553bc_list_table_build(struct gr1553bc_list *list)
+{
+ struct gr1553bc_major *major;
+ struct gr1553bc_minor *minor;
+ struct gr1553bc_minor_cfg *mincfg;
+ int i, j, k, minor_cnt, marker;
+ union gr1553bc_bd *bds, *hwbd;
+
+ marker = NEXT_MINOR_MARKER;
+ if ( list->async_list )
+ marker |= NEXT_MINOR_MARKER_ASYNC;
+
+ /* Create Major linking */
+ for (i=0; i<list->major_cnt; i++) {
+ major = list->majors[i];
+ minor_cnt = major->cfg->minor_cnt;
+ for (j=0; j<minor_cnt; j++) {
+ minor = major->minors[j];
+ mincfg = minor->cfg;
+ bds = minor->bds;
+
+ /* BD[0..SLOTCNT-1] = message slots
+ * BD[SLOTCNT+0] = END
+ * BD[SLOTCNT+1] = JUMP
+ *
+ * or if no optional time slot handling:
+ *
+ * BD[0..SLOTCNT-1] = message slots
+ * BD[SLOTCNT] = JUMP
+ */
+
+ /* BD[0..SLOTCNT-1] */
+ for (k=0; k<mincfg->slot_cnt; k++) {
+ gr1553bc_bd_tr_init(
+ &bds[k].tr,
+ GR1553BC_TR_DUMMY_0,
+ GR1553BC_TR_DUMMY_1,
+ 0,
+ 0);
+ }
+
+ /* BD[SLOTCNT] (OPTIONAL)
+ * If a minor frame is configured to be executed in
+ * certain time (given a time slot), this descriptor
+ * sums up all unused time. The time slot is
+ * decremented when messages are inserted into the
+ * minor frame and increased when messages are removed.
+ */
+ if ( mincfg->timeslot > 0 ) {
+ gr1553bc_bd_tr_init(
+ &bds[k].tr,
+ GR1553BC_TR_DUMMY_0 | (mincfg->timeslot >> 2),
+ GR1553BC_TR_DUMMY_1,
+ 0,
+ 0);
+ k++;
+ }
+
+ /* Last descriptor is a jump to next minor frame, to a
+ * synchronization point. If chain ends here, the list
+ * is marked with a "end-of-list" marker.
+ *
+ */
+ if ( minor->next ) {
+ /* Translate CPU address of BD into HW address */
+ hwbd = gr1553bc_bd_cpu2hw(
+ list,
+ &minor->next->bds[0]
+ );
+ gr1553bc_bd_init(
+ &bds[k],
+ 0xf,
+ GR1553BC_UNCOND_JMP,
+ (uint32_t)hwbd,
+ ((GR1553BC_ID(i,j,k) << 8) | marker),
+ 0
+ );
+ } else {
+ gr1553bc_bd_init(
+ &bds[k],
+ 0xf,
+ GR1553BC_TR_EOL,
+ 0,
+ ((GR1553BC_ID(i,j,k) << 8) | marker),
+ 0);
+ }
+ }
+ }
+
+ return 0;
+}
+
+void gr1553bc_bd_init(
+ union gr1553bc_bd *bd,
+ unsigned int flags,
+ uint32_t word0,
+ uint32_t word1,
+ uint32_t word2,
+ uint32_t word3
+ )
+{
+ struct gr1553bc_bd_raw *raw = &bd->raw;
+
+ if ( flags & 0x1 ) {
+ if ( (flags & KEEP_TIMESLOT) &&
+ ((word0 & GR1553BC_BD_TYPE) == 0) ) {
+ /* Don't touch timeslot previously allocated */
+ word0 &= ~GR1553BC_TR_TIME;
+ word0 |= GR1553BC_READ_MEM(&raw->words[0]) &
+ GR1553BC_TR_TIME;
+ }
+ GR1553BC_WRITE_MEM(&raw->words[0], word0);
+ }
+ if ( flags & 0x2 )
+ GR1553BC_WRITE_MEM(&raw->words[1], word1);
+ if ( flags & 0x4 )
+ GR1553BC_WRITE_MEM(&raw->words[2], word2);
+ if ( flags & 0x8 )
+ GR1553BC_WRITE_MEM(&raw->words[3], word3);
+}
+
+/* Alloc a Major frame according to the configuration structure */
+int gr1553bc_major_alloc_skel
+ (
+ struct gr1553bc_major **major,
+ struct gr1553bc_major_cfg *cfg
+ )
+{
+ struct gr1553bc_major *maj;
+ struct gr1553bc_minor *minor;
+ size_t size;
+ int i;
+
+ if ( (cfg == NULL) || (major == NULL) || (cfg->minor_cnt <= 0) )
+ return -1;
+
+ /* Allocate Major Frame description, but no descriptors */
+ size = sizeof(*maj) + cfg->minor_cnt *
+ (sizeof(*minor) + sizeof(void *));
+ maj = grlib_malloc(size);
+ if ( maj == NULL )
+ return -1;
+
+ maj->cfg = cfg;
+ maj->next = NULL;
+
+ /* Create links between minor frames, and from minor frames
+ * to configuration structure.
+ */
+ minor = (struct gr1553bc_minor *)&maj->minors[cfg->minor_cnt];
+ for (i=0; i<cfg->minor_cnt; i++, minor++) {
+ maj->minors[i] = minor;
+ minor->next = minor + 1;
+ minor->cfg = &cfg->minor_cfgs[i];
+ minor->alloc = 0;
+ minor->bds = NULL;
+ }
+ /* last Minor should point to next Major frame's first minor,
+ * we do that somewhere else.
+ */
+ (minor - 1)->next = NULL;
+
+ *major = maj;
+
+ return 0;
+}
+
+struct gr1553bc_major *gr1553bc_major_from_id
+ (
+ struct gr1553bc_list *list,
+ int mid
+ )
+{
+ int major_no;
+
+ /* Find Minor Frame from MID */
+ major_no = GR1553BC_MAJID_FROM_ID(mid);
+
+ if ( major_no >= list->major_cnt )
+ return NULL;
+ return list->majors[major_no];
+}
+
+struct gr1553bc_minor *gr1553bc_minor_from_id
+ (
+ struct gr1553bc_list *list,
+ int mid
+ )
+{
+ int minor_no;
+ struct gr1553bc_major *major;
+
+ /* Get Major from ID */
+ major = gr1553bc_major_from_id(list, mid);
+ if ( major == NULL )
+ return NULL;
+
+ /* Find Minor Frame from MID */
+ minor_no = GR1553BC_MINID_FROM_ID(mid);
+
+ if ( minor_no >= major->cfg->minor_cnt )
+ return NULL;
+ return major->minors[minor_no];
+}
+
+union gr1553bc_bd *gr1553bc_slot_bd
+ (
+ struct gr1553bc_list *list,
+ int mid
+ )
+{
+ struct gr1553bc_minor *minor;
+ int slot_no;
+
+ /*** look up BD ***/
+
+ /* Get minor */
+ minor = gr1553bc_minor_from_id(list, mid);
+ if ( minor == NULL )
+ return NULL;
+
+ /* Get Slot */
+ slot_no = GR1553BC_SLOTID_FROM_ID(mid);
+ if ( slot_no >= 0xff )
+ slot_no = 0;
+
+ /* Get BD address */
+ return &minor->bds[slot_no];
+}
+
+static int gr1553bc_minor_first_avail(struct gr1553bc_minor *minor)
+{
+ int slot_num;
+ uint32_t alloc;
+
+ alloc = minor->alloc;
+ if ( alloc == 0xffffffff ) {
+ /* No free */
+ return -1;
+ }
+ slot_num = 0;
+ while ( alloc & 1 ) {
+ alloc = alloc >> 1;
+ slot_num++;
+ }
+ if ( slot_num >= minor->cfg->slot_cnt ) {
+ /* no free */
+ return -1;
+ }
+ return slot_num;
+}
+
+int gr1553bc_slot_alloc(
+ struct gr1553bc_list *list,
+ int *mid,
+ int timeslot,
+ union gr1553bc_bd **bd
+ )
+{
+ struct gr1553bc_minor *minor = gr1553bc_minor_from_id(list, *mid);
+
+ return gr1553bc_slot_alloc2(minor, mid, timeslot, bd);
+}
+
+/* Same as gr1553bc_slot_alloc but identifies a minor instead of list.
+ * The major/minor part of MID is ignored.
+ */
+int gr1553bc_slot_alloc2(
+ struct gr1553bc_minor *minor,
+ int *mid,
+ int timeslot,
+ union gr1553bc_bd **bd
+ )
+{
+ int slot_no;
+ uint32_t set0;
+ int timefree;
+ struct gr1553bc_bd_tr *trbd;
+ struct gr1553bc_minor_cfg *mincfg;
+
+ if ( minor == NULL )
+ return -1;
+
+ mincfg = minor->cfg;
+
+ /* Find first free slot if not a certain slot is requested */
+ slot_no = GR1553BC_SLOTID_FROM_ID(*mid);
+ if ( slot_no == 0xff ) {
+ slot_no = gr1553bc_minor_first_avail(minor);
+ if ( slot_no < 0 )
+ return -1;
+ } else {
+ /* Allocate a certain slot, check that it is free */
+ if ( slot_no >= mincfg->slot_cnt )
+ return -1;
+ if ( (1<<slot_no) & minor->alloc )
+ return -1;
+ }
+
+ /* Ok, we got our slot. Lets allocate time for slot if requested by user
+ * and time management is enabled for this Minor Frame.
+ */
+ if ( timeslot > 0 ) {
+ /* Make timeslot on a 4us boundary (time resolution of core) */
+ timeslot = (timeslot + 0x3) >> 2;
+
+ if ( mincfg->timeslot ) {
+ /* Subtract requested time from free time */
+ trbd = &minor->bds[mincfg->slot_cnt].tr;
+ set0 = GR1553BC_READ_MEM(&trbd->settings[0]);
+ timefree = set0 & GR1553BC_TR_TIME;
+ if ( timefree < timeslot ) {
+ /* Not enough time left to schedule slot in minor */
+ return -1;
+ }
+ /* Store back the time left */
+ timefree -= timeslot;
+ set0 = (set0 & ~GR1553BC_TR_TIME) | timefree;
+ GR1553BC_WRITE_MEM(&trbd->settings[0], set0);
+ /* Note: at the moment the minor frame can be executed faster
+ * than expected, we hurry up writing requested
+ * descriptor.
+ */
+ }
+ }
+
+ /* Make the allocated descriptor be an empty slot with the
+ * timeslot requested.
+ */
+ trbd = &minor->bds[slot_no].tr;
+ gr1553bc_bd_tr_init(
+ trbd,
+ GR1553BC_TR_DUMMY_0 | timeslot,
+ GR1553BC_TR_DUMMY_1,
+ 0,
+ 0);
+
+ /* Allocate slot */
+ minor->alloc |= 1<<slot_no;
+
+ if ( bd )
+ *bd = (union gr1553bc_bd *)trbd;
+ *mid = GR1553BC_ID_SET_SLOT(*mid, slot_no);
+
+ return 0;
+}
+
+/* Return time slot freed (if time is managed by driver), negative on error */
+int gr1553bc_slot_free(struct gr1553bc_list *list, int mid)
+{
+ struct gr1553bc_minor *minor = gr1553bc_minor_from_id(list, mid);
+
+ return gr1553bc_slot_free2(minor, mid);
+}
+
+/* Return time slot freed (if time is managed by driver), negative on error */
+int gr1553bc_slot_free2(struct gr1553bc_minor *minor, int mid)
+{
+ union gr1553bc_bd *bd;
+ struct gr1553bc_bd_tr *endbd;
+ struct gr1553bc_minor_cfg *mincfg;
+ int slot_no, timeslot, timefree;
+ uint32_t word0, set0;
+
+ if ( minor == NULL )
+ return -1;
+
+ slot_no = GR1553BC_SLOTID_FROM_ID(mid);
+
+ if ( (minor->alloc & (1<<slot_no)) == 0 )
+ return -1;
+
+ bd = &minor->bds[slot_no];
+
+ /* If the driver handles time for this minor frame, return
+ * time if previuosly requested.
+ */
+ timeslot = 0;
+ mincfg = minor->cfg;
+ if ( mincfg->timeslot > 0 ) {
+ /* Find out if message slot had time allocated */
+ word0 = GR1553BC_READ_MEM(&bd->raw.words[0]);
+ if ( word0 & GR1553BC_BD_TYPE ) {
+ /* Condition ==> no time slot allocated */
+ } else {
+ /* Transfer descriptor, may have time slot */
+ timeslot = word0 & GR1553BC_TR_TIME;
+ if ( timeslot > 0 ) {
+ /* Return previously allocated time to END
+ * TIME descriptor.
+ */
+ endbd = &minor->bds[mincfg->slot_cnt].tr;
+ set0 = GR1553BC_READ_MEM(&endbd->settings[0]);
+ timefree = set0 & GR1553BC_TR_TIME;
+ timefree += timeslot;
+ set0 = (set0 & ~GR1553BC_TR_TIME) | timefree;
+ GR1553BC_WRITE_MEM(&endbd->settings[0], set0);
+ /* Note: at the moment the minor frame can be
+ * executed slower than expected, the
+ * timeslot is at two locations.
+ */
+ }
+ }
+ }
+
+ /* Make slot an empty message */
+ gr1553bc_bd_tr_init(
+ &bd->tr,
+ GR1553BC_TR_DUMMY_0,
+ GR1553BC_TR_DUMMY_1,
+ 0,
+ 0);
+
+ /* unallocate descriptor */
+ minor->alloc &= ~(1<<slot_no);
+
+ /* Return time freed in microseconds */
+ return timeslot << 2;
+}
+
+int gr1553bc_list_freetime(struct gr1553bc_list *list, int mid)
+{
+ struct gr1553bc_minor *minor = gr1553bc_minor_from_id(list, mid);
+
+ return gr1553bc_minor_freetime(minor);
+}
+
+int gr1553bc_minor_freetime(struct gr1553bc_minor *minor)
+{
+ struct gr1553bc_bd_tr *endbd;
+ struct gr1553bc_minor_cfg *mincfg;
+ int timefree;
+ uint32_t set0;
+
+ if ( minor == NULL )
+ return -1;
+
+ /* If the driver handles time for this minor frame, return
+ * time if previuosly requested.
+ */
+ timefree = 0;
+ mincfg = minor->cfg;
+ if ( mincfg->timeslot > 0 ) {
+ /* Return previously allocated time to END
+ * TIME descriptor.
+ */
+ endbd = &minor->bds[mincfg->slot_cnt].tr;
+ set0 = GR1553BC_READ_MEM(&endbd->settings[0]);
+ timefree = (set0 & GR1553BC_TR_TIME) << 2;
+ }
+
+ /* Return time freed */
+ return timefree;
+}
+
+int gr1553bc_slot_raw
+ (
+ struct gr1553bc_list *list,
+ int mid,
+ unsigned int flags,
+ uint32_t word0,
+ uint32_t word1,
+ uint32_t word2,
+ uint32_t word3
+ )
+{
+ struct gr1553bc_minor *minor;
+ union gr1553bc_bd *bd;
+ int slot_no;
+
+ minor = gr1553bc_minor_from_id(list, mid);
+ if ( minor == NULL )
+ return -1;
+
+ /* Get Slot */
+ slot_no = GR1553BC_SLOTID_FROM_ID(mid);
+ if ( slot_no >= minor->cfg->slot_cnt ) {
+ return -1;
+ }
+
+ /* Get descriptor */
+ bd = &minor->bds[slot_no];
+
+ /* Build empty descriptor. */
+ gr1553bc_bd_init(
+ bd,
+ flags,
+ word0,
+ word1,
+ word2,
+ word3);
+
+ return 0;
+}
+
+/* Create unconditional IRQ customly defined location
+ * The IRQ is disabled, enable it with gr1553bc_slot_irq_enable().
+ */
+int gr1553bc_slot_irq_prepare
+ (
+ struct gr1553bc_list *list,
+ int mid,
+ bcirq_func_t func,
+ void *data
+ )
+{
+ union gr1553bc_bd *bd;
+ int slot_no, to_mid;
+
+ /* Build unconditional IRQ descriptor. The padding is used
+ * for identifying the MINOR frame and function and custom data.
+ *
+ * The IRQ is disabled at first, a unconditional jump to next
+ * descriptor in table.
+ */
+
+ /* Get BD address of jump destination */
+ slot_no = GR1553BC_SLOTID_FROM_ID(mid);
+ to_mid = GR1553BC_ID_SET_SLOT(mid, slot_no + 1);
+ bd = gr1553bc_slot_bd(list, to_mid);
+ if ( bd == NULL )
+ return -1;
+ bd = gr1553bc_bd_cpu2hw(list, bd);
+
+ return gr1553bc_slot_raw(
+ list,
+ mid,
+ 0xF,
+ GR1553BC_UNCOND_JMP,
+ (uint32_t)bd,
+ (uint32_t)func,
+ (uint32_t)data
+ );
+}
+
+/* Enable previously prepared unconditional IRQ */
+int gr1553bc_slot_irq_enable(struct gr1553bc_list *list, int mid)
+{
+ /* Leave word1..3 untouched:
+ * 1. Unconditional Jump address
+ * 2. Function
+ * 3. Custom Data
+ *
+ * Since only one bit is changed in word0 (Condition word),
+ * no hardware/software races will exist ==> it is safe
+ * to enable/disable IRQ at any time independent of where
+ * hardware is in table.
+ */
+ return gr1553bc_slot_raw(
+ list,
+ mid,
+ 0x1, /* change only WORD0 */
+ GR1553BC_UNCOND_IRQ,
+ 0,
+ 0,
+ 0);
+}
+
+/* Disable unconditional IRQ point, changed to unconditional JUMP
+ * to descriptor following.
+ * After disabling it it can be enabled again, or freed.
+ */
+int gr1553bc_slot_irq_disable(struct gr1553bc_list *list, int mid)
+{
+ return gr1553bc_slot_raw(
+ list,
+ mid,
+ 0x1, /* change only WORD0, JUMP address already set */
+ GR1553BC_UNCOND_JMP,
+ 0,
+ 0,
+ 0);
+}
+
+int gr1553bc_slot_empty(struct gr1553bc_list *list, int mid)
+{
+ return gr1553bc_slot_raw(
+ list,
+ mid,
+ 0xF | KEEP_TIMESLOT,
+ GR1553BC_TR_DUMMY_0,
+ GR1553BC_TR_DUMMY_1,
+ 0,
+ 0);
+}
+
+int gr1553bc_slot_exttrig(struct gr1553bc_list *list, int mid)
+{
+ return gr1553bc_slot_raw(
+ list,
+ mid,
+ 0xF | KEEP_TIMESLOT,
+ GR1553BC_TR_DUMMY_0 | GR1553BC_TR_EXTTRIG,
+ GR1553BC_TR_DUMMY_1,
+ 0,
+ 0);
+}
+
+int gr1553bc_slot_jump
+ (
+ struct gr1553bc_list *list,
+ int mid,
+ uint32_t condition,
+ int to_mid
+ )
+{
+ union gr1553bc_bd *bd;
+
+ /* Get BD address */
+ bd = gr1553bc_slot_bd(list, to_mid);
+ if ( bd == NULL )
+ return -1;
+ /* Convert into an address that the HW understand */
+ bd = gr1553bc_bd_cpu2hw(list, bd);
+
+ return gr1553bc_slot_raw(
+ list,
+ mid,
+ 0xF,
+ condition,
+ (uint32_t)bd,
+ 0,
+ 0);
+}
+
+int gr1553bc_slot_transfer(
+ struct gr1553bc_list *list,
+ int mid,
+ int options,
+ int tt,
+ uint16_t *dptr)
+{
+ uint32_t set0, set1;
+ union gr1553bc_bd *bd;
+ int rx_rtadr, tx_rtadr, timeout;
+
+ /* Get BD address */
+ bd = gr1553bc_slot_bd(list, mid);
+ if ( bd == NULL )
+ return -1;
+
+ /* Translate Data pointer from CPU-local to 1553-core accessible
+ * address if user wants that. This may be useful for AMBA-over-PCI
+ * cores.
+ */
+ if ( (unsigned int)dptr & 0x1 ) {
+ struct gr1553bc_priv *bcpriv = list->bc;
+
+ drvmgr_translate(
+ *bcpriv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)((unsigned int)dptr & ~0x1),
+ (void **)&dptr);
+ }
+
+ /* It is assumed that the descriptor has already been initialized
+ * as a empty slot (Dummy bit set), so to avoid races the dummy
+ * bit is cleared last.
+ *
+ * If we knew that the write would do a burst (for example over SpW)
+ * it would be safe to write in order.
+ */
+
+ /* Preserve timeslot */
+ set0 = GR1553BC_READ_MEM(&bd->tr.settings[0]);
+ set0 &= GR1553BC_TR_TIME;
+ set0 |= options & 0x61f00000;
+ set0 |= list->tropts; /* Global options */
+
+ /* Set transfer type, bus and let RT tolerance table descide
+ * responce tolerance.
+ *
+ * If a destination address is specified the longest timeout
+ * tolerance is taken.
+ */
+ rx_rtadr = (tt >> 22) & 0x1f;
+ tx_rtadr = (tt >> 12) & 0x1f;
+ if ( (tx_rtadr != 0x1f) &&
+ (list->rt_timeout[rx_rtadr] < list->rt_timeout[tx_rtadr]) ) {
+ timeout = list->rt_timeout[tx_rtadr];
+ } else {
+ timeout = list->rt_timeout[rx_rtadr];
+ }
+ set1 = ((timeout & 0xf) << 27) | (tt & 0x27ffffff) | ((options & 0x3)<<30);
+
+ GR1553BC_WRITE_MEM(&bd->tr.settings[0], set0);
+ GR1553BC_WRITE_MEM(&bd->tr.dptr, (uint32_t)dptr);
+ /* Write UNUSED BIT, when cleared it Indicates that BC has written it */
+ GR1553BC_WRITE_MEM(&bd->tr.status, 0x80000000);
+ GR1553BC_WRITE_MEM(&bd->tr.settings[1], set1);
+
+ return 0;
+}
+
+int gr1553bc_slot_update
+ (
+ struct gr1553bc_list *list,
+ int mid,
+ uint16_t *dptr,
+ unsigned int *stat
+ )
+{
+ union gr1553bc_bd *bd;
+ unsigned int status;
+ unsigned int dataptr = (unsigned int)dptr;
+
+ /* Get BD address */
+ bd = gr1553bc_slot_bd(list, mid);
+ if ( bd == NULL )
+ return -1;
+
+ /* Write new Data Pointer if needed */
+ if ( dataptr ) {
+ struct gr1553bc_priv *bcpriv = list->bc;
+
+ /* Translate Data pointer from CPU-local to 1553-core accessible
+ * address if user wants that. This may be useful for AMBA-over-PCI
+ * cores.
+ */
+ if ( dataptr & 0x1 ) {
+ drvmgr_translate(
+ *bcpriv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)(dataptr & ~0x1),
+ (void **)&dptr
+ );
+ }
+
+ /* Update Data Pointer */
+ GR1553BC_WRITE_MEM(&bd->tr.dptr, dataptr);
+ }
+
+ /* Get status of transfer descriptor */
+ if ( stat ) {
+ status = *stat;
+ *stat = GR1553BC_READ_MEM(&bd->tr.status);
+ if ( status ) {
+ /* Clear status fields user selects, then
+ * or bit31 if user wants that. The bit31
+ * may be used to indicate if the BC has
+ * performed the access.
+ */
+ status = (*stat & (status & 0xffffff)) |
+ (status & (1<<31));
+ GR1553BC_WRITE_MEM(&bd->tr.status, status);
+ }
+ }
+
+ return 0;
+}
+
+int gr1553bc_slot_dummy(
+ struct gr1553bc_list *list,
+ int mid,
+ unsigned int *dummy)
+{
+ union gr1553bc_bd *bd;
+ unsigned int set1, new_set1;
+
+ /* Get BD address */
+ bd = gr1553bc_slot_bd(list, mid);
+ if ( bd == NULL )
+ return -1;
+ /* Update the Dummy Bit */
+ set1 = GR1553BC_READ_MEM(&bd->tr.settings[1]);
+ new_set1 = (set1 & ~GR1553BC_TR_DUMMY_1) | (*dummy & GR1553BC_TR_DUMMY_1);
+ GR1553BC_WRITE_MEM(&bd->tr.settings[1], new_set1);
+
+ *dummy = set1;
+
+ return 0;
+}
+
+/* Find MID from Descriptor pointer */
+int gr1553bc_mid_from_bd(
+ union gr1553bc_bd *bd,
+ int *mid,
+ int *async
+ )
+{
+ int i, bdmid, slot_no;
+ uint32_t word0, word2;
+
+ /* Find Jump to next Minor Frame or End-Of-List,
+ * at those locations we have stored a MID
+ *
+ * GR1553BC_SLOT_MAX+2 = Worst case, BD is max distance from jump
+ * descriptor. 2=END and Jump descriptors.
+ */
+ for (i=0; i<GR1553BC_SLOT_MAX+2; i++) {
+ word0 = GR1553BC_READ_MEM(&bd->raw.words[0]);
+ if ( word0 & GR1553BC_BD_TYPE ) {
+ if ( word0 == GR1553BC_UNCOND_JMP ) {
+ /* May be a unconditional IRQ set by user. In
+ * that case the function is stored in WORD3,
+ * functions must be aligned to 4 byte boudary.
+ */
+ word2 = GR1553BC_READ_MEM(&bd->raw.words[2]);
+ if ( word2 & NEXT_MINOR_MARKER ) {
+ goto found_mid;
+ }
+ } else if ( word0 == GR1553BC_TR_EOL ) {
+ /* End-Of-List, does contain a MID */
+ word2 = GR1553BC_READ_MEM(&bd->raw.words[2]);
+ goto found_mid;
+ }
+ }
+ bd++;
+ }
+
+ return -1;
+
+found_mid:
+ /* Get MID of JUMP descriptor */
+ bdmid = word2 >> 8;
+ /* Subtract distance from JUMP descriptor to find MID
+ * of requested BD.
+ */
+ slot_no = GR1553BC_SLOTID_FROM_ID(bdmid);
+ slot_no -= i;
+ bdmid = GR1553BC_ID_SET_SLOT(bdmid, slot_no);
+
+ if ( mid )
+ *mid = bdmid;
+
+ /* Determine which list BD belongs to: async or sync */
+ if ( async )
+ *async = word2 & NEXT_MINOR_MARKER_ASYNC;
+
+ return 0;
+}
+
+/*************** END OF LIST HANDLING ROUTINES ***************/
+
+/*************** DEVICE HANDLING ROUTINES ***************/
+
+void gr1553bc_device_init(struct gr1553bc_priv *priv);
+void gr1553bc_device_uninit(struct gr1553bc_priv *priv);
+void gr1553bc_isr(void *data);
+
+/*** GR1553BC driver ***/
+
+void gr1553bc_register(void)
+{
+ /* The BC driver rely on the GR1553B Driver */
+ gr1553_register();
+}
+
+static void gr1553bc_isr_std(union gr1553bc_bd *bd, void *data)
+{
+ /* Do nothing */
+}
+
+/* Take a GR1553BC hardware device identified by minor.
+ * A pointer is returned that is used internally by the GR1553BC
+ * driver, it is used as an input paramter 'bc' to all other
+ * functions that manipulate the hardware.
+ */
+void *gr1553bc_open(int minor)
+{
+ struct drvmgr_dev **pdev = NULL;
+ struct gr1553bc_priv *priv = NULL;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ void *irq_log_p = NULL;
+
+ /* Allocate requested device */
+ pdev = gr1553_bc_open(minor);
+ if ( pdev == NULL )
+ goto fail;
+
+ irq_log_p = grlib_malloc(GR1553BC_IRQLOG_SIZE*2);
+ if ( irq_log_p == NULL )
+ goto fail;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( priv == NULL )
+ goto fail;
+
+ /* Init BC device */
+ priv->pdev = pdev;
+ (*pdev)->priv = priv;
+ priv->irq_log_p = irq_log_p;
+ priv->started = 0;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)(*pdev)->businfo;
+ pnpinfo = &ambadev->info;
+ priv->regs = (struct gr1553b_regs *)pnpinfo->apb_slv->start;
+
+ SPIN_INIT(&priv->devlock, "gr1553bc");
+
+ gr1553bc_device_init(priv);
+
+ /* Register ISR handler (unmask at IRQ controller) */
+ if ( drvmgr_interrupt_register(*priv->pdev, 0, "gr1553bc",
+ gr1553bc_isr, priv) ) {
+ goto fail;
+ }
+
+ return priv;
+
+fail:
+ if ( pdev )
+ gr1553_bc_close(pdev);
+ if ( irq_log_p )
+ free(irq_log_p);
+ if ( priv )
+ free(priv);
+ return NULL;
+}
+
+void gr1553bc_close(void *bc)
+{
+ struct gr1553bc_priv *priv = bc;
+
+ /* Stop Hardware */
+ gr1553bc_stop(bc, 0x3);
+
+ gr1553bc_device_uninit(priv);
+
+ /* Remove interrupt handler (mask IRQ at IRQ controller) */
+ drvmgr_interrupt_unregister(*priv->pdev, 0, gr1553bc_isr, priv);
+
+ /* Free device */
+ gr1553_bc_close(priv->pdev);
+ SPIN_FREE(&priv->devlock);
+ free(priv->irq_log_p);
+ free(priv);
+}
+
+/* Return Current Minor frame number */
+int gr1553bc_indication(void *bc, int async, int *mid)
+{
+ struct gr1553bc_priv *priv = bc;
+ union gr1553bc_bd *bd;
+
+ /* Get current descriptor pointer */
+ if ( async ) {
+ bd = (union gr1553bc_bd *)
+ GR1553BC_READ_REG(&priv->regs->bc_aslot);
+ bd = gr1553bc_bd_hw2cpu(priv->alist, bd);
+ } else {
+ bd = (union gr1553bc_bd *)
+ GR1553BC_READ_REG(&priv->regs->bc_slot);
+ bd = gr1553bc_bd_hw2cpu(priv->list, bd);
+ }
+
+ return gr1553bc_mid_from_bd(bd, mid, NULL);
+}
+
+/* Start major frame processing, wait for TimerManager tick or start directly */
+int gr1553bc_start(void *bc, struct gr1553bc_list *list, struct gr1553bc_list *list_async)
+{
+ struct gr1553bc_priv *priv = bc;
+ union gr1553bc_bd *bd = NULL, *bd_async = NULL;
+ uint32_t ctrl, irqmask;
+ SPIN_IRQFLAGS(irqflags);
+
+ if ( (list == NULL) && (list_async == NULL) )
+ return 0;
+
+ /* Find first descriptor in list, the descriptor
+ * first to be executed.
+ */
+ ctrl = GR1553BC_KEY;
+ if ( list ) {
+ bd = gr1553bc_slot_bd(list, GR1553BC_ID(0,0,0));
+ if ( bd == NULL )
+ return -1;
+ bd = gr1553bc_bd_cpu2hw(list, bd);
+ ctrl |= GR1553B_BC_ACT_SCSRT;
+ }
+ if ( list_async ) {
+ bd_async = gr1553bc_slot_bd(list_async, GR1553BC_ID(0,0,0));
+ if ( bd_async == NULL )
+ return -1;
+ bd_async = gr1553bc_bd_cpu2hw(list_async, bd_async);
+ ctrl |= GR1553B_BC_ACT_ASSRT;
+ }
+
+ /* Do "hot-swapping" of lists */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ if ( list ) {
+ priv->list = list;
+ GR1553BC_WRITE_REG(&priv->regs->bc_bd, (uint32_t)bd);
+ }
+ if ( list_async ) {
+ priv->alist = list_async;
+ GR1553BC_WRITE_REG(&priv->regs->bc_abd, (uint32_t)bd_async);
+ }
+
+ /* If not enabled before, we enable it now. */
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, ctrl);
+
+ /* Enable IRQ */
+ if ( priv->started == 0 ) {
+ priv->started = 1;
+ irqmask = GR1553BC_READ_REG(&priv->regs->imask);
+ irqmask |= GR1553B_IRQEN_BCEVE|GR1553B_IRQEN_BCDE|GR1553B_IRQEN_BCWKE;
+ GR1553BC_WRITE_REG(&priv->regs->imask, irqmask);
+ }
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+/* Pause GR1553 BC transfers */
+int gr1553bc_pause(void *bc)
+{
+ struct gr1553bc_priv *priv = bc;
+ uint32_t ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Do "hot-swapping" of lists */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = GR1553BC_KEY | GR1553B_BC_ACT_SCSUS;
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+/* Restart GR1553 BC transfers, after being paused */
+int gr1553bc_restart(void *bc)
+{
+ struct gr1553bc_priv *priv = bc;
+ uint32_t ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = GR1553BC_KEY | GR1553B_BC_ACT_SCSRT;
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+/* Stop BC transmission */
+int gr1553bc_stop(void *bc, int options)
+{
+ struct gr1553bc_priv *priv = bc;
+ uint32_t ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ ctrl = GR1553BC_KEY;
+ if ( options & 0x1 )
+ ctrl |= GR1553B_BC_ACT_SCSTP;
+ if ( options & 0x2 )
+ ctrl |= GR1553B_BC_ACT_ASSTP;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, ctrl);
+ priv->started = 0;
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+/* Reset software and BC hardware into a known "unused/init" state */
+void gr1553bc_device_init(struct gr1553bc_priv *priv)
+{
+/* RESET HARDWARE REGISTERS */
+ /* Stop BC if not already stopped */
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, GR1553BC_KEY | 0x0204);
+
+ /* Since RT can not be used at the same time as BC, we stop
+ * RT rx, it should already be stopped...
+ */
+ GR1553BC_WRITE_REG(&priv->regs->rt_cfg, GR1553RT_KEY);
+
+ /* Clear some registers */
+ GR1553BC_WRITE_REG(&priv->regs->bc_bd, 0);
+ GR1553BC_WRITE_REG(&priv->regs->bc_abd, 0);
+ GR1553BC_WRITE_REG(&priv->regs->bc_timer, 0);
+ GR1553BC_WRITE_REG(&priv->regs->bc_wake, 0);
+ GR1553BC_WRITE_REG(&priv->regs->bc_irqptr, 0);
+ GR1553BC_WRITE_REG(&priv->regs->bc_busmsk, 0);
+
+/* PUT SOFTWARE INTO INITIAL STATE */
+ priv->list = NULL;
+ priv->alist = NULL;
+
+ priv->irq_log_base = (uint32_t *)
+ (((uint32_t)priv->irq_log_p + (GR1553BC_IRQLOG_SIZE-1)) &
+ ~(GR1553BC_IRQLOG_SIZE-1));
+ /* Translate into a hardware accessible address */
+ drvmgr_translate_check(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)priv->irq_log_base,
+ (void **)&priv->irq_log_base_hw,
+ GR1553BC_IRQLOG_SIZE);
+ priv->irq_log_curr = priv->irq_log_base;
+ priv->irq_log_end = &priv->irq_log_base[GR1553BC_IRQLOG_CNT-1];
+ priv->irq_func = gr1553bc_isr_std;
+ priv->irq_data = NULL;
+
+ GR1553BC_WRITE_REG(&priv->regs->bc_irqptr,(uint32_t)priv->irq_log_base_hw);
+}
+
+void gr1553bc_device_uninit(struct gr1553bc_priv *priv)
+{
+ uint32_t irqmask;
+
+ /* Stop BC if not already stopped */
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, GR1553BC_KEY | 0x0204);
+
+ /* Since RT can not be used at the same time as BC, we stop
+ * RT rx, it should already be stopped...
+ */
+ GR1553BC_WRITE_REG(&priv->regs->rt_cfg, GR1553RT_KEY);
+
+ /* Turn off IRQ generation */
+ irqmask=GR1553BC_READ_REG(&priv->regs->imask);
+ irqmask&=~(GR1553B_IRQEN_BCEVE|GR1553B_IRQEN_BCDE|GR1553B_IRQEN_BCWKE);
+ GR1553BC_WRITE_REG(&priv->regs->irq, irqmask);
+}
+
+/* Interrupt handler */
+void gr1553bc_isr(void *arg)
+{
+ struct gr1553bc_priv *priv = arg;
+ uint32_t *curr, *pos, word0, word2;
+ union gr1553bc_bd *bd;
+ bcirq_func_t func;
+ void *data;
+ int handled, irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Did core make IRQ */
+ irq = GR1553BC_READ_REG(&priv->regs->irq);
+ irq &= (GR1553B_IRQEN_BCEVE|GR1553B_IRQEN_BCDE|GR1553B_IRQEN_BCWKE);
+ if ( irq == 0 )
+ return; /* Shared IRQ: some one else may have caused the IRQ */
+
+ /* Clear handled IRQs */
+ GR1553BC_WRITE_REG(&priv->regs->irq, irq);
+
+ /* DMA error. This IRQ does not affect the IRQ log.
+ * We let standard IRQ handle handle it.
+ */
+ if ( irq & GR1553B_IRQEN_BCDE ) {
+ priv->irq_func(NULL, priv->irq_data);
+ }
+
+ /* Get current posistion in hardware */
+ pos = (uint32_t *)GR1553BC_READ_REG(&priv->regs->bc_irqptr);
+ /* Converting into CPU address */
+ pos = priv->irq_log_base +
+ ((unsigned int)pos - (unsigned int)priv->irq_log_base_hw)/4;
+
+ /* Step in IRQ log until we reach the end. */
+ handled = 0;
+ curr = priv->irq_log_curr;
+ while ( curr != pos ) {
+ bd = (union gr1553bc_bd *)(GR1553BC_READ_MEM(curr) & ~1);
+ GR1553BC_WRITE_MEM(curr, 0x2); /* Mark Handled */
+
+ /* Convert Descriptor in IRQ log into CPU address. In order
+ * to convert we must know which list the descriptor belongs
+ * to, we compare the address of the bd to the ASYNC list
+ * descriptor table area.
+ */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ if ( priv->alist && ((unsigned int)bd>=priv->alist->table_hw) &&
+ ((unsigned int)bd <
+ (priv->alist->table_hw + priv->alist->table_size))) {
+ /* BD in async list */
+ bd = gr1553bc_bd_hw2cpu(priv->alist, bd);
+ } else if (priv->list &&
+ ((unsigned int)bd >= priv->list->table_hw) &&
+ ((unsigned int)bd <
+ (priv->list->table_hw + priv->list->table_size))) {
+ /* BD in sync list */
+ bd = gr1553bc_bd_hw2cpu(priv->list, bd);
+ } else {
+ /* error - unknown BD. Should not happen but could
+ * if user has switched list. Ignore IRQ entry and
+ * continue to next entry.
+ */
+ bd = NULL;
+ }
+
+ /* Handle Descriptor that cased IRQ
+ *
+ * If someone have inserted an IRQ descriptor and tied
+ * that to a custom function we call that function, otherwise
+ * we let the standard IRQ handle handle it.
+ */
+ if ( bd ) {
+ word0 = GR1553BC_READ_MEM(&bd->raw.words[0]);
+ word2 = GR1553BC_READ_MEM(&bd->raw.words[2]);
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+ if ( word0 == GR1553BC_UNCOND_IRQ ) {
+ if ( (word2 & 0x3) == 0 ) {
+ func = (bcirq_func_t)(word2 & ~0x3);
+ data = (void *)
+ GR1553BC_READ_MEM(&bd->raw.words[3]);
+ func(bd, data);
+ handled = 1;
+ }
+ }
+
+ if ( handled == 0 ) {
+ /* Let standard IRQ handle handle it */
+ priv->irq_func(bd, priv->irq_data);
+ } else {
+ handled = 0;
+ }
+ } else {
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+ }
+
+ /* Increment to next entry in IRQ LOG */
+ if ( curr == priv->irq_log_end )
+ curr = priv->irq_log_base;
+ else
+ curr++;
+ }
+ priv->irq_log_curr = curr;
+}
+
+int gr1553bc_irq_setup
+ (
+ void *bc,
+ bcirq_func_t func,
+ void *data
+ )
+{
+ struct gr1553bc_priv *priv = bc;
+
+ if ( func == NULL )
+ priv->irq_func = gr1553bc_isr_std;
+ else
+ priv->irq_func = func;
+ priv->irq_data = data;
+
+ return 0;
+}
+
+void gr1553bc_ext_trig(void *bc, int trig)
+{
+ struct gr1553bc_priv *priv = bc;
+ unsigned int trigger;
+
+ if ( trig )
+ trigger = GR1553B_BC_ACT_SETT;
+ else
+ trigger = GR1553B_BC_ACT_CLRT;
+
+ GR1553BC_WRITE_REG(&priv->regs->bc_ctrl, GR1553BC_KEY | trigger);
+}
+
+void gr1553bc_status(void *bc, struct gr1553bc_status *status)
+{
+ struct gr1553bc_priv *priv = bc;
+
+ status->status = GR1553BC_READ_REG(&priv->regs->bc_stat);
+ status->time = GR1553BC_READ_REG(&priv->regs->bc_timer);
+}
+
+/*** DEBUGGING HELP FUNCTIONS ***/
+
+#include <stdio.h>
+
+void gr1553bc_show_list(struct gr1553bc_list *list, int options)
+{
+ struct gr1553bc_major *major;
+ struct gr1553bc_minor *minor;
+ int i, j, minor_cnt, timefree;
+
+ printf("LIST\n");
+ printf(" major cnt: %d\n", list->major_cnt);
+ for (i=0; i<32; i++) {
+ printf(" RT[%d] timeout: %d\n", i, 14+(list->rt_timeout[i]*4));
+ }
+
+ for (i=0; i<list->major_cnt; i++) {
+ major = list->majors[i];
+ minor_cnt = major->cfg->minor_cnt;
+ printf(" MAJOR[%d]\n", i);
+ printf(" minor count: %d\n", minor_cnt);
+
+ for (j=0; j<minor_cnt; j++) {
+ minor = major->minors[j];
+
+ printf(" MINOR[%d]\n", j);
+ printf(" bd: 0x%08x (HW:0x%08x)\n",
+ (unsigned int)&minor->bds[0],
+ (unsigned int)gr1553bc_bd_cpu2hw(list,
+ &minor->bds[0]));
+ printf(" slot cnt: %d\n", minor->cfg->slot_cnt);
+ if ( minor->cfg->timeslot ) {
+ timefree = gr1553bc_minor_freetime(minor);
+ printf(" timefree: %d\n", timefree);
+ printf(" timetotal: %d\n",
+ minor->cfg->timeslot);
+ } else {
+ printf(" no time mgr\n");
+ }
+ }
+ }
+}
diff --git a/bsps/shared/grlib/1553/gr1553bm.c b/bsps/shared/grlib/1553/gr1553bm.c
new file mode 100644
index 0000000000..482e574d78
--- /dev/null
+++ b/bsps/shared/grlib/1553/gr1553bm.c
@@ -0,0 +1,509 @@
+/* GR1553B BM driver
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/gr1553b.h>
+#include <grlib/gr1553bm.h>
+
+#include <grlib/grlib_impl.h>
+
+#define GR1553BM_WRITE_MEM(adr, val) *(volatile uint32_t *)(adr) = (uint32_t)(val)
+#define GR1553BM_READ_MEM(adr) (*(volatile uint32_t *)(adr))
+
+#define GR1553BM_WRITE_REG(adr, val) *(volatile uint32_t *)(adr) = (uint32_t)(val)
+#define GR1553BM_READ_REG(adr) (*(volatile uint32_t *)(adr))
+
+struct gr1553bm_priv {
+ struct drvmgr_dev **pdev;
+ struct gr1553b_regs *regs;
+ SPIN_DECLARE(devlock);
+
+ void *buffer;
+ unsigned int buffer_base_hw;
+ unsigned int buffer_base;
+ unsigned int buffer_end;
+ unsigned int buffer_size;
+ unsigned int read_pos;
+ int started;
+ struct gr1553bm_config cfg;
+
+ /* Time updated by IRQ when 24-bit Time counter overflows */
+ volatile uint64_t time;
+};
+
+void gr1553bm_isr(void *data);
+
+/* Default Driver configuration */
+struct gr1553bm_config gr1553bm_default_config =
+{
+ /* Highest resolution, use Time overflow IRQ to track */
+ .time_resolution = 0,
+ .time_ovf_irq = 1,
+
+ /* No filtering, log all */
+ .filt_error_options = GR1553BM_ERROPTS_ALL,
+ .filt_rtadr = 0xffffffff,
+ .filt_subadr = 0xffffffff,
+ .filt_mc = 0x0007ffff,
+
+ /* 128Kbyte dynamically allocated buffer. */
+ .buffer_size = 128*1024,
+ .buffer_custom = NULL,
+};
+
+void gr1553bm_register(void)
+{
+ /* The BM driver rely on the GR1553B Driver */
+ gr1553_register();
+}
+
+static void gr1553bm_hw_start(struct gr1553bm_priv *priv)
+{
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Enable IRQ source and mark running state */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ priv->started = 1;
+
+ /* Clear old IRQ flags */
+ priv->regs->irq = GR1553B_IRQ_BMD | GR1553B_IRQ_BMTOF;
+
+ /* Unmask IRQ sources */
+ if ( priv->cfg.time_ovf_irq ) {
+ priv->regs->imask |= GR1553B_IRQEN_BMDE | GR1553B_IRQEN_BMTOE;
+ } else {
+ priv->regs->imask |= GR1553B_IRQEN_BMDE;
+ }
+
+ /* Start logging */
+ priv->regs->bm_ctrl =
+ (priv->cfg.filt_error_options &
+ (GR1553B_BM_CTRL_MANL|GR1553B_BM_CTRL_UDWL|GR1553B_BM_CTRL_IMCL))
+ | GR1553B_BM_CTRL_BMEN;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+static void gr1553bm_hw_stop(struct gr1553bm_priv *priv)
+{
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Stop Logging */
+ priv->regs->bm_ctrl = 0;
+
+ /* Stop IRQ source */
+ priv->regs->imask &= ~(GR1553B_IRQEN_BMDE|GR1553B_IRQEN_BMTOE);
+
+ /* Clear IRQ flags */
+ priv->regs->irq = GR1553B_IRQ_BMD | GR1553B_IRQ_BMTOF;
+
+ priv->started = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+/* Open device by number */
+void *gr1553bm_open(int minor)
+{
+ struct drvmgr_dev **pdev = NULL;
+ struct gr1553bm_priv *priv = NULL;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Allocate requested device */
+ pdev = gr1553_bm_open(minor);
+ if ( pdev == NULL )
+ goto fail;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( priv == NULL )
+ goto fail;
+
+ /* Init BC device */
+ priv->pdev = pdev;
+ (*pdev)->priv = priv;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)(*pdev)->businfo;
+ pnpinfo = &ambadev->info;
+ priv->regs = (struct gr1553b_regs *)pnpinfo->apb_slv->start;
+ SPIN_INIT(&priv->devlock, "gr1553bm");
+
+ /* Start with default configuration */
+ priv->cfg = gr1553bm_default_config;
+
+ /* Unmask IRQs */
+ gr1553bm_hw_stop(priv);
+
+ return priv;
+
+fail:
+ if ( pdev )
+ gr1553_bm_close(pdev);
+ if ( priv )
+ free(priv);
+ return NULL;
+}
+
+/* Close previously */
+void gr1553bm_close(void *bm)
+{
+ struct gr1553bm_priv *priv = bm;
+
+ if ( priv->started ) {
+ gr1553bm_stop(bm);
+ }
+
+ if ( (priv->cfg.buffer_custom == NULL) && priv->buffer )
+ free(priv->buffer);
+
+ gr1553_bm_close(priv->pdev);
+ free(priv);
+}
+
+/* Configure the BM driver */
+int gr1553bm_config(void *bm, struct gr1553bm_config *cfg)
+{
+ struct gr1553bm_priv *priv = bm;
+
+ if ( priv->started )
+ return -1;
+
+ /* Check Config validity? */
+/*#warning IMPLEMENT.*/
+
+ /* Free old buffer if dynamically allocated */
+ if ( (priv->cfg.buffer_custom == NULL) && priv->buffer ) {
+ free(priv->buffer);
+ priv->buffer = NULL;
+ }
+ priv->buffer_size = cfg->buffer_size & ~0x7; /* on 8 byte bounadry */
+ if ((unsigned int)cfg->buffer_custom & 1) {
+ /* Custom Address Given in Remote address. We need
+ * to convert it intoTranslate into Hardware a
+ * hardware accessible address
+ */
+ priv->buffer_base_hw = (unsigned int)cfg->buffer_custom & ~1;
+ priv->buffer = cfg->buffer_custom;
+ drvmgr_translate_check(
+ *priv->pdev,
+ DMAMEM_TO_CPU,
+ (void *)priv->buffer_base_hw,
+ (void **)&priv->buffer_base,
+ priv->buffer_size);
+ } else {
+ if (cfg->buffer_custom == NULL) {
+ /* Allocate new buffer dynamically */
+ priv->buffer = grlib_malloc(priv->buffer_size + 8);
+ if (priv->buffer == NULL)
+ return -1;
+ } else {
+ /* Address given in CPU accessible address, no
+ * translation required.
+ */
+ priv->buffer = cfg->buffer_custom;
+ }
+ /* Align to 16 bytes */
+ priv->buffer_base = ((unsigned int)priv->buffer + (8-1)) &
+ ~(8-1);
+ /* Translate address of buffer base into address that Hardware must
+ * use to access the buffer.
+ */
+ drvmgr_translate_check(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)priv->buffer_base,
+ (void **)&priv->buffer_base_hw,
+ priv->buffer_size);
+
+ }
+
+ /* Copy valid config */
+ priv->cfg = *cfg;
+
+ return 0;
+}
+
+/* Start logging */
+int gr1553bm_start(void *bm)
+{
+ struct gr1553bm_priv *priv = bm;
+
+ if ( priv->started )
+ return -1;
+ if ( priv->buffer == NULL )
+ return -2;
+
+ /* Start at Time = 0 */
+ priv->regs->bm_ttag =
+ priv->cfg.time_resolution << GR1553B_BM_TTAG_RES_BIT;
+
+ /* Configure Filters */
+ priv->regs->bm_adr = priv->cfg.filt_rtadr;
+ priv->regs->bm_subadr = priv->cfg.filt_subadr;
+ priv->regs->bm_mc = priv->cfg.filt_mc;
+
+ /* Set up buffer */
+ priv->regs->bm_start = priv->buffer_base_hw;
+ priv->regs->bm_end = priv->buffer_base_hw + priv->cfg.buffer_size - 4;
+ priv->regs->bm_pos = priv->buffer_base_hw;
+ priv->read_pos = priv->buffer_base;
+ priv->buffer_end = priv->buffer_base + priv->cfg.buffer_size;
+
+ /* Register ISR handler and unmask IRQ source at IRQ controller */
+ if (drvmgr_interrupt_register(*priv->pdev, 0, "gr1553bm", gr1553bm_isr, priv))
+ return -3;
+
+ /* Start hardware and set priv->started */
+ gr1553bm_hw_start(priv);
+
+ return 0;
+}
+
+/* Stop logging */
+void gr1553bm_stop(void *bm)
+{
+ struct gr1553bm_priv *priv = bm;
+
+ /* Stop Hardware */
+ gr1553bm_hw_stop(priv);
+
+ /* At this point the hardware must be stopped and IRQ
+ * sources unmasked.
+ */
+
+ /* Unregister ISR handler and unmask 1553 IRQ source at IRQ ctrl */
+ drvmgr_interrupt_unregister(*priv->pdev, 0, gr1553bm_isr, priv);
+}
+
+int gr1553bm_started(void *bm)
+{
+ return ((struct gr1553bm_priv *)bm)->started;
+}
+
+/* Get 64-bit 1553 Time.
+ *
+ * Update software time counters and return the current time.
+ */
+void gr1553bm_time(void *bm, uint64_t *time)
+{
+ struct gr1553bm_priv *priv = bm;
+ unsigned int hwtime, hwtime2;
+
+resample:
+ if ( priv->started && (priv->cfg.time_ovf_irq == 0) ) {
+ /* Update Time overflow counter. The carry bit from Time counter
+ * is located in IRQ Flag.
+ *
+ * When IRQ is not used this function must be called often
+ * enough to avoid that the Time overflows and the carry
+ * bit is already set. The frequency depends on the Time
+ * resolution.
+ */
+ if ( priv->regs->irq & GR1553B_IRQ_BMTOF ) {
+ /* Clear carry bit */
+ priv->regs->irq = GR1553B_IRQ_BMTOF;
+ priv->time += (GR1553B_BM_TTAG_VAL + 1);
+ }
+ }
+
+ /* Report current Time, even if stopped */
+ hwtime = priv->regs->bm_ttag & GR1553B_BM_TTAG_VAL;
+ if ( time )
+ *time = priv->time | hwtime;
+
+ if ( priv->cfg.time_ovf_irq ) {
+ /* Detect wrap around */
+ hwtime2 = priv->regs->bm_ttag & GR1553B_BM_TTAG_VAL;
+ if ( hwtime > hwtime2 ) {
+ /* priv->time and hwtime may be out of sync if
+ * IRQ updated priv->time just after bm_ttag was read
+ * here, we resample if we detect inconsistancy.
+ */
+ goto resample;
+ }
+ }
+}
+
+/* Number of entries available in DMA buffer */
+int gr1553bm_available(void *bm, int *nentries)
+{
+ struct gr1553bm_priv *priv = bm;
+ unsigned int top, bot, pos;
+
+ if ( !priv->started )
+ return -1;
+
+ /* Get BM posistion in log */
+ pos = priv->regs->bm_pos;
+
+ /* Convert into CPU accessible address */
+ pos = priv->buffer_base + (pos - priv->buffer_base_hw);
+
+ if ( pos >= priv->read_pos ) {
+ top = (pos - priv->read_pos)/sizeof(struct gr1553bm_entry);
+ bot = 0;
+ } else {
+ top = (priv->buffer_end - priv->read_pos)/sizeof(struct gr1553bm_entry);
+ bot = (pos - priv->buffer_base)/sizeof(struct gr1553bm_entry);
+ }
+
+ if ( nentries )
+ *nentries = top+bot;
+
+ return 0;
+}
+
+/* Read a maximum number of entries from LOG buffer. */
+int gr1553bm_read(void *bm, struct gr1553bm_entry *dst, int *max)
+{
+ struct gr1553bm_priv *priv = bm;
+ unsigned int dest, pos, left, newPos, len;
+ unsigned int topAdr, botAdr, topLen, botLen;
+
+ if ( !priv || !priv->started )
+ return -1;
+
+ left = *max;
+ pos = priv->regs->bm_pos & ~0x7;
+
+ /* Convert into CPU accessible address */
+ pos = priv->buffer_base + (pos - priv->buffer_base_hw);
+
+ if ( (pos == priv->read_pos) || (left < 1) ) {
+ /* No data available */
+ *max = 0;
+ return 0;
+ }
+ newPos = 0;
+
+ /* Addresses and lengths of BM log buffer */
+ if ( pos >= priv->read_pos ) {
+ /* Read Top only */
+ topAdr = priv->read_pos;
+ botAdr = 0;
+ topLen = (pos - priv->read_pos)/sizeof(struct gr1553bm_entry);
+ botLen = 0;
+ } else {
+ /* Read Top and Bottom */
+ topAdr = priv->read_pos;
+ botAdr = priv->buffer_base;
+ topLen = (priv->buffer_end - priv->read_pos)/sizeof(struct gr1553bm_entry);
+ botLen = (pos - priv->buffer_base)/sizeof(struct gr1553bm_entry);
+ }
+
+ dest = (unsigned int)dst;
+ if ( topLen > 0 ) {
+ /* Copy from top area first */
+ if ( topLen > left ) {
+ len = left;
+ left = 0;
+ } else {
+ len = topLen;
+ left -= topLen;
+ }
+ newPos = topAdr + (len * sizeof(struct gr1553bm_entry));
+ if ( newPos >= priv->buffer_end )
+ newPos -= priv->buffer_size;
+ if ( priv->cfg.copy_func ) {
+ dest += priv->cfg.copy_func(
+ dest, /*Optional Destination*/
+ (void *)topAdr, /* DMA start address */
+ len, /* Number of entries */
+ priv->cfg.copy_func_arg /* Custom ARG */
+ );
+ } else {
+ memcpy( (void *)dest,
+ (void *)topAdr,
+ len * sizeof(struct gr1553bm_entry));
+ dest += len * sizeof(struct gr1553bm_entry);
+ }
+ }
+
+ if ( (botLen > 0) && (left > 0) ) {
+ /* Copy bottom area last */
+ if ( botLen > left ) {
+ len = left;
+ left = 0;
+ } else {
+ len = botLen;
+ left -= botLen;
+ }
+ newPos = botAdr + (len * sizeof(struct gr1553bm_entry));
+
+ if ( priv->cfg.copy_func ) {
+ priv->cfg.copy_func(
+ dest, /*Optional Destination*/
+ (void *)botAdr, /* DMA start address */
+ len, /* Number of entries */
+ priv->cfg.copy_func_arg /* Custom ARG */
+ );
+ } else {
+ memcpy( (void *)dest,
+ (void *)botAdr,
+ len * sizeof(struct gr1553bm_entry));
+ }
+ }
+
+ /* Remember last read posistion in buffer */
+ /*printf("New pos: 0x%08x (0x%08x), %d\n", newPos, priv->read_pos, *max - left);*/
+ priv->read_pos = newPos;
+
+ /* Return number of entries read */
+ *max = *max - left;
+
+ return 0;
+}
+
+/* Note: This is a shared interrupt handler, with BC/RT driver
+ * we must determine the cause of IRQ before handling it.
+ */
+void gr1553bm_isr(void *data)
+{
+ struct gr1553bm_priv *priv = data;
+ uint32_t irqflag;
+
+ /* Get Causes */
+ irqflag = priv->regs->irq & (GR1553B_IRQ_BMD | GR1553B_IRQ_BMTOF);
+
+ /* Check spurious IRQs */
+ if ( (irqflag == 0) || (priv->started == 0) )
+ return;
+
+ if ( (irqflag & GR1553B_IRQ_BMTOF) && priv->cfg.time_ovf_irq ) {
+ /* 1553 Time Over flow. Time is 24-bits */
+ priv->time += (GR1553B_BM_TTAG_VAL + 1);
+
+ /* Clear cause handled */
+ priv->regs->irq = GR1553B_IRQ_BMTOF;
+ }
+
+ if ( irqflag & GR1553B_IRQ_BMD ) {
+ /* BM DMA ERROR. Fatal error, we stop BM hardware and let
+ * user take care of it. From now on all calls will result
+ * in an error because the BM is stopped (priv->started=0).
+ */
+
+ /* Clear cause handled */
+ priv->regs->irq = GR1553B_IRQ_BMD;
+
+ if ( priv->cfg.dma_error_isr )
+ priv->cfg.dma_error_isr(data, priv->cfg.dma_error_arg);
+
+ gr1553bm_hw_stop(priv);
+ }
+}
diff --git a/bsps/shared/grlib/1553/gr1553rt.c b/bsps/shared/grlib/1553/gr1553rt.c
new file mode 100644
index 0000000000..339e856c76
--- /dev/null
+++ b/bsps/shared/grlib/1553/gr1553rt.c
@@ -0,0 +1,1210 @@
+/* GR1553B RT driver
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grlib/gr1553b.h>
+#include <grlib/gr1553rt.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/grlib_impl.h>
+
+#define GR1553RT_WRITE_MEM(adr, val) *(volatile uint32_t *)(adr) = (uint32_t)(val)
+#define GR1553RT_READ_MEM(adr) (*(volatile uint32_t *)(adr))
+
+#define GR1553RT_WRITE_REG(adr, val) *(volatile uint32_t *)(adr) = (uint32_t)(val)
+#define GR1553RT_READ_REG(adr) (*(volatile uint32_t *)(adr))
+
+/* Software representation of one hardware descriptor */
+struct gr1553rt_sw_bd {
+ unsigned short this_next;/* Next entry or this entry. 0xffff: no next */
+ unsigned char listid; /* ListID of List the descriptor is attached */
+ char unused;
+} __attribute__((packed));
+
+/* Software description of a subaddress */
+struct gr1553rt_subadr {
+ /* RX LIST */
+ unsigned char rxlistid;
+ /* TX LIST */
+ unsigned char txlistid;
+};
+
+struct gr1553rt_irqerr {
+ gr1553rt_irqerr_t func;
+ void *data;
+};
+
+struct gr1553rt_irqmc {
+ gr1553rt_irqmc_t func;
+ void *data;
+};
+
+struct gr1553rt_irq {
+ gr1553rt_irq_t func;
+ void *data;
+};
+
+struct gr1553rt_priv {
+ /* Pointer to Hardware registers */
+ struct gr1553b_regs *regs;
+
+ /* Software State */
+ int started;
+ struct gr1553rt_cfg cfg;
+ SPIN_DECLARE(devlock);
+
+ /* Handle to GR1553B RT device layer */
+ struct drvmgr_dev **pdev;
+
+ /* Each Index represents one RT Subaddress. 31 = Broadcast */
+ struct gr1553rt_subadr subadrs[32];
+
+ /* Pointer to array of Software's description of a hardware
+ * descriptor.
+ */
+#if (RTBD_MAX == 0)
+ struct gr1553rt_sw_bd *swbds;
+#else
+ struct gr1553rt_sw_bd swbds[RTBD_MAX];
+#endif
+
+ /* List of Free descriptors */
+ unsigned short swbd_free;
+ int swbd_free_cnt;
+
+ /* Hardware SubAddress descriptors given for CPU and Hardware */
+ void *satab_buffer;
+ struct gr1553rt_sa *sas_cpu; /* Translated for CPU */
+ struct gr1553rt_sa *sas_hw; /* Translated for Hardware */
+
+ /* Hardware descriptors address given for CPU and hardware */
+ void *bd_buffer;
+ int bds_cnt; /* Number of descriptors */
+ struct gr1553rt_bd *bds_cpu; /* Translated for CPU */
+ struct gr1553rt_bd *bds_hw; /* Translated for Hardware */
+
+
+ /* Event Log buffer in */
+ void *evlog_buffer;
+ unsigned int *evlog_cpu_next; /* Next LOG entry to be handled */
+ unsigned int *evlog_cpu_base; /* First Entry in LOG */
+ unsigned int *evlog_cpu_end; /* Last+1 Entry in LOG */
+ unsigned int *evlog_hw_base; /* Translated for Hardware */
+
+ /* Each Index represents a LIST ID */
+ struct gr1553rt_list *lists[RTLISTID_MAX];
+
+ /* IRQ handlers, one per SUBADDRESS */
+ struct gr1553rt_irq irq_rx[32];
+ struct gr1553rt_irq irq_tx[32];
+
+ /* ISR called when an ERROR IRQ is received */
+ struct gr1553rt_irqerr irq_err;
+
+ /* ISR called when an Mode Code is received */
+ struct gr1553rt_irqmc irq_mc;
+};
+
+void gr1553rt_sw_init(struct gr1553rt_priv *priv);
+void gr1553rt_sw_free(struct gr1553rt_priv *priv);
+void gr1553rt_isr(void *data);
+
+/* Assign and ID to the list. An LIST ID is needed before scheduling list
+ * on an RT subaddress.
+ *
+ * Only 64 lists can be registered at a time on the same device.
+ */
+static int gr1553rt_list_reg(struct gr1553rt_list *list)
+{
+ struct gr1553rt_priv *priv = list->rt;
+ int i;
+
+ /* Find first free list ID */
+ for ( i=0; i<RTLISTID_MAX; i++) {
+ if ( priv->lists[i] == NULL ) {
+ priv->lists[i] = list;
+ list->listid = i;
+ return i;
+ }
+ }
+
+ /* No available LIST IDs */
+ list->listid = -1;
+
+ return -1;
+}
+
+#if 0 /* unused for now */
+/* Unregister List from device */
+static void gr1553rt_list_unreg(struct gr1553rt_list *list)
+{
+ struct gr1553rt_priv *priv = list->rt;
+
+ priv->lists[list->listid] = NULL;
+ list->listid = -1;
+}
+#endif
+
+static int gr1553rt_bdid(void *rt, struct gr1553rt_sw_bd *bd)
+{
+ struct gr1553rt_priv *priv = rt;
+
+ unsigned short index;
+
+ /* Get Index of Software BD */
+ index = ((unsigned int)bd - (unsigned int)&priv->swbds[0]) /
+ sizeof(struct gr1553rt_sw_bd);
+
+ return index;
+}
+
+static void gr1553rt_bd_alloc_init(void *rt, int count)
+{
+ struct gr1553rt_priv *priv = rt;
+ int i;
+
+ for (i=0; i<count-1; i++) {
+ priv->swbds[i].this_next = i+1;
+ }
+ priv->swbds[count-1].this_next = 0xffff;
+ priv->swbd_free = 0;
+ priv->swbd_free_cnt = count;
+}
+
+/* Allocate a Chain of descriptors */
+static int gr1553rt_bd_alloc(void *rt, struct gr1553rt_sw_bd **bd, int cnt)
+{
+ struct gr1553rt_priv *priv = rt;
+ struct gr1553rt_sw_bd *curr;
+ int i;
+
+ if ((priv->swbd_free_cnt < cnt) || (cnt <= 0)) {
+ *bd = NULL;
+ return -1;
+ }
+
+ *bd = &priv->swbds[priv->swbd_free];
+ for (i=0; i<cnt; i++) {
+ if ( i == 0) {
+ curr = &priv->swbds[priv->swbd_free];
+ } else {
+ curr = &priv->swbds[curr->this_next];
+ }
+ if ( curr->this_next == 0xffff ) {
+ *bd = NULL;
+ return -1;
+ }
+ }
+ priv->swbd_free = curr->this_next;
+ priv->swbd_free_cnt -= cnt;
+ curr->this_next = 0xffff; /* Mark end of chain on last entry */
+
+ return 0;
+}
+
+#if 0 /* unused for now */
+static void gr1553rt_bd_free(void *rt, struct gr1553rt_sw_bd *bd)
+{
+ struct gr1553rt_priv *priv = rt;
+ unsigned short index;
+
+ /* Get Index of Software BD */
+ index = gr1553rt_bdid(priv, bd);
+
+ /* Insert first in list */
+ bd->this_next = priv->swbd_free;
+ priv->swbd_free = index;
+ priv->swbd_free_cnt++;
+}
+#endif
+
+int gr1553rt_list_init
+ (
+ void *rt,
+ struct gr1553rt_list **plist,
+ struct gr1553rt_list_cfg *cfg
+ )
+{
+ struct gr1553rt_priv *priv = rt;
+ size_t size;
+ int i;
+ struct gr1553rt_sw_bd *swbd;
+ unsigned short index;
+ struct gr1553rt_list *list;
+
+ /* The user may provide a pre allocated LIST, or
+ * let the driver handle allocation by using malloc()
+ *
+ * If the IN/OUT plist argument points to NULL a list
+ * dynamically allocated here.
+ */
+ list = *plist;
+ if ( list == NULL ) {
+ /* Dynamically allocate LIST */
+ size = sizeof(*list) +
+ (cfg->bd_cnt * sizeof(list->bds[0]));
+ list = grlib_malloc(size);
+ if ( list == NULL )
+ return -1;
+ *plist = list;
+ }
+
+ list->rt = rt;
+ list->subadr = -1;
+ list->listid = gr1553rt_list_reg(list);
+ if ( list->listid == -1 )
+ return -2; /* Too many lists */
+ list->cfg = cfg;
+ list->bd_cnt = cfg->bd_cnt;
+
+ /* Allocate all BDs needed by list */
+ if ( gr1553rt_bd_alloc(rt, &swbd, list->bd_cnt) ) {
+ return -3; /* Too few descriptors */
+ }
+
+ /* Get ID/INDEX of Software BDs */
+ index = gr1553rt_bdid(rt, swbd);
+ list->bds[0] = index;
+ for (i=1; i<list->bd_cnt; i++) {
+ list->bds[i] = priv->swbds[list->bds[i-1]].this_next;
+ }
+
+ /* Now that the next pointer has fullfilled it's job and not
+ * needed anymore, we use it as list entry pointer instead.
+ * The this_next pointer is a list entry number.
+ */
+ for (i=0; i<list->bd_cnt; i++) {
+ priv->swbds[list->bds[i]].this_next = i;
+ }
+
+ return 0;
+}
+
+int gr1553rt_bd_init(
+ struct gr1553rt_list *list,
+ unsigned short entry_no,
+ unsigned int flags,
+ uint16_t *dptr,
+ unsigned short next
+ )
+{
+ struct gr1553rt_priv *priv;
+ unsigned short bdid;
+ struct gr1553rt_bd *bd;
+ unsigned int nextbd, dataptr;
+ SPIN_IRQFLAGS(irqflags);
+
+ if ( entry_no >= list->bd_cnt )
+ return -1;
+
+ /* Find Descriptor */
+ bdid = list->bds[entry_no];
+ priv = list->rt;
+ bd = &priv->bds_cpu[bdid];
+
+ if ( next == 0xfffe ) {
+ next = entry_no + 1;
+ if ( next >= list->bd_cnt )
+ next = 0;
+ }
+
+ /* Find next descriptor in address space that the
+ * Hardware understand.
+ */
+ if ( next >= 0xffff ) {
+ nextbd = 0x3; /* End of list */
+ } else if ( next >= list->bd_cnt ) {
+ return -1;
+ } else {
+ bdid = list->bds[next];
+ nextbd = (unsigned int)&priv->bds_hw[bdid];
+ }
+
+ dataptr = (unsigned int)dptr;
+ if ( dataptr & 1 ) {
+ /* Translate address from CPU-local into remote */
+ dataptr &= ~1;
+ drvmgr_translate(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)dataptr,
+ (void **)&dataptr
+ );
+ }
+
+ /* Init BD */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ bd->ctrl = flags & GR1553RT_BD_FLAGS_IRQEN;
+ bd->dptr = (unsigned int)dptr;
+ bd->next = nextbd;
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+int gr1553rt_bd_update(
+ struct gr1553rt_list *list,
+ int entry_no,
+ unsigned int *status,
+ uint16_t **dptr
+ )
+{
+ struct gr1553rt_priv *priv;
+ unsigned short bdid;
+ struct gr1553rt_bd *bd;
+ unsigned int tmp, dataptr;
+ SPIN_IRQFLAGS(irqflags);
+
+ if ( entry_no >= list->bd_cnt )
+ return -1;
+
+ /* Find Descriptor */
+ bdid = list->bds[entry_no];
+ priv = list->rt;
+ bd = &priv->bds_cpu[bdid];
+
+ /* Prepare translation if needed */
+ if ( dptr && (dataptr=(unsigned int)*dptr) ) {
+ if ( dataptr & 1 ) {
+ /* Translate address from CPU-local into remote. May
+ * be used when RT core is accessed over the PCI bus.
+ */
+ dataptr &= ~1;
+ drvmgr_translate(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)dataptr,
+ (void **)&dataptr
+ );
+ }
+ }
+
+ /* Get current values and then set new values in BD */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ /* READ/WRITE Status/Control word */
+ if ( status ) {
+ tmp = bd->ctrl;
+ if ( *status ) {
+ bd->ctrl = *status;
+ }
+ *status = tmp;
+ }
+ /* READ/WRITE Data-Pointer word */
+ if ( dptr ) {
+ tmp = bd->dptr;
+ if ( dataptr ) {
+ bd->dptr = dataptr;
+ }
+ *dptr = (uint16_t *)tmp;
+ }
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+int gr1553rt_irq_err
+ (
+ void *rt,
+ gr1553rt_irqerr_t func,
+ void *data
+ )
+{
+ struct gr1553rt_priv *priv = rt;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ priv->irq_err.func = func;
+ priv->irq_err.data = data;
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+int gr1553rt_irq_mc
+ (
+ void *rt,
+ gr1553rt_irqmc_t func,
+ void *data
+ )
+{
+ struct gr1553rt_priv *priv = rt;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ priv->irq_mc.func = func;
+ priv->irq_mc.data = data;
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+int gr1553rt_irq_sa
+ (
+ void *rt,
+ int subadr,
+ int tx,
+ gr1553rt_irq_t func,
+ void *data
+ )
+{
+ struct gr1553rt_priv *priv = rt;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ if ( tx ) {
+ priv->irq_tx[subadr].func = func;
+ priv->irq_tx[subadr].data = data;
+ } else {
+ priv->irq_rx[subadr].func = func;
+ priv->irq_rx[subadr].data = data;
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+/* GR1553-RT Interrupt Service Routine */
+void gr1553rt_isr(void *data)
+{
+ struct gr1553rt_priv *priv = data;
+ unsigned int firstirq, lastpos;
+ int index;
+ unsigned int *last, *curr, entry, hwbd;
+ int type, samc, mcode, subadr;
+ int listid;
+ struct gr1553rt_irq *pisr, isr;
+ struct gr1553rt_irqerr isrerr;
+ struct gr1553rt_irqmc isrmc;
+ unsigned int irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Ack IRQ before reading current write pointer, but after
+ * reading current IRQ pointer. This is because RT_EVIRQ
+ * may be updated after we ACK the IRQ source.
+ */
+ irq = priv->regs->irq &
+ (GR1553B_IRQ_RTTE|GR1553B_IRQ_RTD|GR1553B_IRQ_RTEV);
+ if ( irq == 0 )
+ return;
+
+ firstirq = priv->regs->rt_evirq;
+ priv->regs->irq = irq;
+ lastpos = priv->regs->rt_evlog;
+
+ /* Quit if nothing has been added to the log */
+ if ( lastpos == firstirq )
+ return;
+
+ if ( irq & (GR1553B_IRQ_RTTE|GR1553B_IRQ_RTD) ) {
+ /* copy func and arg while owning lock */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ isrerr = priv->irq_err;
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+ if ( isrerr.func ) {
+ isrerr.func(irq, isrerr.data);
+ }
+
+ /* Stop Hardware and enter non-started mode. This will
+ * make all future calls to driver result in an error.
+ */
+ gr1553rt_stop(priv);
+ }
+
+ /* Step between first log entry causing an IRQ to last
+ * entry. Each entry that has caused an IRQ will be handled
+ * by calling user-defined function.
+ *
+ * We convert hardware addresses into CPU accessable addresses
+ * first.
+ */
+ index = (firstirq - (unsigned int)priv->evlog_hw_base) /
+ sizeof(unsigned int);
+ curr = priv->evlog_cpu_base + index;
+ index = (lastpos - (unsigned int)priv->evlog_hw_base) /
+ sizeof(unsigned int);
+ last = priv->evlog_cpu_base + index;
+
+ do {
+ /* Process one entry */
+ entry = *curr;
+
+ if ( entry & 0x80000000 ) {
+ /* Entry caused IRQ */
+ type = (entry >> 29) & 0x3;
+ samc = (entry >> 24) & 0x1f;
+ if ( (type & 0x2) == 0 ) {
+ /* Transmit/Receive Data */
+ subadr = samc;
+ if ( type ) {
+ /* Receive */
+ listid = priv->subadrs[subadr].rxlistid;
+ hwbd = priv->sas_cpu[subadr].rxptr;
+ pisr = &priv->irq_rx[subadr];
+ } else {
+ /* Transmit */
+ listid = priv->subadrs[subadr].txlistid;
+ hwbd = priv->sas_cpu[subadr].txptr;
+ pisr = &priv->irq_tx[subadr];
+ }
+
+ index = ((unsigned int)hwbd - (unsigned int)
+ priv->bds_hw)/sizeof(struct gr1553rt_bd);
+
+ /* copy func and arg while owning lock */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ isr = *pisr;
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* Call user ISR of RX/TX transfer */
+ if ( isr.func ) {
+ isr.func(
+ priv->lists[listid],
+ entry,
+ priv->swbds[index].this_next,
+ isr.data
+ );
+ }
+ } else if ( type == 0x2) {
+ /* Modecode */
+ mcode = samc;
+
+ /* copy func and arg while owning lock */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ isrmc = priv->irq_mc;
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* Call user ISR of ModeCodes RX/TX */
+ if ( isrmc.func ) {
+ isrmc.func(
+ mcode,
+ entry,
+ isrmc.data
+ );
+ }
+ } else {
+ /* ERROR OF SOME KIND, EVLOG OVERWRITTEN? */
+ rtems_fatal_error_occurred(RTEMS_IO_ERROR);
+ }
+ }
+
+ /* Calc next entry posistion */
+ curr++;
+ if ( curr == priv->evlog_cpu_end )
+ curr = priv->evlog_cpu_base;
+
+ } while ( curr != last );
+}
+
+int gr1553rt_indication(void *rt, int subadr, int *txeno, int *rxeno)
+{
+ struct gr1553rt_priv *priv = rt;
+ struct gr1553rt_sa *sa;
+ unsigned int bd, index;
+
+ /* Sub address valid */
+ if ( (subadr < 0) || (subadr > 31) )
+ return -1;
+
+ /* Get SubAddress Descriptor address as accessed from CPU */
+ sa = &priv->sas_cpu[subadr];
+
+ /* Indication of TX descriptor? */
+ if ( txeno ) {
+ bd = sa->txptr;
+ /* Get Index of Hardware BD */
+ index = ((unsigned int)bd - (unsigned int)&priv->bds_hw[0]) /
+ sizeof(struct gr1553rt_bd);
+ *txeno = priv->swbds[index].this_next;
+ }
+
+ /* Indication of RX descriptor? */
+ if ( rxeno ) {
+ bd = sa->rxptr;
+ /* Get Index of Hardware BD */
+ index = ((unsigned int)bd - (unsigned int)&priv->bds_hw[0]) /
+ sizeof(struct gr1553rt_bd);
+ *rxeno = priv->swbds[index].this_next;
+ }
+
+ return 0;
+}
+
+void gr1553rt_hw_stop(struct gr1553rt_priv *priv);
+
+void gr1553rt_register(void)
+{
+ /* The RT driver rely on the GR1553B Driver */
+ gr1553_register();
+}
+
+void *gr1553rt_open(int minor)
+{
+ struct drvmgr_dev **pdev = NULL;
+ struct gr1553rt_priv *priv = NULL;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Allocate requested device */
+ pdev = gr1553_rt_open(minor);
+ if ( pdev == NULL )
+ goto fail;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( priv == NULL )
+ goto fail;
+
+ /* Assign a device private to RT device */
+ priv->pdev = pdev;
+ (*pdev)->priv = priv;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)(*pdev)->businfo;
+ pnpinfo = &ambadev->info;
+ priv->regs = (struct gr1553b_regs *)pnpinfo->apb_slv->start;
+
+ SPIN_INIT(&priv->devlock, "gr1553rt");
+
+ /* Start with default configuration */
+ /*priv->cfg = gr1553rt_default_config;*/
+
+ /* Unmask IRQs and so */
+ gr1553rt_hw_stop(priv);
+
+ /* Register ISR handler. hardware mask IRQ, so it is safe to unmask
+ * at IRQ controller.
+ */
+ if (drvmgr_interrupt_register(*priv->pdev, 0, "gr1553rt", gr1553rt_isr, priv))
+ goto fail;
+
+ return priv;
+
+fail:
+ if ( pdev )
+ gr1553_rt_close(pdev);
+ if ( priv )
+ free(priv);
+ return NULL;
+}
+
+void gr1553rt_close(void *rt)
+{
+ struct gr1553rt_priv *priv = rt;
+
+ if ( priv->started ) {
+ gr1553rt_stop(priv);
+ }
+
+ /* Remove ISR handler */
+ drvmgr_interrupt_unregister(*priv->pdev, 0, gr1553rt_isr, priv);
+
+ /* Free dynamically allocated buffers if any */
+ gr1553rt_sw_free(priv);
+ SPIN_FREE(&priv->devlock);
+
+ /* Return RT/BC device */
+ gr1553_rt_close(priv->pdev);
+}
+
+/* Stop Hardware and disable IRQ */
+void gr1553rt_hw_stop(struct gr1553rt_priv *priv)
+{
+ uint32_t irqmask;
+
+ /* Disable RT */
+ GR1553RT_WRITE_REG(&priv->regs->rt_cfg, GR1553RT_KEY);
+
+ /* Stop BC if not already stopped: BC can not be used simultaneously
+ * as the RT anyway
+ */
+ GR1553RT_WRITE_REG(&priv->regs->bc_ctrl, GR1553BC_KEY | 0x0204);
+
+ /* Turn off RT IRQ generation */
+ irqmask=GR1553RT_READ_REG(&priv->regs->imask);
+ irqmask&=~(GR1553B_IRQEN_RTEVE|GR1553B_IRQEN_RTDE);
+ GR1553RT_WRITE_REG(&priv->regs->irq, irqmask);
+}
+
+/* Free dynamically allocated buffers, if any */
+void gr1553rt_sw_free(struct gr1553rt_priv *priv)
+{
+ /* Event log */
+ if ( (priv->cfg.evlog_buffer == NULL) && priv->evlog_buffer ) {
+ free(priv->evlog_buffer);
+ priv->evlog_buffer = NULL;
+ }
+
+ /* RX/TX Descriptors */
+ if ( (priv->cfg.bd_buffer == NULL) && priv->bd_buffer ) {
+ free(priv->bd_buffer);
+ priv->bd_buffer = NULL;
+ }
+
+#if (RTBD_MAX == 0)
+ if ( priv->swbds ) {
+ free(priv->swbds);
+ priv->swbds = NULL;
+ }
+#endif
+
+ /* Sub address table */
+ if ( (priv->cfg.satab_buffer == NULL) && priv->satab_buffer ) {
+ free(priv->satab_buffer);
+ priv->satab_buffer = NULL;
+ }
+}
+
+/* Free dynamically allocated buffers, if any */
+static int gr1553rt_sw_alloc(struct gr1553rt_priv *priv)
+{
+ int size;
+
+ /* Allocate Event log */
+ if ((unsigned int)priv->cfg.evlog_buffer & 1) {
+ /* Translate Address from HARDWARE (REMOTE) to CPU-LOCAL */
+ priv->evlog_hw_base = (unsigned int *)
+ ((unsigned int)priv->cfg.evlog_buffer & ~0x1);
+ priv->evlog_buffer = priv->cfg.evlog_buffer;
+ drvmgr_translate_check(
+ *priv->pdev,
+ DMAMEM_TO_CPU,
+ (void *)priv->evlog_hw_base,
+ (void **)&priv->evlog_cpu_base,
+ priv->cfg.evlog_size
+ );
+ } else {
+ if (priv->cfg.evlog_buffer == NULL) {
+ priv->evlog_buffer = grlib_malloc(
+ priv->cfg.evlog_size * 2);
+ if (priv->evlog_buffer == NULL)
+ return -1;
+ } else {
+ /* Addess already CPU-LOCAL */
+ priv->evlog_buffer = priv->cfg.evlog_buffer;
+ }
+ /* Align to SIZE bytes boundary */
+ priv->evlog_cpu_base = (unsigned int *)
+ (((unsigned int)priv->evlog_buffer +
+ (priv->cfg.evlog_size-1)) & ~(priv->cfg.evlog_size-1));
+
+ drvmgr_translate_check(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)priv->evlog_cpu_base,
+ (void **)&priv->evlog_hw_base,
+ priv->cfg.evlog_size
+ );
+ }
+ priv->evlog_cpu_end = priv->evlog_cpu_base +
+ priv->cfg.evlog_size/sizeof(unsigned int *);
+
+ /* Allocate Transfer Descriptors */
+ priv->bds_cnt = priv->cfg.bd_count;
+ size = priv->bds_cnt * sizeof(struct gr1553rt_bd);
+ if ((unsigned int)priv->cfg.bd_buffer & 1) {
+ /* Translate Address from HARDWARE (REMOTE) to CPU-LOCAL */
+ priv->bds_hw = (struct gr1553rt_bd *)
+ ((unsigned int)priv->cfg.bd_buffer & ~0x1);
+ priv->bd_buffer = priv->cfg.bd_buffer;
+ drvmgr_translate_check(
+ *priv->pdev,
+ DMAMEM_TO_CPU,
+ (void *)priv->bds_hw,
+ (void **)&priv->bds_cpu,
+ size
+ );
+ } else {
+ if ( priv->cfg.bd_buffer == NULL ) {
+ priv->bd_buffer = grlib_malloc(size + 0xf);
+ if (priv->bd_buffer == NULL)
+ return -1;
+ } else {
+ /* Addess already CPU-LOCAL */
+ priv->bd_buffer = priv->cfg.bd_buffer;
+ }
+ /* Align to 16 bytes boundary */
+ priv->bds_cpu = (struct gr1553rt_bd *)
+ (((unsigned int)priv->bd_buffer + 0xf) & ~0xf);
+
+ /* Translate from CPU address to hardware address */
+ drvmgr_translate_check(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)priv->bds_cpu,
+ (void **)&priv->bds_hw,
+ size
+ );
+ }
+
+#if (RTBD_MAX == 0)
+ /* Allocate software description of */
+ priv->swbds = grlib_malloc(priv->cfg.bd_count * sizeof(*priv->swbds));
+ if ( priv->swbds == NULL ) {
+ return -1;
+ }
+#endif
+
+ /* Allocate Sub address table */
+ if ((unsigned int)priv->cfg.satab_buffer & 1) {
+ /* Translate Address from HARDWARE (REMOTE) to CPU-LOCAL */
+ priv->sas_hw = (struct gr1553rt_sa *)
+ ((unsigned int)priv->cfg.satab_buffer & ~0x1);
+ priv->satab_buffer = priv->cfg.satab_buffer;
+ drvmgr_translate_check(
+ *priv->pdev,
+ DMAMEM_TO_CPU,
+ (void *)priv->sas_hw,
+ (void **)&priv->sas_cpu,
+ 16 * 32);
+ } else {
+ if (priv->cfg.satab_buffer == NULL) {
+ priv->satab_buffer = grlib_malloc((16 * 32) * 2);
+ if (priv->satab_buffer == NULL)
+ return -1;
+ } else {
+ /* Addess already CPU-LOCAL */
+ priv->satab_buffer = priv->cfg.satab_buffer;
+ }
+ /* Align to 512 bytes boundary */
+ priv->sas_cpu = (struct gr1553rt_sa *)
+ (((unsigned int)priv->satab_buffer + 0x1ff) &
+ ~0x1ff);
+
+ /* Translate Address from CPU-LOCAL to HARDWARE (REMOTE) */
+ drvmgr_translate_check(
+ *priv->pdev,
+ CPUMEM_TO_DMA,
+ (void *)priv->sas_cpu,
+ (void **)&priv->sas_hw,
+ 16 * 32);
+ }
+
+ return 0;
+}
+
+void gr1553rt_sw_init(struct gr1553rt_priv *priv)
+{
+ int i;
+
+ /* Clear Sub Address table */
+ memset(priv->sas_cpu, 0, 512);
+
+ /* Clear Transfer descriptors */
+ memset(priv->bds_cpu, 0, priv->bds_cnt * 16);
+
+ /* Clear the Event log */
+ memset(priv->evlog_cpu_base, 0, priv->cfg.evlog_size);
+
+ /* Init descriptor allocation algorithm */
+ gr1553rt_bd_alloc_init(priv, priv->bds_cnt);
+
+ /* Init table used to convert from sub address to list.
+ * Currently non assigned.
+ */
+ for (i=0; i<32; i++) {
+ priv->subadrs[i].rxlistid = 0xff;
+ priv->subadrs[i].txlistid = 0xff;
+ }
+
+ /* Clear all previous IRQ handlers */
+ for (i=0; i<32; i++) {
+ priv->irq_rx[i].func = NULL;
+ priv->irq_tx[i].data = NULL;
+ }
+ priv->irq_err.func = NULL;
+ priv->irq_err.data = NULL;
+ priv->irq_mc.func = NULL;
+ priv->irq_mc.data = NULL;
+
+ /* Clear LIST to LISTID table */
+ for (i=0; i<RTLISTID_MAX; i++) {
+ priv->lists[i] = NULL;
+ }
+}
+
+int gr1553rt_config(void *rt, struct gr1553rt_cfg *cfg)
+{
+ struct gr1553rt_priv *priv = rt;
+
+ if ( priv->started )
+ return -1;
+
+ /*** Free dynamically allocated buffers ***/
+
+ gr1553rt_sw_free(priv);
+
+ /*** Check new config ***/
+ if ( cfg->rtaddress > 30 )
+ return -1;
+ if ( (cfg->evlog_size & (cfg->evlog_size-1)) != 0)
+ return -1; /* SIZE: Not aligned to a power of 2 */
+ if ( ((unsigned int)priv->cfg.evlog_buffer & (cfg->evlog_size-1)) != 0 )
+ return -1; /* Buffer: Not aligned to size */
+#if (RTBD_MAX > 0)
+ if ( cfg->bd_count > RTBD_MAX )
+ return -1;
+#endif
+
+ /*** Make new config current ***/
+ priv->cfg = *cfg;
+
+ /*** Adapt to new config ***/
+
+ if ( gr1553rt_sw_alloc(priv) != 0 )
+ return -1;
+
+ gr1553rt_sw_init(priv);
+
+ return 0;
+}
+
+int gr1553rt_start(void *rt)
+{
+ struct gr1553rt_priv *priv = rt;
+ SPIN_IRQFLAGS(irqflags);
+
+ if ( priv->started )
+ return -1;
+
+ /*** Initialize software Pointers and stuff ***/
+
+ if ( !priv->satab_buffer || !priv->bd_buffer || !priv->evlog_buffer )
+ return -2;
+
+ priv->evlog_cpu_next = priv->evlog_cpu_base;
+
+ /*** Initialize Registers ***/
+
+ /* Subaddress table base */
+ priv->regs->rt_tab = (unsigned int)priv->sas_hw;
+
+ /* Mode code configuration */
+ priv->regs->rt_mcctrl = priv->cfg.modecode;
+
+ /* RT Time Tag resolution */
+ priv->regs->rt_ttag = priv->cfg.time_res << 16;
+
+ /* Event LOG base and size */
+ priv->regs->rt_evsz = ~(priv->cfg.evlog_size - 1);
+ priv->regs->rt_evlog = (unsigned int)priv->evlog_hw_base;
+ priv->regs->rt_evirq = 0;
+
+ /* Clear and old IRQ flag and Enable IRQ */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ priv->regs->irq = GR1553B_IRQ_RTEV|GR1553B_IRQ_RTD|GR1553B_IRQ_RTTE;
+ priv->regs->imask |= GR1553B_IRQEN_RTEVE | GR1553B_IRQEN_RTDE |
+ GR1553B_IRQEN_RTTEE;
+
+ /* Enable and Set RT address */
+ priv->regs->rt_cfg = GR1553RT_KEY |
+ (priv->cfg.rtaddress << GR1553B_RT_CFG_RTADDR_BIT) |
+ GR1553B_RT_CFG_RTEN;
+
+ /* Tell software RT is started */
+ priv->started = 1;
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return 0;
+}
+
+void gr1553rt_stop(void *rt)
+{
+ struct gr1553rt_priv *priv = rt;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Stop Hardware */
+ gr1553rt_hw_stop(priv);
+
+ /* Software state */
+ priv->started = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+void gr1553rt_sa_schedule(
+ void *rt,
+ int subadr,
+ int tx,
+ struct gr1553rt_list *list
+ )
+{
+ struct gr1553rt_priv *priv = rt;
+ unsigned short bdid;
+ struct gr1553rt_bd *bd;
+
+ if ( !list || (list->listid == -1) )
+ return;
+
+ /* Get Hardware address of first descriptor in list */
+ bdid = list->bds[0];
+ if ( bdid == 0xffff )
+ return;
+ bd = &priv->bds_hw[bdid];
+
+ list->subadr = subadr;
+
+ /* Update Sub address table */
+ if ( tx ) {
+ list->subadr |= 0x100;
+ priv->subadrs[subadr].txlistid = list->listid;
+ priv->sas_cpu[subadr].txptr = (unsigned int)bd;
+ } else {
+ priv->subadrs[subadr].rxlistid = list->listid;
+ priv->sas_cpu[subadr].rxptr = (unsigned int)bd;
+ }
+}
+
+void gr1553rt_sa_setopts(
+ void *rt,
+ int subadr,
+ unsigned int mask,
+ unsigned int options
+ )
+{
+ struct gr1553rt_priv *priv = rt;
+ unsigned int ctrl;
+
+ if ( (subadr > 31) || (priv->sas_cpu == NULL) )
+ return;
+
+ ctrl = priv->sas_cpu[subadr].ctrl;
+ priv->sas_cpu[subadr].ctrl = (ctrl & ~mask) | options;
+}
+
+void gr1553rt_set_vecword(void *rt, unsigned int mask, unsigned int words)
+{
+ struct gr1553rt_priv *priv = rt;
+ unsigned int vword;
+
+ if ( mask == 0 )
+ return;
+
+ vword = priv->regs->rt_statw;
+
+ priv->regs->rt_statw = (vword & ~mask) | (words & mask);
+}
+
+void gr1553rt_set_bussts(void *rt, unsigned int mask, unsigned int sts)
+{
+ struct gr1553rt_priv *priv = rt;
+ unsigned int stat;
+
+ stat = priv->regs->rt_stat2;
+ priv->regs->rt_stat2 = (stat & ~mask) | (mask & sts);
+}
+
+void gr1553rt_status(void *rt, struct gr1553rt_status *status)
+{
+ struct gr1553rt_priv *priv = rt;
+ struct gr1553b_regs *regs = priv->regs;
+ unsigned int tmp;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ status->status = regs->rt_stat;
+ status->bus_status = regs->rt_stat2;
+
+ tmp = regs->rt_sync;
+ status->synctime = tmp >> 16;
+ status->syncword = tmp & 0xffff;
+
+ tmp = regs->rt_ttag;
+ status->time_res = tmp >> 16;
+ status->time = tmp & 0xffff;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+void gr1553rt_list_sa(struct gr1553rt_list *list, int *subadr, int *tx)
+{
+ int sa, trt;
+
+ if ( list->subadr == -1 ) {
+ sa = -1;
+ trt = -1;
+ } else {
+ sa = list->subadr & 0xff;
+ trt = (list->subadr & 0x100) >> 8;
+ }
+
+ if ( subadr )
+ *subadr = sa;
+ if ( tx )
+ *tx = trt;
+}
+
+int gr1553rt_evlog_read(void *rt, unsigned int *dst, int max)
+{
+ struct gr1553rt_priv *priv = rt;
+ int cnt, top, bot, left;
+ unsigned int *hwpos;
+
+ /* Get address of hardware's current working entry */
+ hwpos = (unsigned int *)priv->regs->rt_evlog;
+
+ /* Convert into CPU address */
+ hwpos = (unsigned int *)
+ ((unsigned int)hwpos - (unsigned int)priv->evlog_hw_base +
+ (unsigned int)priv->evlog_cpu_base);
+
+ if ( priv->evlog_cpu_next == hwpos )
+ return 0;
+
+ if ( priv->evlog_cpu_next > hwpos ) {
+ top = (unsigned int)priv->evlog_cpu_end -
+ (unsigned int)priv->evlog_cpu_next;
+ bot = (unsigned int)hwpos - (unsigned int)priv->evlog_cpu_base;
+ } else {
+ top = (unsigned int)hwpos - (unsigned int)priv->evlog_cpu_next;
+ bot = 0;
+ }
+ top = top / 4;
+ bot = bot / 4;
+
+ left = max;
+ if ( top > 0 ) {
+ if ( top > left ) {
+ cnt = left;
+ } else {
+ cnt = top;
+ }
+ memcpy(dst, priv->evlog_cpu_next, cnt*4);
+ dst += cnt;
+ left -= cnt;
+ }
+
+ if ( (bot > 0) && (left > 0) ) {
+ if ( bot > left ) {
+ cnt = left;
+ } else {
+ cnt = bot;
+ }
+ memcpy(dst, priv->evlog_cpu_base, cnt*4);
+ left -= cnt;
+ }
+
+ cnt = max - left;
+ priv->evlog_cpu_next += cnt;
+ if ( priv->evlog_cpu_next >= priv->evlog_cpu_end ) {
+ priv->evlog_cpu_next = (unsigned int *)
+ ((unsigned int)priv->evlog_cpu_base +
+ ((unsigned int)priv->evlog_cpu_next -
+ (unsigned int)priv->evlog_cpu_end ));
+ }
+
+ return max - left;
+}
diff --git a/bsps/shared/grlib/amba/ahbstat.c b/bsps/shared/grlib/amba/ahbstat.c
new file mode 100644
index 0000000000..af3d778feb
--- /dev/null
+++ b/bsps/shared/grlib/amba/ahbstat.c
@@ -0,0 +1,239 @@
+/* AHB Status register driver
+ *
+ * COPYRIGHT (c) 2009 - 2017.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <inttypes.h>
+#include <string.h>
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/ahbstat.h>
+
+#include <grlib/grlib_impl.h>
+
+#define REG_WRITE(addr, val) (*(volatile uint32_t *)(addr) = (uint32_t)(val))
+#define REG_READ(addr) (*(volatile uint32_t *)(addr))
+
+void ahbstat_isr(void *arg);
+
+/* AHB fail interrupt callback to user. This function is declared weak so that
+ * the user can define a function pointer variable containing the address
+ * responsible for handling errors
+ *
+ * minor Index of AHBSTAT hardware
+ * regs Register address of AHBSTAT
+ * status AHBSTAT status register at IRQ
+ * failing_address AHBSTAT Failing address register at IRQ
+ *
+ * * User return
+ * 0: print error onto terminal with printk and reenable AHBSTAT
+ * 1: just re-enable AHBSTAT
+ * 2: just print error
+ * 3: do nothing, let user do custom handling
+ */
+int (*ahbstat_error)(
+ int minor,
+ struct ahbstat_regs *regs,
+ uint32_t status,
+ uint32_t failing_address
+ ) __attribute__((weak)) = NULL;
+
+#define AHBSTAT_STS_CE_BIT 9
+#define AHBSTAT_STS_NE_BIT 8
+#define AHBSTAT_STS_HW_BIT 7
+#define AHBSTAT_STS_HM_BIT 3
+#define AHBSTAT_STS_HS_BIT 0
+
+#define AHBSTAT_STS_CE (1 << AHBSTAT_STS_CE_BIT)
+#define AHBSTAT_STS_NE (1 << AHBSTAT_STS_NE_BIT)
+#define AHBSTAT_STS_HW (1 << AHBSTAT_STS_HW_BIT)
+#define AHBSTAT_STS_HM (0xf << AHBSTAT_STS_HM_BIT)
+#define AHBSTAT_STS_HS (0x7 << AHBSTAT_STS_HS_BIT)
+
+enum { DEVNAME_LEN = 9 };
+struct ahbstat_priv {
+ struct drvmgr_dev *dev;
+ struct ahbstat_regs *regs;
+ char devname[DEVNAME_LEN];
+ int minor;
+ /* Cached error */
+ uint32_t last_status;
+ uint32_t last_address;
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+};
+
+static int ahbstat_init2(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops ahbstat_ops =
+{
+ .init = {NULL, ahbstat_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id ahbstat_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_AHBSTAT},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info ahbstat_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_AHBSTAT_ID,/* Driver ID */
+ "AHBSTAT_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &ahbstat_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct ahbstat_priv),
+ },
+ &ahbstat_ids[0]
+};
+
+void ahbstat_register_drv (void)
+{
+ drvmgr_drv_register(&ahbstat_drv_info.general);
+}
+
+static int ahbstat_init2(struct drvmgr_dev *dev)
+{
+ struct ahbstat_priv *priv;
+ struct amba_dev_info *ambadev;
+
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if (ambadev == NULL)
+ return DRVMGR_FAIL;
+ priv->regs = (struct ahbstat_regs *)ambadev->info.apb_slv->start;
+ priv->minor = dev->minor_drv;
+
+ strncpy(&priv->devname[0], "ahbstat0", DEVNAME_LEN);
+ priv->devname[7] += priv->minor;
+ /*
+ * Initialize spinlock for AHBSTAT Device. It is used to protect user
+ * API calls involivng priv structure from updates in ISR.
+ */
+ SPIN_INIT(&priv->devlock, priv->devname);
+
+ /* Initialize hardware */
+ REG_WRITE(&priv->regs->status, 0);
+
+ /* Install IRQ handler */
+ drvmgr_interrupt_register(dev, 0, priv->devname, ahbstat_isr, priv);
+
+ return DRVMGR_OK;
+}
+
+void ahbstat_isr(void *arg)
+{
+ struct ahbstat_priv *priv = arg;
+ uint32_t fadr, status;
+ int rc;
+ SPIN_ISR_IRQFLAGS(lock_context);
+
+ /* Get hardware status */
+ status = REG_READ(&priv->regs->status);
+ if ((status & AHBSTAT_STS_NE) == 0)
+ return;
+
+ /* IRQ generated by AHBSTAT core... handle it here */
+
+ /* Get Failing address */
+ fadr = REG_READ(&priv->regs->failing);
+
+ SPIN_LOCK(&priv->devlock, lock_context);
+ priv->last_status = status;
+ priv->last_address = fadr;
+ SPIN_UNLOCK(&priv->devlock, lock_context);
+
+ /* Let user handle error, default to print the error and reenable HW
+ *
+ * User return
+ * 0: print error and reenable AHBSTAT
+ * 1: just reenable AHBSTAT
+ * 2: just print error
+ * 3: do nothing
+ */
+ rc = 0;
+ if (ahbstat_error != NULL)
+ rc = ahbstat_error(priv->minor, priv->regs, status, fadr);
+
+ if ((rc & 0x1) == 0) {
+ printk("\n### AHBSTAT: %s %s error of size %" PRId32
+ " by master %" PRId32 " at 0x%08" PRIx32 "\n",
+ status & AHBSTAT_STS_CE ? "single" : "non-correctable",
+ status & AHBSTAT_STS_HW ? "write" : "read",
+ (status & AHBSTAT_STS_HS) >> AHBSTAT_STS_HS_BIT,
+ (status & AHBSTAT_STS_HM) >> AHBSTAT_STS_HM_BIT,
+ fadr);
+ }
+
+ if ((rc & 0x2) == 0) {
+ /* Trigger new interrupts */
+ REG_WRITE(&priv->regs->status, 0);
+ }
+}
+
+/* Get Last received AHB Error
+ *
+ * Return
+ * 0: No error received
+ * 1: Error Received, last status and address stored into argument pointers
+ * -1: No such AHBSTAT device
+ */
+int ahbstat_last_error(int minor, uint32_t *status, uint32_t *address)
+{
+ struct drvmgr_dev *dev;
+ struct ahbstat_priv *priv;
+ uint32_t last_status;
+ uint32_t last_address;
+ SPIN_IRQFLAGS(lock_context);
+
+ if (drvmgr_get_dev(&ahbstat_drv_info.general, minor, &dev)) {
+ return -1;
+ }
+ priv = (struct ahbstat_priv *)dev->priv;
+
+ /* Read information cached by ISR */
+ SPIN_LOCK_IRQ(&priv->devlock, lock_context);
+ last_status = REG_READ(&priv->last_status);
+ last_address = REG_READ(&priv->last_address);
+ SPIN_UNLOCK_IRQ(&priv->devlock, lock_context);
+
+ *status = last_status;
+ *address = last_address;
+
+ return (last_status & AHBSTAT_STS_NE) >> AHBSTAT_STS_NE_BIT;
+}
+
+/* Get AHBSTAT registers address from minor. NULL returned if no such device */
+struct ahbstat_regs *ahbstat_get_regs(int minor)
+{
+ struct drvmgr_dev *dev;
+ struct ahbstat_priv *priv;
+
+ if (drvmgr_get_dev(&ahbstat_drv_info.general, minor, &dev)) {
+ return NULL;
+ }
+ priv = (struct ahbstat_priv *)dev->priv;
+
+ return priv->regs;
+}
diff --git a/bsps/shared/grlib/amba/ambapp.c b/bsps/shared/grlib/amba/ambapp.c
new file mode 100644
index 0000000000..69018f47e7
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp.c
@@ -0,0 +1,457 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011.
+ * Aeroflex Gaisler.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <grlib/ambapp.h>
+#include <bsp.h>
+
+#include <grlib/grlib_impl.h>
+
+#define AMBA_CONF_AREA 0xff000
+#define AMBA_AHB_SLAVE_CONF_AREA (1 << 11)
+#define AMBA_APB_SLAVES 16
+
+/* Allocate one AMBA device */
+static struct ambapp_dev *ambapp_alloc_dev_struct(int dev_type)
+{
+ struct ambapp_dev *dev;
+ size_t size = sizeof(*dev);
+
+ if (dev_type == DEV_APB_SLV)
+ size += sizeof(struct ambapp_apb_info);
+ else
+ size += sizeof(struct ambapp_ahb_info); /* AHB */
+ dev = grlib_calloc(1, size);
+ if (dev != NULL)
+ dev->dev_type = dev_type;
+ return dev;
+}
+
+static unsigned int
+ambapp_addr_from (struct ambapp_mmap *mmaps, unsigned int address)
+{
+ /* no translation? */
+ if (!mmaps)
+ return address;
+
+ while (mmaps->size) {
+ if ((address >= mmaps->remote_adr) &&
+ (address <= (mmaps->remote_adr + (mmaps->size - 1)))) {
+ return (address - mmaps->remote_adr) + mmaps->local_adr;
+ }
+ mmaps++;
+ }
+ return 1;
+}
+
+static void ambapp_ahb_dev_init(
+ unsigned int ioarea,
+ struct ambapp_mmap *mmaps,
+ struct ambapp_pnp_ahb *ahb,
+ struct ambapp_dev *dev,
+ int ahbidx
+ )
+{
+ int bar;
+ struct ambapp_ahb_info *ahb_info;
+ unsigned int addr, mask, mbar;
+
+ /* Setup device struct */
+ dev->vendor = ambapp_pnp_vendor(ahb->id);
+ dev->device = ambapp_pnp_device(ahb->id);
+ ahb_info = DEV_TO_AHB(dev);
+ ahb_info->ver = ambapp_pnp_ver(ahb->id);
+ ahb_info->irq = ambapp_pnp_irq(ahb->id);
+ ahb_info->ahbidx = ahbidx;
+ ahb_info->custom[0] = (unsigned int)ahb->custom[0];
+ ahb_info->custom[1] = (unsigned int)ahb->custom[1];
+ ahb_info->custom[2] = (unsigned int)ahb->custom[2];
+
+ /* Memory BARs */
+ for (bar=0; bar<4; bar++) {
+ mbar = ahb->mbar[bar];
+ if (mbar == 0) {
+ addr = 0;
+ mask = 0;
+ } else {
+ addr = ambapp_pnp_start(mbar);
+ if (ambapp_pnp_mbar_type(mbar) == AMBA_TYPE_AHBIO) {
+ /* AHB I/O area is releative IO_AREA */
+ addr = AMBA_TYPE_AHBIO_ADDR(addr, ioarea);
+ mask = (((unsigned int)(ambapp_pnp_mbar_mask(~mbar) << 8) | 0xff)) + 1;
+ } else {
+ /* AHB memory area, absolute address */
+ addr = ambapp_addr_from(mmaps, addr);
+ mask = (~((unsigned int)(ambapp_pnp_mbar_mask(mbar) << 20))) + 1;
+ }
+ }
+ ahb_info->start[bar] = addr;
+ ahb_info->mask[bar] = mask;
+ ahb_info->type[bar] = ambapp_pnp_mbar_type(mbar);
+ }
+}
+
+static void ambapp_apb_dev_init(
+ unsigned int base,
+ struct ambapp_mmap *mmaps,
+ struct ambapp_pnp_apb *apb,
+ struct ambapp_dev *dev,
+ int ahbidx
+ )
+{
+ struct ambapp_apb_info *apb_info;
+
+ /* Setup device struct */
+ dev->vendor = ambapp_pnp_vendor(apb->id);
+ dev->device = ambapp_pnp_device(apb->id);
+ apb_info = DEV_TO_APB(dev);
+ apb_info->ver = ambapp_pnp_ver(apb->id);
+ apb_info->irq = ambapp_pnp_irq(apb->id);
+ apb_info->ahbidx = ahbidx;
+ apb_info->start = ambapp_pnp_apb_start(apb->iobar, base);
+ apb_info->mask = ambapp_pnp_apb_mask(apb->iobar);
+}
+
+static int ambapp_add_ahbbus(
+ struct ambapp_bus *abus,
+ unsigned int ioarea
+ )
+{
+ int i;
+ for (i=0; i<AHB_BUS_MAX; i++) {
+ if (abus->ahbs[i].ioarea == 0) {
+ abus->ahbs[i].ioarea = ioarea;
+ return i;
+ } else if (abus->ahbs[i].ioarea == ioarea) {
+ /* Bus already added */
+ return -1;
+ }
+ }
+ return -1;
+}
+
+/* Internal AMBA Scanning Function */
+static int ambapp_scan2(
+ struct ambapp_bus *abus,
+ unsigned int ioarea,
+ ambapp_memcpy_t memfunc,
+ struct ambapp_dev *parent,
+ struct ambapp_dev **root
+ )
+{
+ struct ambapp_pnp_ahb *ahb, ahb_buf;
+ struct ambapp_pnp_apb *apb, apb_buf;
+ struct ambapp_dev *dev, *prev, *prevapb, *apbdev;
+ struct ambapp_ahb_info *ahb_info;
+ int maxloops = 64;
+ unsigned int apbbase, bridge_adr;
+ int i, j, ahbidx;
+
+ *root = NULL;
+
+ if (parent) {
+ /* scan first bus for 64 devices, rest for 16 devices */
+ maxloops = 16;
+ }
+
+ ahbidx = ambapp_add_ahbbus(abus, ioarea);
+ if (ahbidx < 0) {
+ /* Bus already scanned, stop */
+ return 0;
+ }
+
+ prev = parent;
+
+ /* AHB MASTERS */
+ ahb = (struct ambapp_pnp_ahb *) (ioarea | AMBA_CONF_AREA);
+ for (i = 0; i < maxloops; i++, ahb++) {
+ memfunc(&ahb_buf, ahb, sizeof(struct ambapp_pnp_ahb), abus);
+ if (ahb_buf.id == 0)
+ continue;
+
+ /* An AHB device present here */
+ dev = ambapp_alloc_dev_struct(DEV_AHB_MST);
+ if (!dev)
+ return -1;
+
+ ambapp_ahb_dev_init(ioarea, abus->mmaps, &ahb_buf, dev, ahbidx);
+
+ if (*root == NULL)
+ *root = dev;
+
+ if (prev != parent)
+ prev->next = dev;
+ dev->prev = prev;
+ prev = dev;
+ }
+
+ /* AHB SLAVES */
+ ahb = (struct ambapp_pnp_ahb *)
+ (ioarea | AMBA_CONF_AREA | AMBA_AHB_SLAVE_CONF_AREA);
+ for (i = 0; i < maxloops; i++, ahb++) {
+ memfunc(&ahb_buf, ahb, sizeof(struct ambapp_pnp_ahb), abus);
+ if (ahb_buf.id == 0)
+ continue;
+
+ /* An AHB device present here */
+ dev = ambapp_alloc_dev_struct(DEV_AHB_SLV);
+ if (!dev)
+ return -1;
+
+ ambapp_ahb_dev_init(ioarea, abus->mmaps, &ahb_buf, dev, ahbidx);
+
+ if (*root == NULL)
+ *root = dev;
+
+ if (prev != parent)
+ prev->next = dev;
+ dev->prev = prev;
+ prev = dev;
+
+ ahb_info = DEV_TO_AHB(dev);
+
+ /* Is it a AHB/AHB Bridge ? */
+ if (((dev->device == GAISLER_AHB2AHB) &&
+ (dev->vendor == VENDOR_GAISLER) && (ahb_info->ver > 0)) ||
+ ((dev->device == GAISLER_L2CACHE) &&
+ (dev->vendor == VENDOR_GAISLER)) ||
+ ((dev->device == GAISLER_GRIOMMU) &&
+ (dev->vendor == VENDOR_GAISLER))) {
+ /* AHB/AHB Bridge Found, recurse down the
+ * Bridge
+ */
+ if (ahb_info->custom[1] != 0) {
+ bridge_adr = ambapp_addr_from(abus->mmaps,
+ ahb_info->custom[1]);
+ /* Scan next bus if not already scanned */
+ if (ambapp_scan2(abus, bridge_adr, memfunc, dev,
+ &dev->children))
+ return -1;
+ }
+ } else if ((dev->device == GAISLER_APBMST) &&
+ (dev->vendor == VENDOR_GAISLER)) {
+ /* AHB/APB Bridge Found, add the APB devices to this
+ * AHB Slave's children
+ */
+ prevapb = dev;
+ apbbase = ahb_info->start[0];
+
+ /* APB SLAVES */
+ apb = (struct ambapp_pnp_apb *)
+ (apbbase | AMBA_CONF_AREA);
+ for (j=0; j<AMBA_APB_SLAVES; j++, apb++) {
+ memfunc(&apb_buf, apb, sizeof(*apb), abus);
+ if (apb_buf.id == 0)
+ continue;
+
+ apbdev = ambapp_alloc_dev_struct(DEV_APB_SLV);
+ if (!apbdev)
+ return -1;
+
+ ambapp_apb_dev_init(apbbase, abus->mmaps,
+ &apb_buf, apbdev, ahbidx);
+
+ if (prevapb != dev)
+ prevapb->next = apbdev;
+ else
+ dev->children = apbdev;
+ apbdev->prev = prevapb;
+ prevapb = apbdev;
+ }
+ }
+ }
+
+ /* Remember first AHB MST/SLV device on bus and Parent Bridge */
+ abus->ahbs[ahbidx].dev = *root;
+ abus->ahbs[ahbidx].bridge = parent;
+
+ return 0;
+}
+
+/* Build AMBA Plug & Play device graph */
+int ambapp_scan(
+ struct ambapp_bus *abus,
+ unsigned int ioarea,
+ ambapp_memcpy_t memfunc,
+ struct ambapp_mmap *mmaps
+ )
+{
+ memset(abus, 0, sizeof(*abus));
+ abus->mmaps = mmaps;
+
+ /* Default to memcpy() */
+ if (!memfunc)
+ memfunc = (ambapp_memcpy_t)memcpy;
+
+ return ambapp_scan2(abus, ioarea, memfunc, NULL, &abus->root);
+}
+
+/* Match search options againt device */
+static int ambapp_dev_match_options(struct ambapp_dev *dev, unsigned int options, int vendor, int device)
+{
+ if ((((options & (OPTIONS_ALL_DEVS)) == OPTIONS_ALL_DEVS) || /* TYPE */
+ ((options & OPTIONS_AHB_MSTS) && (dev->dev_type == DEV_AHB_MST)) ||
+ ((options & OPTIONS_AHB_SLVS) && (dev->dev_type == DEV_AHB_SLV)) ||
+ ((options & OPTIONS_APB_SLVS) && (dev->dev_type == DEV_APB_SLV))) &&
+ ((vendor == -1) || (vendor == dev->vendor)) && /* VENDOR/DEV ID */
+ ((device == -1) || (device == dev->device)) &&
+ (((options & OPTIONS_ALL) == OPTIONS_ALL) || /* Allocated State */
+ ((options & OPTIONS_FREE) && DEV_IS_FREE(dev)) ||
+ ((options & OPTIONS_ALLOCATED) && DEV_IS_ALLOCATED(dev)))) {
+ return 1;
+ }
+ return 0;
+}
+
+/* If device is an APB bridge all devices on the APB bridge is processed */
+static int ambapp_for_each_apb(
+ struct ambapp_dev *dev,
+ unsigned int options,
+ int vendor,
+ int device,
+ ambapp_func_t func,
+ void *arg)
+{
+ int index, ret;
+ struct ambapp_dev *apbslv;
+
+ ret = 0;
+ if (dev->children && (dev->children->dev_type == DEV_APB_SLV)) {
+ /* Found a APB Bridge */
+ index = 0;
+ apbslv = dev->children;
+ while (apbslv) {
+ if (ambapp_dev_match_options(apbslv, options,
+ vendor, device) == 1) {
+ ret = func(apbslv, index, arg);
+ if (ret != 0)
+ break; /* Signalled stopped */
+ }
+ index++;
+ apbslv = apbslv->next;
+ }
+ }
+
+ return ret;
+}
+
+/* Traverse the prescanned device information */
+static int ambapp_for_each_dev(
+ struct ambapp_dev *root,
+ unsigned int options,
+ int vendor,
+ int device,
+ ambapp_func_t func,
+ void *arg)
+{
+ struct ambapp_dev *dev;
+ int ahb_slave = 0;
+ int index, ret;
+
+ /* Start at device 'root' and process downwards.
+ *
+ * Breadth first search, search order
+ * 1. AHB MSTS
+ * 2. AHB SLVS
+ * 3. APB SLVS on primary bus
+ * 4. AHB/AHB secondary... -> step to 1.
+ */
+
+ /* AHB MST / AHB SLV */
+ if (options & (OPTIONS_AHB_MSTS|OPTIONS_AHB_SLVS|OPTIONS_DEPTH_FIRST)) {
+ index = 0;
+ dev = root;
+ while (dev) {
+ if ((dev->dev_type == DEV_AHB_SLV) && !ahb_slave) {
+ /* First AHB Slave */
+ ahb_slave = 1;
+ index = 0;
+ }
+
+ /* Conditions must be fullfilled for function to be
+ * called
+ */
+ if (ambapp_dev_match_options(dev, options, vendor, device) == 1) {
+ /* Correct device and vendor ID */
+ ret = func(dev, index, arg);
+ if (ret != 0)
+ return ret; /* Signalled stopped */
+ }
+
+ if ((options & OPTIONS_DEPTH_FIRST) && (options & OPTIONS_APB_SLVS)) {
+ /* Check is APB bridge, and process all APB
+ * Slaves in that case
+ */
+ ret = ambapp_for_each_apb(dev, options, vendor, device, func, arg);
+ if (ret != 0)
+ return ret; /* Signalled stopped */
+ }
+
+ if (options & OPTIONS_DEPTH_FIRST) {
+ if (dev->children && (dev->children->dev_type != DEV_APB_SLV)) {
+ /* Found AHB Bridge, recurse */
+ ret = ambapp_for_each_dev(dev->children, options, vendor, device,
+ func, arg);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ index++;
+ dev = dev->next;
+ }
+ }
+
+ /* Find APB Bridges */
+ if ((options & OPTIONS_APB_SLVS) && !(options & OPTIONS_DEPTH_FIRST)) {
+ dev = root;
+ while (dev) {
+ /* Check is APB bridge, and process all APB Slaves in
+ * that case
+ */
+ ret = ambapp_for_each_apb(dev, options, vendor, device, func, arg);
+ if (ret != 0)
+ return ret; /* Signalled stopped */
+ dev = dev->next;
+ }
+ }
+
+ /* Find AHB Bridges */
+ if (!(options & OPTIONS_DEPTH_FIRST)) {
+ dev = root;
+ while (dev) {
+ if (dev->children && (dev->children->dev_type != DEV_APB_SLV)) {
+ /* Found AHB Bridge, recurse */
+ ret = ambapp_for_each_dev(dev->children, options, vendor, device,
+ func, arg);
+ if (ret != 0)
+ return ret;
+ }
+ dev = dev->next;
+ }
+ }
+
+ return 0;
+}
+
+int ambapp_for_each(
+ struct ambapp_bus *abus,
+ unsigned int options,
+ int vendor,
+ int device,
+ ambapp_func_t func,
+ void *arg)
+{
+ return ambapp_for_each_dev(abus->root, options, vendor, device, func, arg);
+}
diff --git a/bsps/shared/grlib/amba/ambapp_alloc.c b/bsps/shared/grlib/amba/ambapp_alloc.c
new file mode 100644
index 0000000000..96fcb7961b
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_alloc.c
@@ -0,0 +1,25 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+
+int ambapp_alloc_dev(struct ambapp_dev *dev, void *owner)
+{
+ if (dev->owner)
+ return -1;
+ dev->owner = owner;
+ return 0;
+}
+
+void ambapp_free_dev(struct ambapp_dev *dev)
+{
+ dev->owner = 0;
+}
diff --git a/bsps/shared/grlib/amba/ambapp_count.c b/bsps/shared/grlib/amba/ambapp_count.c
new file mode 100644
index 0000000000..9da4d93a19
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_count.c
@@ -0,0 +1,23 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+
+/* Get number of devices matching search options */
+int ambapp_dev_count(struct ambapp_bus *abus, unsigned int options,
+ int vendor, int device)
+{
+ int count = 10000;
+
+ ambapp_for_each(abus, options, vendor, device, ambapp_find_by_idx, &count);
+
+ return 10000 - count;
+}
diff --git a/bsps/shared/grlib/amba/ambapp_depth.c b/bsps/shared/grlib/amba/ambapp_depth.c
new file mode 100644
index 0000000000..2fe0b142e9
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_depth.c
@@ -0,0 +1,25 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+
+/* Get bus depth a device is located at */
+int ambapp_depth(struct ambapp_dev *dev)
+{
+ int depth = 0;
+
+ do {
+ dev = ambapp_find_parent(dev);
+ depth++;
+ } while (dev);
+
+ return depth - 1;
+}
diff --git a/bsps/shared/grlib/amba/ambapp_find_by_idx.c b/bsps/shared/grlib/amba/ambapp_find_by_idx.c
new file mode 100644
index 0000000000..55d9022881
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_find_by_idx.c
@@ -0,0 +1,39 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+
+/* AMBAPP helper routine to find a device by index. The function is given to
+ * ambapp_for_each, the argument may be NULL (find first device) or a pointer
+ * to a index which is downcounted until 0 is reached. If the int-pointer
+ * points to a value of:
+ * 0 - first device is returned
+ * 1 - second device is returned
+ * ...
+ *
+ * The matching device is returned, which will stop the ambapp_for_each search.
+ * If zero is returned from ambapp_for_each no device matching the index was
+ * found
+ */
+int ambapp_find_by_idx(struct ambapp_dev *dev, int index, void *pcount)
+{
+ int *pi = pcount;
+
+ if (pi) {
+ if ((*pi)-- == 0)
+ return (int)dev;
+ else
+ return 0;
+ } else {
+ /* Satisfied with first matching device, stop search */
+ return (int)dev;
+ }
+}
diff --git a/bsps/shared/grlib/amba/ambapp_freq.c b/bsps/shared/grlib/amba/ambapp_freq.c
new file mode 100644
index 0000000000..9e6e9c1765
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_freq.c
@@ -0,0 +1,109 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+
+/* Calculate AHB Bus frequency of
+ * - Bus[0] (inverse=1), relative to the frequency of Bus[ahbidx]
+ * NOTE: set freq_hz to frequency of Bus[ahbidx].
+ * or
+ * - Bus[ahbidx] (inverse=0), relative to the frequency of Bus[0]
+ * NOTE: set freq_hz to frequency of Bus[0].
+ *
+ * If a unsupported bridge is found the invalid frequncy of 0Hz is
+ * returned.
+ */
+static unsigned int ambapp_freq_calc(
+ struct ambapp_bus *abus,
+ int ahbidx,
+ unsigned int freq_hz,
+ int inverse)
+{
+ struct ambapp_ahb_info *ahb;
+ struct ambapp_dev *bridge;
+ unsigned char ffact;
+ int dir;
+
+ /* Found Bus0? */
+ bridge = abus->ahbs[ahbidx].bridge;
+ if (!bridge)
+ return freq_hz;
+
+ /* Find this bus frequency relative to freq_hz */
+ if ((bridge->vendor == VENDOR_GAISLER) &&
+ ((bridge->device == GAISLER_AHB2AHB) ||
+ (bridge->device == GAISLER_L2CACHE))) {
+ ahb = DEV_TO_AHB(bridge);
+ ffact = (ahb->custom[0] & AMBAPP_FLAG_FFACT) >> 4;
+ if (ffact != 0) {
+ dir = ahb->custom[0] & AMBAPP_FLAG_FFACT_DIR;
+
+ /* Calculate frequency by dividing or
+ * multiplying system frequency
+ */
+ if ((dir && !inverse) || (!dir && inverse))
+ freq_hz = freq_hz * ffact;
+ else
+ freq_hz = freq_hz / ffact;
+ }
+ return ambapp_freq_calc(abus, ahb->ahbidx, freq_hz, inverse);
+ } else {
+ /* Unknown bridge, impossible to calc frequency */
+ return 0;
+ }
+}
+
+/* Find the frequency of all AHB Buses from knowing the frequency of one
+ * particular APB/AHB Device.
+ */
+void ambapp_freq_init(
+ struct ambapp_bus *abus,
+ struct ambapp_dev *dev,
+ unsigned int freq_hz)
+{
+ struct ambapp_common_info *info;
+ int i;
+
+ for (i=0; i<AHB_BUS_MAX; i++)
+ abus->ahbs[i].freq_hz = 0;
+
+ /* Register Frequency at the AHB bus that the device the user gave us
+ * is located at.
+ */
+ if (dev) {
+ info = DEV_TO_COMMON(dev);
+ abus->ahbs[info->ahbidx].freq_hz = freq_hz;
+
+ /* Find Frequency of Bus 0 */
+ abus->ahbs[0].freq_hz = ambapp_freq_calc(abus, info->ahbidx, freq_hz, 1);
+ } else {
+ abus->ahbs[0].freq_hz = freq_hz;
+ }
+
+ /* Find Frequency of all except for Bus0 and the bus which frequency
+ * was reported at
+ */
+ for (i=1; i<AHB_BUS_MAX; i++) {
+ if (abus->ahbs[i].ioarea == 0)
+ break;
+ if (abus->ahbs[i].freq_hz != 0)
+ continue;
+ abus->ahbs[i].freq_hz = ambapp_freq_calc(abus, i, abus->ahbs[0].freq_hz, 0);
+ }
+}
+
+/* Assign a AMBA Bus a frequency but reporting the frequency of a
+ * particular AHB/APB device */
+unsigned int ambapp_freq_get(struct ambapp_bus *abus, struct ambapp_dev *dev)
+{
+ struct ambapp_common_info *info = DEV_TO_COMMON(dev);
+ return abus->ahbs[info->ahbidx].freq_hz;
+}
diff --git a/bsps/shared/grlib/amba/ambapp_names.c b/bsps/shared/grlib/amba/ambapp_names.c
new file mode 100644
index 0000000000..8d168f283b
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_names.c
@@ -0,0 +1,447 @@
+/*
+ * AMBA Plug & Play Device and Vendor name database: Created from GRLIB 3386.
+ *
+ * COPYRIGHT (c) 2009.
+ * Aeroflex Gaisler.
+ *
+ * The device and vendor definitions are extracted with a script from
+ * GRLIB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_ids.h>
+#include <string.h>
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+typedef struct {
+ int device_id;
+ char *name;
+} ambapp_device_name;
+
+typedef struct {
+ unsigned int vendor_id;
+ char *name;
+ ambapp_device_name *devices;
+} ambapp_vendor_devnames;
+
+/**************** AUTO GENERATED FROM devices.vhd ****************/
+static ambapp_device_name GAISLER_devices[] =
+{
+ {GAISLER_LEON2DSU, "LEON2DSU"},
+ {GAISLER_LEON3, "LEON3"},
+ {GAISLER_LEON3DSU, "LEON3DSU"},
+ {GAISLER_ETHAHB, "ETHAHB"},
+ {GAISLER_APBMST, "APBMST"},
+ {GAISLER_AHBUART, "AHBUART"},
+ {GAISLER_SRCTRL, "SRCTRL"},
+ {GAISLER_SDCTRL, "SDCTRL"},
+ {GAISLER_SSRCTRL, "SSRCTRL"},
+ {GAISLER_I2C2AHB, "I2C2AHB"},
+ {GAISLER_APBUART, "APBUART"},
+ {GAISLER_IRQMP, "IRQMP"},
+ {GAISLER_AHBRAM, "AHBRAM"},
+ {GAISLER_AHBDPRAM, "AHBDPRAM"},
+ {GAISLER_GRIOMMU2, "GRIOMMU2"},
+ {GAISLER_GPTIMER, "GPTIMER"},
+ {GAISLER_PCITRG, "PCITRG"},
+ {GAISLER_PCISBRG, "PCISBRG"},
+ {GAISLER_PCIFBRG, "PCIFBRG"},
+ {GAISLER_PCITRACE, "PCITRACE"},
+ {GAISLER_DMACTRL, "DMACTRL"},
+ {GAISLER_AHBTRACE, "AHBTRACE"},
+ {GAISLER_DSUCTRL, "DSUCTRL"},
+ {GAISLER_CANAHB, "CANAHB"},
+ {GAISLER_GPIO, "GPIO"},
+ {GAISLER_AHBROM, "AHBROM"},
+ {GAISLER_AHBJTAG, "AHBJTAG"},
+ {GAISLER_ETHMAC, "ETHMAC"},
+ {GAISLER_SWNODE, "SWNODE"},
+ {GAISLER_SPW, "SPW"},
+ {GAISLER_AHB2AHB, "AHB2AHB"},
+ {GAISLER_USBDC, "USBDC"},
+ {GAISLER_USB_DCL, "USB_DCL"},
+ {GAISLER_DDRMP, "DDRMP"},
+ {GAISLER_ATACTRL, "ATACTRL"},
+ {GAISLER_DDRSP, "DDRSP"},
+ {GAISLER_EHCI, "EHCI"},
+ {GAISLER_UHCI, "UHCI"},
+ {GAISLER_I2CMST, "I2CMST"},
+ {GAISLER_SPW2, "SPW2"},
+ {GAISLER_AHBDMA, "AHBDMA"},
+ {GAISLER_NUHOSP3, "NUHOSP3"},
+ {GAISLER_CLKGATE, "CLKGATE"},
+ {GAISLER_SPICTRL, "SPICTRL"},
+ {GAISLER_DDR2SP, "DDR2SP"},
+ {GAISLER_SLINK, "SLINK"},
+ {GAISLER_GRTM, "GRTM"},
+ {GAISLER_GRTC, "GRTC"},
+ {GAISLER_GRPW, "GRPW"},
+ {GAISLER_GRCTM, "GRCTM"},
+ {GAISLER_GRHCAN, "GRHCAN"},
+ {GAISLER_GRFIFO, "GRFIFO"},
+ {GAISLER_GRADCDAC, "GRADCDAC"},
+ {GAISLER_GRPULSE, "GRPULSE"},
+ {GAISLER_GRTIMER, "GRTIMER"},
+ {GAISLER_AHB2PP, "AHB2PP"},
+ {GAISLER_GRVERSION, "GRVERSION"},
+ {GAISLER_APB2PW, "APB2PW"},
+ {GAISLER_PW2APB, "PW2APB"},
+ {GAISLER_GRCAN, "GRCAN"},
+ {GAISLER_I2CSLV, "I2CSLV"},
+ {GAISLER_U16550, "U16550"},
+ {GAISLER_AHBMST_EM, "AHBMST_EM"},
+ {GAISLER_AHBSLV_EM, "AHBSLV_EM"},
+ {GAISLER_GRTESTMOD, "GRTESTMOD"},
+ {GAISLER_ASCS, "ASCS"},
+ {GAISLER_IPMVBCTRL, "IPMVBCTRL"},
+ {GAISLER_SPIMCTRL, "SPIMCTRL"},
+ {GAISLER_L4STAT, "L4STAT"},
+ {GAISLER_LEON4, "LEON4"},
+ {GAISLER_LEON4DSU, "LEON4DSU"},
+ {GAISLER_PWM, "PWM"},
+ {GAISLER_L2CACHE, "L2CACHE"},
+ {GAISLER_SDCTRL64, "SDCTRL64"},
+ {GAISLER_GR1553B, "GR1553B"},
+ {GAISLER_1553TST, "1553TST"},
+ {GAISLER_GRIOMMU, "GRIOMMU"},
+ {GAISLER_FTAHBRAM, "FTAHBRAM"},
+ {GAISLER_FTSRCTRL, "FTSRCTRL"},
+ {GAISLER_AHBSTAT, "AHBSTAT"},
+ {GAISLER_LEON3FT, "LEON3FT"},
+ {GAISLER_FTMCTRL, "FTMCTRL"},
+ {GAISLER_FTSDCTRL, "FTSDCTRL"},
+ {GAISLER_FTSRCTRL8, "FTSRCTRL8"},
+ {GAISLER_MEMSCRUB, "MEMSCRUB"},
+ {GAISLER_FTSDCTRL64, "FTSDCTRL64"},
+ {GAISLER_NANDFCTRL, "NANDFCTRL"},
+ {GAISLER_N2DLLCTRL, "N2DLLCTRL"},
+ {GAISLER_N2PLLCTRL, "N2PLLCTRL"},
+ {GAISLER_SPI2AHB, "SPI2AHB"},
+ {GAISLER_DDRSDMUX, "DDRSDMUX"},
+ {GAISLER_AHBFROM, "AHBFROM"},
+ {GAISLER_PCIEXP, "PCIEXP"},
+ {GAISLER_APBPS2, "APBPS2"},
+ {GAISLER_VGACTRL, "VGACTRL"},
+ {GAISLER_LOGAN, "LOGAN"},
+ {GAISLER_SVGACTRL, "SVGACTRL"},
+ {GAISLER_T1AHB, "T1AHB"},
+ {GAISLER_MP7WRAP, "MP7WRAP"},
+ {GAISLER_GRSYSMON, "GRSYSMON"},
+ {GAISLER_GRACECTRL, "GRACECTRL"},
+ {GAISLER_ATAHBSLV, "ATAHBSLV"},
+ {GAISLER_ATAHBMST, "ATAHBMST"},
+ {GAISLER_ATAPBSLV, "ATAPBSLV"},
+ {GAISLER_MIGDDR2, "MIGDDR2"},
+ {GAISLER_LCDCTRL, "LCDCTRL"},
+ {GAISLER_SWITCHOVER, "SWITCHOVER"},
+ {GAISLER_FIFOUART, "FIFOUART"},
+ {GAISLER_MUXCTRL, "MUXCTRL"},
+ {GAISLER_B1553BC, "B1553BC"},
+ {GAISLER_B1553RT, "B1553RT"},
+ {GAISLER_B1553BRM, "B1553BRM"},
+ {GAISLER_AES, "AES"},
+ {GAISLER_ECC, "ECC"},
+ {GAISLER_PCIF, "PCIF"},
+ {GAISLER_CLKMOD, "CLKMOD"},
+ {GAISLER_HAPSTRAK, "HAPSTRAK"},
+ {GAISLER_TEST_1X2, "TEST_1X2"},
+ {GAISLER_WILD2AHB, "WILD2AHB"},
+ {GAISLER_BIO1, "BIO1"},
+ {GAISLER_AESDMA, "AESDMA"},
+ {GAISLER_GRPCI2, "GRPCI2"},
+ {GAISLER_GRPCI2_DMA, "GRPCI2_DMA"},
+ {GAISLER_GRPCI2_TB, "GRPCI2_TB"},
+ {GAISLER_MMA, "MMA"},
+ {GAISLER_SATCAN, "SATCAN"},
+ {GAISLER_CANMUX, "CANMUX"},
+ {GAISLER_GRTMRX, "GRTMRX"},
+ {GAISLER_GRTCTX, "GRTCTX"},
+ {GAISLER_GRTMDESC, "GRTMDESC"},
+ {GAISLER_GRTMVC, "GRTMVC"},
+ {GAISLER_GEFFE, "GEFFE"},
+ {GAISLER_GPREG, "GPREG"},
+ {GAISLER_GRTMPAHB, "GRTMPAHB"},
+ {GAISLER_SPWCUC, "SPWCUC"},
+ {GAISLER_SPW2_DMA, "SPW2_DMA"},
+ {GAISLER_SPWROUTER, "SPWROUTER"},
+ {GAISLER_EDCLMST, "EDCLMST"},
+ {GAISLER_GRPWTX, "GRPWTX"},
+ {GAISLER_GRPWRX, "GRPWRX"},
+ {GAISLER_GPREGBANK, "GPREGBANK"},
+ {GAISLER_MIG_7SERIES, "MIG_7SERIES"},
+ {GAISLER_GRSPW2_SIST, "GRSPW2_SIST"},
+ {GAISLER_SGMII, "SGMII"},
+ {GAISLER_RGMII, "RGMII"},
+ {GAISLER_IRQGEN, "IRQGEN"},
+ {GAISLER_GRDMAC, "GRDMAC"},
+ {GAISLER_AHB2AVLA, "AHB2AVLA"},
+ {GAISLER_SPWTDP, "SPWTDP"},
+ {GAISLER_L3STAT, "L3STAT"},
+ {GAISLER_GR740THS, "GR740THS"},
+ {GAISLER_GRRM, "GRRM"},
+ {GAISLER_CMAP, "CMAP"},
+ {GAISLER_CPGEN, "CPGEN"},
+ {GAISLER_AMBAPROT, "AMBAPROT"},
+ {GAISLER_IGLOO2_BRIDGE, "IGLOO2_BRIDGE"},
+ {GAISLER_AHB2AXI, "AHB2AXI"},
+ {GAISLER_AXI2AHB, "AXI2AHB"},
+ {GAISLER_FDIR_RSTCTRL, "FDIR_RSTCTRL"},
+ {GAISLER_APB3MST, "APB3MST"},
+ {GAISLER_LRAM, "LRAM"},
+ {GAISLER_BOOTSEQ, "BOOTSEQ"},
+ {GAISLER_TCCOP, "TCCOP"},
+ {GAISLER_SPIMASTER, "SPIMASTER"},
+ {GAISLER_SPISLAVE, "SPISLAVE"},
+ {GAISLER_GRSRIO, "GRSRIO"},
+ {0, NULL}
+};
+
+static ambapp_device_name PENDER_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name ESA_devices[] =
+{
+ {ESA_LEON2, "LEON2"},
+ {ESA_LEON2APB, "LEON2APB"},
+ {ESA_IRQ, "IRQ"},
+ {ESA_TIMER, "TIMER"},
+ {ESA_UART, "UART"},
+ {ESA_CFG, "CFG"},
+ {ESA_IO, "IO"},
+ {ESA_MCTRL, "MCTRL"},
+ {ESA_PCIARB, "PCIARB"},
+ {ESA_HURRICANE, "HURRICANE"},
+ {ESA_SPW_RMAP, "SPW_RMAP"},
+ {ESA_AHBUART, "AHBUART"},
+ {ESA_SPWA, "SPWA"},
+ {ESA_BOSCHCAN, "BOSCHCAN"},
+ {ESA_IRQ2, "IRQ2"},
+ {ESA_AHBSTAT, "AHBSTAT"},
+ {ESA_WPROT, "WPROT"},
+ {ESA_WPROT2, "WPROT2"},
+ {ESA_PDEC3AMBA, "PDEC3AMBA"},
+ {ESA_PTME3AMBA, "PTME3AMBA"},
+ {0, NULL}
+};
+
+static ambapp_device_name ASTRIUM_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name OPENCHIP_devices[] =
+{
+ {OPENCHIP_APBGPIO, "APBGPIO"},
+ {OPENCHIP_APBI2C, "APBI2C"},
+ {OPENCHIP_APBSPI, "APBSPI"},
+ {OPENCHIP_APBCHARLCD, "APBCHARLCD"},
+ {OPENCHIP_APBPWM, "APBPWM"},
+ {OPENCHIP_APBPS2, "APBPS2"},
+ {OPENCHIP_APBMMCSD, "APBMMCSD"},
+ {OPENCHIP_APBNAND, "APBNAND"},
+ {OPENCHIP_APBLPC, "APBLPC"},
+ {OPENCHIP_APBCF, "APBCF"},
+ {OPENCHIP_APBSYSACE, "APBSYSACE"},
+ {OPENCHIP_APB1WIRE, "APB1WIRE"},
+ {OPENCHIP_APBJTAG, "APBJTAG"},
+ {OPENCHIP_APBSUI, "APBSUI"},
+ {0, NULL}
+};
+
+static ambapp_device_name OPENCORES_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name CONTRIB_devices[] =
+{
+ {CONTRIB_CORE1, "CORE1"},
+ {CONTRIB_CORE2, "CORE2"},
+ {0, NULL}
+};
+
+static ambapp_device_name EONIC_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name RADIONOR_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name GLEICHMANN_devices[] =
+{
+ {GLEICHMANN_CUSTOM, "CUSTOM"},
+ {GLEICHMANN_GEOLCD01, "GEOLCD01"},
+ {GLEICHMANN_DAC, "DAC"},
+ {GLEICHMANN_HPI, "HPI"},
+ {GLEICHMANN_SPI, "SPI"},
+ {GLEICHMANN_HIFC, "HIFC"},
+ {GLEICHMANN_ADCDAC, "ADCDAC"},
+ {GLEICHMANN_SPIOC, "SPIOC"},
+ {GLEICHMANN_AC97, "AC97"},
+ {0, NULL}
+};
+
+static ambapp_device_name MENTA_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name SUN_devices[] =
+{
+ {SUN_T1, "SUN_T1"},
+ {SUN_S1, "SUN_S1"},
+ {0, NULL}
+};
+
+static ambapp_device_name MOVIDIA_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name ORBITA_devices[] =
+{
+ {ORBITA_1553B, "1553B"},
+ {ORBITA_429, "429"},
+ {ORBITA_SPI, "SPI"},
+ {ORBITA_I2C, "I2C"},
+ {ORBITA_SMARTCARD, "SMARTCARD"},
+ {ORBITA_SDCARD, "SDCARD"},
+ {ORBITA_UART16550, "UART16550"},
+ {ORBITA_CRYPTO, "CRYPTO"},
+ {ORBITA_SYSIF, "SYSIF"},
+ {ORBITA_PIO, "PIO"},
+ {ORBITA_RTC, "RTC"},
+ {ORBITA_COLORLCD, "COLORLCD"},
+ {ORBITA_PCI, "PCI"},
+ {ORBITA_DSP, "DSP"},
+ {ORBITA_USBHOST, "USBHOST"},
+ {ORBITA_USBDEV, "USBDEV"},
+ {0, NULL}
+};
+
+static ambapp_device_name SYNOPSYS_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name NASA_devices[] =
+{
+ {NASA_EP32, "EP32"},
+ {0, NULL}
+};
+
+static ambapp_device_name CAL_devices[] =
+{
+ {CAL_DDRCTRL, "DDRCTRL"},
+ {0, NULL}
+};
+
+static ambapp_device_name EMBEDDIT_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name CETON_devices[] =
+{
+ {0, NULL}
+};
+
+static ambapp_device_name ACTEL_devices[] =
+{
+ {ACTEL_COREMP7, "COREMP7"},
+ {0, NULL}
+};
+
+static ambapp_vendor_devnames vendors[] =
+{
+ {VENDOR_GAISLER, "GAISLER", GAISLER_devices},
+ {VENDOR_PENDER, "PENDER", PENDER_devices},
+ {VENDOR_ESA, "ESA", ESA_devices},
+ {VENDOR_ASTRIUM, "ASTRIUM", ASTRIUM_devices},
+ {VENDOR_OPENCHIP, "OPENCHIP", OPENCHIP_devices},
+ {VENDOR_OPENCORES, "OPENCORES", OPENCORES_devices},
+ {VENDOR_CONTRIB, "CONTRIB", CONTRIB_devices},
+ {VENDOR_EONIC, "EONIC", EONIC_devices},
+ {VENDOR_RADIONOR, "RADIONOR", RADIONOR_devices},
+ {VENDOR_GLEICHMANN, "GLEICHMANN", GLEICHMANN_devices},
+ {VENDOR_MENTA, "MENTA", MENTA_devices},
+ {VENDOR_SUN, "SUN", SUN_devices},
+ {VENDOR_MOVIDIA, "MOVIDIA", MOVIDIA_devices},
+ {VENDOR_ORBITA, "ORBITA", ORBITA_devices},
+ {VENDOR_SYNOPSYS, "SYNOPSYS", SYNOPSYS_devices},
+ {VENDOR_NASA, "NASA", NASA_devices},
+ {VENDOR_CAL, "CAL", CAL_devices},
+ {VENDOR_EMBEDDIT, "EMBEDDIT", EMBEDDIT_devices},
+ {VENDOR_CETON, "CETON", CETON_devices},
+ {VENDOR_ACTEL, "ACTEL", ACTEL_devices},
+ {0, NULL, NULL}
+};
+
+/*****************************************************************/
+
+static char *ambapp_get_devname(ambapp_device_name *devs, int id)
+{
+ while (devs->device_id > 0) {
+ if (devs->device_id == id)
+ return devs->name;
+ devs++;
+ }
+ return NULL;
+}
+
+char *ambapp_device_id2str(int vendor, int id)
+{
+ ambapp_vendor_devnames *ven = &vendors[0];
+
+ while (ven->vendor_id > 0) {
+ if (ven->vendor_id == vendor)
+ return ambapp_get_devname(ven->devices, id);
+ ven++;
+ }
+ return NULL;
+}
+
+char *ambapp_vendor_id2str(int vendor)
+{
+ ambapp_vendor_devnames *ven = &vendors[0];
+
+ while (ven->vendor_id > 0) {
+ if (ven->vendor_id == vendor)
+ return ven->name;
+ ven++;
+ }
+ return NULL;
+}
+
+int ambapp_vendev_id2str(int vendor, int id, char *buf)
+{
+ char *dstr, *vstr;
+
+ *buf = '\0';
+
+ vstr = ambapp_vendor_id2str(vendor);
+ if (vstr == NULL)
+ return 0;
+
+ dstr = ambapp_device_id2str(vendor, id);
+ if (dstr == NULL)
+ return 0;
+
+ strcpy(buf, vstr);
+ strcat(buf, "_");
+ strcat(buf, dstr);
+
+ return strlen(buf);
+}
diff --git a/bsps/shared/grlib/amba/ambapp_old.c b/bsps/shared/grlib/amba/ambapp_old.c
new file mode 100644
index 0000000000..a51e692fbf
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_old.c
@@ -0,0 +1,112 @@
+/*
+ * Old AMBA scanning Interface provided for backwards compability
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/ambapp.h>
+
+struct ambapp_dev_find_match_arg {
+ int index;
+ int count;
+ int type;
+ void *dev;
+};
+
+/* AMBA PP find routines */
+static int ambapp_dev_find_match(struct ambapp_dev *dev, int index, void *arg)
+{
+ struct ambapp_dev_find_match_arg *p = arg;
+
+ if (p->index == 0) {
+ /* Found controller, stop */
+ if (p->type == DEV_APB_SLV) {
+ *(struct ambapp_apb_info *)p->dev = *DEV_TO_APB(dev);
+ p->dev = ((struct ambapp_apb_info *)p->dev)+1;
+ } else {
+ *(struct ambapp_ahb_info *)p->dev = *DEV_TO_AHB(dev);
+ p->dev = ((struct ambapp_ahb_info *)p->dev)+1;
+ }
+ p->count--;
+ if (p->count < 1)
+ return 1;
+ } else {
+ p->index--;
+ }
+ return 0;
+}
+
+int ambapp_find_apbslvs_next(struct ambapp_bus *abus, int vendor, int device, struct ambapp_apb_info *dev, int index, int maxno)
+{
+ struct ambapp_dev_find_match_arg arg;
+
+ arg.index = index;
+ arg.count = maxno;
+ arg.type = DEV_APB_SLV; /* APB */
+ arg.dev = dev;
+
+ ambapp_for_each(abus, (OPTIONS_ALL|OPTIONS_APB_SLVS), vendor, device,
+ ambapp_dev_find_match, &arg);
+
+ return maxno - arg.count;
+}
+
+int ambapp_find_apbslv(struct ambapp_bus *abus, int vendor, int device, struct ambapp_apb_info *dev)
+{
+ return ambapp_find_apbslvs_next(abus, vendor, device, dev, 0, 1);
+}
+
+int ambapp_find_apbslv_next(struct ambapp_bus *abus, int vendor, int device, struct ambapp_apb_info *dev, int index)
+{
+ return ambapp_find_apbslvs_next(abus, vendor, device, dev, index, 1);
+}
+
+int ambapp_find_apbslvs(struct ambapp_bus *abus, int vendor, int device, struct ambapp_apb_info *dev, int maxno)
+{
+ return ambapp_find_apbslvs_next(abus, vendor, device, dev, 0, maxno);
+}
+
+int ambapp_get_number_apbslv_devices(struct ambapp_bus *abus, int vendor, int device)
+{
+ return ambapp_dev_count(abus, (OPTIONS_ALL|OPTIONS_APB_SLVS), vendor, device);
+}
+
+int ambapp_find_ahbslvs_next(struct ambapp_bus *abus, int vendor, int device, struct ambapp_ahb_info *dev, int index, int maxno)
+{
+ struct ambapp_dev_find_match_arg arg;
+
+ arg.index = index;
+ arg.count = maxno;
+ arg.type = DEV_AHB_SLV; /* AHB SLV */
+ arg.dev = dev;
+
+ ambapp_for_each(abus, (OPTIONS_ALL|OPTIONS_AHB_SLVS), vendor, device,
+ ambapp_dev_find_match, &arg);
+
+ return maxno - arg.count;
+}
+
+int ambapp_find_ahbslv_next(struct ambapp_bus *abus, int vendor, int device, struct ambapp_ahb_info *dev, int index)
+{
+ return ambapp_find_ahbslvs_next(abus, vendor, device, dev, index, 1);
+}
+
+int ambapp_find_ahbslv(struct ambapp_bus *abus, int vendor, int device, struct ambapp_ahb_info *dev)
+{
+ return ambapp_find_ahbslvs_next(abus, vendor, device, dev, 0, 1);
+}
+
+int ambapp_find_ahbslvs(struct ambapp_bus *abus, int vendor, int device, struct ambapp_ahb_info *dev, int maxno)
+{
+ return ambapp_find_ahbslvs_next(abus, vendor, device, dev, 0, maxno);
+}
+
+int ambapp_get_number_ahbslv_devices(struct ambapp_bus *abus, int vendor, int device)
+{
+ return ambapp_dev_count(abus, (OPTIONS_ALL|OPTIONS_AHB_SLVS), vendor, device);
+}
diff --git a/bsps/shared/grlib/amba/ambapp_parent.c b/bsps/shared/grlib/amba/ambapp_parent.c
new file mode 100644
index 0000000000..b77b6eec68
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_parent.c
@@ -0,0 +1,23 @@
+/*
+ * AMBA Plug & Play routines
+ *
+ * COPYRIGHT (c) 2011
+ * Aeroflex Gaisler
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <grlib/ambapp.h>
+
+struct ambapp_dev *ambapp_find_parent(struct ambapp_dev *dev)
+{
+ while (dev->prev) {
+ if (dev == dev->prev->children)
+ return dev->prev;
+ dev = dev->prev;
+ }
+ return NULL;
+}
diff --git a/bsps/shared/grlib/amba/ambapp_show.c b/bsps/shared/grlib/amba/ambapp_show.c
new file mode 100644
index 0000000000..12cefa2c0f
--- /dev/null
+++ b/bsps/shared/grlib/amba/ambapp_show.c
@@ -0,0 +1,68 @@
+/*
+ * AMBA Plug & Play routines: device information printing.
+ *
+ * COPYRIGHT (c) 2009.
+ * Aeroflex Gaisler.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdio.h>
+#include <grlib/ambapp.h>
+
+struct ambapp_dev_print_arg {
+ int show_depth;
+};
+
+static char *unknown = "unknown";
+
+static int ambapp_dev_print(struct ambapp_dev *dev, int index, void *arg)
+{
+ char *dev_str, *ven_str, *type_str;
+ struct ambapp_dev_print_arg *p = arg;
+ char dp[32];
+ int i=0;
+ unsigned int basereg;
+
+ if (p->show_depth) {
+ for (i=0; i<ambapp_depth(dev)*2; i+=2) {
+ dp[i] = ' ';
+ dp[i+1] = ' ';
+ }
+ }
+ dp[i] = '\0';
+
+ ven_str = ambapp_vendor_id2str(dev->vendor);
+ if (!ven_str) {
+ ven_str = unknown;
+ dev_str = unknown;
+ } else {
+ dev_str = ambapp_device_id2str(dev->vendor, dev->device);
+ if (!dev_str)
+ dev_str = unknown;
+ }
+ if (dev->dev_type == DEV_APB_SLV) {
+ /* APB */
+ basereg = DEV_TO_APB(dev)->start;
+ type_str = "apb";
+ } else {
+ /* AHB */
+ basereg = DEV_TO_AHB(dev)->start[0];
+ type_str = "ahb";
+ }
+ printf("%s |-> 0x%x:0x%x:0x%x: %s_%s, %s: 0x%x, 0x%x (OWNER: 0x%x)\n",
+ dp, index, dev->vendor, dev->device, ven_str, dev_str, type_str,
+ basereg, (unsigned int)dev, (unsigned int)dev->owner);
+
+ return 0;
+}
+
+void ambapp_print(struct ambapp_bus *abus, int show_depth)
+{
+ struct ambapp_dev_print_arg arg;
+ arg.show_depth = show_depth;
+ ambapp_for_each(abus, (OPTIONS_ALL_DEVS|OPTIONS_ALL|OPTIONS_DEPTH_FIRST), -1,
+ -1, ambapp_dev_print, &arg);
+}
diff --git a/bsps/shared/grlib/analog/gradcdac.c b/bsps/shared/grlib/analog/gradcdac.c
new file mode 100644
index 0000000000..02939e58f0
--- /dev/null
+++ b/bsps/shared/grlib/analog/gradcdac.c
@@ -0,0 +1,580 @@
+/* ADC / DAC (GRADCDAC) interface implementation
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/gradcdac.h>
+
+/****************** DEBUG Definitions ********************/
+#define DBG_IOCTRL 1
+#define DBG_TX 2
+#define DBG_RX 4
+
+#define DEBUG_FLAGS (DBG_IOCTRL | DBG_RX | DBG_TX )
+/* Uncomment for debug output */
+/*
+#define DEBUG
+#define DEBUGFUNCS
+*/
+#include <grlib/debug_defs.h>
+
+#include <grlib/grlib_impl.h>
+
+struct gradcdac_priv {
+ struct gradcdac_regs *regs; /* Must be first */
+ struct drvmgr_dev *dev;
+ char devName[48];
+
+ unsigned int freq;
+ int irqno;
+ int minor;
+
+ void (*isr_adc)(void *cookie, void *arg);
+ void (*isr_dac)(void *cookie, void *arg);
+ void *isr_adc_arg;
+ void *isr_dac_arg;
+
+ int open;
+};
+
+/* Global variables */
+
+/* Print Info routines */
+void gradcdac_print(void *cookie);
+
+int gradcdac_init2(struct drvmgr_dev *dev);
+int gradcdac_init3(struct drvmgr_dev *dev);
+int gradcadc_device_init(struct gradcdac_priv *pDev);
+void gradcdac_adc_interrupt(void *arg);
+void gradcdac_dac_interrupt(void *arg);
+
+struct drvmgr_drv_ops gradcdac_ops =
+{
+ .init = {NULL, gradcdac_init2, gradcdac_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id gradcdac_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRADCDAC},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info gradcdac_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRADCDAC_ID, /* Driver ID */
+ "GRADCDAC_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &gradcdac_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gradcdac_ids[0]
+};
+
+void gradcdac_register_drv (void)
+{
+ DBG("Registering GRADCDAC driver\n");
+ drvmgr_drv_register(&gradcdac_drv_info.general);
+}
+
+int gradcdac_init2(struct drvmgr_dev *dev)
+{
+ struct gradcdac_priv *priv;
+
+ DBG("GRADCDAC[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+
+int gradcdac_init3(struct drvmgr_dev *dev)
+{
+ struct gradcdac_priv *priv = dev->priv;
+ char prefix[32];
+
+ if ( !priv )
+ return DRVMGR_FAIL;
+
+ if ( gradcadc_device_init(priv) ) {
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/gradcdac%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgradcdac%d", prefix, dev->minor_bus);
+ }
+
+ return DRVMGR_OK;
+}
+
+static void gradcdac_print_dev(struct gradcdac_priv *pDev)
+{
+ printf("======= GRADCDAC %p =======\n", pDev->regs);
+ printf(" Minor: %d\n", pDev->minor);
+ printf(" Dev Name: %s\n", pDev->devName);
+ printf(" RegBase: %p\n", pDev->regs);
+ printf(" IRQ: %d and %d\n", pDev->irqno, pDev->irqno+1);
+ printf(" Core Freq: %d kHz\n", pDev->freq / 1000);
+ printf(" Opened: %s\n", pDev->open ? "YES" : "NO");
+
+ printf(" CONFIG: 0x%x\n", pDev->regs->config);
+ printf(" STATUS: 0x%x\n", pDev->regs->status);
+}
+
+void gradcdac_print(void *cookie)
+{
+ struct drvmgr_dev *dev;
+ struct gradcdac_priv *pDev;
+
+ if ( cookie ) {
+ gradcdac_print_dev(cookie);
+ return;
+ }
+
+ /* Show all */
+ dev = gradcdac_drv_info.general.dev;
+ while (dev) {
+ pDev = (struct gradcdac_priv *)dev->priv;
+ gradcdac_print_dev(pDev);
+ dev = dev->next_in_drv;
+ }
+}
+
+static void gradcdac_hw_reset(struct gradcdac_regs *regs)
+{
+ /* Reset core */
+ regs->config = 0;
+ regs->adrdir = 0;
+ regs->adrout = 0;
+ regs->data_dir = 0;
+ regs->data_out = 0;
+}
+
+/* Device initialization called once on startup */
+int gradcadc_device_init(struct gradcdac_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irqno = pnpinfo->irq;
+ pDev->regs = (struct gradcdac_regs *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+
+ /* Reset Hardware before attaching IRQ handler */
+ gradcdac_hw_reset(pDev->regs);
+
+ pDev->open = 0;
+
+ /* Get frequency in Hz */
+ if ( drvmgr_freq_get(pDev->dev, DEV_APB_SLV, &pDev->freq) ) {
+ return -1;
+ }
+
+ DBG("GRADCDAC frequency: %d Hz\n", pDev->freq);
+
+ return 0;
+}
+
+void gradcdac_dac_interrupt(void *arg)
+{
+ struct gradcdac_priv *pDev = arg;
+ if ( pDev->isr_dac )
+ pDev->isr_dac(pDev, pDev->isr_dac_arg);
+}
+
+void gradcdac_adc_interrupt(void *arg)
+{
+ struct gradcdac_priv *pDev = arg;
+ if ( pDev->isr_adc )
+ pDev->isr_adc(pDev, pDev->isr_adc_arg);
+}
+
+void *gradcdac_open(char *devname)
+{
+ struct gradcdac_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ /* Find device by name */
+ dev = gradcdac_drv_info.general.dev;
+ while ( dev ) {
+ pDev = (struct gradcdac_priv *)dev->priv;
+ if ( pDev ) {
+ if ( strncmp(pDev->devName, devname, sizeof(pDev->devName)) == 0 ) {
+ /* Found matching device name */
+ break;
+ }
+ }
+ dev = dev->next_in_drv;
+ }
+
+ if ( !dev )
+ return NULL;
+
+ /* is device busy/taken? */
+ if ( pDev->open )
+ return NULL;
+
+ /* Mark device taken */
+ pDev->open = 1;
+
+ return pDev;
+}
+
+void gradcdac_set_config(void *cookie, struct gradcdac_config *cfg)
+{
+ struct gradcdac_priv *pDev = cookie;
+ unsigned int config=0;
+
+ config = (cfg->dac_ws<<GRADCDAC_CFG_DACWS_BIT)&GRADCDAC_CFG_DACWS;
+
+ if ( cfg->wr_pol )
+ config |= GRADCDAC_CFG_WRPOL;
+
+ config |= (cfg->dac_dw<<GRADCDAC_CFG_DACDW_BIT)&GRADCDAC_CFG_DACDW;
+
+ config |= (cfg->adc_ws<<GRADCDAC_CFG_ADCWS_BIT)&GRADCDAC_CFG_ADCWS;
+
+ if ( cfg->rc_pol )
+ config |= GRADCDAC_CFG_RCPOL;
+
+ config |= (cfg->cs_mode<<GRADCDAC_CFG_CSMODE_BIT)&GRADCDAC_CFG_CSMODE;
+
+ if ( cfg->cs_pol )
+ config |= GRADCDAC_CFG_CSPOL;
+
+ if ( cfg->ready_mode )
+ config |= GRADCDAC_CFG_RDYMODE;
+
+ if ( cfg->ready_pol )
+ config |= GRADCDAC_CFG_RDYPOL;
+
+ if ( cfg->trigg_pol )
+ config |= GRADCDAC_CFG_TRIGPOL;
+
+ config |= (cfg->trigg_mode<<GRADCDAC_CFG_TRIGMODE_BIT)&GRADCDAC_CFG_TRIGMODE;
+
+ config |= (cfg->adc_dw<<GRADCDAC_CFG_ADCDW_BIT)&GRADCDAC_CFG_ADCDW;
+
+ /* Write config */
+ pDev->regs->config = config;
+}
+
+void gradcdac_get_config(void *cookie, struct gradcdac_config *cfg)
+{
+ struct gradcdac_priv *pDev = cookie;
+ unsigned int config;
+
+ if ( !cfg )
+ return;
+
+ /* Get config */
+ config = pDev->regs->config;
+
+ cfg->dac_ws = (config&GRADCDAC_CFG_DACWS)>>GRADCDAC_CFG_DACWS_BIT;
+
+ cfg->wr_pol = (config&GRADCDAC_CFG_WRPOL)>>GRADCDAC_CFG_WRPOL_BIT;
+
+ cfg->dac_dw = (config&GRADCDAC_CFG_DACDW)>>GRADCDAC_CFG_DACDW_BIT;
+
+ cfg->adc_ws = (config&GRADCDAC_CFG_ADCWS)>>GRADCDAC_CFG_ADCWS_BIT;
+
+ cfg->rc_pol = (config&GRADCDAC_CFG_RCPOL)>>GRADCDAC_CFG_RCPOL_BIT;
+
+ cfg->cs_mode = (config&GRADCDAC_CFG_CSMODE)>>GRADCDAC_CFG_CSMODE_BIT;
+
+ cfg->cs_pol = (config&GRADCDAC_CFG_CSPOL)>>GRADCDAC_CFG_CSPOL_BIT;
+
+ cfg->ready_mode = (config&GRADCDAC_CFG_RDYMODE)>>GRADCDAC_CFG_RDYMODE_BIT;
+
+ cfg->ready_pol = (config&GRADCDAC_CFG_RDYPOL)>>GRADCDAC_CFG_RDYPOL_BIT;
+
+ cfg->trigg_pol = (config&GRADCDAC_CFG_TRIGPOL)>>GRADCDAC_CFG_TRIGPOL_BIT;
+
+ cfg->trigg_mode = (config&GRADCDAC_CFG_TRIGMODE)>>GRADCDAC_CFG_TRIGMODE_BIT;
+
+ cfg->adc_dw = (config&GRADCDAC_CFG_ADCDW)>>GRADCDAC_CFG_ADCDW_BIT;
+}
+
+void gradcdac_set_cfg(void *cookie, unsigned int config)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->config = config;
+}
+
+unsigned int gradcdac_get_cfg(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->config;
+}
+
+unsigned int gradcdac_get_status(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->status;
+}
+
+/* Install IRQ handler for ADC and/or DAC interrupt.
+ * The installed IRQ handler(ISR) must read the status
+ * register to clear the pending interrupt avoiding multiple
+ * entries to the ISR caused by the same IRQ.
+ *
+ * \param adc 1=ADC interrupt, 2=ADC interrupt, 3=ADC and DAC interrupt
+ * \param isr Interrupt service routine called when IRQ is fired
+ * \param arg custom argument passed to ISR when called.
+ */
+int gradcdac_install_irq_handler(void *cookie, int adc, void (*isr)(void *cookie, void *arg), void *arg)
+{
+ struct gradcdac_priv *pDev = cookie;
+
+ if ( (adc > 3) || !adc )
+ return -1;
+
+ if ( adc & GRADCDAC_ISR_ADC ){
+ pDev->isr_adc_arg = arg;
+ pDev->isr_adc = isr;
+ drvmgr_interrupt_register(pDev->dev, GRADCDAC_IRQ_ADC, "gradcdac_adc", gradcdac_adc_interrupt, pDev);
+ }
+
+ if ( adc & GRADCDAC_ISR_DAC ){
+ pDev->isr_dac_arg = arg;
+ pDev->isr_dac = isr;
+ drvmgr_interrupt_register(pDev->dev, GRADCDAC_IRQ_DAC, "gradcdac_dac", gradcdac_dac_interrupt, pDev);
+ }
+
+ return 0;
+}
+
+void gradcdac_uninstall_irq_handler(void *cookie, int adc)
+{
+ struct gradcdac_priv *pDev = cookie;
+
+ if ( (adc > 3) || !adc )
+ return;
+
+ if ( adc & GRADCDAC_ISR_ADC ){
+ drvmgr_interrupt_unregister(pDev->dev, GRADCDAC_IRQ_ADC, gradcdac_adc_interrupt, pDev);
+ pDev->isr_adc = NULL;
+ pDev->isr_adc_arg = NULL;
+ }
+
+ if ( adc & GRADCDAC_ISR_DAC ){
+ drvmgr_interrupt_unregister(pDev->dev, GRADCDAC_IRQ_DAC, gradcdac_dac_interrupt, pDev);
+ pDev->isr_dac = NULL;
+ pDev->isr_dac_arg = NULL;
+ }
+}
+
+/* Make the ADC circuitry initialize a analogue to digital
+ * conversion. The result can be read out by gradcdac_adc_convert_try
+ * or gradcdac_adc_convert.
+ */
+void gradcdac_adc_convert_start(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+
+ /* Write to ADC Data Input register to start a conversion */
+ pDev->regs->adc_din = 0;
+}
+
+/* Tries to read the conversion result. If the circuitry is busy
+ * converting the function return a non-zero value, if the conversion
+ * has successfully finished the function return zero.
+ *
+ * \param digital_value the resulting converted value is placed here
+ * \return zero = ADC conversion complete, digital_value contain current conversion result
+ * positive = ADC busy, digital_value contain previous conversion result
+ * negative = Conversion request failed.
+ */
+int gradcdac_adc_convert_try(void *cookie, unsigned short *digital_value)
+{
+ struct gradcdac_priv *pDev = cookie;
+ unsigned int status;
+
+ status = pDev->regs->status;
+
+ if ( digital_value ){
+ *digital_value = pDev->regs->adc_din;
+ }
+
+ if ( gradcdac_ADC_isOngoing(status) )
+ return 1;
+
+ if ( gradcdac_ADC_isCompleted(status) )
+ return 0;
+
+ /* Failure */
+ return -1;
+}
+
+/* Waits until the ADC circuity has finished a digital to analogue
+ * conversion. The Waiting is implemented as a busy loop utilizing
+ * 100% CPU load.
+ */
+int gradcdac_adc_convert(void *cookie, unsigned short *digital_value)
+{
+ struct gradcdac_priv *pDev = cookie;
+ unsigned int status;
+
+ do {
+ status=gradcdac_get_status(pDev);
+ }while ( gradcdac_ADC_isOngoing(status) );
+
+ if ( digital_value )
+ *digital_value = pDev->regs->adc_din;
+
+ if ( gradcdac_ADC_isCompleted(status) )
+ return 0;
+
+ return -1;
+}
+
+/* Try to make the DAC circuitry initialize a digital to analogue
+ * conversion. If the circuitry is busy by a previous conversion
+ * the function return a non-zero value, if the conversion is
+ * successfully initialized the function return zero.
+ */
+int gradcdac_dac_convert_try(void *cookie, unsigned short digital_value)
+{
+ struct gradcdac_priv *pDev = cookie;
+ unsigned int status = pDev->regs->status;
+
+ if ( gradcdac_DAC_isOngoing(status) )
+ return -1;
+
+ /* Force a new conversion */
+ pDev->regs->dac_dout = digital_value;
+
+ /* Return success */
+ return 0;
+}
+
+/* Initializes a digital to analogue conversion by waiting until
+ * previous conversions is finished before proceeding with the
+ * conversion. The Waiting is implemented as a busy loop utilizing
+ * 100% CPU load.
+ */
+void gradcdac_dac_convert(void *cookie, unsigned short digital_value)
+{
+ struct gradcdac_priv *pDev = cookie;
+ unsigned int status;
+
+ do {
+ status = gradcdac_get_status(pDev);
+ }while( gradcdac_DAC_isOngoing(status) );
+
+ pDev->regs->dac_dout = digital_value;
+}
+
+unsigned int gradcdac_get_adrinput(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->adrin;
+}
+
+void gradcdac_set_adrinput(void *cookie, unsigned int input)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->adrin = input;
+}
+
+unsigned int gradcdac_get_adroutput(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->adrout;
+}
+
+void gradcdac_set_adroutput(void *cookie, unsigned int output)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->adrout = output;
+}
+
+unsigned int gradcdac_get_adrdir(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->adrdir;
+}
+
+void gradcdac_set_adrdir(void *cookie, unsigned int dir)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->adrdir = dir;
+}
+
+unsigned int gradcdac_get_datainput(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->data_in;
+}
+
+void gradcdac_set_datainput(void *cookie, unsigned int input)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->data_in = input;
+}
+
+unsigned int gradcdac_get_dataoutput(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->data_out;
+}
+
+void gradcdac_set_dataoutput(void *cookie, unsigned int output)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->data_out = output;
+}
+
+unsigned int gradcdac_get_datadir(void *cookie)
+{
+ struct gradcdac_priv *pDev = cookie;
+ return pDev->regs->data_dir;
+}
+
+void gradcdac_set_datadir(void *cookie, unsigned int dir)
+{
+ struct gradcdac_priv *pDev = cookie;
+ pDev->regs->data_dir = dir;
+}
diff --git a/bsps/shared/grlib/ascs/grascs.c b/bsps/shared/grlib/ascs/grascs.c
new file mode 100644
index 0000000000..1d9541ce85
--- /dev/null
+++ b/bsps/shared/grlib/ascs/grascs.c
@@ -0,0 +1,619 @@
+/* This file contains the GRASCS RTEMS driver
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <bsp.h>
+#include <grlib/ambapp.h>
+#include <grlib/grascs.h>
+#include <grlib/grlib.h>
+
+#include <grlib/grlib_impl.h>
+
+#ifndef GAISLER_ASCS
+#define GAISLER_ASCS 0x043
+#endif
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+typedef struct {
+ volatile unsigned int cmd;
+ volatile unsigned int clk;
+ volatile unsigned int sts;
+ volatile unsigned int tcd;
+ volatile unsigned int tmd;
+} GRASCS_regs;
+
+typedef struct {
+ unsigned char tmconf;
+ unsigned char usconf;
+ unsigned char nslaves;
+ unsigned char dbits;
+ int clkfreq;
+} GRASCS_caps;
+
+typedef struct {
+ GRASCS_regs *regs; /* Pointer to core registers */
+ GRASCS_caps *caps; /* Pointer to capability struct */
+ rtems_id tcsem1, tcsem2;
+ rtems_id tmsem1, tmsem2;
+ volatile char running;
+ int tcptr;
+ int tmptr;
+ int tcwords;
+ int tmwords;
+} GRASCS_cfg;
+
+static GRASCS_cfg *cfg = NULL;
+
+/*------------------------------------*/
+/* Start of internal helper functions */
+/*------------------------------------*/
+
+/* Function: ASCS_getaddr
+ Arguments: base: Core's register base address
+ irq: Core's irq
+ Return values: 0 if successful, -1 if core is not found
+ Description: Assigns core's register base address and
+ irq to arguments. Uses AMBA plug and play to find the
+ core.
+*/
+static int ASCS_get_addr(int *base, int *irq) {
+
+ struct ambapp_apb_info core;
+
+ if(ambapp_find_apbslv(&ambapp_plb, VENDOR_GAISLER, GAISLER_ASCS, &core) == 1) {
+ *base = core.start;
+ *irq = core.irq;
+ DBG("ASCS_get_addr: Registerd ASCS core at 0x%x with irq %i\n",core.start, core.irq);
+ return 0;
+ }
+ DBG("ASCS_get_addr: Failed to detect core\n");
+ return -1;
+}
+
+/* Function: ASCS_calc_clkreg
+ Arguments: sysfreq: System clock frequency in kHz
+ etrfreq: ETR frequency in Hz
+ Return values: Value of core's CLK-register
+ Description: Calculates value of core's CLK-register. See
+ GRASCS IP core documentation for details.
+*/
+static int ASCS_calc_clkreg(int sysfreq, int etrfreq) {
+
+ if(cfg->caps->usconf)
+ return 1000000/etrfreq;
+ else
+ return sysfreq*1000/etrfreq;
+}
+
+/* Function: ASCS_get_sysfreq
+ Arguments: -
+ Return values: System clock frequency in kHz, -1 if failed
+ Description: Uses AMBA plug and play to lookup system frequency
+*/
+static int ASCS_get_sysfreq(void) {
+
+ struct ambapp_apb_info gpt;
+ struct gptimer_regs *tregs;
+ int tmp;
+
+ if(ambapp_find_apbslv(&ambapp_plb, VENDOR_GAISLER, GAISLER_GPTIMER, &gpt) == 1) {
+ tregs = (struct gptimer_regs *) gpt.start;
+ tmp = (tregs->scaler_reload + 1)*1000;
+ DBG("ASCS_get_sysfreq: Detected system frequency %i kHz\n",tmp);
+ if((tmp < GRASCS_MIN_SFREQ) || (tmp > GRASCS_MAX_SFREQ)) {
+ DBG("ASCS_get_sysfreq: System frequency is invalid for ASCS core\n");
+ return -1;
+ }
+ else
+ return (tregs->scaler_reload + 1)*1000;
+ }
+ DBG("ASCS_get_sysfreq: Failed to detect system frequency\n");
+ return -1;
+}
+
+/* Function: ASCS_irqhandler
+ Arguments: v: not used
+ Return values: -
+ Description: Determines the source of the interrupt, clears the
+ appropriate bits in the core's STS register and releases
+ the associated semaphore
+*/
+static rtems_isr ASCS_irqhandler(rtems_vector_number v) {
+
+ if(cfg->regs->sts & GRASCS_STS_TCDONE) {
+ /* Clear TC done bit */
+ cfg->regs->sts |= GRASCS_STS_TCDONE;
+
+ if(--cfg->tcwords == 0)
+ /* No more TCs to perform right now */
+ rtems_semaphore_release(cfg->tcsem2);
+ else {
+ /* Block not sent yet, start next TC */
+ if(cfg->caps->dbits == 8) {
+ cfg->tcptr++;
+ cfg->regs->tcd = *((unsigned char*)cfg->tcptr);
+ }
+ else if(cfg->caps->dbits == 16) {
+ cfg->tcptr += 2;
+ cfg->regs->tcd = *((unsigned short int*)cfg->tcptr);
+ }
+ else {
+ cfg->tcptr += 4;
+ cfg->regs->tcd = *((unsigned int*)cfg->tcptr);
+ }
+ }
+ }
+
+ if(cfg->regs->sts & GRASCS_STS_TMDONE) {
+ /* Clear TM done bit */
+ cfg->regs->sts |= GRASCS_STS_TMDONE;
+
+ /* Store received data */
+ if(cfg->caps->dbits == 8) {
+ *((unsigned char*)cfg->tmptr) = (unsigned char)(cfg->regs->tmd & 0xFF);
+ cfg->tmptr++;
+ }
+ else if(cfg->caps->dbits == 16) {
+ *((unsigned short int*)cfg->tmptr) = (unsigned short int)(cfg->regs->tmd & 0xFFFF);
+ cfg->tmptr += 2;
+ }
+ else {
+ *((unsigned int*)cfg->tmptr) = cfg->regs->tmd;
+ cfg->tmptr += 4;
+ }
+
+ if(--cfg->tmwords == 0)
+ /* No more TMs to perform right now */
+ rtems_semaphore_release(cfg->tmsem2);
+ else
+ /* Block not received yet, start next TM */
+ cfg->regs->cmd |= GRASCS_CMD_SENDTM;
+ }
+}
+
+/*---------------------------*/
+/* Start of driver interface */
+/*---------------------------*/
+
+/* Function: ASCS_init
+ Arguments: -
+ Return values: 0 if successful, -1 if unsuccessful
+ Description: Initializes the ASCS core
+*/
+int ASCS_init(void) {
+
+ int base, irq, tmp;
+
+ DBG("ASCS_init: Starting initialization of ASCS core\n");
+
+ /* Allocate memory for config, status and capability struct */
+ if((cfg = grlib_malloc(sizeof(*cfg))) == NULL) {
+ DBG("ASCS_init: Could not allocate memory for cfg struc\n");
+ return -1;
+ }
+
+ if((cfg->caps = grlib_calloc(1,sizeof(*cfg->caps))) == NULL) {
+ DBG("ASCS_init: Could not allocate memory for caps struc\n");
+ goto init_error1;
+ }
+
+ /* Create semaphores for blocking ASCS_TC/TM functions */
+ if(rtems_semaphore_create(rtems_build_name('A','S','C','0'),1,
+ (RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|
+ RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|
+ RTEMS_NO_PRIORITY_CEILING), 0,
+ &cfg->tcsem1) != RTEMS_SUCCESSFUL) {
+ DBG("ASCS_init: Failed to create semaphore ASC0\n");
+ goto init_error2;
+ }
+ if(rtems_semaphore_create(rtems_build_name('A','S','C','1'),1,
+ (RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|
+ RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|
+ RTEMS_NO_PRIORITY_CEILING), 0,
+ &cfg->tmsem1) != RTEMS_SUCCESSFUL) {
+ DBG("ASCS_init: Failed to create semaphore ASC1\n");
+ goto init_error2;
+ }
+ /* Create semaphores for waiting on ASCS_TC/TM interrupt */
+ if(rtems_semaphore_create(rtems_build_name('A','S','C','2'),0,
+ (RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|
+ RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|
+ RTEMS_NO_PRIORITY_CEILING), 0,
+ &cfg->tcsem2) != RTEMS_SUCCESSFUL) {
+ DBG("ASCS_init: Failed to create semaphore ASC2\n");
+ goto init_error2;
+ }
+ if(rtems_semaphore_create(rtems_build_name('A','S','C','3'),0,
+ (RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|
+ RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|
+ RTEMS_NO_PRIORITY_CEILING), 0,
+ &cfg->tmsem2) != RTEMS_SUCCESSFUL) {
+ DBG("ASCS_init: Failed to create semaphore ASC3\n");
+ goto init_error2;
+ }
+
+ /* Set pointer to core registers */
+ if(ASCS_get_addr(&base, &irq) == -1)
+ goto init_error2;
+
+ cfg->regs = (GRASCS_regs*)base;
+
+ /* Read core capabilities */
+ tmp = cfg->regs->sts;
+ cfg->caps->dbits = ((tmp >> GRASCS_STS_DBITS_BITS) & 0x1F) + 1;
+ cfg->caps->nslaves = ((tmp >> GRASCS_STS_NSLAVES_BITS) & 0xF) + 1;
+ cfg->caps->tmconf = (tmp >> GRASCS_STS_TMCONF_BITS) & 0x1;
+ cfg->caps->usconf = (tmp >> GRASCS_STS_USCONF_BITS) & 0x1;
+
+ /* Reset and configure core */
+ cfg->running = 0;
+ cfg->regs->cmd |= GRASCS_CMD_RESET;
+ if((tmp = ASCS_get_sysfreq()) == -1)
+ goto init_error2;
+ cfg->caps->clkfreq = tmp;
+ while(ASCS_iface_status())
+ ;
+ cfg->regs->clk = ASCS_calc_clkreg(tmp, GRASCS_DEFAULT_ETRFREQ);
+ cfg->regs->cmd = GRASCS_CMD_US1C;
+ cfg->regs->cmd |= (tmp/1000 << GRASCS_CMD_US1_BITS) | GRASCS_CMD_US1C |
+ GRASCS_CMD_TCDONE | GRASCS_CMD_TMDONE;
+
+ /* Register interrupt routine */
+ set_vector(ASCS_irqhandler,irq+0x10,2);
+
+ return 0;
+
+ init_error2:
+ free(cfg->caps);
+ init_error1:
+ free(cfg);
+ return -1;
+}
+
+/* Function: ASCS_input_select
+ Arguments: slave: The number of the slave that is active,
+ numbered from 0-15
+ Return values: 0 if successful, -GRASCS_ERROR_CAPFAULT if slave value
+ is negative or too big, -GRASCS_ERROR_TRANSACTIVE if
+ a TM is active.
+ Description: Sets the slave_sel bits in the core's CMD register.
+ they are used to choose which slave the core listens
+ to when performing a TM. The bits can't be set
+ during a TM, and the function will in such a case fail.
+*/
+int ASCS_input_select(int slave) {
+
+ if((slave < 0) || (slave > cfg->caps->nslaves)) {
+ /* Slave number is negative or too big */
+ DBG("ASCS_input_select: Wrong slave number\n");
+ return -GRASCS_ERROR_CAPFAULT;
+ }
+
+ if(rtems_semaphore_obtain(cfg->tmsem1,RTEMS_NO_WAIT,RTEMS_NO_TIMEOUT) !=
+ RTEMS_SUCCESSFUL) {
+ /* Can't change active slave during a TM */
+ DBG("ASCS_input_select: Transaction active\n");
+ return -GRASCS_ERROR_TRANSACTIVE;
+ }
+
+ cfg->regs->cmd = ((cfg->regs->cmd &= ~GRASCS_CMD_SLAVESEL) |
+ (slave << GRASCS_CMD_SLAVESEL_BITS));
+
+ rtems_semaphore_release(cfg->tmsem1);
+ return 0;
+}
+
+/* Function: ASCS_etr_select
+ Arguments: src: The source of the ETR signal, valid values are
+ 0-GRASCS_MAX_TMS (0 = internal source, 1-GRASCS_MAX_TMS =
+ external time markers 1-GRASCS_MAX_TMS).
+ freq: ETR frequency in Hz. Valid values are
+ GRASCS_MIN_ETRFREQ-GRASCS_MAX_ETRFREQ
+ Return values: 0 if successful, -GRASCS_ERROR_CAPFAULT if src or freq values
+ are invalid, -GRASCS_ERROR_STARTSTOP if synchronization interface
+ isn't stopped.
+ Description: Changes the source for the ETR signal. The frequency of source signal
+ is assumed to be the same as the frequency of the freq input
+*/
+int ASCS_etr_select(int etr, int freq) {
+
+ if((etr < 0) || (etr > GRASCS_MAX_TMS) || ((cfg->caps->tmconf == 0) && (etr > 0)) ||
+ (freq < GRASCS_MIN_ETRFREQ) || (freq > GRASCS_MAX_ETRFREQ)) {
+ /* ETR source value or frequency is invalid */
+ DBG("ASCS_etr_select: Wrong etr src number or wrong frequency\n");
+ return -GRASCS_ERROR_CAPFAULT;
+ }
+
+ if(cfg->regs->sts & GRASCS_STS_ERUNNING) {
+ /* Synchronization interface is running */
+ DBG("ASCS_etr_select: Synch interface is running\n");
+ return -GRASCS_ERROR_STARTSTOP;
+ }
+
+ cfg->regs->clk = ASCS_calc_clkreg(cfg->caps->clkfreq,freq);
+ cfg->regs->cmd = ((cfg->regs->cmd &= ~GRASCS_CMD_ETRCTRL) |
+ (etr << GRASCS_CMD_ETRCTRL_BITS));
+
+ return 0;
+}
+
+/* Function: ASCS_start
+ Arguments: -
+ Return values: -
+ Description: Enables the serial interface.
+*/
+void ASCS_start(void) {
+
+ /* Set register and internal status to running */
+ cfg->regs->cmd |= GRASCS_CMD_STARTSTOP;
+ cfg->running = 1;
+}
+
+/* Function: ASCS_stop
+ Arguments: -
+ Return values: -
+ Description: Disables the serial interface. This function will
+ block until possible calls to TC_send(_block) and
+ TM_recv(_block) has returned in order to be sure
+ that started transactions will be performed.
+*/
+void ASCS_stop(void) {
+
+ /* Set internal status to stopped */
+ cfg->running = 0;
+
+ /* Obtain semaphores to avoid possible situation where a
+ TC_send(_block) or TM_recv(_block) is aborted and driver is
+ waiting forever for an interrupt */
+ rtems_semaphore_obtain(cfg->tcsem1,RTEMS_WAIT,RTEMS_NO_TIMEOUT);
+ rtems_semaphore_obtain(cfg->tmsem1,RTEMS_WAIT,RTEMS_NO_TIMEOUT);
+
+ /* Change actual register value */
+ cfg->regs->cmd &= ~GRASCS_CMD_STARTSTOP;
+
+ /* Release the semaphores */
+ rtems_semaphore_release(cfg->tcsem1);
+ rtems_semaphore_release(cfg->tmsem1);
+}
+
+/* Function: ASCS_iface_status
+ Arguments: -
+ Return values: 0 if both serial interface and synch interface is stopped,
+ 1 if serial interface is running buth synch interface is
+ stopped, 2 if serial interface is stopped but synch interface
+ is running, 3 if both serial and synch interface is running
+ Description: Reads the core's STS register and reports the status of the
+ serial and synch interfaces
+*/
+int ASCS_iface_status(void) {
+
+ return ((cfg->regs->sts & 0x3) & (0x2 | cfg->running));
+}
+
+/* Function: ASCS_TC_send
+ Arguments: word: Pointer to a word that should be sent
+ Return values: 0 on success
+ -GRASCS_ERROR_STARTSTOP if serial interface is stopped,
+ -GRASCS_ERROR_TRANSACTIVE if another TC is in progress.
+ Description: Start a TC and sends the data that word points to.
+*/
+int ASCS_TC_send(int *word) {
+
+ int retval;
+
+ if(rtems_semaphore_obtain(cfg->tcsem1,RTEMS_NO_WAIT,RTEMS_NO_TIMEOUT) !=
+ RTEMS_SUCCESSFUL) {
+ /* Can't start a TC_send if another TC_send of TC_send_block is
+ in progress */
+ DBG("ASCS_TC_send: Could not obtain semaphore, transcation probably in progress\n");
+ return -GRASCS_ERROR_TRANSACTIVE;
+ }
+
+ if(!cfg->running) {
+ /* Can't start a TC if serial interface isn't started */
+ DBG("ASCS_TC_send: Serial interface is not started\n");
+ retval = -GRASCS_ERROR_STARTSTOP;
+ }
+ else {
+ /* Start the transfer */
+ cfg->tcwords = 1;
+ if(cfg->caps->dbits == 8)
+ cfg->regs->tcd = *((unsigned char*)word);
+ else if(cfg->caps->dbits == 16)
+ cfg->regs->tcd = *((unsigned short int*)((int)word & ~1));
+ else
+ cfg->regs->tcd = *((unsigned int*)((int)word & ~3));
+
+ /* Wait until transfer is complete */
+ rtems_semaphore_obtain(cfg->tcsem2,RTEMS_WAIT,RTEMS_NO_TIMEOUT);
+ retval = 0;
+ }
+
+ rtems_semaphore_release(cfg->tcsem1);
+
+ return retval;
+}
+
+/* Function: ASCS_TC_send_block
+ Arguments: block: Pointer to the start of a datablock that
+ should be sent.
+ ntrans: Number of transfers needed to transfer
+ the block.
+ Return values: 0 if successfull, -GRASCS_ERROR_STARTSTOP if TC
+ couldn't be started because serial interface is
+ stopped, -GRASCS_ERROR_TRANSACTIVE if TC couldn't
+ be started because another TC isn't done yet.
+ Description: Starts ntrans TCs and sends the data that starts at the
+ address that block points to. The size of each
+ transaction will vary depending on whether the core is
+ configured for 8, 16, or 32 bits data transfers.
+*/
+int ASCS_TC_send_block(int *block, int ntrans) {
+
+ int retval;
+
+ if(rtems_semaphore_obtain(cfg->tcsem1,RTEMS_NO_WAIT,RTEMS_NO_TIMEOUT) !=
+ RTEMS_SUCCESSFUL) {
+ /* Can't start a TC_send_block if another TC_send of TC_send_block is
+ in progress */
+ DBG("ASCS_TC_send_block: Could not obtain semaphore, transcation probably in progress\n");
+ return -GRASCS_ERROR_TRANSACTIVE;
+ }
+
+ if(!cfg->running) {
+ /* Can't start a TC if serial interface isn't started */
+ DBG("ASCS_TC_send_block: Serial interface is not started\n");
+ retval = -GRASCS_ERROR_STARTSTOP;
+ }
+ else {
+ /* Start the first transfer */
+ cfg->tcwords = ntrans;
+ if(cfg->caps->dbits == 8) {
+ cfg->tcptr = (int)block;
+ cfg->regs->tcd = *((unsigned char*)cfg->tcptr);
+ }
+ else if(cfg->caps->dbits == 16) {
+ cfg->tcptr = (int)block & ~1;
+ cfg->regs->tcd = *((unsigned short int*)cfg->tcptr);
+ }
+ else {
+ cfg->tcptr = (int)block & ~3;
+ cfg->regs->tcd = *((unsigned int*)cfg->tcptr);
+ }
+
+ /* Wait until all transfers are complete */
+ rtems_semaphore_obtain(cfg->tcsem2,RTEMS_WAIT,RTEMS_NO_TIMEOUT);
+ retval = 0;
+ }
+
+ rtems_semaphore_release(cfg->tcsem1);
+
+ return retval;
+}
+
+/* Function: ASCS_TC_sync_start
+ Arguments: -
+ Return values: -
+ Description: Starts synchronization interface. Might
+ be delayed if a TM is in progress. SW can poll
+ ASCS_iface_status() to find out when synch interface is
+ started. First ETR pulse can be delay up to one ETR
+ period depending on the source of the ETR and
+ activity on the TM line.
+*/
+void ASCS_TC_sync_start(void) {
+
+ cfg->regs->cmd |= GRASCS_CMD_ESTARTSTOP;
+}
+
+/* Function: ASCS_TC_sync_stop
+ Arguments: -
+ Return values: -
+ Description: Stops the synchronization interface. Might
+ be delayed for 1 us if a ETR pulse is being generated. SW
+ can determine when synch interface has stopped by polling
+ ASCS_iface_status().
+*/
+void ASCS_TC_sync_stop(void) {
+
+ cfg->regs->cmd &= ~GRASCS_CMD_ESTARTSTOP;
+}
+
+/* Function: ASCS_TM_recv
+ Arguments: word: Pointer to where the received word should be
+ placed
+ Return values: 0 if successful, -GRASCS_ERROR_STARTSTOP if serial
+ interface isn't started, -GRASCS_ERROR_TRANSACTIVE
+ if another TM is in progress
+ Description: Starts a TM and stores the incoming data in word.
+*/
+int ASCS_TM_recv(int *word) {
+
+ int retval;
+
+ if(rtems_semaphore_obtain(cfg->tmsem1,RTEMS_NO_WAIT,RTEMS_NO_TIMEOUT) !=
+ RTEMS_SUCCESSFUL) {
+ /* Can't start a TM_recv if another TM_recv of TM_recv_block is
+ in progress */
+ DBG("ASCS_TM_recv: Could not obtain semaphore, transaction probably in progress\n");
+ return -GRASCS_ERROR_TRANSACTIVE;
+ }
+
+ if(!cfg->running) {
+ /* Can't start a TM if serial interface isn't started */
+ DBG("ASCS_TM_recv: Serial interface is not started\n");
+ retval = -GRASCS_ERROR_STARTSTOP;
+ }
+ else {
+ /* Start transfer */
+ cfg->tmwords = 1;
+ cfg->tmptr = (int)word;
+ cfg->regs->cmd |= GRASCS_CMD_SENDTM;
+
+ /* Wait until transfer finishes */
+ rtems_semaphore_obtain(cfg->tmsem2,RTEMS_WAIT,RTEMS_NO_TIMEOUT);
+ retval = 0;
+ }
+
+ rtems_semaphore_release(cfg->tmsem1);
+
+ return retval;
+}
+
+/* Function: ASCS_TM_recv_block
+ Arguments: block: Pointer to where the received datablock
+ should be stored.
+ ntrans: Number of transfers needed to transfer
+ the block.
+ Return values: 0 if successful, -GRASCS_ERROR_STARTSTOP if serial
+ interface isn't started, -GRASCS_ERROR_TRANSACTIVE if
+ a performed TM hasn't been processed yet
+ Description: Starts ntrans TMs and stores the data at the address
+ that block points to. The size of each transaction
+ will vary depending on whether the core is
+ configured for 8, 16, or 32 bits data transfers.
+*/
+int ASCS_TM_recv_block(int *block, int ntrans) {
+
+ int retval;
+
+ if(rtems_semaphore_obtain(cfg->tmsem1,RTEMS_NO_WAIT,RTEMS_NO_TIMEOUT) !=
+ RTEMS_SUCCESSFUL) {
+ /* Can't start a TM_recv_block if another TM_recv of TM_recv_block is
+ in progress */
+ DBG("ASCS_TM_recv_block: Could not obtain semaphore, transaction probably in progress\n");
+ return -GRASCS_ERROR_TRANSACTIVE;
+ }
+
+ if(!cfg->running) {
+ /* Can't start a TM if serial interface isn't started */
+ DBG("ASCS_TM_recv_block: Serial interface is not started\n");
+ retval = -GRASCS_ERROR_STARTSTOP;
+ }
+ else {
+ /* Start transfer */
+ cfg->tmwords = ntrans;
+ cfg->tmptr = (int)block;
+ cfg->regs->cmd |= GRASCS_CMD_SENDTM;
+
+ /* Wait until transfer finishes */
+ rtems_semaphore_obtain(cfg->tmsem2,RTEMS_WAIT,RTEMS_NO_TIMEOUT);
+ retval = 0;
+ }
+
+ rtems_semaphore_release(cfg->tmsem1);
+
+ return retval;
+}
diff --git a/bsps/shared/grlib/btimer/gptimer.c b/bsps/shared/grlib/btimer/gptimer.c
new file mode 100644
index 0000000000..4b3ec8c4b8
--- /dev/null
+++ b/bsps/shared/grlib/btimer/gptimer.c
@@ -0,0 +1,545 @@
+/* This file contains the driver for the GRLIB GPTIMER timers port. The driver
+ * is implemented by using the tlib.c simple timer layer and the Driver
+ * Manager.
+ *
+ * The Driver can be configured using driver resources:
+ *
+ * - timerStart Timer Index if first Timer, this parameters is typically used
+ * in AMP systems for resource allocation. The Timers before
+ * timerStart will not be accessed.
+ * - timerCnt Number of timers that the driver will use, this parameters is
+ * typically used in AMP systems for resource allocation between
+ * OS instances.
+ * - prescaler Base prescaler, normally set by bootloader but can be
+ * overridden. The default scaler reload value set by bootloader
+ * is so that Timers operate in 1MHz. Setting the prescaler to a
+ * lower value increase the accuracy of the timers but shortens
+ * the time until underflow happens.
+ * - clockTimer Used to select a particular timer to be the system clock
+ * timer. This is useful when multiple GPTIMERs cores are
+ * available, or in AMP systems. By default the TLIB selects the
+ * first timer registered as system clock timer.
+ *
+ * The BSP define APBUART_INFO_AVAIL in order to add the info routine
+ * used for debugging.
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <bsp.h>
+#include <stdlib.h>
+#include <string.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grlib.h>
+#include <grlib/gptimer.h>
+#include <grlib/tlib.h>
+
+#if defined(LEON3)
+#include <leon.h>
+#endif
+
+#ifdef GPTIMER_INFO_AVAIL
+#include <stdio.h>
+#endif
+
+#ifdef RTEMS_SMP
+#include <rtems/score/processormask.h>
+#include <rtems/score/smpimpl.h>
+#endif
+
+#include <grlib/grlib_impl.h>
+
+/* GPTIMER Core Configuration Register (READ-ONLY) */
+#define GPTIMER_CFG_TIMERS_BIT 0
+#define GPTIMER_CFG_IRQ_BIT 3
+#define GPTIMER_CFG_SI_BIT 8
+#define GPTIMER_CFG_DF_BIT 9
+
+#define GPTIMER_CFG_TIMERS (0x7<<GPTIMER_CFG_TIMERS_BIT)
+#define GPTIMER_CFG_IRQ (0x1f<<GPTIMER_CFG_IRQ_BIT)
+#define GPTIMER_CFG_SI (1<<GPTIMER_CFG_SI_BIT)
+#define GPTIMER_CFG_DF (1<<GPTIMER_CFG_DF_BIT)
+
+/* GPTIMER Timer Control Register */
+#define GPTIMER_CTRL_EN_BIT 0
+#define GPTIMER_CTRL_RS_BIT 1
+#define GPTIMER_CTRL_LD_BIT 2
+#define GPTIMER_CTRL_IE_BIT 3
+#define GPTIMER_CTRL_IP_BIT 4
+#define GPTIMER_CTRL_CH_BIT 5
+#define GPTIMER_CTRL_DH_BIT 6
+
+#define GPTIMER_CTRL_EN (1<<GPTIMER_CTRL_EN_BIT)
+#define GPTIMER_CTRL_RS (1<<GPTIMER_CTRL_RS_BIT)
+#define GPTIMER_CTRL_LD (1<<GPTIMER_CTRL_LD_BIT)
+#define GPTIMER_CTRL_IE (1<<GPTIMER_CTRL_IE_BIT)
+#define GPTIMER_CTRL_IP (1<<GPTIMER_CTRL_IP_BIT)
+#define GPTIMER_CTRL_CH (1<<GPTIMER_CTRL_CH_BIT)
+#define GPTIMER_CTRL_DH (1<<GPTIMER_CTRL_DH_BIT)
+
+#define DBG(x...)
+
+/* GPTIMER timer private */
+struct gptimer_timer {
+ struct tlib_dev tdev; /* Must be first in struct */
+ struct gptimer_timer_regs *tregs;
+ char index; /* Timer Index in this driver */
+ char tindex; /* Timer Index In Hardware */
+ unsigned char irq_ack_mask;
+};
+
+/* GPTIMER Core private */
+struct gptimer_priv {
+ struct drvmgr_dev *dev;
+ struct gptimer_regs *regs;
+ unsigned int base_clk;
+ unsigned int base_freq;
+ unsigned int widthmask;
+ char separate_interrupt;
+ char isr_installed;
+
+ /* Structure per Timer unit, the core supports up to 8 timers */
+ int timer_cnt;
+ struct gptimer_timer timers[0];
+};
+
+void gptimer_isr(void *data);
+
+#if 0
+void gptimer_tlib_irq_register(struct tlib_drv *tdrv, tlib_isr_t func, void *data)
+{
+ struct gptimer_priv *priv = (struct gptimer_priv *)tdrv;
+
+ if ( SHARED ...)
+
+
+ drvmgr_interrupt_register();
+}
+#endif
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static struct tlib_drv gptimer_tlib_drv;
+int gptimer_device_init(struct gptimer_priv *priv);
+
+int gptimer_init1(struct drvmgr_dev *dev);
+#ifdef GPTIMER_INFO_AVAIL
+static int gptimer_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p, int, char *argv[]);
+#define GTIMER_INFO_FUNC gptimer_info
+#else
+#define GTIMER_INFO_FUNC NULL
+#endif
+
+struct drvmgr_drv_ops gptimer_ops =
+{
+ .init = {gptimer_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = GTIMER_INFO_FUNC,
+};
+
+struct amba_dev_id gptimer_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GPTIMER},
+ {VENDOR_GAISLER, GAISLER_GRTIMER},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info gptimer_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GPTIMER_ID,/* Driver ID */
+ "GPTIMER_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &gptimer_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gptimer_ids[0]
+};
+
+void gptimer_register_drv (void)
+{
+ DBG("Registering GPTIMER driver\n");
+ drvmgr_drv_register(&gptimer_drv_info.general);
+}
+
+int gptimer_init1(struct drvmgr_dev *dev)
+{
+ struct gptimer_priv *priv;
+ struct gptimer_regs *regs;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ int timer_hw_cnt, timer_cnt, timer_start;
+ int i, size;
+ struct gptimer_timer *timer;
+ union drvmgr_key_value *value;
+ unsigned char irq_ack_mask;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ regs = (struct gptimer_regs *)pnpinfo->apb_slv->start;
+
+ DBG("GPTIMER[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ /* Get number of Timers */
+ timer_hw_cnt = regs->cfg & GPTIMER_CFG_TIMERS;
+
+ /* Let user spelect a range of timers to be used. In AMP systems
+ * it is sometimes neccessary to leave timers for other CPU instances.
+ *
+ * The default operation in AMP is to shared the timers within the
+ * first GPTIMER core as below. This can of course be overrided by
+ * driver resources.
+ */
+ timer_cnt = timer_hw_cnt;
+ timer_start = 0;
+#if defined(RTEMS_MULTIPROCESSING) && defined(LEON3)
+ if ((dev->minor_drv == 0) && drvmgr_on_rootbus(dev)) {
+ timer_cnt = 1;
+ timer_start = LEON3_Cpu_Index;
+ }
+#endif
+ value = drvmgr_dev_key_get(dev, "timerStart", DRVMGR_KT_INT);
+ if ( value) {
+ timer_start = value->i;
+ timer_cnt = timer_hw_cnt - timer_start;
+ }
+ value = drvmgr_dev_key_get(dev, "timerCnt", DRVMGR_KT_INT);
+ if ( value && (value->i < timer_cnt) ) {
+ timer_cnt = value->i;
+ }
+
+ /* Allocate Common Timer Description, size depends on how many timers
+ * are present.
+ */
+ size = sizeof(struct gptimer_priv) +
+ timer_cnt*sizeof(struct gptimer_timer);
+ priv = dev->priv = grlib_calloc(1, size);
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+ priv->regs = regs;
+
+ /* The Base Frequency of the GPTIMER core is the same as the
+ * frequency of the AMBA bus it is situated on.
+ */
+ drvmgr_freq_get(dev, DEV_APB_SLV, &priv->base_clk);
+
+ /* This core will may provide important Timer functionality
+ * to other drivers and the RTEMS kernel, the Clock driver
+ * may for example use this device. So the Timer driver must be
+ * initialized in the first iiitialization stage.
+ */
+
+ /*** Initialize Hardware ***/
+
+ /* If user request to set prescaler, we will do that. However, note
+ * that doing so for the Root-Bus GPTIMER may affect the RTEMS Clock
+ * so that Clock frequency is wrong.
+ */
+ value = drvmgr_dev_key_get(priv->dev, "prescaler", DRVMGR_KT_INT);
+ if ( value )
+ regs->scaler_reload = value->i;
+
+ /* Get Frequency that the timers are operating in (after prescaler) */
+ priv->base_freq = priv->base_clk / (priv->regs->scaler_reload + 1);
+
+ /* Stop Timer and probe Pending bit. In newer hardware the
+ * timer has pending bit is cleared by writing a one to it,
+ * whereas older versions it is cleared with a zero.
+ */
+ priv->regs->timer[timer_start].ctrl = GPTIMER_CTRL_IP;
+ if ((priv->regs->timer[timer_start].ctrl & GPTIMER_CTRL_IP) != 0)
+ irq_ack_mask = ~GPTIMER_CTRL_IP;
+ else
+ irq_ack_mask = ~0;
+
+ /* Probe timer register width mask */
+ priv->regs->timer[timer_start].value = 0xffffffff;
+ priv->widthmask = priv->regs->timer[timer_start].value;
+
+ priv->timer_cnt = timer_cnt;
+ for (i=0; i<timer_cnt; i++) {
+ timer = &priv->timers[i];
+ timer->index = i;
+ timer->tindex = i + timer_start;
+ timer->tregs = &regs->timer[(int)timer->tindex];
+ timer->tdev.drv = &gptimer_tlib_drv;
+ timer->irq_ack_mask = irq_ack_mask;
+
+ /* Register Timer at Timer Library */
+ tlib_dev_reg(&timer->tdev);
+ }
+
+ /* Check Interrupt support implementation, two cases:
+ * A. All Timers share one IRQ
+ * B. Each Timer have an individual IRQ. The number is:
+ * BASE_IRQ + timer_index
+ */
+ priv->separate_interrupt = (regs->cfg & GPTIMER_CFG_SI) != 0;
+
+ return DRVMGR_OK;
+}
+
+#ifdef GPTIMER_INFO_AVAIL
+static int gptimer_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p, int argc, char *argv[])
+{
+ struct gptimer_priv *priv = dev->priv;
+ struct gptimer_timer *timer;
+ char buf[64];
+ int i;
+
+ if (priv == NULL || argc != 0)
+ return -DRVMGR_EINVAL;
+
+ sprintf(buf, "Timer Count: %d", priv->timer_cnt);
+ print_line(p, buf);
+ sprintf(buf, "REGS: 0x%08x", (unsigned int)priv->regs);
+ print_line(p, buf);
+ sprintf(buf, "BASE SCALER: %d", priv->regs->scaler_reload);
+ print_line(p, buf);
+ sprintf(buf, "BASE FREQ: %dkHz", priv->base_freq / 1000);
+ print_line(p, buf);
+ sprintf(buf, "SeparateIRQ: %s", priv->separate_interrupt ? "YES":"NO");
+ print_line(p, buf);
+
+ for (i=0; i<priv->timer_cnt; i++) {
+ timer = &priv->timers[i];
+ sprintf(buf, " - TIMER HW Index %d -", timer->tindex);
+ print_line(p, buf);
+ sprintf(buf, " TLIB Index: %d", timer->index);
+ print_line(p, buf);
+ sprintf(buf, " RELOAD REG: %d", timer->tregs->reload);
+ print_line(p, buf);
+ sprintf(buf, " CTRL REG: %d", timer->tregs->ctrl);
+ print_line(p, buf);
+ }
+
+ return DRVMGR_OK;
+}
+#endif
+
+static inline struct gptimer_priv *priv_from_timer(struct gptimer_timer *t)
+{
+ return (struct gptimer_priv *)
+ ((unsigned int)t -
+ sizeof(struct gptimer_priv) -
+ t->index * sizeof(struct gptimer_timer));
+}
+
+static int gptimer_tlib_int_pend(struct tlib_dev *hand, int ack)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ unsigned int ctrl = timer->tregs->ctrl;
+
+ if ((ctrl & (GPTIMER_CTRL_IP | GPTIMER_CTRL_IE)) ==
+ (GPTIMER_CTRL_IP | GPTIMER_CTRL_IE)) {
+ /* clear Pending IRQ ? */
+ if (ack)
+ timer->tregs->ctrl = ctrl & timer->irq_ack_mask;
+ return 1; /* timer generated IRQ */
+ } else
+ return 0; /* was not timer causing IRQ */
+}
+
+void gptimer_isr(void *data)
+{
+ struct gptimer_priv *priv = data;
+ int i;
+
+ /* Check all timers for IRQ */
+ for (i=0;i<priv->timer_cnt; i++) {
+ if (gptimer_tlib_int_pend((void *)&priv->timers[i], 0)) {
+ /* IRQ Was generated by Timer and Pending flag has *not*
+ * yet been cleared, this is to allow ISR to look at
+ * pending bit. Call ISR registered. Clear pending bit.
+ */
+ if (priv->timers[i].tdev.isr_func) {
+ priv->timers[i].tdev.isr_func(
+ priv->timers[i].tdev.isr_data);
+ }
+ gptimer_tlib_int_pend((void *)&priv->timers[i], 1);
+ }
+ }
+}
+
+static void gptimer_tlib_reset(struct tlib_dev *hand)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+
+ timer->tregs->ctrl = (timer->tregs->ctrl & timer->irq_ack_mask) &
+ GPTIMER_CTRL_IP;
+ timer->tregs->reload = 0xffffffff;
+ timer->tregs->ctrl = GPTIMER_CTRL_LD;
+}
+
+static void gptimer_tlib_get_freq(
+ struct tlib_dev *hand,
+ unsigned int *basefreq,
+ unsigned int *tickrate)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_priv *priv = priv_from_timer(timer);
+
+ /* Calculate base frequency from Timer Clock and Prescaler */
+ if ( basefreq )
+ *basefreq = priv->base_freq;
+ if ( tickrate )
+ *tickrate = timer->tregs->reload + 1;
+}
+
+static int gptimer_tlib_set_freq(struct tlib_dev *hand, unsigned int tickrate)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+
+ timer->tregs->reload = tickrate - 1;
+
+ /*Check that value was allowed (Timer may not be as wide as expected)*/
+ if ( timer->tregs->reload != (tickrate - 1) )
+ return -1;
+ else
+ return 0;
+}
+
+static void gptimer_tlib_irq_reg(struct tlib_dev *hand, tlib_isr_t func, void *data, int flags)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_priv *priv = priv_from_timer(timer);
+
+ if ( priv->separate_interrupt ) {
+ drvmgr_interrupt_register(priv->dev, timer->tindex,
+ "gptimer", func, data);
+ } else {
+ if (priv->isr_installed == 0) {
+ /* Shared IRQ handler */
+ drvmgr_interrupt_register(
+ priv->dev,
+ 0,
+ "gptimer_shared",
+ gptimer_isr,
+ priv);
+ }
+ priv->isr_installed++;
+ }
+
+#if RTEMS_SMP
+ if (flags & TLIB_FLAGS_BROADCAST) {
+ int tindex = 0;
+
+ if (priv->separate_interrupt) {
+ /* Offset interrupt number with HW subtimer index */
+ tindex = timer->tindex;
+ }
+ drvmgr_interrupt_set_affinity(priv->dev, tindex,
+ _SMP_Get_online_processors());
+ }
+#endif
+
+ timer->tregs->ctrl |= GPTIMER_CTRL_IE;
+}
+
+static void gptimer_tlib_irq_unreg(struct tlib_dev *hand, tlib_isr_t func, void *data)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_priv *priv = priv_from_timer(timer);
+
+ /* Turn off IRQ at source, unregister IRQ handler */
+ timer->tregs->ctrl &= ~GPTIMER_CTRL_IE;
+
+ if ( priv->separate_interrupt ) {
+ drvmgr_interrupt_unregister(priv->dev, timer->tindex,
+ func, data);
+ } else {
+ timer->tdev.isr_func = NULL;
+ priv->isr_installed--;
+ if (priv->isr_installed == 0) {
+ drvmgr_interrupt_unregister(priv->dev, 0,
+ gptimer_isr, priv);
+ }
+ }
+}
+
+static void gptimer_tlib_start(struct tlib_dev *hand, int once)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ unsigned int ctrl;
+
+ /* Load the selected frequency before starting Frequency */
+ ctrl = GPTIMER_CTRL_LD | GPTIMER_CTRL_EN;
+ if ( once == 0 )
+ ctrl |= GPTIMER_CTRL_RS; /* Restart Timer */
+ timer->tregs->ctrl = ctrl | (timer->tregs->ctrl & timer->irq_ack_mask &
+ ~GPTIMER_CTRL_RS);
+}
+
+static void gptimer_tlib_stop(struct tlib_dev *hand)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+
+ /* Load the selected Frequency */
+ timer->tregs->ctrl &= ~(GPTIMER_CTRL_EN|GPTIMER_CTRL_IP);
+}
+
+static void gptimer_tlib_restart(struct tlib_dev *hand)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+
+ timer->tregs->ctrl |= GPTIMER_CTRL_LD | GPTIMER_CTRL_EN;
+}
+
+static void gptimer_tlib_get_counter(
+ struct tlib_dev *hand,
+ unsigned int *counter)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+
+ *counter = timer->tregs->value;
+}
+
+static void gptimer_tlib_get_widthmask(
+ struct tlib_dev *hand,
+ unsigned int *widthmask)
+{
+ struct gptimer_timer *timer = (struct gptimer_timer *)hand;
+ struct gptimer_priv *priv = priv_from_timer(timer);
+
+ *widthmask = priv->widthmask;
+}
+
+static struct tlib_drv gptimer_tlib_drv =
+{
+ .reset = gptimer_tlib_reset,
+ .get_freq = gptimer_tlib_get_freq,
+ .set_freq = gptimer_tlib_set_freq,
+ .irq_reg = gptimer_tlib_irq_reg,
+ .irq_unreg = gptimer_tlib_irq_unreg,
+ .start = gptimer_tlib_start,
+ .stop = gptimer_tlib_stop,
+ .restart = gptimer_tlib_restart,
+ .get_counter = gptimer_tlib_get_counter,
+ .custom = NULL,
+ .int_pend = gptimer_tlib_int_pend,
+ .get_widthmask = gptimer_tlib_get_widthmask,
+};
diff --git a/bsps/shared/grlib/btimer/tlib.c b/bsps/shared/grlib/btimer/tlib.c
new file mode 100644
index 0000000000..d66a472fe9
--- /dev/null
+++ b/bsps/shared/grlib/btimer/tlib.c
@@ -0,0 +1,77 @@
+/*
+ * Timer Library (TLIB)
+ *
+ * COPYRIGHT (c) 2011.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <grlib/tlib.h>
+
+struct tlib_dev *tlib_dev_head = NULL;
+struct tlib_dev *tlib_dev_tail = NULL;
+static int tlib_dev_cnt = 0;
+
+/* Register Timer device to Timer Library */
+int tlib_dev_reg(struct tlib_dev *newdev)
+{
+ /* Reset device */
+ newdev->status = 0;
+ newdev->isr_func = NULL;
+ newdev->index = tlib_dev_cnt;
+
+ /* Insert last in queue */
+ newdev->next = NULL;
+ if ( tlib_dev_tail == NULL ) {
+ tlib_dev_head = newdev;
+ } else {
+ tlib_dev_tail->next = newdev;
+ }
+ tlib_dev_tail = newdev;
+
+ /* Return Index of Registered Timer */
+ return tlib_dev_cnt++;
+}
+
+void *tlib_open(int timer_no)
+{
+ struct tlib_dev *dev;
+
+ if ( timer_no < 0 )
+ return NULL;
+
+ dev = tlib_dev_head;
+ while ( (timer_no > 0) && dev ) {
+ timer_no--;
+ dev = dev->next;
+ }
+ if ( dev ) {
+ if ( dev->status )
+ return NULL;
+ dev->status = 1;
+ /* Reset Timer to initial state */
+ tlib_reset(dev);
+ }
+ return dev;
+}
+
+void tlib_close(void *hand)
+{
+ struct tlib_dev *dev = hand;
+
+ /* Stop any ongoing timer operation and unregister IRQ if registered */
+ tlib_stop(dev);
+ tlib_irq_unregister(dev);
+
+ /* Mark not open */
+ dev->status = 0;
+}
+
+int tlib_ntimer(void)
+{
+ return tlib_dev_cnt;
+}
diff --git a/bsps/shared/grlib/btimer/tlib_ckinit.c b/bsps/shared/grlib/btimer/tlib_ckinit.c
new file mode 100644
index 0000000000..e43c8fdd8e
--- /dev/null
+++ b/bsps/shared/grlib/btimer/tlib_ckinit.c
@@ -0,0 +1,442 @@
+/*
+ * Clock Tick Device Driver using Timer Library implemented
+ * by the GRLIB GPTIMER / LEON2 Timer drivers.
+ *
+ * COPYRIGHT (c) 2010 - 2017.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+/*
+ * This is an implementation of the RTEMS "clockdrv_shell" interface for
+ * LEON2/3/4 systems using the Driver Manager. It is clock hardware agnostic
+ * and compatible with SMP and UP. Availability of free running counters is
+ * probed and selected as needed.
+ */
+#include <rtems.h>
+#include <rtems/timecounter.h>
+#include <rtems/clockdrv.h>
+#include <stdlib.h>
+#include <bsp.h>
+#include <grlib/tlib.h>
+
+#ifdef RTEMS_DRVMGR_STARTUP
+
+#if defined(LEON3)
+#include <leon.h>
+#endif
+
+struct ops {
+ /*
+ * Set up the free running counter using the Timecounter or Simple
+ * Timecounter interface.
+ */
+ rtems_device_driver (*initialize_counter)(void);
+
+ /*
+ * Hardware-specific support at tick interrupt which runs early in Clock_isr.
+ * It can for example be used to check if interrupt was actually caused by
+ * the timer hardware. If return value is not RTEMS_SUCCESSFUL then Clock_isr
+ * returns immediately. at_tick can be initialized with NULL.
+ */
+ rtems_device_driver (*at_tick)(void);
+
+ /*
+ * Typically calls rtems_timecounter_tick(). A specialized clock driver may
+ * use for example rtems_timecounter_tick_simple() instead.
+ */
+ void (*timecounter_tick)(void);
+
+ /*
+ * Called when the clock driver exits. It can be used to stop functionality
+ * started by initialize_counter. The tick timer is stopped by default.
+ * shutdown_hardware can be initialized with NULL
+ */
+ void (*shutdown_hardware)(void);
+};
+
+/*
+ * Different implementation depending on available free running counter for the
+ * timecounter.
+ *
+ * NOTE: The clock interface is not compatible with shared interrupts on the
+ * clock (tick) timer in SMP configuration.
+ */
+
+#ifndef RTEMS_SMP
+/* "simple timecounter" interface. Only for non-SMP. */
+static const struct ops ops_simple;
+#else
+/* Hardware support up-counter using LEON3 %asr23. */
+static const struct ops ops_timetag;
+/* Timestamp counter available in some IRQ(A)MP instantiations. */
+static const struct ops ops_irqamp;
+/* Separate GPTIMER subtimer as timecounter */
+static const struct ops ops_subtimer;
+#endif
+
+struct clock_priv {
+ const struct ops *ops;
+ /*
+ * Timer number in Timer Library for tick timer used by this interface.
+ * Defaults to the first Timer in the System.
+ */
+ int tlib_tick_index;
+ /* Timer number for timecounter timer if separate GPTIMER subtimer is used */
+ int tlib_counter_index;
+ void *tlib_tick;
+ void *tlib_counter;
+ rtems_timecounter_simple tc_simple;
+ struct timecounter tc;
+};
+static struct clock_priv priv;
+
+/** Common interface **/
+
+/* Set system clock timer instance */
+void Clock_timer_register(int timer_number)
+{
+ priv.tlib_tick_index = timer_number;
+ priv.tlib_counter_index = timer_number + 1;
+}
+
+static rtems_device_driver tlib_clock_find_timer(void)
+{
+ /* Take Timer that should be used as system timer. */
+ priv.tlib_tick = tlib_open(priv.tlib_tick_index);
+ if (priv.tlib_tick == NULL) {
+ /* System Clock Timer not found */
+ return RTEMS_NOT_DEFINED;
+ }
+
+ /* Select which operation set to use */
+#ifndef RTEMS_SMP
+ priv.ops = &ops_simple;
+#else
+ /* When on LEON3 try to use dedicated hardware free running counter. */
+ leon3_up_counter_enable();
+ if (leon3_up_counter_is_available()) {
+ priv.ops = &ops_timetag;
+ return RTEMS_SUCCESSFUL;
+ } else {
+ volatile struct irqmp_timestamp_regs *irqmp_ts;
+
+ irqmp_ts = &LEON3_IrqCtrl_Regs->timestamp[0];
+ if (leon3_irqmp_has_timestamp(irqmp_ts)) {
+ priv.ops = &ops_irqamp;
+ return RTEMS_SUCCESSFUL;
+ }
+ }
+
+ /* Take another subtimer as the final option. */
+ priv.ops = &ops_subtimer;
+#endif
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver tlib_clock_initialize_hardware(void)
+{
+ /* Set tick rate in number of "Base-Frequency ticks" */
+ tlib_set_freq(priv.tlib_tick, rtems_configuration_get_microseconds_per_tick());
+ priv.ops->initialize_counter();
+ tlib_start(priv.tlib_tick, 0);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver tlib_clock_at_tick(void)
+{
+ if (priv.ops->at_tick) {
+ return priv.ops->at_tick();
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void tlib_clock_timecounter_tick(void)
+{
+ priv.ops->timecounter_tick();
+}
+
+/* Return a value not equal to RTEMS_SUCCESFUL to make Clock_initialize fail. */
+static rtems_device_driver tlib_clock_install_isr(rtems_isr *isr)
+{
+ int flags = 0;
+
+#ifdef RTEMS_SMP
+ /* We shall broadcast the clock interrupt to all processors. */
+ flags = TLIB_FLAGS_BROADCAST;
+#endif
+ tlib_irq_register(priv.tlib_tick, isr, NULL, flags);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+#ifndef RTEMS_SMP
+/** Simple counter **/
+static uint32_t simple_tlib_tc_get(rtems_timecounter_simple *tc)
+{
+ unsigned int clicks = 0;
+
+ if (priv.tlib_tick != NULL) {
+ tlib_get_counter(priv.tlib_tick, &clicks);
+ }
+
+ return clicks;
+}
+
+static bool simple_tlib_tc_is_pending(rtems_timecounter_simple *tc)
+{
+ bool pending = false;
+
+ if (priv.tlib_tick != NULL) {
+ pending = tlib_interrupt_pending(priv.tlib_tick, 0) != 0;
+ }
+
+ return pending;
+}
+
+static uint32_t simple_tlib_tc_get_timecount(struct timecounter *tc)
+{
+ return rtems_timecounter_simple_downcounter_get(
+ tc,
+ simple_tlib_tc_get,
+ simple_tlib_tc_is_pending
+ );
+}
+
+static rtems_device_driver simple_initialize_counter(void)
+{
+ uint64_t frequency;
+ unsigned int tick_hz;
+
+ frequency = 1000000;
+ tick_hz = rtems_configuration_get_microseconds_per_tick();
+
+ rtems_timecounter_simple_install(
+ &priv.tc_simple,
+ frequency,
+ tick_hz,
+ simple_tlib_tc_get_timecount
+ );
+
+ return RTEMS_NOT_DEFINED;
+}
+
+static void simple_tlib_tc_at_tick(rtems_timecounter_simple *tc)
+{
+ /* Nothing to do */
+}
+
+/*
+ * Support for shared interrupts. Ack IRQ at source, only handle interrupts
+ * generated from the tick-timer. This is called early in Clock_isr.
+ */
+static rtems_device_driver simple_at_tick(void)
+{
+ if (tlib_interrupt_pending(priv.tlib_tick, 1) == 0) {
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static void simple_timecounter_tick(void)
+{
+ rtems_timecounter_simple_downcounter_tick(
+ &priv.tc_simple,
+ simple_tlib_tc_get,
+ simple_tlib_tc_at_tick
+ );
+}
+
+static const struct ops ops_simple = {
+ .initialize_counter = simple_initialize_counter,
+ .at_tick = simple_at_tick,
+ .timecounter_tick = simple_timecounter_tick,
+ .shutdown_hardware = NULL,
+};
+
+#else
+
+/** Subtimer as counter **/
+static uint32_t subtimer_get_timecount(struct timecounter *tc)
+{
+ unsigned int counter;
+
+ tlib_get_counter(priv.tlib_counter, &counter);
+
+ return 0xffffffff - counter;
+}
+
+static rtems_device_driver subtimer_initialize_counter(void)
+{
+ unsigned int mask;
+ unsigned int basefreq;
+
+ if (priv.tlib_counter_index == priv.tlib_tick_index) {
+ priv.tlib_counter_index = priv.tlib_tick_index + 1;
+ }
+ /* Take Timer that should be used as timecounter upcounter timer. */
+ priv.tlib_counter = tlib_open(priv.tlib_counter_index);
+ if (priv.tlib_counter == NULL) {
+ /* Timecounter timer not found */
+ return RTEMS_NOT_DEFINED;
+ }
+
+ /* Configure free running counter: GPTIMER */
+ tlib_get_freq(priv.tlib_counter, &basefreq, NULL);
+ tlib_get_widthmask(priv.tlib_counter, &mask);
+
+ priv.tc.tc_get_timecount = subtimer_get_timecount;
+ priv.tc.tc_counter_mask = mask;
+ priv.tc.tc_frequency = basefreq;
+ priv.tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER;
+ rtems_timecounter_install(&priv.tc);
+ /* Start free running counter */
+ tlib_start(priv.tlib_counter, 0);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void subtimer_timecounter_tick(void)
+{
+ rtems_timecounter_tick();
+}
+
+static void subtimer_shutdown_hardware(void)
+{
+ if (priv.tlib_counter) {
+ tlib_stop(priv.tlib_counter);
+ priv.tlib_counter = NULL;
+ }
+}
+
+static const struct ops ops_subtimer = {
+ .initialize_counter = subtimer_initialize_counter,
+ .timecounter_tick = subtimer_timecounter_tick,
+ .shutdown_hardware = subtimer_shutdown_hardware,
+};
+
+/** DSU timetag as counter **/
+static uint32_t timetag_get_timecount(struct timecounter *tc)
+{
+ return leon3_up_counter_low();
+}
+
+static rtems_device_driver timetag_initialize_counter(void)
+{
+ /* Configure free running counter: timetag */
+ priv.tc.tc_get_timecount = timetag_get_timecount;
+ priv.tc.tc_counter_mask = 0xffffffff;
+ priv.tc.tc_frequency = leon3_up_counter_frequency();
+ priv.tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER;
+ rtems_timecounter_install(&priv.tc);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void timetag_timecounter_tick(void)
+{
+ rtems_timecounter_tick();
+}
+
+static const struct ops ops_timetag = {
+ .initialize_counter = timetag_initialize_counter,
+ .at_tick = NULL,
+ .timecounter_tick = timetag_timecounter_tick,
+ .shutdown_hardware = NULL,
+};
+
+/** IRQ(A)MP timestamp as counter **/
+static uint32_t irqamp_get_timecount(struct timecounter *tc)
+{
+ return LEON3_IrqCtrl_Regs->timestamp[0].counter;
+}
+
+static rtems_device_driver irqamp_initialize_counter(void)
+{
+ volatile struct irqmp_timestamp_regs *irqmp_ts;
+ static const uint32_t A_TSISEL_FIELD = 0xf;
+
+ /* Configure free running counter: timetag */
+ priv.tc.tc_get_timecount = irqamp_get_timecount;
+ priv.tc.tc_counter_mask = 0xffffffff;
+ priv.tc.tc_frequency = leon3_up_counter_frequency();
+ priv.tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER;
+ rtems_timecounter_install(&priv.tc);
+
+ /*
+ * The counter increments whenever a TSISEL field in a Timestamp Control
+ * Register is non-zero.
+ */
+ irqmp_ts = &LEON3_IrqCtrl_Regs->timestamp[0];
+ irqmp_ts->control = A_TSISEL_FIELD;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void irqamp_timecounter_tick(void)
+{
+ rtems_timecounter_tick();
+}
+
+static const struct ops ops_irqamp = {
+ .initialize_counter = irqamp_initialize_counter,
+ .at_tick = NULL,
+ .timecounter_tick = irqamp_timecounter_tick,
+ .shutdown_hardware = NULL,
+};
+#endif
+
+/** Interface to the Clock Driver Shell (dev/clock/clockimpl.h) **/
+#define Clock_driver_support_find_timer() \
+ do { \
+ rtems_device_driver ret; \
+ ret = tlib_clock_find_timer(); \
+ if (RTEMS_SUCCESSFUL != ret) { \
+ return ret; \
+ } \
+ } while (0)
+
+#define Clock_driver_support_install_isr( isr ) \
+ do { \
+ rtems_device_driver ret; \
+ ret = tlib_clock_install_isr( isr ); \
+ if (RTEMS_SUCCESSFUL != ret) { \
+ return ret; \
+ } \
+ } while (0)
+
+#define Clock_driver_support_set_interrupt_affinity(online_processors) \
+ /* Done by tlib_clock_install_isr() */
+
+#define Clock_driver_support_initialize_hardware() \
+ do { \
+ rtems_device_driver ret; \
+ ret = tlib_clock_initialize_hardware(); \
+ if (RTEMS_SUCCESSFUL != ret) { \
+ return ret; \
+ } \
+ } while (0)
+
+#define Clock_driver_timecounter_tick() \
+ tlib_clock_timecounter_tick()
+
+#define Clock_driver_support_at_tick() \
+ do { \
+ rtems_device_driver ret; \
+ ret = tlib_clock_at_tick(); \
+ if (RTEMS_SUCCESSFUL != ret) { \
+ return; \
+ } \
+ } while (0)
+
+#include "../../../shared/dev/clock/clockimpl.h"
+
+#endif /* RTEMS_DRVMGR_STARTUP */
+
diff --git a/bsps/shared/grlib/can/canmux.c b/bsps/shared/grlib/can/canmux.c
new file mode 100644
index 0000000000..369cb3dd0a
--- /dev/null
+++ b/bsps/shared/grlib/can/canmux.c
@@ -0,0 +1,199 @@
+/*
+ * CAN_MUX driver. Present in GR712RC.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <bsp.h>
+#include <rtems/bspIo.h> /* printk */
+
+#include <grlib/canmux.h>
+#include <grlib/ambapp.h>
+
+#include <grlib/grlib_impl.h>
+
+#ifndef GAISLER_CANMUX
+#define GAISLER_CANMUX 0x081
+#endif
+
+#if !defined(CANMUX_DEVNAME)
+ #undef CANMUX_DEVNAME
+ #define CANMUX_DEVNAME "/dev/canmux"
+#endif
+
+/* Enable debug output? */
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#define BUSA_SELECT (1 << 0)
+#define BUSB_SELECT (1 << 1)
+
+struct canmux_priv {
+ volatile unsigned int *muxreg;
+ rtems_id devsem;
+ int open;
+};
+
+static struct canmux_priv *priv;
+
+static rtems_device_driver canmux_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver canmux_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver canmux_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver canmux_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver canmux_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver canmux_initialize(rtems_device_major_number major, rtems_device_minor_number unused, void *arg);
+
+
+static rtems_device_driver canmux_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t*)arg;
+
+ DBG("CAN_MUX: IOCTL %d\n\r", ioarg->command);
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case CANMUX_IOC_BUSA_SATCAN: *priv->muxreg &= ~BUSA_SELECT; break;
+ case CANMUX_IOC_BUSA_OCCAN1: *priv->muxreg |= BUSA_SELECT; break;
+ case CANMUX_IOC_BUSB_SATCAN: *priv->muxreg &= ~BUSB_SELECT; break;
+ case CANMUX_IOC_BUSB_OCCAN2: *priv->muxreg |= BUSB_SELECT; break;
+ default: return RTEMS_NOT_DEFINED;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver canmux_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_rw_args_t *rw_args=(rtems_libio_rw_args_t*)arg;
+
+ rw_args->bytes_moved = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver canmux_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_rw_args_t *rw_args = (rtems_libio_rw_args_t*)arg;
+
+ rw_args->bytes_moved = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+static rtems_device_driver canmux_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ DBG("CAN_MUX: Closing %d\n\r",minor);
+
+ priv->open = 0;
+ return RTEMS_SUCCESSFUL;
+}
+
+
+static rtems_device_driver canmux_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ DBG("CAN_MUX: Opening %d\n\r",minor);
+
+ rtems_semaphore_obtain(priv->devsem,RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ if (priv->open) {
+ rtems_semaphore_release(priv->devsem);
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ priv->open = 1;
+ rtems_semaphore_release(priv->devsem);
+
+ DBG("CAN_MUX: Opening %d success\n\r",minor);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver canmux_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct ambapp_apb_info d;
+ char fs_name[20];
+ rtems_status_code status;
+
+ DBG("CAN_MUX: Initialize..\n\r");
+
+ strcpy(fs_name, CANMUX_DEVNAME);
+
+ /* Find core and initialize register pointer */
+ if (!ambapp_find_apbslv(&ambapp_plb, VENDOR_GAISLER, GAISLER_CANMUX, &d)) {
+ printk("CAN_MUX: Failed to find CAN_MUX core\n\r");
+ return -1;
+ }
+
+ status = rtems_io_register_name(fs_name, major, minor);
+ if (RTEMS_SUCCESSFUL != status)
+ rtems_fatal_error_occurred(status);
+
+ /* Create private structure */
+ if ((priv = grlib_malloc(sizeof(*priv))) == NULL) {
+ printk("CAN_MUX driver could not allocate memory for priv structure\n\r");
+ return -1;
+ }
+
+ priv->muxreg = (unsigned int*)d.start;
+
+ status = rtems_semaphore_create(
+ rtems_build_name('M', 'd', 'v', '0'),
+ 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &priv->devsem);
+ if (status != RTEMS_SUCCESSFUL) {
+ printk("CAN_MUX: Failed to create dev semaphore (%d)\n\r", status);
+ free(priv);
+ return RTEMS_UNSATISFIED;
+ }
+
+ priv->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+#define CANMUX_DRIVER_TABLE_ENTRY { canmux_initialize, canmux_open, canmux_close, canmux_read, canmux_write, canmux_ioctl }
+
+static rtems_driver_address_table canmux_driver = CANMUX_DRIVER_TABLE_ENTRY;
+
+int canmux_register(void)
+{
+ rtems_status_code r;
+ rtems_device_major_number m;
+
+ DBG("CAN_MUX: canmux_register called\n\r");
+
+ if ((r = rtems_io_register_driver(0, &canmux_driver, &m)) == RTEMS_SUCCESSFUL) {
+ DBG("CAN_MUX driver successfully registered, major: %d\n\r", m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("CAN_MUX rtems_io_register_driver failed: RTEMS_TOO_MANY\n\r"); break;
+ case RTEMS_INVALID_NUMBER:
+ printk("CAN_MUX rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n\r"); break;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("CAN_MUX rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n\r"); break;
+ default:
+ printk("CAN_MUX rtems_io_register_driver failed\n\r");
+ }
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/bsps/shared/grlib/can/grcan.c b/bsps/shared/grlib/can/grcan.c
new file mode 100644
index 0000000000..55154d823a
--- /dev/null
+++ b/bsps/shared/grlib/can/grcan.c
@@ -0,0 +1,1976 @@
+/*
+ * GRCAN driver
+ *
+ * COPYRIGHT (c) 2007.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <grlib/grcan.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Maximum number of GRCAN devices supported by driver */
+#define GRCAN_COUNT_MAX 8
+
+#define WRAP_AROUND_TX_MSGS 1
+#define WRAP_AROUND_RX_MSGS 2
+#define GRCAN_MSG_SIZE sizeof(struct grcan_msg)
+#define BLOCK_SIZE (16*4)
+
+/* grcan needs to have it buffers aligned to 1k boundaries */
+#define BUFFER_ALIGNMENT_NEEDS 1024
+
+/* Default Maximium buffer size for statically allocated buffers */
+#ifndef TX_BUF_SIZE
+ #define TX_BUF_SIZE (BLOCK_SIZE*16)
+#endif
+
+/* Make receiver buffers bigger than transmitt */
+#ifndef RX_BUF_SIZE
+ #define RX_BUF_SIZE ((3*BLOCK_SIZE)*16)
+#endif
+
+#ifndef IRQ_CLEAR_PENDING
+ #define IRQ_CLEAR_PENDING(irqno)
+#endif
+
+#ifndef IRQ_UNMASK
+ #define IRQ_UNMASK(irqno)
+#endif
+
+#ifndef IRQ_MASK
+ #define IRQ_MASK(irqno)
+#endif
+
+#ifndef GRCAN_DEFAULT_BAUD
+ /* default to 500kbits/s */
+ #define GRCAN_DEFAULT_BAUD 500000
+#endif
+
+#ifndef GRCAN_SAMPLING_POINT
+ #define GRCAN_SAMPLING_POINT 80
+#endif
+
+/* Uncomment for debug output */
+/****************** DEBUG Definitions ********************/
+#define DBG_TX 2
+#define DBG_RX 4
+#define DBG_STATE 8
+
+#define DEBUG_FLAGS (DBG_STATE | DBG_RX | DBG_TX )
+/*
+#define DEBUG
+#define DEBUGFUNCS
+*/
+#include <grlib/debug_defs.h>
+
+/*********************************************************/
+
+int state2err[4] = {
+ /* STATE_STOPPED */ GRCAN_RET_NOTSTARTED,
+ /* STATE_STARTED */ GRCAN_RET_OK,
+ /* STATE_BUSOFF */ GRCAN_RET_BUSOFF,
+ /* STATE_AHBERR */ GRCAN_RET_AHBERR
+};
+
+struct grcan_msg {
+ unsigned int head[2];
+ unsigned char data[8];
+};
+
+struct grcan_config {
+ struct grcan_timing timing;
+ struct grcan_selection selection;
+ int abort;
+ int silent;
+};
+
+struct grcan_priv {
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ unsigned int baseaddr, ram_base;
+ struct grcan_regs *regs;
+ int irq;
+ int minor;
+ int open;
+ int started;
+ unsigned int channel;
+ int flushing;
+ unsigned int corefreq_hz;
+
+ /* Circular DMA buffers */
+ void *_rx, *_rx_hw;
+ void *_tx, *_tx_hw;
+ void *txbuf_adr;
+ void *rxbuf_adr;
+ struct grcan_msg *rx;
+ struct grcan_msg *tx;
+ unsigned int rxbuf_size; /* requested RX buf size in bytes */
+ unsigned int txbuf_size; /* requested TX buf size in bytes */
+
+ int txblock, rxblock;
+ int txcomplete, rxcomplete;
+
+ struct grcan_filter sfilter;
+ struct grcan_filter afilter;
+ int config_changed; /* 0=no changes, 1=changes ==> a Core reset is needed */
+ struct grcan_config config;
+ struct grcan_stats stats;
+
+ rtems_id rx_sem, tx_sem, txempty_sem, dev_sem;
+ SPIN_DECLARE(devlock);
+};
+
+static void __inline__ grcan_hw_reset(struct grcan_regs *regs);
+
+static int grcan_hw_read_try(
+ struct grcan_priv *pDev,
+ struct grcan_regs *regs,
+ CANMsg *buffer,
+ int max);
+
+static int grcan_hw_write_try(
+ struct grcan_priv *pDev,
+ struct grcan_regs *regs,
+ CANMsg *buffer,
+ int count);
+
+static void grcan_hw_config(
+ struct grcan_regs *regs,
+ struct grcan_config *conf);
+
+static void grcan_hw_accept(
+ struct grcan_regs *regs,
+ struct grcan_filter *afilter);
+
+static void grcan_hw_sync(
+ struct grcan_regs *regs,
+ struct grcan_filter *sfilter);
+
+static void grcan_interrupt(void *arg);
+
+#ifdef GRCAN_REG_BYPASS_CACHE
+#define READ_REG(address) _grcan_read_nocache((unsigned int)(address))
+#else
+#define READ_REG(address) (*(volatile unsigned int *)(address))
+#endif
+
+#ifdef GRCAN_DMA_BYPASS_CACHE
+#define READ_DMA_WORD(address) _grcan_read_nocache((unsigned int)(address))
+#define READ_DMA_BYTE(address) _grcan_read_nocache_byte((unsigned int)(address))
+static unsigned char __inline__ _grcan_read_nocache_byte(unsigned int address)
+{
+ unsigned char tmp;
+ __asm__ (" lduba [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(address)
+ );
+ return tmp;
+}
+#else
+#define READ_DMA_WORD(address) (*(volatile unsigned int *)(address))
+#define READ_DMA_BYTE(address) (*(volatile unsigned char *)(address))
+#endif
+
+#if defined(GRCAN_REG_BYPASS_CACHE) || defined(GRCAN_DMA_BYPASS_CACHE)
+static unsigned int __inline__ _grcan_read_nocache(unsigned int address)
+{
+ unsigned int tmp;
+ __asm__ (" lda [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(address)
+ );
+ return tmp;
+}
+#endif
+
+#define NELEM(a) ((int) (sizeof (a) / sizeof (a[0])))
+
+static int grcan_count = 0;
+static struct grcan_priv *priv_tab[GRCAN_COUNT_MAX];
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int grcan_device_init(struct grcan_priv *pDev);
+
+int grcan_init2(struct drvmgr_dev *dev);
+int grcan_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops grcan_ops =
+{
+ .init = {NULL, grcan_init2, grcan_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grcan_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRCAN},
+ {VENDOR_GAISLER, GAISLER_GRHCAN},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grcan_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRCAN_ID, /* Driver ID */
+ "GRCAN_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grcan_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grcan_ids[0]
+};
+
+void grcan_register_drv (void)
+{
+ DBG("Registering GRCAN driver\n");
+ drvmgr_drv_register(&grcan_drv_info.general);
+}
+
+int grcan_init2(struct drvmgr_dev *dev)
+{
+ struct grcan_priv *priv;
+
+ DBG("GRCAN[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ if (GRCAN_COUNT_MAX <= grcan_count)
+ return DRVMGR_ENORES;
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int grcan_init3(struct drvmgr_dev *dev)
+{
+ struct grcan_priv *priv;
+ char prefix[32];
+
+ priv = dev->priv;
+
+ /*
+ * Now we take care of device initialization.
+ */
+
+ if ( grcan_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ priv_tab[grcan_count] = priv;
+ grcan_count++;
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "grcan%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "%sgrcan%d", prefix, dev->minor_bus);
+ }
+
+ return DRVMGR_OK;
+}
+
+int grcan_device_init(struct grcan_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (struct grcan_regs *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+
+ /* Get frequency in Hz */
+ if ( drvmgr_freq_get(pDev->dev, DEV_APB_SLV, &pDev->corefreq_hz) ) {
+ return -1;
+ }
+
+ DBG("GRCAN frequency: %d Hz\n", pDev->corefreq_hz);
+
+ /* Reset Hardware before attaching IRQ handler */
+ grcan_hw_reset(pDev->regs);
+
+ /* RX Semaphore created with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'C', 'R', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->rx_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* TX Semaphore created with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'C', 'T', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->tx_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* TX Empty Semaphore created with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'C', 'E', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->txempty_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'C', 'A', '0' + pDev->minor),
+ 1,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->dev_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ return 0;
+}
+
+static void __inline__ grcan_hw_reset(struct grcan_regs *regs)
+{
+ regs->ctrl = GRCAN_CTRL_RESET;
+}
+
+static rtems_device_driver grcan_hw_start(struct grcan_priv *pDev)
+{
+ /*
+ * tmp is set but never used. GCC gives a warning for this
+ * and we need to tell GCC not to complain.
+ */
+ unsigned int tmp RTEMS_UNUSED;
+
+ SPIN_IRQFLAGS(oldLevel);
+
+ FUNCDBG();
+
+ /* Check that memory has been allocated successfully */
+ if (!pDev->tx || !pDev->rx)
+ return RTEMS_NO_MEMORY;
+
+ /* Configure FIFO configuration register
+ * and Setup timing
+ */
+ if (pDev->config_changed) {
+ grcan_hw_config(pDev->regs, &pDev->config);
+ pDev->config_changed = 0;
+ }
+
+ /* Setup receiver */
+ pDev->regs->rx0addr = (unsigned int)pDev->_rx_hw;
+ pDev->regs->rx0size = pDev->rxbuf_size;
+
+ /* Setup Transmitter */
+ pDev->regs->tx0addr = (unsigned int)pDev->_tx_hw;
+ pDev->regs->tx0size = pDev->txbuf_size;
+
+ /* Setup acceptance filters */
+ grcan_hw_accept(pDev->regs, &pDev->afilter);
+
+ /* Sync filters */
+ grcan_hw_sync(pDev->regs, &pDev->sfilter);
+
+ /* Clear status bits */
+ tmp = READ_REG(&pDev->regs->stat);
+ pDev->regs->stat = 0;
+
+ /* Setup IRQ handling */
+
+ /* Clear all IRQs */
+ tmp = READ_REG(&pDev->regs->pir);
+ pDev->regs->picr = 0x1ffff;
+
+ /* unmask TxLoss|TxErrCntr|RxErrCntr|TxAHBErr|RxAHBErr|OR|OFF|PASS */
+ pDev->regs->imr = 0x1601f;
+
+ /* Enable routing of the IRQs */
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ IRQ_UNMASK(pDev->irq + GRCAN_IRQ_TXSYNC);
+ IRQ_UNMASK(pDev->irq + GRCAN_IRQ_RXSYNC);
+ IRQ_UNMASK(pDev->irq + GRCAN_IRQ_IRQ);
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+
+ /* Enable receiver/transmitter */
+ pDev->regs->rx0ctrl = GRCAN_RXCTRL_ENABLE;
+ pDev->regs->tx0ctrl = GRCAN_TXCTRL_ENABLE;
+
+ /* Enable HurriCANe core */
+ pDev->regs->ctrl = GRCAN_CTRL_ENABLE;
+
+ /* Leave transmitter disabled, it is enabled when
+ * trying to send something.
+ */
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grcan_hw_stop(struct grcan_priv *pDev)
+{
+ FUNCDBG();
+
+ /* Mask all IRQs */
+ pDev->regs->imr = 0;
+ IRQ_MASK(pDev->irq + GRCAN_IRQ_TXSYNC);
+ IRQ_MASK(pDev->irq + GRCAN_IRQ_RXSYNC);
+ IRQ_MASK(pDev->irq + GRCAN_IRQ_IRQ);
+
+ /* Disable receiver & transmitter */
+ pDev->regs->rx0ctrl = 0;
+ pDev->regs->tx0ctrl = 0;
+}
+
+static void grcan_sw_stop(struct grcan_priv *pDev)
+{
+ /*
+ * Release semaphores to wake all threads waiting for an IRQ.
+ * The threads that
+ * get woken up must check started state in
+ * order to determine that they should return to
+ * user space with error status.
+ *
+ * Entering into started mode again will reset the
+ * semaphore count.
+ */
+ rtems_semaphore_release(pDev->rx_sem);
+ rtems_semaphore_release(pDev->tx_sem);
+ rtems_semaphore_release(pDev->txempty_sem);
+}
+
+static void grcan_hw_config(struct grcan_regs *regs, struct grcan_config *conf)
+{
+ unsigned int config = 0;
+
+ /* Reset HurriCANe Core */
+ regs->ctrl = 0;
+
+ if (conf->silent)
+ config |= GRCAN_CFG_SILENT;
+
+ if (conf->abort)
+ config |= GRCAN_CFG_ABORT;
+
+ if (conf->selection.selection)
+ config |= GRCAN_CFG_SELECTION;
+
+ if (conf->selection.enable0)
+ config |= GRCAN_CFG_ENABLE0;
+
+ if (conf->selection.enable1)
+ config |= GRCAN_CFG_ENABLE1;
+
+ /* Timing */
+ config |= (conf->timing.bpr << GRCAN_CFG_BPR_BIT) & GRCAN_CFG_BPR;
+ config |= (conf->timing.rsj << GRCAN_CFG_RSJ_BIT) & GRCAN_CFG_RSJ;
+ config |= (conf->timing.ps1 << GRCAN_CFG_PS1_BIT) & GRCAN_CFG_PS1;
+ config |= (conf->timing.ps2 << GRCAN_CFG_PS2_BIT) & GRCAN_CFG_PS2;
+ config |=
+ (conf->timing.scaler << GRCAN_CFG_SCALER_BIT) & GRCAN_CFG_SCALER;
+
+ /* Write configuration */
+ regs->conf = config;
+
+ /* Enable HurriCANe Core */
+ regs->ctrl = GRCAN_CTRL_ENABLE;
+}
+
+static void grcan_hw_accept(
+ struct grcan_regs *regs,
+ struct grcan_filter *afilter
+)
+{
+ /* Disable Sync mask totaly (if we change scode or smask
+ * in an unfortunate way we may trigger a sync match)
+ */
+ regs->rx0mask = 0xffffffff;
+
+ /* Set Sync Filter in a controlled way */
+ regs->rx0code = afilter->code;
+ regs->rx0mask = afilter->mask;
+}
+
+static void grcan_hw_sync(struct grcan_regs *regs, struct grcan_filter *sfilter)
+{
+ /* Disable Sync mask totaly (if we change scode or smask
+ * in an unfortunate way we may trigger a sync match)
+ */
+ regs->smask = 0xffffffff;
+
+ /* Set Sync Filter in a controlled way */
+ regs->scode = sfilter->code;
+ regs->smask = sfilter->mask;
+}
+
+static unsigned int grcan_hw_rxavail(
+ unsigned int rp,
+ unsigned int wp, unsigned int size
+)
+{
+ if (rp == wp) {
+ /* read pointer and write pointer is equal only
+ * when RX buffer is empty.
+ */
+ return 0;
+ }
+
+ if (wp > rp) {
+ return (wp - rp) / GRCAN_MSG_SIZE;
+ } else {
+ return (size - (rp - wp)) / GRCAN_MSG_SIZE;
+ }
+}
+
+static unsigned int grcan_hw_txspace(
+ unsigned int rp,
+ unsigned int wp,
+ unsigned int size
+)
+{
+ unsigned int left;
+
+ if (rp == wp) {
+ /* read pointer and write pointer is equal only
+ * when TX buffer is empty.
+ */
+ return size / GRCAN_MSG_SIZE - WRAP_AROUND_TX_MSGS;
+ }
+
+ /* size - 4 - abs(read-write) */
+ if (wp > rp) {
+ left = size - (wp - rp);
+ } else {
+ left = rp - wp;
+ }
+
+ return left / GRCAN_MSG_SIZE - WRAP_AROUND_TX_MSGS;
+}
+
+#define MIN_TSEG1 1
+#define MIN_TSEG2 2
+#define MAX_TSEG1 14
+#define MAX_TSEG2 8
+
+static int grcan_calc_timing(
+ unsigned int baud, /* The requested BAUD to calculate timing for */
+ unsigned int core_hz, /* Frequency in Hz of GRCAN Core */
+ unsigned int sampl_pt,
+ struct grcan_timing *timing /* result is placed here */
+)
+{
+ int best_error = 1000000000;
+ int error;
+ int best_tseg = 0, best_brp = 0, brp = 0;
+ int tseg = 0, tseg1 = 0, tseg2 = 0;
+ int sjw = 1;
+
+ /* Default to 90% */
+ if ((sampl_pt < 50) || (sampl_pt > 99)) {
+ sampl_pt = GRCAN_SAMPLING_POINT;
+ }
+
+ if ((baud < 5000) || (baud > 1000000)) {
+ /* invalid speed mode */
+ return -1;
+ }
+
+ /* find best match, return -2 if no good reg
+ * combination is available for this frequency
+ */
+
+ /* some heuristic specials */
+ if (baud > ((1000000 + 500000) / 2))
+ sampl_pt = 75;
+
+ if (baud < ((12500 + 10000) / 2))
+ sampl_pt = 75;
+
+ /* tseg even = round down, odd = round up */
+ for (
+ tseg = (MIN_TSEG1 + MIN_TSEG2 + 2) * 2;
+ tseg <= (MAX_TSEG2 + MAX_TSEG1 + 2) * 2 + 1;
+ tseg++
+ ) {
+ brp = core_hz / ((1 + tseg / 2) * baud) + tseg % 2;
+ if (
+ (brp <= 0) ||
+ ((brp > 256 * 1) && (brp <= 256 * 2) && (brp & 0x1)) ||
+ ((brp > 256 * 2) && (brp <= 256 * 4) && (brp & 0x3)) ||
+ ((brp > 256 * 4) && (brp <= 256 * 8) && (brp & 0x7)) ||
+ (brp > 256 * 8)
+ )
+ continue;
+
+ error = baud - core_hz / (brp * (1 + tseg / 2));
+ if (error < 0) {
+ error = -error;
+ }
+
+ if (error <= best_error) {
+ best_error = error;
+ best_tseg = tseg / 2;
+ best_brp = brp - 1;
+ }
+ }
+
+ if (best_error && (baud / best_error < 10)) {
+ return -2;
+ } else if (!timing)
+ return 0; /* nothing to store result in, but a valid bitrate can be calculated */
+
+ tseg2 = best_tseg - (sampl_pt * (best_tseg + 1)) / 100;
+
+ if (tseg2 < MIN_TSEG2) {
+ tseg2 = MIN_TSEG2;
+ }
+
+ if (tseg2 > MAX_TSEG2) {
+ tseg2 = MAX_TSEG2;
+ }
+
+ tseg1 = best_tseg - tseg2 - 2;
+
+ if (tseg1 > MAX_TSEG1) {
+ tseg1 = MAX_TSEG1;
+ tseg2 = best_tseg - tseg1 - 2;
+ }
+
+ /* Get scaler and BRP from pseudo BRP */
+ if (best_brp <= 256) {
+ timing->scaler = best_brp;
+ timing->bpr = 0;
+ } else if (best_brp <= 256 * 2) {
+ timing->scaler = ((best_brp + 1) >> 1) - 1;
+ timing->bpr = 1;
+ } else if (best_brp <= 256 * 4) {
+ timing->scaler = ((best_brp + 1) >> 2) - 1;
+ timing->bpr = 2;
+ } else {
+ timing->scaler = ((best_brp + 1) >> 3) - 1;
+ timing->bpr = 3;
+ }
+
+ timing->ps1 = tseg1 + 1;
+ timing->ps2 = tseg2;
+ timing->rsj = sjw;
+
+ return 0;
+}
+
+static int grcan_hw_read_try(
+ struct grcan_priv *pDev,
+ struct grcan_regs *regs,
+ CANMsg * buffer,
+ int max
+)
+{
+ int i, j;
+ CANMsg *dest;
+ struct grcan_msg *source, tmp;
+ unsigned int wp, rp, size, rxmax, addr;
+ int trunk_msg_cnt;
+
+ FUNCDBG();
+
+ wp = READ_REG(&regs->rx0wr);
+ rp = READ_REG(&regs->rx0rd);
+
+ /*
+ * Due to hardware wrap around simplification write pointer will
+ * never reach the read pointer, at least a gap of 8 bytes.
+ * The only time they are equal is when the read pointer has
+ * reached the write pointer (empty buffer)
+ *
+ */
+ if (wp != rp) {
+ /* Not empty, we have received chars...
+ * Read as much as possible from DMA buffer
+ */
+ size = READ_REG(&regs->rx0size);
+
+ /* Get number of bytes available in RX buffer */
+ trunk_msg_cnt = grcan_hw_rxavail(rp, wp, size);
+
+ /* truncate size if user space buffer hasn't room for
+ * all received chars.
+ */
+ if (trunk_msg_cnt > max)
+ trunk_msg_cnt = max;
+
+ /* Read until i is 0 */
+ i = trunk_msg_cnt;
+
+ addr = (unsigned int)pDev->rx;
+ source = (struct grcan_msg *)(addr + rp);
+ dest = buffer;
+ rxmax = addr + (size - GRCAN_MSG_SIZE);
+
+ /* Read as many can messages as possible */
+ while (i > 0) {
+ /* Read CAN message from DMA buffer */
+ tmp.head[0] = READ_DMA_WORD(&source->head[0]);
+ tmp.head[1] = READ_DMA_WORD(&source->head[1]);
+ if (tmp.head[1] & 0x4) {
+ DBGC(DBG_RX, "overrun\n");
+ }
+ if (tmp.head[1] & 0x2) {
+ DBGC(DBG_RX, "bus-off mode\n");
+ }
+ if (tmp.head[1] & 0x1) {
+ DBGC(DBG_RX, "error-passive mode\n");
+ }
+ /* Convert one grcan CAN message to one "software" CAN message */
+ dest->extended = tmp.head[0] >> 31;
+ dest->rtr = (tmp.head[0] >> 30) & 0x1;
+ if (dest->extended) {
+ dest->id = tmp.head[0] & 0x3fffffff;
+ } else {
+ dest->id = (tmp.head[0] >> 18) & 0xfff;
+ }
+ dest->len = tmp.head[1] >> 28;
+ for (j = 0; j < dest->len; j++)
+ dest->data[j] = READ_DMA_BYTE(&source->data[j]);
+
+ /* wrap around if neccessary */
+ source =
+ ((unsigned int)source >= rxmax) ?
+ (struct grcan_msg *)addr : source + 1;
+ dest++; /* straight user buffer */
+ i--;
+ }
+ {
+ /* A bus off interrupt may have occured after checking pDev->started */
+ SPIN_IRQFLAGS(oldLevel);
+
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ if (pDev->started == STATE_STARTED) {
+ regs->rx0rd = (unsigned int) source - addr;
+ regs->rx0ctrl = GRCAN_RXCTRL_ENABLE;
+ } else {
+ DBGC(DBG_STATE, "cancelled due to a BUS OFF error\n");
+ trunk_msg_cnt = state2err[pDev->started];
+ }
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ }
+ return trunk_msg_cnt;
+ }
+ return 0;
+}
+
+static int grcan_hw_write_try(
+ struct grcan_priv *pDev,
+ struct grcan_regs *regs,
+ CANMsg * buffer,
+ int count
+)
+{
+ unsigned int rp, wp, size, txmax, addr;
+ int ret;
+ struct grcan_msg *dest;
+ CANMsg *source;
+ int space_left;
+ unsigned int tmp;
+ int i;
+
+ DBGC(DBG_TX, "\n");
+ /*FUNCDBG(); */
+
+ rp = READ_REG(&regs->tx0rd);
+ wp = READ_REG(&regs->tx0wr);
+ size = READ_REG(&regs->tx0size);
+
+ space_left = grcan_hw_txspace(rp, wp, size);
+
+ /* is circular fifo full? */
+ if (space_left < 1)
+ return 0;
+
+ /* Truncate size */
+ if (space_left > count)
+ space_left = count;
+ ret = space_left;
+
+ addr = (unsigned int)pDev->tx;
+
+ dest = (struct grcan_msg *)(addr + wp);
+ source = (CANMsg *) buffer;
+ txmax = addr + (size - GRCAN_MSG_SIZE);
+
+ while (space_left > 0) {
+ /* Convert and write CAN message to DMA buffer */
+ if (source->extended) {
+ tmp = (1 << 31) | (source->id & 0x3fffffff);
+ } else {
+ tmp = (source->id & 0xfff) << 18;
+ }
+ if (source->rtr)
+ tmp |= (1 << 30);
+ dest->head[0] = tmp;
+ dest->head[1] = source->len << 28;
+ for (i = 0; i < source->len; i++)
+ dest->data[i] = source->data[i];
+ source++; /* straight user buffer */
+ dest =
+ ((unsigned int)dest >= txmax) ?
+ (struct grcan_msg *)addr : dest + 1;
+ space_left--;
+ }
+
+ {
+ /* A bus off interrupt may have occured after checking pDev->started */
+ SPIN_IRQFLAGS(oldLevel);
+
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ if (pDev->started == STATE_STARTED) {
+ regs->tx0wr = (unsigned int) dest - addr;
+ regs->tx0ctrl = GRCAN_TXCTRL_ENABLE;
+ } else {
+ DBGC(DBG_STATE, "cancelled due to a BUS OFF error\n");
+ ret = state2err[pDev->started];
+ }
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ }
+ return ret;
+}
+
+static int grcan_wait_rxdata(struct grcan_priv *pDev, int min)
+{
+ unsigned int wp, rp, size, irq;
+ unsigned int irq_trunk, dataavail;
+ int wait, state;
+ SPIN_IRQFLAGS(oldLevel);
+
+ FUNCDBG();
+
+ /*** block until receive IRQ received
+ * Set up a valid IRQ point so that an IRQ is received
+ * when one or more messages are received
+ */
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ state = pDev->started;
+
+ /* A bus off interrupt may have occured after checking pDev->started */
+ if (state != STATE_STARTED) {
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ if (state == STATE_BUSOFF) {
+ DBGC(DBG_STATE, "cancelled due to a BUS OFF error\n");
+ } else if (state == STATE_AHBERR) {
+ DBGC(DBG_STATE, "cancelled due to a AHB error\n");
+ } else {
+ DBGC(DBG_STATE, "cancelled due to STOP (unexpected) \n");
+ }
+ return state2err[state];
+ }
+
+ size = READ_REG(&pDev->regs->rx0size);
+ rp = READ_REG(&pDev->regs->rx0rd);
+ wp = READ_REG(&pDev->regs->rx0wr);
+
+ /**** Calculate IRQ Pointer ****/
+ irq = wp + min * GRCAN_MSG_SIZE;
+ /* wrap irq around */
+ if (irq >= size) {
+ irq_trunk = irq - size;
+ } else
+ irq_trunk = irq;
+
+ /* init IRQ HW */
+ pDev->regs->rx0irq = irq_trunk;
+
+ /* Clear pending Rx IRQ */
+ pDev->regs->picr = GRCAN_RXIRQ_IRQ;
+
+ wp = READ_REG(&pDev->regs->rx0wr);
+
+ /* Calculate messages available */
+ dataavail = grcan_hw_rxavail(rp, wp, size);
+
+ if (dataavail < min) {
+ /* Still empty, proceed with sleep - Turn on IRQ (unmask irq) */
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) | GRCAN_RXIRQ_IRQ;
+ wait = 1;
+ } else {
+ /* enough message has been received, abort sleep - don't unmask interrupt */
+ wait = 0;
+ }
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+
+ /* Wait for IRQ to fire only if has been triggered */
+ if (wait) {
+ rtems_semaphore_obtain(pDev->rx_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ /*
+ * The semaphore is released either due to the expected IRQ
+ * condition or by BUSOFF, AHBERROR or another thread calling
+ * grcan_stop(). In either case, state2err[] has the correnct
+ * return value.
+ */
+ return state2err[pDev->started];
+ }
+
+ return 0;
+}
+
+/* Wait for TX circular buffer to have room for min CAN messagges. TXIRQ is used to pin
+ * point the location of the CAN message corresponding to min.
+ *
+ * min must be at least WRAP_AROUND_TX_MSGS less than max buffer capacity
+ * (pDev->txbuf_size/GRCAN_MSG_SIZE) for this algo to work.
+ */
+static int grcan_wait_txspace(struct grcan_priv *pDev, int min)
+{
+ int wait, state;
+ unsigned int irq, rp, wp, size, space_left;
+ unsigned int irq_trunk;
+ SPIN_IRQFLAGS(oldLevel);
+
+ DBGC(DBG_TX, "\n");
+ /*FUNCDBG(); */
+
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ state = pDev->started;
+ /* A bus off interrupt may have occured after checking pDev->started */
+ if (state != STATE_STARTED) {
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ if (state == STATE_BUSOFF) {
+ DBGC(DBG_STATE, "cancelled due to a BUS OFF error\n");
+ } else if (state == STATE_AHBERR) {
+ DBGC(DBG_STATE, "cancelled due to a AHB error\n");
+ } else {
+ DBGC(DBG_STATE, "cancelled due to STOP (unexpected)\n");
+ }
+ return state2err[state];
+ }
+
+ pDev->regs->tx0ctrl = GRCAN_TXCTRL_ENABLE;
+
+ size = READ_REG(&pDev->regs->tx0size);
+ wp = READ_REG(&pDev->regs->tx0wr);
+
+ rp = READ_REG(&pDev->regs->tx0rd);
+
+ /**** Calculate IRQ Pointer ****/
+ irq = rp + min * GRCAN_MSG_SIZE;
+ /* wrap irq around */
+ if (irq >= size) {
+ irq_trunk = irq - size;
+ } else
+ irq_trunk = irq;
+
+ /* trigger HW to do a IRQ when enough room in buffer */
+ pDev->regs->tx0irq = irq_trunk;
+
+ /* Clear pending Tx IRQ */
+ pDev->regs->picr = GRCAN_TXIRQ_IRQ;
+
+ /* One problem, if HW already gone past IRQ place the IRQ will
+ * never be received resulting in a thread hang. We check if so
+ * before proceeding.
+ *
+ * has the HW already gone past the IRQ generation place?
+ * == does min fit info tx buffer?
+ */
+ rp = READ_REG(&pDev->regs->tx0rd);
+
+ space_left = grcan_hw_txspace(rp, wp, size);
+
+ if (space_left < min) {
+ /* Still too full, proceed with sleep - Turn on IRQ (unmask irq) */
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) | GRCAN_TXIRQ_IRQ;
+ wait = 1;
+ } else {
+ /* There are enough room in buffer, abort wait - don't unmask interrupt */
+ wait = 0;
+ }
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+
+ /* Wait for IRQ to fire only if it has been triggered */
+ if (wait) {
+ rtems_semaphore_obtain(pDev->tx_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ return state2err[pDev->started];
+ }
+
+ /* At this point the TxIRQ has been masked, we ned not to mask it */
+ return 0;
+}
+
+static int grcan_tx_flush(struct grcan_priv *pDev)
+{
+ int wait, state;
+ unsigned int rp, wp;
+ SPIN_IRQFLAGS(oldLevel);
+ FUNCDBG();
+
+ /* loop until all data in circular buffer has been read by hw.
+ * (write pointer != read pointer )
+ *
+ * Hardware doesn't update write pointer - we do
+ */
+ while (
+ (wp = READ_REG(&pDev->regs->tx0wr)) !=
+ (rp = READ_REG(&pDev->regs->tx0rd))
+ ) {
+ /* Wait for TX empty IRQ */
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ state = pDev->started;
+
+ /* A bus off interrupt may have occured after checking pDev->started */
+ if (state != STATE_STARTED) {
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ if (state == STATE_BUSOFF) {
+ DBGC(DBG_STATE, "cancelled due to a BUS OFF error\n");
+ } else if (state == STATE_AHBERR) {
+ DBGC(DBG_STATE, "cancelled due to a AHB error\n");
+ } else {
+ DBGC(DBG_STATE, "cancelled due to STOP (unexpected)\n");
+ }
+ return state2err[state];
+ }
+
+ /* Clear pending TXEmpty IRQ */
+ pDev->regs->picr = GRCAN_TXEMPTY_IRQ;
+
+ if (wp != READ_REG(&pDev->regs->tx0rd)) {
+ /* Still not empty, proceed with sleep - Turn on IRQ (unmask irq) */
+ pDev->regs->imr =
+ READ_REG(&pDev->regs->imr) | GRCAN_TXEMPTY_IRQ;
+ wait = 1;
+ } else {
+ /* TX fifo is empty */
+ wait = 0;
+ }
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ if (!wait)
+ break;
+
+ /* Wait for IRQ to wake us */
+ rtems_semaphore_obtain(pDev->txempty_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ state = pDev->started;
+ if (state != STATE_STARTED) {
+ return state2err[state];
+ }
+ }
+ return 0;
+}
+
+static int grcan_alloc_buffers(struct grcan_priv *pDev, int rx, int tx)
+{
+ unsigned int adr;
+ FUNCDBG();
+
+ if ( tx ) {
+ adr = (unsigned int)pDev->txbuf_adr;
+ if (adr & 0x1) {
+ /* User defined "remote" address. Translate it into
+ * a CPU accessible address
+ */
+ pDev->_tx_hw = (void *)(adr & ~0x1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->_tx_hw,
+ (void **)&pDev->_tx,
+ pDev->txbuf_size);
+ pDev->tx = (struct grcan_msg *)pDev->_tx;
+ } else {
+ if (adr == 0) {
+ pDev->_tx = grlib_malloc(pDev->txbuf_size +
+ BUFFER_ALIGNMENT_NEEDS);
+ if (!pDev->_tx)
+ return -1;
+ } else {
+ /* User defined "cou-local" address. Translate
+ * it into a CPU accessible address
+ */
+ pDev->_tx = (void *)adr;
+ }
+ /* Align TX buffer */
+ pDev->tx = (struct grcan_msg *)
+ (((unsigned int)pDev->_tx +
+ (BUFFER_ALIGNMENT_NEEDS-1)) &
+ ~(BUFFER_ALIGNMENT_NEEDS-1));
+
+ /* Translate address into an hardware accessible
+ * address
+ */
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->tx,
+ (void **)&pDev->_tx_hw,
+ pDev->txbuf_size);
+ }
+ }
+
+ if ( rx ) {
+ adr = (unsigned int)pDev->rxbuf_adr;
+ if (adr & 0x1) {
+ /* User defined "remote" address. Translate it into
+ * a CPU accessible address
+ */
+ pDev->_rx_hw = (void *)(adr & ~0x1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->_rx_hw,
+ (void **)&pDev->_rx,
+ pDev->rxbuf_size);
+ pDev->rx = (struct grcan_msg *)pDev->_rx;
+ } else {
+ if (adr == 0) {
+ pDev->_rx = grlib_malloc(pDev->rxbuf_size +
+ BUFFER_ALIGNMENT_NEEDS);
+ if (!pDev->_rx)
+ return -1;
+ } else {
+ /* User defined "cou-local" address. Translate
+ * it into a CPU accessible address
+ */
+ pDev->_rx = (void *)adr;
+ }
+ /* Align RX buffer */
+ pDev->rx = (struct grcan_msg *)
+ (((unsigned int)pDev->_rx +
+ (BUFFER_ALIGNMENT_NEEDS-1)) &
+ ~(BUFFER_ALIGNMENT_NEEDS-1));
+
+ /* Translate address into an hardware accessible
+ * address
+ */
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->rx,
+ (void **)&pDev->_rx_hw,
+ pDev->rxbuf_size);
+ }
+ }
+ return 0;
+}
+
+static void grcan_free_buffers(struct grcan_priv *pDev, int rx, int tx)
+{
+ FUNCDBG();
+
+ if (tx && pDev->_tx) {
+ free(pDev->_tx);
+ pDev->_tx = NULL;
+ pDev->tx = NULL;
+ }
+
+ if (rx && pDev->_rx) {
+ free(pDev->_rx);
+ pDev->_rx = NULL;
+ pDev->rx = NULL;
+ }
+}
+
+int grcan_dev_count(void)
+{
+ return grcan_count;
+}
+
+void *grcan_open_by_name(char *name, int *dev_no)
+{
+ int i;
+ for (i = 0; i < grcan_count; i++){
+ struct grcan_priv *pDev;
+
+ pDev = priv_tab[i];
+ if (NULL == pDev) {
+ continue;
+ }
+ if (strncmp(pDev->devName, name, NELEM(pDev->devName)) == 0) {
+ if (dev_no)
+ *dev_no = i;
+ return grcan_open(i);
+ }
+ }
+ return NULL;
+}
+
+void *grcan_open(int dev_no)
+{
+ struct grcan_priv *pDev;
+ void *ret;
+ union drvmgr_key_value *value;
+
+ FUNCDBG();
+
+ if (grcan_count == 0 || (grcan_count <= dev_no)) {
+ return NULL;
+ }
+
+ pDev = priv_tab[dev_no];
+
+ /* Wait until we get semaphore */
+ if (rtems_semaphore_obtain(pDev->dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ return NULL;
+ }
+
+ /* is device busy/taken? */
+ if ( pDev->open ) {
+ ret = NULL;
+ goto out;
+ }
+
+ SPIN_INIT(&pDev->devlock, pDev->devName);
+
+ /* Mark device taken */
+ pDev->open = 1;
+
+ pDev->txblock = pDev->rxblock = 1;
+ pDev->txcomplete = pDev->rxcomplete = 0;
+ pDev->started = STATE_STOPPED;
+ pDev->config_changed = 1;
+ pDev->config.silent = 0;
+ pDev->config.abort = 0;
+ pDev->config.selection.selection = 0;
+ pDev->config.selection.enable0 = 0;
+ pDev->config.selection.enable1 = 1;
+ pDev->flushing = 0;
+ pDev->rx = pDev->_rx = NULL;
+ pDev->tx = pDev->_tx = NULL;
+ pDev->txbuf_adr = 0;
+ pDev->rxbuf_adr = 0;
+ pDev->txbuf_size = TX_BUF_SIZE;
+ pDev->rxbuf_size = RX_BUF_SIZE;
+
+ /* Override default buffer sizes if available from bus resource */
+ value = drvmgr_dev_key_get(pDev->dev, "txBufSize", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txbuf_size = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxBufSize", DRVMGR_KT_INT);
+ if ( value )
+ pDev->rxbuf_size = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txBufAdr", DRVMGR_KT_POINTER);
+ if ( value )
+ pDev->txbuf_adr = value->ptr;
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxBufAdr", DRVMGR_KT_POINTER);
+ if ( value )
+ pDev->rxbuf_adr = value->ptr;
+
+ DBG("Defaulting to rxbufsize: %d, txbufsize: %d\n",RX_BUF_SIZE,TX_BUF_SIZE);
+
+ /* Default to accept all messages */
+ pDev->afilter.mask = 0x00000000;
+ pDev->afilter.code = 0x00000000;
+
+ /* Default to disable sync messages (only trigger when id is set to all ones) */
+ pDev->sfilter.mask = 0xffffffff;
+ pDev->sfilter.code = 0x00000000;
+
+ /* Calculate default timing register values */
+ grcan_calc_timing(GRCAN_DEFAULT_BAUD,pDev->corefreq_hz,GRCAN_SAMPLING_POINT,&pDev->config.timing);
+
+ if ( grcan_alloc_buffers(pDev,1,1) ) {
+ ret = NULL;
+ goto out;
+ }
+
+ /* Clear statistics */
+ memset(&pDev->stats,0,sizeof(struct grcan_stats));
+
+ ret = pDev;
+out:
+ rtems_semaphore_release(pDev->dev_sem);
+ return ret;
+}
+
+int grcan_close(void *d)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ grcan_stop(d);
+
+ grcan_hw_reset(pDev->regs);
+
+ grcan_free_buffers(pDev,1,1);
+
+ /* Mark Device as closed */
+ pDev->open = 0;
+
+ return 0;
+}
+
+int grcan_read(void *d, CANMsg *msg, size_t ucount)
+{
+ struct grcan_priv *pDev = d;
+ CANMsg *dest;
+ unsigned int count, left;
+ int nread;
+ int req_cnt;
+
+ FUNCDBG();
+
+ dest = msg;
+ req_cnt = ucount;
+
+ if ( (!dest) || (req_cnt<1) )
+ return GRCAN_RET_INVARG;
+
+ if (pDev->started != STATE_STARTED) {
+ return GRCAN_RET_NOTSTARTED;
+ }
+
+ DBGC(DBG_RX, "grcan_read [%p]: buf: %p len: %u\n", d, msg, (unsigned int) ucount);
+
+ nread = grcan_hw_read_try(pDev,pDev->regs,dest,req_cnt);
+ if (nread < 0) {
+ return nread;
+ }
+ count = nread;
+ if ( !( pDev->rxblock && pDev->rxcomplete && (count!=req_cnt) ) ){
+ if ( count > 0 ) {
+ /* Successfully received messages (at least one) */
+ return count;
+ }
+
+ /* nothing read, shall we block? */
+ if ( !pDev->rxblock ) {
+ /* non-blocking mode */
+ return GRCAN_RET_TIMEOUT;
+ }
+ }
+
+ while (count == 0 || (pDev->rxcomplete && (count!=req_cnt))) {
+ if (!pDev->rxcomplete) {
+ left = 1; /* return as soon as there is one message available */
+ } else {
+ left = req_cnt - count; /* return as soon as all data are available */
+
+ /* never wait for more than the half the maximum size of the receive buffer
+ * Why? We need some time to copy buffer before to catch up with hw,
+ * otherwise we would have to copy everything when the data has been
+ * received.
+ */
+ if (left > ((pDev->rxbuf_size/GRCAN_MSG_SIZE) / 2)){
+ left = (pDev->rxbuf_size/GRCAN_MSG_SIZE) / 2;
+ }
+ }
+
+ nread = grcan_wait_rxdata(pDev, left);
+ if (nread) {
+ /* The wait has been aborted, probably due to
+ * the device driver has been closed by another
+ * thread or a bus-off. Return error code.
+ */
+ return nread;
+ }
+
+ /* Try read bytes from circular buffer */
+ nread = grcan_hw_read_try(
+ pDev,
+ pDev->regs,
+ dest+count,
+ req_cnt-count);
+
+ if (nread < 0) {
+ /* The read was aborted by bus-off. */
+ return nread;
+ }
+ count += nread;
+ }
+ /* no need to unmask IRQ as IRQ Handler do that for us. */
+ return count;
+}
+
+int grcan_write(void *d, CANMsg *msg, size_t ucount)
+{
+ struct grcan_priv *pDev = d;
+ CANMsg *source;
+ unsigned int count, left;
+ int nwritten;
+ int req_cnt;
+
+ DBGC(DBG_TX,"\n");
+
+ if ((pDev->started != STATE_STARTED) || pDev->config.silent || pDev->flushing)
+ return GRCAN_RET_NOTSTARTED;
+
+ req_cnt = ucount;
+ source = (CANMsg *) msg;
+
+ /* check proper length and buffer pointer */
+ if (( req_cnt < 1) || (source == NULL) ){
+ return GRCAN_RET_INVARG;
+ }
+
+ nwritten = grcan_hw_write_try(pDev,pDev->regs,source,req_cnt);
+ if (nwritten < 0) {
+ return nwritten;
+ }
+ count = nwritten;
+ if ( !(pDev->txblock && pDev->txcomplete && (count!=req_cnt)) ) {
+ if ( count > 0 ) {
+ /* Successfully transmitted chars (at least one char) */
+ return count;
+ }
+
+ /* nothing written, shall we block? */
+ if ( !pDev->txblock ) {
+ /* non-blocking mode */
+ return GRCAN_RET_TIMEOUT;
+ }
+ }
+
+ /* if in txcomplete mode we need to transmit all chars */
+ while((count == 0) || (pDev->txcomplete && (count!=req_cnt)) ){
+ /*** block until room to fit all or as much of transmit buffer as possible
+ * IRQ comes. Set up a valid IRQ point so that an IRQ is received
+ * when we can put a chunk of data into transmit fifo
+ */
+ if ( !pDev->txcomplete ){
+ left = 1; /* wait for anything to fit buffer */
+ }else{
+ left = req_cnt - count; /* wait for all data to fit in buffer */
+
+ /* never wait for more than the half the maximum size of the transmit
+ * buffer
+ * Why? We need some time to fill buffer before hw catches up.
+ */
+ if ( left > ((pDev->txbuf_size/GRCAN_MSG_SIZE)/2) ){
+ left = (pDev->txbuf_size/GRCAN_MSG_SIZE)/2;
+ }
+ }
+
+ nwritten = grcan_wait_txspace(pDev,left);
+ /* Wait until more room in transmit buffer */
+ if ( nwritten ) {
+ /* The wait has been aborted, probably due to
+ * the device driver has been closed by another
+ * thread. To avoid deadlock we return directly
+ * with error status.
+ */
+ return nwritten;
+ }
+
+ /* Try read bytes from circular buffer */
+ nwritten = grcan_hw_write_try(
+ pDev,
+ pDev->regs,
+ source+count,
+ req_cnt-count);
+
+ if (nwritten < 0) {
+ /* Write was aborted by bus-off. */
+ return nwritten;
+ }
+ count += nwritten;
+ }
+ /* no need to unmask IRQ as IRQ Handler do that for us. */
+
+ return count;
+}
+
+int grcan_start(void *d)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ if (grcan_get_state(d) == STATE_STARTED) {
+ return -1;
+ }
+
+ if ( (grcan_hw_start(pDev)) != RTEMS_SUCCESSFUL ){
+ return -2;
+ }
+
+ /* Clear semaphore state. This is to avoid effects from previous
+ * bus-off/stop where semahpores where flushed() but the count remained.
+ */
+ rtems_semaphore_obtain(pDev->rx_sem, RTEMS_NO_WAIT, 0);
+ rtems_semaphore_obtain(pDev->tx_sem, RTEMS_NO_WAIT, 0);
+ rtems_semaphore_obtain(pDev->txempty_sem, RTEMS_NO_WAIT, 0);
+
+ /* Read and write are now open... */
+ pDev->started = STATE_STARTED;
+ DBGC(DBG_STATE, "STOPPED|BUSOFF|AHBERR->STARTED\n");
+
+ /* Register interrupt routine and enable IRQ at IRQ ctrl */
+ drvmgr_interrupt_register(pDev->dev, 0, pDev->devName,
+ grcan_interrupt, pDev);
+
+ return 0;
+}
+
+int grcan_stop(void *d)
+{
+ struct grcan_priv *pDev = d;
+ SPIN_IRQFLAGS(oldLevel);
+ int do_sw_stop;
+
+ FUNCDBG();
+
+ if (pDev->started == STATE_STOPPED)
+ return -1;
+
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ if (pDev->started == STATE_STARTED) {
+ grcan_hw_stop(pDev);
+ do_sw_stop = 1;
+ DBGC(DBG_STATE, "STARTED->STOPPED\n");
+ } else {
+ /*
+ * started == STATE_[STOPPED|BUSOFF|AHBERR] so grcan_hw_stop()
+ * might already been called from ISR.
+ */
+ DBGC(DBG_STATE, "[STOPPED|BUSOFF|AHBERR]->STOPPED\n");
+ do_sw_stop = 0;
+ }
+ pDev->started = STATE_STOPPED;
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+
+ if (do_sw_stop)
+ grcan_sw_stop(pDev);
+
+ /* Disable interrupts */
+ drvmgr_interrupt_unregister(pDev->dev, 0, grcan_interrupt, pDev);
+
+ return 0;
+}
+
+int grcan_get_state(void *d)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ return pDev->started;
+}
+
+int grcan_flush(void *d)
+{
+ struct grcan_priv *pDev = d;
+ int tmp;
+
+ FUNCDBG();
+
+ if ((pDev->started != STATE_STARTED) || pDev->flushing || pDev->config.silent)
+ return -1;
+
+ pDev->flushing = 1;
+ tmp = grcan_tx_flush(pDev);
+ pDev->flushing = 0;
+ if ( tmp ) {
+ /* The wait has been aborted, probably due to
+ * the device driver has been closed by another
+ * thread.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+int grcan_set_silent(void* d, int silent)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ if (pDev->started == STATE_STARTED)
+ return -1;
+
+ pDev->config.silent = silent;
+ pDev->config_changed = 1;
+
+ return 0;
+}
+
+int grcan_set_abort(void* d, int abort)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ if (pDev->started == STATE_STARTED)
+ return -1;
+
+ pDev->config.abort = abort;
+ /* This Configuration parameter doesn't need HurriCANe reset
+ * ==> no pDev->config_changed = 1;
+ */
+
+ return 0;
+}
+
+int grcan_set_selection(void *d, const struct grcan_selection *selection)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ if (pDev->started == STATE_STARTED)
+ return -1;
+
+ if ( !selection )
+ return -2;
+
+ pDev->config.selection = *selection;
+ pDev->config_changed = 1;
+
+ return 0;
+}
+
+int grcan_set_rxblock(void *d, int block)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ pDev->rxblock = block;
+
+ return 0;
+}
+
+int grcan_set_txblock(void *d, int block)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ pDev->txblock = block;
+
+ return 0;
+}
+
+int grcan_set_txcomplete(void *d, int complete)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ pDev->txcomplete = complete;
+
+ return 0;
+}
+
+int grcan_set_rxcomplete(void *d, int complete)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ pDev->rxcomplete = complete;
+
+ return 0;
+}
+
+int grcan_get_stats(void *d, struct grcan_stats *stats)
+{
+ struct grcan_priv *pDev = d;
+ SPIN_IRQFLAGS(oldLevel);
+
+ FUNCDBG();
+
+ if ( !stats )
+ return -1;
+
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ *stats = pDev->stats;
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+
+ return 0;
+}
+
+int grcan_clr_stats(void *d)
+{
+ struct grcan_priv *pDev = d;
+ SPIN_IRQFLAGS(oldLevel);
+
+ FUNCDBG();
+
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ memset(&pDev->stats,0,sizeof(struct grcan_stats));
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+
+ return 0;
+}
+
+int grcan_set_speed(void *d, unsigned int speed)
+{
+ struct grcan_priv *pDev = d;
+ struct grcan_timing timing;
+ int ret;
+
+ FUNCDBG();
+
+ /* cannot change speed during run mode */
+ if (pDev->started == STATE_STARTED)
+ return -1;
+
+ /* get speed rate from argument */
+ ret = grcan_calc_timing(speed, pDev->corefreq_hz, GRCAN_SAMPLING_POINT, &timing);
+ if ( ret )
+ return -2;
+
+ /* save timing/speed */
+ pDev->config.timing = timing;
+ pDev->config_changed = 1;
+
+ return 0;
+}
+
+int grcan_set_btrs(void *d, const struct grcan_timing *timing)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ /* Set BTR registers manually
+ * Read GRCAN/HurriCANe Manual.
+ */
+ if (pDev->started == STATE_STARTED)
+ return -1;
+
+ if ( !timing )
+ return -2;
+
+ pDev->config.timing = *timing;
+ pDev->config_changed = 1;
+
+ return 0;
+}
+
+int grcan_set_afilter(void *d, const struct grcan_filter *filter)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ if ( !filter ){
+ /* Disable filtering - let all messages pass */
+ pDev->afilter.mask = 0x0;
+ pDev->afilter.code = 0x0;
+ }else{
+ /* Save filter */
+ pDev->afilter = *filter;
+ }
+ /* Set hardware acceptance filter */
+ grcan_hw_accept(pDev->regs,&pDev->afilter);
+
+ return 0;
+}
+
+int grcan_set_sfilter(void *d, const struct grcan_filter *filter)
+{
+ struct grcan_priv *pDev = d;
+ SPIN_IRQFLAGS(oldLevel);
+
+ FUNCDBG();
+
+ if ( !filter ){
+ /* disable TX/RX SYNC filtering */
+ pDev->sfilter.mask = 0xffffffff;
+ pDev->sfilter.mask = 0;
+
+ /* disable Sync interrupt */
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) & ~(GRCAN_RXSYNC_IRQ|GRCAN_TXSYNC_IRQ);
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ }else{
+ /* Save filter */
+ pDev->sfilter = *filter;
+
+ /* Enable Sync interrupt */
+ SPIN_LOCK_IRQ(&pDev->devlock, oldLevel);
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) | (GRCAN_RXSYNC_IRQ|GRCAN_TXSYNC_IRQ);
+ SPIN_UNLOCK_IRQ(&pDev->devlock, oldLevel);
+ }
+ /* Set Sync RX/TX filter */
+ grcan_hw_sync(pDev->regs,&pDev->sfilter);
+
+ return 0;
+}
+
+int grcan_get_status(void* d, unsigned int *data)
+{
+ struct grcan_priv *pDev = d;
+
+ FUNCDBG();
+
+ if ( !data )
+ return -1;
+
+ /* Read out the statsu register from the GRCAN core */
+ data[0] = READ_REG(&pDev->regs->stat);
+
+ return 0;
+}
+
+/* Error indicators */
+#define GRCAN_IRQ_ERRORS \
+ (GRCAN_RXAHBERR_IRQ | GRCAN_TXAHBERR_IRQ | GRCAN_OFF_IRQ)
+#define GRCAN_STAT_ERRORS (GRCAN_STAT_AHBERR | GRCAN_STAT_OFF)
+/* Warning & RX/TX sync indicators */
+#define GRCAN_IRQ_WARNS \
+ (GRCAN_ERR_IRQ | GRCAN_OR_IRQ | GRCAN_TXLOSS_IRQ | \
+ GRCAN_RXSYNC_IRQ | GRCAN_TXSYNC_IRQ)
+#define GRCAN_STAT_WARNS (GRCAN_STAT_OR | GRCAN_STAT_PASS)
+
+/* Handle the IRQ */
+static void grcan_interrupt(void *arg)
+{
+ struct grcan_priv *pDev = arg;
+ unsigned int status = READ_REG(&pDev->regs->pimsr);
+ unsigned int canstat = READ_REG(&pDev->regs->stat);
+ unsigned int imr_clear;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Spurious IRQ call? */
+ if ( !status && !canstat )
+ return;
+
+ if (pDev->started != STATE_STARTED) {
+ DBGC(DBG_STATE, "not STARTED (unexpected interrupt)\n");
+ pDev->regs->picr = status;
+ return;
+ }
+
+ FUNCDBG();
+
+ if ( (status & GRCAN_IRQ_ERRORS) || (canstat & GRCAN_STAT_ERRORS) ) {
+ /* Bus-off condition interrupt
+ * The link is brought down by hardware, we wake all threads
+ * that is blocked in read/write calls and stop futher calls
+ * to read/write until user has called ioctl(fd,START,0).
+ */
+ SPIN_LOCK(&pDev->devlock, irqflags);
+ DBGC(DBG_STATE, "STARTED->BUSOFF|AHBERR\n");
+ pDev->stats.ints++;
+ if ((status & GRCAN_OFF_IRQ) || (canstat & GRCAN_STAT_OFF)) {
+ /* CAN Bus-off interrupt */
+ DBGC(DBG_STATE, "BUSOFF: status: 0x%x, canstat: 0x%x\n",
+ status, canstat);
+ pDev->started = STATE_BUSOFF;
+ pDev->stats.busoff_cnt++;
+ } else {
+ /* RX or Tx AHB Error interrupt */
+ printk("AHBERROR: status: 0x%x, canstat: 0x%x\n",
+ status, canstat);
+ pDev->started = STATE_AHBERR;
+ pDev->stats.ahberr_cnt++;
+ }
+ grcan_hw_stop(pDev); /* this mask all IRQ sources */
+ pDev->regs->picr = 0x1ffff; /* clear all interrupts */
+ /*
+ * Prevent driver from affecting bus. Driver can be started
+ * again with grcan_start().
+ */
+ SPIN_UNLOCK(&pDev->devlock, irqflags);
+
+ /* Release semaphores to wake blocked threads. */
+ grcan_sw_stop(pDev);
+
+ /*
+ * NOTE: Another interrupt may be pending now so ISR could be
+ * executed one more time aftert this (first) return.
+ */
+ return;
+ }
+
+ /* Mask interrupts in one place under spin-lock. */
+ imr_clear = status & (GRCAN_RXIRQ_IRQ | GRCAN_TXIRQ_IRQ | GRCAN_TXEMPTY_IRQ);
+
+ SPIN_LOCK(&pDev->devlock, irqflags);
+
+ /* Increment number of interrupts counter */
+ pDev->stats.ints++;
+ if ((status & GRCAN_IRQ_WARNS) || (canstat & GRCAN_STAT_WARNS)) {
+
+ if ( (status & GRCAN_ERR_IRQ) || (canstat & GRCAN_STAT_PASS) ) {
+ /* Error-Passive interrupt */
+ pDev->stats.passive_cnt++;
+ }
+
+ if ( (status & GRCAN_OR_IRQ) || (canstat & GRCAN_STAT_OR) ) {
+ /* Over-run during reception interrupt */
+ pDev->stats.overrun_cnt++;
+ }
+
+ if ( status & GRCAN_TXLOSS_IRQ ) {
+ pDev->stats.txloss_cnt++;
+ }
+
+ if ( status & GRCAN_TXSYNC_IRQ ) {
+ /* TxSync message transmitted interrupt */
+ pDev->stats.txsync_cnt++;
+ }
+
+ if ( status & GRCAN_RXSYNC_IRQ ) {
+ /* RxSync message received interrupt */
+ pDev->stats.rxsync_cnt++;
+ }
+ }
+
+ if (imr_clear) {
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) & ~imr_clear;
+
+ SPIN_UNLOCK(&pDev->devlock, irqflags);
+
+ if ( status & GRCAN_RXIRQ_IRQ ) {
+ /* RX IRQ pointer interrupt */
+ rtems_semaphore_release(pDev->rx_sem);
+ }
+
+ if ( status & GRCAN_TXIRQ_IRQ ) {
+ /* TX IRQ pointer interrupt */
+ rtems_semaphore_release(pDev->tx_sem);
+ }
+
+ if (status & GRCAN_TXEMPTY_IRQ ) {
+ rtems_semaphore_release(pDev->txempty_sem);
+ }
+ } else {
+ SPIN_UNLOCK(&pDev->devlock, irqflags);
+ }
+
+ /* Clear IRQs */
+ pDev->regs->picr = status;
+}
diff --git a/bsps/shared/grlib/can/occan.c b/bsps/shared/grlib/can/occan.c
new file mode 100644
index 0000000000..59b4f234f6
--- /dev/null
+++ b/bsps/shared/grlib/can/occan.c
@@ -0,0 +1,1971 @@
+/* OC_CAN driver
+ *
+ * COPYRIGHT (c) 2007.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <bsp.h>
+#include <rtems/bspIo.h> /* printk */
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/occan.h>
+
+#include <grlib/grlib_impl.h>
+
+/* RTEMS -> ERRNO decoding table
+
+rtems_assoc_t errno_assoc[] = {
+ { "OK", RTEMS_SUCCESSFUL, 0 },
+ { "BUSY", RTEMS_RESOURCE_IN_USE, EBUSY },
+ { "INVALID NAME", RTEMS_INVALID_NAME, EINVAL },
+ { "NOT IMPLEMENTED", RTEMS_NOT_IMPLEMENTED, ENOSYS },
+ { "TIMEOUT", RTEMS_TIMEOUT, ETIMEDOUT },
+ { "NO MEMORY", RTEMS_NO_MEMORY, ENOMEM },
+ { "NO DEVICE", RTEMS_UNSATISFIED, ENODEV },
+ { "INVALID NUMBER", RTEMS_INVALID_NUMBER, EBADF},
+ { "NOT RESOURCE OWNER", RTEMS_NOT_OWNER_OF_RESOURCE, EPERM},
+ { "IO ERROR", RTEMS_IO_ERROR, EIO},
+ { 0, 0, 0 },
+};
+
+*/
+
+/*
+#undef DEBUG
+#undef DEBUG_EXTRA
+#undef DEBUG_PRINT_REGMAP
+*/
+
+/* default to byte regs */
+#ifndef OCCAN_WORD_REGS
+ #define OCCAN_BYTE_REGS
+#else
+ #undef OCCAN_BYTE_REGS
+#endif
+
+/* Enable Fixup code older OCCAN with a TX IRQ-FLAG bug */
+#define OCCAN_TX_IRQ_FLAG_FIXUP 1
+
+#define OCCAN_WORD_REG_OFS 0x80
+#define OCCAN_NCORE_OFS 0x100
+#define DEFAULT_CLKDIV 0x7
+#define DEFAULT_EXTENDED_MODE 1
+#define DEFAULT_RX_FIFO_LEN 64
+#define DEFAULT_TX_FIFO_LEN 64
+
+/* not implemented yet */
+#undef REDUNDANT_CHANNELS
+
+/* Define common debug macros */
+#ifdef DEBUG
+ #define DBG(fmt, vargs...) printk(fmt, ## vargs )
+#else
+ #define DBG(fmt, vargs...)
+#endif
+
+/* fifo interface */
+typedef struct {
+ int cnt;
+ int ovcnt; /* overwrite count */
+ int full; /* 1 = base contain cnt CANMsgs, tail==head */
+ CANMsg *tail, *head;
+ CANMsg *base;
+ CANMsg fifoarea[0];
+} occan_fifo;
+
+/* PELICAN */
+
+typedef struct {
+ unsigned char
+ mode,
+ cmd,
+ status,
+ intflags,
+ inten,
+ resv0,
+ bustim0,
+ bustim1,
+ unused0[2],
+ resv1,
+ arbcode,
+ errcode,
+ errwarn,
+ rx_err_cnt,
+ tx_err_cnt,
+ rx_fi_xff; /* this is also acceptance code 0 in reset mode */
+ union{
+ struct {
+ unsigned char id[2];
+ unsigned char data[8];
+ unsigned char next_in_fifo[2];
+ } rx_sff;
+ struct {
+ unsigned char id[4];
+ unsigned char data[8];
+ } rx_eff;
+ struct {
+ unsigned char id[2];
+ unsigned char data[8];
+ unsigned char unused[2];
+ } tx_sff;
+ struct {
+ unsigned char id[4];
+ unsigned char data[8];
+ } tx_eff;
+ struct {
+ unsigned char code[3];
+ unsigned char mask[4];
+ } rst_accept;
+ } msg;
+ unsigned char rx_msg_cnt;
+ unsigned char unused1;
+ unsigned char clkdiv;
+} pelican8_regs;
+
+typedef struct {
+ unsigned char
+ mode, unused0[3],
+ cmd, unused1[3],
+ status, unused2[3],
+ intflags, unused3[3],
+ inten, unused4[3],
+ resv0, unused5[3],
+ bustim0, unused6[3],
+ bustim1, unused7[3],
+ unused8[8],
+ resv1,unused9[3],
+ arbcode,unused10[3],
+ errcode,unused11[3],
+ errwarn,unused12[3],
+ rx_err_cnt,unused13[3],
+ tx_err_cnt,unused14[3],
+ rx_fi_xff, unused15[3]; /* this is also acceptance code 0 in reset mode */
+ /* make sure to use pointers when writing (byte access) to these registers */
+ union{
+ struct {
+ unsigned int id[2];
+ unsigned int data[8];
+ unsigned int next_in_fifo[2];
+ } rx_sff;
+ struct {
+ unsigned int id[4];
+ unsigned int data[8];
+ } rx_eff;
+ struct {
+ unsigned int id[2];
+ unsigned int data[8];
+ } tx_sff;
+ struct {
+ unsigned int id[4];
+ unsigned int data[8];
+ } tx_eff;
+ struct {
+ unsigned int code[3];
+ unsigned int mask[4];
+ } rst_accept;
+ } msg;
+ unsigned char rx_msg_cnt,unused16[3];
+ unsigned char unused17[4];
+ unsigned char clkdiv,unused18[3];
+} pelican32_regs;
+
+#ifdef OCCAN_BYTE_REGS
+#define pelican_regs pelican8_regs
+#else
+#define pelican_regs pelican32_regs
+#endif
+
+
+#define MAX_TSEG2 7
+#define MAX_TSEG1 15
+
+#if 0
+typedef struct {
+ unsigned char brp;
+ unsigned char sjw;
+ unsigned char tseg1;
+ unsigned char tseg2;
+ unsigned char sam;
+} occan_speed_regs;
+#endif
+typedef struct {
+ unsigned char btr0;
+ unsigned char btr1;
+} occan_speed_regs;
+
+typedef struct {
+ struct drvmgr_dev *dev;
+ char devName[32];
+ SPIN_DECLARE(devlock);
+
+ /* hardware shortcuts */
+ pelican_regs *regs;
+ int byte_regs;
+ int irq;
+ occan_speed_regs timing;
+ int channel; /* 0=default, 1=second bus */
+ int single_mode;
+ unsigned int sys_freq_hz;
+
+ /* driver state */
+ rtems_id devsem;
+ rtems_id txsem;
+ rtems_id rxsem;
+ int open;
+ int started;
+ int rxblk;
+ int txblk;
+ int sending;
+ unsigned int status;
+ occan_stats stats;
+
+ /* rx&tx fifos */
+ occan_fifo *rxfifo;
+ occan_fifo *txfifo;
+
+ /* Config */
+ unsigned int speed; /* speed in HZ */
+ unsigned char acode[4];
+ unsigned char amask[4];
+} occan_priv;
+
+/********** FIFO INTERFACE **********/
+static void occan_fifo_put(occan_fifo *fifo);
+static CANMsg *occan_fifo_put_claim(occan_fifo *fifo, int force);
+static occan_fifo *occan_fifo_create(int cnt);
+static void occan_fifo_free(occan_fifo *fifo);
+static int occan_fifo_full(occan_fifo *fifo);
+static int occan_fifo_empty(occan_fifo *fifo);
+static void occan_fifo_get(occan_fifo *fifo);
+static CANMsg *occan_fifo_claim_get(occan_fifo *fifo);
+static void occan_fifo_clr(occan_fifo *fifo);
+
+/**** Hardware related Interface ****/
+static int occan_calc_speedregs(unsigned int clock_hz, unsigned int rate, occan_speed_regs *result);
+static int occan_set_speedregs(occan_priv *priv, occan_speed_regs *timing);
+static void pelican_init(occan_priv *priv);
+static void pelican_open(occan_priv *priv);
+static int pelican_start(occan_priv *priv);
+static void pelican_stop(occan_priv *priv);
+static int pelican_send(occan_priv *can, CANMsg *msg);
+static void pelican_set_accept(occan_priv *priv, unsigned char *acode, unsigned char *amask);
+void occan_interrupt(void *arg);
+#ifdef DEBUG_PRINT_REGMAP
+static void pelican_regadr_print(pelican_regs *regs);
+#endif
+
+/***** Driver related interface *****/
+static rtems_device_driver occan_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver occan_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver occan_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver occan_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver occan_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver occan_initialize(rtems_device_major_number major, rtems_device_minor_number unused, void *arg);
+
+#define OCCAN_DRIVER_TABLE_ENTRY { occan_initialize, occan_open, occan_close, occan_read, occan_write, occan_ioctl }
+static rtems_driver_address_table occan_driver = OCCAN_DRIVER_TABLE_ENTRY;
+
+
+/* Read byte bypassing */
+
+
+/* Bypass cache */
+#define READ_REG(priv, address) occan_reg_read(priv, (unsigned int)address)
+#define WRITE_REG(priv, address, data) occan_reg_write(priv, (unsigned int)address, data)
+
+static unsigned int occan_reg_read(occan_priv *priv, unsigned int address)
+{
+ unsigned int adr;
+ if ( priv->byte_regs ) {
+ adr = address;
+ } else {
+ /* Word accessed registers */
+ adr = (address & (~0x7f)) | ((address & 0x7f)<<2);
+ }
+ return *(volatile unsigned char *)adr;
+}
+
+static void occan_reg_write(
+ occan_priv *priv,
+ unsigned int address,
+ unsigned char value)
+{
+ unsigned int adr;
+ if ( priv->byte_regs ) {
+ adr = address;
+ } else {
+ /* Word accessed registers */
+ adr = (address & (~0x7f)) | ((address & 0x7f)<<2);
+ }
+ *(volatile unsigned char *)adr = value;;
+}
+
+/* Mode register bit definitions */
+#define PELICAN_MOD_RESET 0x1
+#define PELICAN_MOD_LISTEN 0x2
+#define PELICAN_MOD_SELFTEST 0x4
+#define PELICAN_MOD_ACCEPT 0x8
+
+/* Command register bit definitions */
+#define PELICAN_CMD_TXREQ 0x1
+#define PELICAN_CMD_ABORT 0x2
+#define PELICAN_CMD_RELRXBUF 0x4
+#define PELICAN_CMD_CLRDOVR 0x8
+#define PELICAN_CMD_SELFRXRQ 0x10
+
+/* Status register bit definitions */
+#define PELICAN_STAT_RXBUF 0x1
+#define PELICAN_STAT_DOVR 0x2
+#define PELICAN_STAT_TXBUF 0x4
+#define PELICAN_STAT_TXOK 0x8
+#define PELICAN_STAT_RX 0x10
+#define PELICAN_STAT_TX 0x20
+#define PELICAN_STAT_ERR 0x40
+#define PELICAN_STAT_BUS 0x80
+
+/* Interrupt register bit definitions */
+#define PELICAN_IF_RX 0x1
+#define PELICAN_IF_TX 0x2
+#define PELICAN_IF_ERRW 0x4
+#define PELICAN_IF_DOVR 0x8
+#define PELICAN_IF_ERRP 0x20
+#define PELICAN_IF_ARB 0x40
+#define PELICAN_IF_BUS 0x80
+
+/* Interrupt Enable register bit definitions */
+#define PELICAN_IE_RX 0x1
+#define PELICAN_IE_TX 0x2
+#define PELICAN_IE_ERRW 0x4
+#define PELICAN_IE_DOVR 0x8
+#define PELICAN_IE_ERRP 0x20
+#define PELICAN_IE_ARB 0x40
+#define PELICAN_IE_BUS 0x80
+
+/* Arbitration lost capture register bit definitions */
+#define PELICAN_ARB_BITS 0x1f
+
+/* register bit definitions */
+#define PELICAN_ECC_CODE_BIT 0x00
+#define PELICAN_ECC_CODE_FORM 0x40
+#define PELICAN_ECC_CODE_STUFF 0x80
+#define PELICAN_ECC_CODE_OTHER 0xc0
+#define PELICAN_ECC_CODE 0xc0
+
+#define PELICAN_ECC_DIR 0x20
+#define PELICAN_ECC_SEG 0x1f
+
+/* Clock divider register bit definitions */
+#define PELICAN_CDR_DIV 0x7
+#define PELICAN_CDR_OFF 0x8
+#define PELICAN_CDR_MODE 0x80
+#define PELICAN_CDR_MODE_PELICAN 0x80
+#define PELICAN_CDR_MODE_BITS 7
+#define PELICAN_CDR_MODE_BASICAN 0x00
+
+
+/* register bit definitions */
+#define OCCAN_BUSTIM_SJW 0xc0
+#define OCCAN_BUSTIM_BRP 0x3f
+#define OCCAN_BUSTIM_SJW_BIT 6
+
+#define OCCAN_BUSTIM_SAM 0x80
+#define OCCAN_BUSTIM_TSEG2 0x70
+#define OCCAN_BUSTIM_TSEG2_BIT 4
+#define OCCAN_BUSTIM_TSEG1 0x0f
+
+/* register bit definitions */
+/*
+#define PELICAN_S_ 0x1
+#define PELICAN_S_ 0x2
+#define PELICAN_S_ 0x4
+#define PELICAN_S_ 0x8
+#define PELICAN_S_ 0x10
+#define PELICAN_S_ 0x20
+#define PELICAN_S_ 0x40
+#define PELICAN_S_ 0x80
+*/
+
+static int occan_driver_io_registered = 0;
+static rtems_device_major_number occan_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int occan_register_io(rtems_device_major_number *m);
+int occan_device_init(occan_priv *pDev);
+
+int occan_init2(struct drvmgr_dev *dev);
+int occan_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops occan_ops =
+{
+ .init = {NULL, occan_init2, occan_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id occan_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_CANAHB},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info occan_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_OCCAN_ID, /* Driver ID */
+ "OCCAN_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &occan_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &occan_ids[0]
+};
+
+void occan_register_drv (void)
+{
+ DBG("Registering OCCAN driver\n");
+ drvmgr_drv_register(&occan_drv_info.general);
+}
+
+int occan_init2(struct drvmgr_dev *dev)
+{
+ occan_priv *priv;
+
+ DBG("OCCAN[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ return DRVMGR_OK;
+}
+
+int occan_init3(struct drvmgr_dev *dev)
+{
+ occan_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( occan_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( occan_register_io(&occan_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ occan_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ if ( occan_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/occan%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%soccan%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ DBG("OCCAN[%d]: Registering %s\n", dev->minor_drv, priv->devName);
+ status = rtems_io_register_name(priv->devName, occan_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+int occan_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &occan_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("OCCAN driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("OCCAN rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("OCCAN rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("OCCAN rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("OCCAN rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int occan_device_init(occan_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ rtems_status_code status;
+ int minor;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (pelican_regs *)(pnpinfo->ahb_slv->start[0] + OCCAN_NCORE_OFS*pnpinfo->index);
+ pDev->byte_regs = 1;
+ minor = pDev->dev->minor_drv;
+
+ /* Get frequency in Hz */
+ if ( drvmgr_freq_get(pDev->dev, DEV_AHB_SLV, &pDev->sys_freq_hz) ) {
+ return -1;
+ }
+
+ DBG("OCCAN frequency: %d Hz\n", pDev->sys_freq_hz);
+
+ /* initialize software */
+ pDev->open = 0;
+ pDev->started = 0; /* Needed for spurious interrupts */
+ pDev->rxfifo = NULL;
+ pDev->txfifo = NULL;
+ status = rtems_semaphore_create(
+ rtems_build_name('C', 'd', 'v', '0'+minor),
+ 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->devsem);
+ if ( status != RTEMS_SUCCESSFUL ){
+ printk("OCCAN[%d]: Failed to create dev semaphore, (%d)\n\r",minor, status);
+ return RTEMS_UNSATISFIED;
+ }
+ status = rtems_semaphore_create(
+ rtems_build_name('C', 't', 'x', '0'+minor),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->txsem);
+ if ( status != RTEMS_SUCCESSFUL ){
+ printk("OCCAN[%d]: Failed to create tx semaphore, (%d)\n\r",minor, status);
+ return RTEMS_UNSATISFIED;
+ }
+ status = rtems_semaphore_create(
+ rtems_build_name('C', 'r', 'x', '0'+minor),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->rxsem);
+ if ( status != RTEMS_SUCCESSFUL ){
+ printk("OCCAN[%d]: Failed to create rx semaphore, (%d)\n\r",minor, status);
+ return RTEMS_UNSATISFIED;
+ }
+
+ /* hardware init/reset */
+ pelican_init(pDev);
+
+#ifdef DEBUG_PRINT_REGMAP
+ pelican_regadr_print(pDev->regs);
+#endif
+
+ return 0;
+}
+
+
+#ifdef DEBUG
+static void pelican_regs_print(occan_priv *pDev){
+ pelican_regs *regs = pDev->regs;
+ printk("--- PELICAN 0x%lx ---\n\r",(unsigned int)regs);
+ printk(" MODE: 0x%02x\n\r",READ_REG(pDev, &regs->mode));
+ printk(" CMD: 0x%02x\n\r",READ_REG(pDev, &regs->cmd));
+ printk(" STATUS: 0x%02x\n\r",READ_REG(pDev, &regs->status));
+ /*printk(" INTFLG: 0x%02x\n\r",READ_REG(pDev, &regs->intflags));*/
+ printk(" INTEN: 0x%02x\n\r",READ_REG(pDev, &regs->inten));
+ printk(" BTR0: 0x%02x\n\r",READ_REG(pDev, &regs->bustim0));
+ printk(" BTR1: 0x%02x\n\r",READ_REG(pDev, &regs->bustim1));
+ printk(" ARBCODE: 0x%02x\n\r",READ_REG(pDev, &regs->arbcode));
+ printk(" ERRCODE: 0x%02x\n\r",READ_REG(pDev, &regs->errcode));
+ printk(" ERRWARN: 0x%02x\n\r",READ_REG(pDev, &regs->errwarn));
+ printk(" RX_ERR_CNT: 0x%02x\n\r",READ_REG(pDev, &regs->rx_err_cnt));
+ printk(" TX_ERR_CNT: 0x%02x\n\r",READ_REG(pDev, &regs->tx_err_cnt));
+ if ( READ_REG(pDev, &regs->mode) & PELICAN_MOD_RESET ){
+ /* in reset mode it is possible to read acceptance filters */
+ printk(" ACR0: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->rx_fi_xff),&regs->rx_fi_xff);
+ printk(" ACR1: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.code[0]),(unsigned int)&regs->msg.rst_accept.code[0]);
+ printk(" ACR1: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.code[1]),(unsigned int)&regs->msg.rst_accept.code[1]);
+ printk(" ACR1: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.code[2]),(unsigned int)&regs->msg.rst_accept.code[2]);
+ printk(" AMR0: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.mask[0]),(unsigned int)&regs->msg.rst_accept.mask[0]);
+ printk(" AMR1: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.mask[1]),(unsigned int)&regs->msg.rst_accept.mask[1]);
+ printk(" AMR2: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.mask[2]),(unsigned int)&regs->msg.rst_accept.mask[2]);
+ printk(" AMR3: 0x%02x (0x%lx)\n\r",READ_REG(pDev, &regs->msg.rst_accept.mask[3]),(unsigned int)&regs->msg.rst_accept.mask[3]);
+
+ }else{
+ printk(" RXFI_XFF: 0x%02x\n\r",READ_REG(pDev, &regs->rx_fi_xff));
+ }
+ printk(" RX_MSG_CNT: 0x%02x\n\r",READ_REG(pDev, &regs->rx_msg_cnt));
+ printk(" CLKDIV: 0x%02x\n\r",READ_REG(pDev, &regs->clkdiv));
+ printk("-------------------\n\r");
+}
+#endif
+
+#ifdef DEBUG_PRINT_REGMAP
+static void pelican_regadr_print(pelican_regs *regs){
+ printk("--- PELICAN 0x%lx ---\n\r",(unsigned int)regs);
+ printk(" MODE: 0x%lx\n\r",(unsigned int)&regs->mode);
+ printk(" CMD: 0x%lx\n\r",(unsigned int)&regs->cmd);
+ printk(" STATUS: 0x%lx\n\r",(unsigned int)&regs->status);
+ /*printk(" INTFLG: 0x%lx\n\r",&regs->intflags);*/
+ printk(" INTEN: 0x%lx\n\r",(unsigned int)&regs->inten);
+ printk(" BTR0: 0x%lx\n\r",(unsigned int)&regs->bustim0);
+ printk(" BTR1: 0x%lx\n\r",(unsigned int)&regs->bustim1);
+ printk(" ARBCODE: 0x%lx\n\r",(unsigned int)&regs->arbcode);
+ printk(" ERRCODE: 0x%lx\n\r",(unsigned int)&regs->errcode);
+ printk(" ERRWARN: 0x%lx\n\r",(unsigned int)&regs->errwarn);
+ printk(" RX_ERR_CNT: 0x%lx\n\r",(unsigned int)&regs->rx_err_cnt);
+ printk(" TX_ERR_CNT: 0x%lx\n\r",(unsigned int)&regs->tx_err_cnt);
+
+ /* in reset mode it is possible to read acceptance filters */
+ printk(" RXFI_XFF: 0x%lx\n\r",(unsigned int)&regs->rx_fi_xff);
+
+ /* reset registers */
+ printk(" ACR0: 0x%lx\n\r",(unsigned int)&regs->rx_fi_xff);
+ printk(" ACR1: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.code[0]);
+ printk(" ACR2: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.code[1]);
+ printk(" ACR3: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.code[2]);
+ printk(" AMR0: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.mask[0]);
+ printk(" AMR1: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.mask[1]);
+ printk(" AMR2: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.mask[2]);
+ printk(" AMR3: 0x%lx\n\r",(unsigned int)&regs->msg.rst_accept.mask[3]);
+
+ /* TX Extended */
+ printk(" EFFTX_ID[0]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.id[0]);
+ printk(" EFFTX_ID[1]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.id[1]);
+ printk(" EFFTX_ID[2]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.id[2]);
+ printk(" EFFTX_ID[3]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.id[3]);
+
+ printk(" EFFTX_DATA[0]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[0]);
+ printk(" EFFTX_DATA[1]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[1]);
+ printk(" EFFTX_DATA[2]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[2]);
+ printk(" EFFTX_DATA[3]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[3]);
+ printk(" EFFTX_DATA[4]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[4]);
+ printk(" EFFTX_DATA[5]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[5]);
+ printk(" EFFTX_DATA[6]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[6]);
+ printk(" EFFTX_DATA[7]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_eff.data[7]);
+
+ /* RX Extended */
+ printk(" EFFRX_ID[0]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.id[0]);
+ printk(" EFFRX_ID[1]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.id[1]);
+ printk(" EFFRX_ID[2]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.id[2]);
+ printk(" EFFRX_ID[3]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.id[3]);
+
+ printk(" EFFRX_DATA[0]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[0]);
+ printk(" EFFRX_DATA[1]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[1]);
+ printk(" EFFRX_DATA[2]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[2]);
+ printk(" EFFRX_DATA[3]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[3]);
+ printk(" EFFRX_DATA[4]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[4]);
+ printk(" EFFRX_DATA[5]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[5]);
+ printk(" EFFRX_DATA[6]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[6]);
+ printk(" EFFRX_DATA[7]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_eff.data[7]);
+
+
+ /* RX Extended */
+ printk(" SFFRX_ID[0]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.id[0]);
+ printk(" SFFRX_ID[1]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.id[1]);
+
+ printk(" SFFRX_DATA[0]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[0]);
+ printk(" SFFRX_DATA[1]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[1]);
+ printk(" SFFRX_DATA[2]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[2]);
+ printk(" SFFRX_DATA[3]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[3]);
+ printk(" SFFRX_DATA[4]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[4]);
+ printk(" SFFRX_DATA[5]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[5]);
+ printk(" SFFRX_DATA[6]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[6]);
+ printk(" SFFRX_DATA[7]: 0x%lx\n\r",(unsigned int)&regs->msg.rx_sff.data[7]);
+
+ /* TX Extended */
+ printk(" SFFTX_ID[0]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.id[0]);
+ printk(" SFFTX_ID[1]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.id[1]);
+
+ printk(" SFFTX_DATA[0]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[0]);
+ printk(" SFFTX_DATA[1]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[1]);
+ printk(" SFFTX_DATA[2]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[2]);
+ printk(" SFFTX_DATA[3]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[3]);
+ printk(" SFFTX_DATA[4]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[4]);
+ printk(" SFFTX_DATA[5]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[5]);
+ printk(" SFFTX_DATA[6]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[6]);
+ printk(" SFFTX_DATA[7]: 0x%lx\n\r",(unsigned int)&regs->msg.tx_sff.data[7]);
+
+ printk(" RX_MSG_CNT: 0x%lx\n\r",(unsigned int)&regs->rx_msg_cnt);
+ printk(" CLKDIV: 0x%lx\n\r",(unsigned int)&regs->clkdiv);
+ printk("-------------------\n\r");
+}
+#endif
+
+#ifdef DEBUG
+static void occan_stat_print(occan_stats *stats){
+ printk("----Stats----\n\r");
+ printk("rx_msgs: %d\n\r",stats->rx_msgs);
+ printk("tx_msgs: %d\n\r",stats->tx_msgs);
+ printk("err_warn: %d\n\r",stats->err_warn);
+ printk("err_dovr: %d\n\r",stats->err_dovr);
+ printk("err_errp: %d\n\r",stats->err_errp);
+ printk("err_arb: %d\n\r",stats->err_arb);
+ printk("err_bus: %d\n\r",stats->err_bus);
+ printk("Int cnt: %d\n\r",stats->ints);
+ printk("tx_buf_err: %d\n\r",stats->tx_buf_error);
+ printk("-------------\n\r");
+}
+#endif
+
+static void pelican_init(occan_priv *priv){
+ /* Reset core */
+ WRITE_REG(priv, &priv->regs->mode, PELICAN_MOD_RESET);
+
+ /* wait for core to reset complete */
+ /*usleep(1);*/
+}
+
+static void pelican_open(occan_priv *priv){
+ int ret;
+
+ /* Set defaults */
+ priv->speed = OCCAN_SPEED_250K;
+
+ /* set acceptance filters to accept all messages */
+ priv->acode[0] = 0;
+ priv->acode[1] = 0;
+ priv->acode[2] = 0;
+ priv->acode[3] = 0;
+ priv->amask[0] = 0xff;
+ priv->amask[1] = 0xff;
+ priv->amask[2] = 0xff;
+ priv->amask[3] = 0xff;
+
+ /* Set clock divider to extended mode, clkdiv not connected
+ */
+ WRITE_REG(priv, &priv->regs->clkdiv, (1<<PELICAN_CDR_MODE_BITS) | (DEFAULT_CLKDIV & PELICAN_CDR_DIV));
+
+ ret = occan_calc_speedregs(priv->sys_freq_hz,priv->speed,&priv->timing);
+ if ( ret ){
+ /* failed to set speed for this system freq, try with 50K instead */
+ priv->speed = OCCAN_SPEED_50K;
+ occan_calc_speedregs(priv->sys_freq_hz, priv->speed,
+ &priv->timing);
+ }
+
+ /* disable all interrupts */
+ WRITE_REG(priv, &priv->regs->inten, 0);
+
+ /* clear pending interrupts by reading */
+ READ_REG(priv, &priv->regs->intflags);
+}
+
+static int pelican_start(occan_priv *priv){
+ /* Start HW communication */
+
+ if ( !priv->rxfifo || !priv->txfifo )
+ return -1;
+
+ /* In case we were started before and stopped we
+ * should empty the TX fifo or try to resend those
+ * messages. We make it simple...
+ */
+ occan_fifo_clr(priv->txfifo);
+
+ /* Clear status bits */
+ priv->status = 0;
+ priv->sending = 0;
+
+ /* clear pending interrupts */
+ READ_REG(priv, &priv->regs->intflags);
+
+ /* clear error counters */
+ WRITE_REG(priv, &priv->regs->rx_err_cnt, 0);
+ WRITE_REG(priv, &priv->regs->tx_err_cnt, 0);
+
+#ifdef REDUNDANT_CHANNELS
+ if ( (priv->channel == 0) || (priv->channel >= REDUNDANT_CHANNELS) ){
+ /* Select the first (default) channel */
+ OCCAN_SET_CHANNEL(priv,0);
+ }else{
+ /* set gpio bit, or something */
+ OCCAN_SET_CHANNEL(priv,priv->channel);
+ }
+#endif
+ /* set the speed regs of the CAN core */
+ occan_set_speedregs(priv,&priv->timing);
+
+ DBG("OCCAN: start: set timing regs btr0: 0x%x, btr1: 0x%x\n\r",
+ READ_REG(priv, &priv->regs->bustim0),
+ READ_REG(priv, &priv->regs->bustim1));
+
+ /* Set default acceptance filter */
+ pelican_set_accept(priv,priv->acode,priv->amask);
+
+ /* Nothing can fail from here, this must be set before interrupts are
+ * enabled */
+ priv->started = 1;
+
+ /* turn on interrupts */
+ WRITE_REG(priv, &priv->regs->inten,
+ PELICAN_IE_RX | PELICAN_IE_TX | PELICAN_IE_ERRW |
+ PELICAN_IE_ERRP | PELICAN_IE_BUS);
+#ifdef DEBUG
+ /* print setup before starting */
+ pelican_regs_print(priv->regs);
+ occan_stat_print(&priv->stats);
+#endif
+
+ /* core already in reset mode,
+ * ¤ Exit reset mode
+ * ¤ Enter Single/Dual mode filtering.
+ */
+ WRITE_REG(priv, &priv->regs->mode, (priv->single_mode << 3));
+
+ /* Register interrupt routine and unmask IRQ at IRQ controller */
+ drvmgr_interrupt_register(priv->dev, 0, "occan", occan_interrupt, priv);
+
+ return 0;
+}
+
+static void pelican_stop(occan_priv *priv)
+{
+ /* stop HW */
+
+ drvmgr_interrupt_unregister(priv->dev, 0, occan_interrupt, priv);
+
+#ifdef DEBUG
+ /* print setup before stopping */
+ pelican_regs_print(priv->regs);
+ occan_stat_print(&priv->stats);
+#endif
+
+ /* put core in reset mode */
+ WRITE_REG(priv, &priv->regs->mode, PELICAN_MOD_RESET);
+
+ /* turn off interrupts */
+ WRITE_REG(priv, &priv->regs->inten, 0);
+
+ priv->status |= OCCAN_STATUS_RESET;
+}
+
+static inline int pelican_tx_ready(occan_priv *can)
+{
+ unsigned char status;
+ pelican_regs *regs = can->regs;
+
+ /* is there room in send buffer? */
+ status = READ_REG(can, &regs->status);
+ if ( !(status & PELICAN_STAT_TXBUF) ) {
+ /* tx fifo taken, we have to wait */
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Try to send message "msg", if hardware txfifo is
+ * full, then -1 is returned.
+ *
+ * Be sure to have disabled CAN interrupts when
+ * entering this function.
+ */
+static int pelican_send(occan_priv *can, CANMsg *msg){
+ unsigned char tmp;
+ pelican_regs *regs = can->regs;
+
+ /* is there room in send buffer? */
+ if ( !pelican_tx_ready(can) ) {
+ /* tx fifo taken, we have to wait */
+ return -1;
+ }
+
+ tmp = msg->len & 0xf;
+ if ( msg->rtr )
+ tmp |= 0x40;
+
+ if ( msg->extended ){
+ /* Extended Frame */
+ WRITE_REG(can, &regs->rx_fi_xff, 0x80 | tmp);
+ WRITE_REG(can, &regs->msg.tx_eff.id[0],(msg->id >> (5+8+8)) & 0xff);
+ WRITE_REG(can, &regs->msg.tx_eff.id[1],(msg->id >> (5+8)) & 0xff);
+ WRITE_REG(can, &regs->msg.tx_eff.id[2],(msg->id >> (5)) & 0xff);
+ WRITE_REG(can, &regs->msg.tx_eff.id[3],(msg->id << 3) & 0xf8);
+ tmp = msg->len;
+ while(tmp--){
+ WRITE_REG(can, &regs->msg.tx_eff.data[tmp], msg->data[tmp]);
+ }
+ }else{
+ /* Standard Frame */
+ WRITE_REG(can, &regs->rx_fi_xff, tmp);
+ WRITE_REG(can, &regs->msg.tx_sff.id[0],(msg->id >> 3) & 0xff);
+ WRITE_REG(can, &regs->msg.tx_sff.id[1],(msg->id << 5) & 0xe0);
+ tmp = msg->len;
+ while(tmp--){
+ WRITE_REG(can, &regs->msg.tx_sff.data[tmp],msg->data[tmp]);
+ }
+ }
+
+ /* let HW know of new message */
+ if ( msg->sshot ){
+ WRITE_REG(can, &regs->cmd, PELICAN_CMD_TXREQ | PELICAN_CMD_ABORT);
+ }else{
+ /* normal case -- try resend until sent */
+ WRITE_REG(can, &regs->cmd, PELICAN_CMD_TXREQ);
+ }
+
+ return 0;
+}
+
+
+static void pelican_set_accept(occan_priv *priv, unsigned char *acode, unsigned char *amask)
+{
+ unsigned char *acode0, *acode1, *acode2, *acode3;
+ unsigned char *amask0, *amask1, *amask2, *amask3;
+
+ acode0 = &priv->regs->rx_fi_xff;
+ acode1 = (unsigned char *)&priv->regs->msg.rst_accept.code[0];
+ acode2 = (unsigned char *)&priv->regs->msg.rst_accept.code[1];
+ acode3 = (unsigned char *)&priv->regs->msg.rst_accept.code[2];
+
+ amask0 = (unsigned char *)&priv->regs->msg.rst_accept.mask[0];
+ amask1 = (unsigned char *)&priv->regs->msg.rst_accept.mask[1];
+ amask2 = (unsigned char *)&priv->regs->msg.rst_accept.mask[2];
+ amask3 = (unsigned char *)&priv->regs->msg.rst_accept.mask[3];
+
+ /* Set new mask & code */
+ WRITE_REG(priv, acode0, acode[0]);
+ WRITE_REG(priv, acode1, acode[1]);
+ WRITE_REG(priv, acode2, acode[2]);
+ WRITE_REG(priv, acode3, acode[3]);
+
+ WRITE_REG(priv, amask0, amask[0]);
+ WRITE_REG(priv, amask1, amask[1]);
+ WRITE_REG(priv, amask2, amask[2]);
+ WRITE_REG(priv, amask3, amask[3]);
+}
+
+
+/* This function calculates BTR0 and BTR1 values for a given bitrate.
+ *
+ * Set communication parameters.
+ * \param clock_hz OC_CAN Core frequency in Hz.
+ * \param rate Requested baud rate in bits/second.
+ * \param result Pointer to where resulting BTRs will be stored.
+ * \return zero if successful to calculate a baud rate.
+ */
+static int occan_calc_speedregs(unsigned int clock_hz, unsigned int rate, occan_speed_regs *result)
+{
+ int best_error = 1000000000;
+ int error;
+ int best_tseg=0, best_brp=0, brp=0;
+ int tseg=0, tseg1=0, tseg2=0;
+ int sjw = 0;
+ int clock = clock_hz / 2;
+ int sampl_pt = 90;
+
+ if ( (rate<5000) || (rate>1000000) ){
+ /* invalid speed mode */
+ return -1;
+ }
+
+ /* find best match, return -2 if no good reg
+ * combination is available for this frequency */
+
+ /* some heuristic specials */
+ if (rate > ((1000000 + 500000) / 2))
+ sampl_pt = 75;
+
+ if (rate < ((12500 + 10000) / 2))
+ sampl_pt = 75;
+
+ if (rate < ((100000 + 125000) / 2))
+ sjw = 1;
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (0 + 0 + 2) * 2;
+ tseg <= (MAX_TSEG2 + MAX_TSEG1 + 2) * 2 + 1;
+ tseg++)
+ {
+ brp = clock / ((1 + tseg / 2) * rate) + tseg % 2;
+ if ((brp == 0) || (brp > 64))
+ continue;
+
+ error = rate - clock / (brp * (1 + tseg / 2));
+ if (error < 0)
+ {
+ error = -error;
+ }
+
+ if (error <= best_error)
+ {
+ best_error = error;
+ best_tseg = tseg/2;
+ best_brp = brp-1;
+ }
+ }
+
+ if (best_error && (rate / best_error < 10))
+ {
+ printk("OCCAN: bitrate %d is not possible with %d Hz clock\n\r",rate, clock);
+ return -2;
+ }else if ( !result )
+ return 0; /* nothing to store result in, but a valid bitrate can be calculated */
+
+ tseg2 = best_tseg - (sampl_pt * (best_tseg + 1)) / 100;
+
+ if (tseg2 < 0)
+ {
+ tseg2 = 0;
+ }
+
+ if (tseg2 > MAX_TSEG2)
+ {
+ tseg2 = MAX_TSEG2;
+ }
+
+ tseg1 = best_tseg - tseg2 - 2;
+
+ if (tseg1 > MAX_TSEG1)
+ {
+ tseg1 = MAX_TSEG1;
+ tseg2 = best_tseg - tseg1 - 2;
+ }
+
+ result->btr0 = (sjw<<OCCAN_BUSTIM_SJW_BIT) | (best_brp&OCCAN_BUSTIM_BRP);
+ result->btr1 = (0<<7) | (tseg2<<OCCAN_BUSTIM_TSEG2_BIT) | tseg1;
+
+ return 0;
+}
+
+static int occan_set_speedregs(occan_priv *priv, occan_speed_regs *timing)
+{
+ if ( !timing || !priv || !priv->regs)
+ return -1;
+
+ WRITE_REG(priv, &priv->regs->bustim0, timing->btr0);
+ WRITE_REG(priv, &priv->regs->bustim1, timing->btr1);
+
+ return 0;
+}
+
+static rtems_device_driver occan_initialize(rtems_device_major_number major, rtems_device_minor_number unused, void *arg)
+{
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver occan_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ occan_priv *can;
+ struct drvmgr_dev *dev;
+
+ DBG("OCCAN: Opening %d\n\r",minor);
+
+ /* get can device */
+ if ( drvmgr_get_dev(&occan_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_UNSATISFIED; /* NODEV */
+ }
+ can = (occan_priv *)dev->priv;
+
+ /* already opened? */
+ rtems_semaphore_obtain(can->devsem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ if ( can->open ){
+ rtems_semaphore_release(can->devsem);
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ can->open = 1;
+ rtems_semaphore_release(can->devsem);
+
+ SPIN_INIT(&can->devlock, can->devName);
+
+ /* allocate fifos */
+ can->rxfifo = occan_fifo_create(DEFAULT_RX_FIFO_LEN);
+ if ( !can->rxfifo ){
+ can->open = 0;
+ return RTEMS_NO_MEMORY; /* ENOMEM */
+ }
+
+ can->txfifo = occan_fifo_create(DEFAULT_TX_FIFO_LEN);
+ if ( !can->txfifo ){
+ occan_fifo_free(can->rxfifo);
+ can->rxfifo= NULL;
+ can->open = 0;
+ return RTEMS_NO_MEMORY; /* ENOMEM */
+ }
+
+ DBG("OCCAN: Opening %d success\n\r",minor);
+
+ can->started = 0;
+ can->channel = 0; /* Default to first can link */
+ can->txblk = 1; /* Default to Blocking mode */
+ can->rxblk = 1; /* Default to Blocking mode */
+ can->single_mode = 1; /* single mode acceptance filter */
+
+ /* reset stat counters */
+ memset(&can->stats,0,sizeof(occan_stats));
+
+ /* HW must be in reset mode here (close and initializes resets core...)
+ *
+ * 1. set default modes/speeds
+ */
+ pelican_open(can);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver occan_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ occan_priv *can;
+ struct drvmgr_dev *dev;
+
+ DBG("OCCAN: Closing %d\n\r",minor);
+
+ if ( drvmgr_get_dev(&occan_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ can = (occan_priv *)dev->priv;
+
+ /* stop if running */
+ if ( can->started )
+ pelican_stop(can);
+
+ /* Enter Reset Mode */
+ WRITE_REG(can, &can->regs->mode, PELICAN_MOD_RESET);
+
+ /* free fifo memory */
+ occan_fifo_free(can->rxfifo);
+ occan_fifo_free(can->txfifo);
+
+ can->rxfifo = NULL;
+ can->txfifo = NULL;
+
+ can->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver occan_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ occan_priv *can;
+ struct drvmgr_dev *dev;
+ rtems_libio_rw_args_t *rw_args=(rtems_libio_rw_args_t *) arg;
+ CANMsg *dstmsg, *srcmsg;
+ SPIN_IRQFLAGS(oldLevel);
+ int left;
+
+ if ( drvmgr_get_dev(&occan_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ can = (occan_priv *)dev->priv;
+
+ if ( !can->started ){
+ DBG("OCCAN: cannot read from minor %d when not started\n\r",minor);
+ return RTEMS_RESOURCE_IN_USE; /* -EBUSY*/
+ }
+
+ /* does at least one message fit */
+ left = rw_args->count;
+ if ( left < sizeof(CANMsg) ){
+ DBG("OCCAN: minor %d length of buffer must be at least %d, our is %d\n\r",minor,sizeof(CANMsg),left);
+ return RTEMS_INVALID_NAME; /* -EINVAL */
+ }
+
+ /* get pointer to start where to put CAN messages */
+ dstmsg = (CANMsg *)rw_args->buffer;
+ if ( !dstmsg ){
+ DBG("OCCAN: minor %d read: input buffer is NULL\n\r",minor);
+ return RTEMS_INVALID_NAME; /* -EINVAL */
+ }
+
+ while (left >= sizeof(CANMsg) ){
+
+ /* turn off interrupts */
+ SPIN_LOCK_IRQ(&can->devlock, oldLevel);
+
+ /* A bus off interrupt may have occured after checking can->started */
+ if ( can->status & (OCCAN_STATUS_ERR_BUSOFF|OCCAN_STATUS_RESET) ){
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+ DBG("OCCAN: read is cancelled due to a BUS OFF error\n\r");
+ rw_args->bytes_moved = rw_args->count-left;
+ return RTEMS_IO_ERROR; /* EIO */
+ }
+
+ srcmsg = occan_fifo_claim_get(can->rxfifo);
+ if ( !srcmsg ){
+ /* no more messages in reception fifo.
+ * Wait for incoming packets only if in
+ * blocking mode AND no messages been
+ * read before.
+ */
+ if ( !can->rxblk || (left != rw_args->count) ){
+ /* turn on interrupts again */
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+ break;
+ }
+
+ /* turn on interrupts again */
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+
+ DBG("OCCAN: Waiting for RX int\n\r");
+
+ /* wait for incoming messages */
+ rtems_semaphore_obtain(can->rxsem, RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT);
+
+ /* did we get woken up by a BUS OFF error? */
+ if ( can->status & (OCCAN_STATUS_ERR_BUSOFF|OCCAN_STATUS_RESET) ){
+ DBG("OCCAN: Blocking read got woken up by BUS OFF error\n\r");
+ /* At this point it should not matter how many messages we handled */
+ rw_args->bytes_moved = rw_args->count-left;
+ return RTEMS_IO_ERROR; /* EIO */
+ }
+
+ /* no errors detected, it must be a message */
+ continue;
+ }
+
+ /* got message, copy it to userspace buffer */
+ *dstmsg = *srcmsg;
+
+ /* Return borrowed message, RX interrupt can use it again */
+ occan_fifo_get(can->rxfifo);
+
+ /* turn on interrupts again */
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+
+ /* increase pointers */
+ left -= sizeof(CANMsg);
+ dstmsg++;
+ }
+
+ /* save number of read bytes. */
+ rw_args->bytes_moved = rw_args->count-left;
+ if ( rw_args->bytes_moved == 0 ){
+ DBG("OCCAN: minor %d read would block, returning\n\r",minor);
+ return RTEMS_TIMEOUT; /* ETIMEDOUT should be EAGAIN/EWOULDBLOCK */
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver occan_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ occan_priv *can;
+ struct drvmgr_dev *dev;
+ rtems_libio_rw_args_t *rw_args=(rtems_libio_rw_args_t *) arg;
+ CANMsg *msg,*fifo_msg;
+ SPIN_IRQFLAGS(oldLevel);
+ int left;
+
+ DBG("OCCAN: Writing %d bytes from 0x%lx (%d)\n\r",rw_args->count,rw_args->buffer,sizeof(CANMsg));
+
+ if ( drvmgr_get_dev(&occan_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ can = (occan_priv *)dev->priv;
+
+ if ( !can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+
+ left = rw_args->count;
+ if ( (left < sizeof(CANMsg)) || (!rw_args->buffer) ){
+ return RTEMS_INVALID_NAME; /* EINVAL */
+ }
+
+ msg = (CANMsg *)rw_args->buffer;
+
+ /* limit CAN message length to 8 */
+ msg->len = (msg->len > 8) ? 8 : msg->len;
+
+#ifdef DEBUG_VERBOSE
+ pelican_regs_print(can->regs);
+ occan_stat_print(&can->stats);
+#endif
+
+ /* turn off interrupts */
+ SPIN_LOCK_IRQ(&can->devlock, oldLevel);
+
+ /* A bus off interrupt may have occured after checking can->started */
+ if ( can->status & (OCCAN_STATUS_ERR_BUSOFF|OCCAN_STATUS_RESET) ){
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+ rw_args->bytes_moved = 0;
+ return RTEMS_IO_ERROR; /* EIO */
+ }
+
+ /* If no messages in software tx fifo, we will
+ * try to send first message by putting it directly
+ * into the HW TX fifo.
+ */
+ if ( occan_fifo_empty(can->txfifo) ){
+ /*pelican_regs_print(cans[minor+1].regs);*/
+ if ( !pelican_send(can,msg) ) {
+ /* First message put directly into HW TX fifo
+ * This will turn TX interrupt on.
+ */
+ left -= sizeof(CANMsg);
+ msg++;
+
+#ifdef OCCAN_TX_IRQ_FLAG_FIXUP
+ /* Mark that we have put at least one msg in TX FIFO */
+ can->sending = 1;
+#endif
+
+ /* bump stat counters */
+ can->stats.tx_msgs++;
+
+ DBG("OCCAN: Sending direct via HW\n\r");
+ }
+ }
+
+ /* Put messages into software fifo */
+ while ( left >= sizeof(CANMsg) ){
+
+ /* limit CAN message length to 8 */
+ msg->len = (msg->len > 8) ? 8 : msg->len;
+
+ fifo_msg = occan_fifo_put_claim(can->txfifo,0);
+ if ( !fifo_msg ){
+
+ DBG("OCCAN: FIFO is full\n\r");
+ /* Block only if no messages previously sent
+ * and no in blocking mode
+ */
+ if ( !can->txblk || (left != rw_args->count) )
+ break;
+
+ /* turn on interupts again and wait
+ INT_ON
+ WAIT FOR FREE BUF;
+ INT_OFF;
+ CHECK_IF_FIFO_EMPTY ==> SEND DIRECT VIA HW;
+ */
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+
+ DBG("OCCAN: Waiting for tx int\n\r");
+
+ rtems_semaphore_obtain(can->txsem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+
+ /* did we get woken up by a BUS OFF error? */
+ if ( can->status & (OCCAN_STATUS_ERR_BUSOFF|OCCAN_STATUS_RESET) ){
+ DBG("OCCAN: Blocking write got woken up by BUS OFF error or RESET event\n\r");
+ /* At this point it should not matter how many messages we handled */
+ rw_args->bytes_moved = rw_args->count-left;
+ return RTEMS_IO_ERROR; /* EIO */
+ }
+
+ SPIN_LOCK_IRQ(&can->devlock, oldLevel);
+
+ if ( occan_fifo_empty(can->txfifo) ){
+ if ( !pelican_send(can,msg) ) {
+ /* First message put directly into HW TX fifo
+ * This will turn TX interrupt on.
+ */
+ left -= sizeof(CANMsg);
+ msg++;
+
+#ifdef OCCAN_TX_IRQ_FLAG_FIXUP
+ /* Mark that we have put at least one msg in TX FIFO */
+ can->sending = 1;
+#endif
+
+ /* bump stat counters */
+ can->stats.tx_msgs++;
+
+ DBG("OCCAN: Sending direct2 via HW\n\r");
+ }
+ }
+ continue;
+ }
+
+ /* copy message into fifo area */
+ *fifo_msg = *msg;
+
+ /* tell interrupt handler about the message */
+ occan_fifo_put(can->txfifo);
+
+ DBG("OCCAN: Put info fifo SW\n\r");
+
+ /* Prepare insert of next message */
+ msg++;
+ left-=sizeof(CANMsg);
+ }
+
+ SPIN_UNLOCK_IRQ(&can->devlock, oldLevel);
+
+ rw_args->bytes_moved = rw_args->count-left;
+ DBG("OCCAN: Sent %d\n\r",rw_args->bytes_moved);
+
+ if ( left == rw_args->count )
+ return RTEMS_TIMEOUT; /* ETIMEDOUT should be EAGAIN/EWOULDBLOCK */
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver occan_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ int ret;
+ occan_speed_regs timing;
+ occan_priv *can;
+ struct drvmgr_dev *dev;
+ unsigned int speed;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *) arg;
+ struct occan_afilter *afilter;
+ occan_stats *dststats;
+ unsigned int rxcnt,txcnt;
+
+ DBG("OCCAN: IOCTL %d\n\r",ioarg->command);
+
+ if ( drvmgr_get_dev(&occan_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ can = (occan_priv *)dev->priv;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command){
+ case OCCAN_IOC_SET_SPEED:
+
+ /* cannot change speed during run mode */
+ if ( can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+
+ /* get speed rate from argument */
+ speed = (unsigned int)ioarg->buffer;
+ ret = occan_calc_speedregs(can->sys_freq_hz,speed,&timing);
+ if ( ret )
+ return RTEMS_INVALID_NAME; /* EINVAL */
+
+ /* set the speed regs of the CAN core */
+ /* occan_set_speedregs(can,timing); */
+
+ /* save timing/speed */
+ can->speed = speed;
+ can->timing = timing;
+ break;
+
+ case OCCAN_IOC_SET_BTRS:
+ /* Set BTR registers manually
+ * Read OCCAN Manual.
+ */
+ if ( can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+
+ can->speed = 0; /* custom */
+ can->timing.btr1 = (unsigned int)ioarg->buffer & 0xff;
+ can->timing.btr0 = ((unsigned int)ioarg->buffer>>8) & 0xff;
+/*
+ can->timing.sjw = (btr0 >> OCCAN_BUSTIM_SJW_BIT) & 0x3;
+ can->timing.brp = btr0 & OCCAN_BUSTIM_BRP;
+ can->timing.tseg1 = btr1 & 0xf;
+ can->timing.tseg2 = (btr1 >> OCCAN_BUSTIM_TSEG2_BIT) & 0x7;
+ can->timing.sam = (btr1 >> 7) & 0x1;
+ */
+ break;
+
+ case OCCAN_IOC_SPEED_AUTO:
+ return RTEMS_NOT_IMPLEMENTED;
+
+ case OCCAN_IOC_SET_BUFLEN:
+ /* set rx & tx fifo buffer length */
+ if ( can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+
+ rxcnt = (unsigned int)ioarg->buffer & 0x0000ffff;
+ txcnt = (unsigned int)ioarg->buffer >> 16;
+
+ occan_fifo_free(can->rxfifo);
+ occan_fifo_free(can->txfifo);
+
+ /* allocate new buffers */
+ can->rxfifo = occan_fifo_create(rxcnt);
+ can->txfifo = occan_fifo_create(txcnt);
+
+ if ( !can->rxfifo || !can->txfifo )
+ return RTEMS_NO_MEMORY; /* ENOMEM */
+ break;
+
+ case OCCAN_IOC_GET_CONF:
+ return RTEMS_NOT_IMPLEMENTED;
+ break;
+
+ case OCCAN_IOC_GET_STATS:
+ dststats = (occan_stats *)ioarg->buffer;
+ if ( !dststats )
+ return RTEMS_INVALID_NAME; /* EINVAL */
+
+ /* copy data stats into userspace buffer */
+ if ( can->rxfifo )
+ can->stats.rx_sw_dovr = can->rxfifo->ovcnt;
+ *dststats = can->stats;
+ break;
+
+ case OCCAN_IOC_GET_STATUS:
+ /* return the status of the */
+ if ( !ioarg->buffer )
+ return RTEMS_INVALID_NAME;
+
+ *(unsigned int *)ioarg->buffer = can->status;
+ break;
+
+ /* Set physical link */
+ case OCCAN_IOC_SET_LINK:
+#ifdef REDUNDANT_CHANNELS
+ if ( can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+
+ /* switch HW channel */
+ can->channel = (unsigned int)ioargs->buffer;
+#else
+ return RTEMS_NOT_IMPLEMENTED;
+#endif
+ break;
+
+ case OCCAN_IOC_SET_FILTER:
+ if ( can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+
+ afilter = (struct occan_afilter *)ioarg->buffer;
+
+ if ( !afilter )
+ return RTEMS_INVALID_NAME; /* EINVAL */
+
+ /* copy acceptance filter */
+ can->acode[0] = afilter->code[0];
+ can->acode[1] = afilter->code[1];
+ can->acode[2] = afilter->code[2];
+ can->acode[3] = afilter->code[3];
+
+ can->amask[0] = afilter->mask[0];
+ can->amask[1] = afilter->mask[1];
+ can->amask[2] = afilter->mask[2];
+ can->amask[3] = afilter->mask[3];
+
+ can->single_mode = ( afilter->single_mode ) ? 1 : 0;
+
+ /* Acceptance filter is written to hardware
+ * when starting.
+ */
+ /* pelican_set_accept(can,can->acode,can->amask);*/
+ break;
+
+ case OCCAN_IOC_SET_BLK_MODE:
+ can->rxblk = (unsigned int)ioarg->buffer & OCCAN_BLK_MODE_RX;
+ can->txblk = ((unsigned int)ioarg->buffer & OCCAN_BLK_MODE_TX) >> 1;
+ break;
+
+ case OCCAN_IOC_START:
+ if ( can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ if ( pelican_start(can) )
+ return RTEMS_NO_MEMORY; /* failed because of no memory, can happen if SET_BUFLEN failed */
+ /* can->started = 1; -- Is set in pelican_start due to interrupt may occur before we
+ * get here.
+ */
+ break;
+
+ case OCCAN_IOC_STOP:
+ if ( !can->started )
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ pelican_stop(can);
+ can->started = 0;
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+void occan_interrupt(void *arg)
+{
+ occan_priv *can = arg;
+ unsigned char iflags;
+ pelican_regs *regs = can->regs;
+ CANMsg *msg;
+ int signal_rx=0, signal_tx=0;
+ unsigned char tmp, errcode, arbcode;
+ int tx_error_cnt,rx_error_cnt;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ if ( !can->started )
+ return; /* Spurious Interrupt, do nothing */
+
+ SPIN_LOCK(&can->devlock, irqflags);
+ while (1) {
+
+ iflags = READ_REG(can, &can->regs->intflags);
+
+#ifdef OCCAN_TX_IRQ_FLAG_FIXUP
+ /* TX IRQ may be cleared when reading regs->intflags due
+ * to a bug in some chips. Instead of looking at the TX_IRQ_FLAG
+ * the TX-fifo emoty register is looked at when something has
+ * been scheduled for transmission.
+ */
+ if ((iflags & PELICAN_IF_TX) == 0) {
+ if (can->sending && pelican_tx_ready(can)) {
+ can->sending = 0;
+ iflags |= PELICAN_IF_TX;
+ }
+ }
+#endif
+
+ if (iflags == 0)
+ break;
+ /* still interrupts to handle */
+
+ can->stats.ints++;
+
+ if ( iflags & PELICAN_IF_RX ){
+ /* the rx fifo is not empty
+ * put 1 message into rxfifo for later use
+ */
+
+ /* get empty (or make room) message */
+ msg = occan_fifo_put_claim(can->rxfifo,1);
+ tmp = READ_REG(can, &regs->rx_fi_xff);
+ msg->extended = tmp >> 7;
+ msg->rtr = (tmp >> 6) & 1;
+ msg->len = tmp = tmp & 0x0f;
+
+ if ( msg->extended ){
+ /* extended message */
+ msg->id = READ_REG(can, &regs->msg.rx_eff.id[0])<<(5+8+8) |
+ READ_REG(can, &regs->msg.rx_eff.id[1])<<(5+8) |
+ READ_REG(can, &regs->msg.rx_eff.id[2])<<5 |
+ READ_REG(can, &regs->msg.rx_eff.id[3])>>3;
+
+ while(tmp--){
+ msg->data[tmp] = READ_REG(can, &regs->msg.rx_eff.data[tmp]);
+ }
+ /*
+ msg->data[0] = READ_REG(can, &regs->msg.rx_eff.data[0]);
+ msg->data[1] = READ_REG(can, &regs->msg.rx_eff.data[1]);
+ msg->data[2] = READ_REG(can, &regs->msg.rx_eff.data[2]);
+ msg->data[3] = READ_REG(can, &regs->msg.rx_eff.data[3]);
+ msg->data[4] = READ_REG(can, &regs->msg.rx_eff.data[4]);
+ msg->data[5] = READ_REG(can, &regs->msg.rx_eff.data[5]);
+ msg->data[6] = READ_REG(can, &regs->msg.rx_eff.data[6]);
+ msg->data[7] = READ_REG(can, &regs->msg.rx_eff.data[7]);
+ */
+ }else{
+ /* standard message */
+ msg->id = READ_REG(can, &regs->msg.rx_sff.id[0])<<3 |
+ READ_REG(can, &regs->msg.rx_sff.id[1])>>5;
+
+ while(tmp--){
+ msg->data[tmp] = READ_REG(can, &regs->msg.rx_sff.data[tmp]);
+ }
+ /*
+ msg->data[0] = READ_REG(can, &regs->msg.rx_sff.data[0]);
+ msg->data[1] = READ_REG(can, &regs->msg.rx_sff.data[1]);
+ msg->data[2] = READ_REG(can, &regs->msg.rx_sff.data[2]);
+ msg->data[3] = READ_REG(can, &regs->msg.rx_sff.data[3]);
+ msg->data[4] = READ_REG(can, &regs->msg.rx_sff.data[4]);
+ msg->data[5] = READ_REG(can, &regs->msg.rx_sff.data[5]);
+ msg->data[6] = READ_REG(can, &regs->msg.rx_sff.data[6]);
+ msg->data[7] = READ_REG(can, &regs->msg.rx_sff.data[7]);
+ */
+ }
+
+ /* Re-Enable RX buffer for a new message */
+ WRITE_REG(can, &regs->cmd, PELICAN_CMD_RELRXBUF);
+
+ /* make message available to the user */
+ occan_fifo_put(can->rxfifo);
+
+ /* bump stat counters */
+ can->stats.rx_msgs++;
+
+ /* signal the semaphore only once */
+ signal_rx = 1;
+ }
+
+ if ( iflags & PELICAN_IF_TX ) {
+
+ /* there is room in tx fifo of HW */
+
+ if ( !occan_fifo_empty(can->txfifo) ){
+ /* send 1 more messages */
+ msg = occan_fifo_claim_get(can->txfifo);
+
+ if ( pelican_send(can,msg) ){
+ /* ERROR! We got an TX interrupt telling us
+ * tx fifo is empty, yet it is not.
+ *
+ * Complain about this max 10 times
+ */
+ if ( can->stats.tx_buf_error < 10 ){
+ printk("OCCAN: got TX interrupt but TX fifo in not empty (%d)\n\r",can->stats.tx_buf_error);
+ }
+ can->status |= OCCAN_STATUS_QUEUE_ERROR;
+ can->stats.tx_buf_error++;
+ }
+#ifdef OCCAN_TX_IRQ_FLAG_FIXUP
+ can->sending = 1;
+#endif
+
+ /* free software-fifo space taken by sent message */
+ occan_fifo_get(can->txfifo);
+
+ /* bump stat counters */
+ can->stats.tx_msgs++;
+
+ /* wake any sleeping thread waiting for "fifo not full" */
+ signal_tx = 1;
+ }
+ }
+
+ if ( iflags & PELICAN_IF_ERRW ){
+ tx_error_cnt = READ_REG(can, &regs->tx_err_cnt);
+ rx_error_cnt = READ_REG(can, &regs->rx_err_cnt);
+
+ /* 1. if bus off tx error counter = 127 */
+ if ( (tx_error_cnt > 96) || (rx_error_cnt > 96) ){
+ /* in Error Active Warning area or BUS OFF */
+ can->status |= OCCAN_STATUS_WARN;
+
+ /* check reset bit for reset mode */
+ if ( READ_REG(can, &regs->mode) & PELICAN_MOD_RESET ){
+ /* in reset mode ==> bus off */
+ can->status |= OCCAN_STATUS_ERR_BUSOFF | OCCAN_STATUS_RESET;
+
+ /***** pelican_stop(can) ******
+ * turn off interrupts
+ * enter reset mode (HW already done that for us)
+ */
+ WRITE_REG(can, &regs->inten,0);
+
+ /* Indicate that we are not started any more.
+ * This will make write/read return with EBUSY
+ * on read/write attempts.
+ *
+ * User must issue a ioctl(START) to get going again.
+ */
+ can->started = 0;
+
+ /* signal any waiting read/write threads, so that they
+ * can handle the bus error.
+ */
+ signal_rx = 1;
+ signal_tx = 1;
+
+ /* ingnore any old pending interrupt */
+ break;
+ }
+
+ }else{
+ /* not in Upper Error Active area any more */
+ can->status &= ~(OCCAN_STATUS_WARN);
+ }
+ can->stats.err_warn++;
+ }
+
+ if ( iflags & PELICAN_IF_DOVR){
+ can->status |= OCCAN_STATUS_OVERRUN;
+ can->stats.err_dovr++;
+ DBG("OCCAN_INT: DOVR\n\r");
+ }
+
+ if ( iflags & PELICAN_IF_ERRP){
+ /* Let the error counters decide what kind of
+ * interrupt it was. In/Out of EPassive area.
+ */
+ tx_error_cnt = READ_REG(can, &regs->tx_err_cnt);
+ rx_error_cnt = READ_REG(can, &regs->rx_err_cnt);
+
+ if ( (tx_error_cnt > 127) || (rx_error_cnt > 127) ){
+ can->status |= OCCAN_STATUS_ERR_PASSIVE;
+ }else{
+ can->status &= ~(OCCAN_STATUS_ERR_PASSIVE);
+ }
+
+ /* increase Error Passive In/out interrupt counter */
+ can->stats.err_errp++;
+ }
+
+ if ( iflags & PELICAN_IF_ARB){
+ arbcode = READ_REG(can, &regs->arbcode);
+ can->stats.err_arb_bitnum[arbcode & PELICAN_ARB_BITS]++;
+ can->stats.err_arb++;
+ DBG("OCCAN_INT: ARB (0x%x)\n\r",arbcode & PELICAN_ARB_BITS);
+ }
+
+ if ( iflags & PELICAN_IF_BUS){
+ /* Some kind of BUS error, only used for
+ * statistics. Error Register is decoded
+ * and put into can->stats.
+ */
+ errcode = READ_REG(can, &regs->errcode);
+ switch( errcode & PELICAN_ECC_CODE ){
+ case PELICAN_ECC_CODE_BIT:
+ can->stats.err_bus_bit++;
+ break;
+ case PELICAN_ECC_CODE_FORM:
+ can->stats.err_bus_form++;
+ break;
+ case PELICAN_ECC_CODE_STUFF:
+ can->stats.err_bus_stuff++;
+ break;
+ case PELICAN_ECC_CODE_OTHER:
+ can->stats.err_bus_other++;
+ break;
+ }
+
+ /* Get Direction (TX/RX) */
+ if ( errcode & PELICAN_ECC_DIR ){
+ can->stats.err_bus_rx++;
+ }else{
+ can->stats.err_bus_tx++;
+ }
+
+ /* Get Segment in frame that went wrong */
+ can->stats.err_bus_segs[errcode & PELICAN_ECC_SEG]++;
+
+ /* total number of bus errors */
+ can->stats.err_bus++;
+ }
+ }
+ SPIN_UNLOCK(&can->devlock, irqflags);
+
+ /* signal Binary semaphore, messages available! */
+ if ( signal_rx ){
+ rtems_semaphore_release(can->rxsem);
+ }
+
+ if ( signal_tx ){
+ rtems_semaphore_release(can->txsem);
+ }
+}
+
+/*******************************************************************************
+ * FIFO IMPLEMENTATION
+ */
+
+static occan_fifo *occan_fifo_create(int cnt)
+{
+ occan_fifo *fifo;
+ fifo = grlib_calloc(1, sizeof(*fifo)+cnt*sizeof(CANMsg));
+ if ( fifo ){
+ fifo->cnt = cnt;
+ fifo->base = &fifo->fifoarea[0];
+ fifo->tail = fifo->head = fifo->base;
+ }
+ return fifo;
+}
+
+static void occan_fifo_free(occan_fifo *fifo)
+{
+ if ( fifo )
+ free(fifo);
+}
+
+static int occan_fifo_full(occan_fifo *fifo)
+{
+ return fifo->full;
+}
+
+static int occan_fifo_empty(occan_fifo *fifo)
+{
+ return (!fifo->full) && (fifo->head == fifo->tail);
+}
+
+/* Stage 1 - get buffer to fill (never fails if force!=0) */
+static CANMsg *occan_fifo_put_claim(occan_fifo *fifo, int force)
+{
+ if ( !fifo )
+ return NULL;
+
+ if ( occan_fifo_full(fifo) ){
+
+ if ( !force )
+ return NULL;
+
+ /* all buffers already used ==> overwrite the oldest */
+ fifo->ovcnt++;
+ occan_fifo_get(fifo);
+ }
+
+ return fifo->head;
+}
+
+/* Stage 2 - increment indexes */
+static void occan_fifo_put(occan_fifo *fifo)
+{
+ if ( occan_fifo_full(fifo) )
+ return;
+
+ /* wrap around */
+ fifo->head = (fifo->head >= &fifo->base[fifo->cnt-1])? fifo->base : fifo->head+1;
+
+ if ( fifo->head == fifo->tail )
+ fifo->full = 1;
+}
+
+static CANMsg *occan_fifo_claim_get(occan_fifo *fifo)
+{
+ if ( occan_fifo_empty(fifo) )
+ return NULL;
+
+ /* return oldest message */
+ return fifo->tail;
+}
+
+
+static void occan_fifo_get(occan_fifo *fifo)
+{
+ if ( !fifo )
+ return;
+
+ if ( occan_fifo_empty(fifo) )
+ return;
+
+ /* increment indexes */
+ fifo->tail = (fifo->tail >= &fifo->base[fifo->cnt-1]) ?
+ fifo->base : fifo->tail+1;
+ fifo->full = 0;
+}
+
+static void occan_fifo_clr(occan_fifo *fifo)
+{
+ fifo->full = 0;
+ fifo->ovcnt = 0;
+ fifo->head = fifo->tail = fifo->base;
+}
+
+/******************************************************************************/
diff --git a/bsps/shared/grlib/can/satcan.c b/bsps/shared/grlib/can/satcan.c
new file mode 100644
index 0000000000..c6d58aaed4
--- /dev/null
+++ b/bsps/shared/grlib/can/satcan.c
@@ -0,0 +1,716 @@
+/*
+ * SatCAN FPGA driver
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <bsp.h>
+#include <rtems/bspIo.h> /* printk */
+
+#include <grlib/satcan.h>
+#include <grlib/ambapp.h>
+
+#include <grlib/grlib_impl.h>
+
+#ifndef GAISLER_SATCAN
+#define GAISLER_SATCAN 0x080
+#endif
+
+#if !defined(SATCAN_DEVNAME)
+ #undef SATCAN_DEVNAME
+ #define SATCAN_DEVNAME "/dev/satcan"
+#endif
+
+/* Enable debug output? */
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+
+/* Defines related to DMA */
+#define ALIGN_2KMEM 32*1024
+#define ALIGN_8KMEM 128*1024
+
+#define OFFSET_2K_LOW_POS 15
+#define OFFSET_8K_LOW_POS 17
+
+#define DMA_2K_DATA_SELECT (1 << 14)
+#define DMA_8K_DATA_SELECT (1 << 16)
+
+#define DMA_2K_DATA_OFFSET 16*1024
+#define DMA_8K_DATA_OFFSET 64*1024
+
+/* Core register structures and defines */
+
+/* Indexes to SatCAN registers in satcan array are declared in satcan.h*/
+/* Fields for some of the SatCAN FPGA registers */
+
+/* CmdReg0 */
+#define CAN_TODn_Int_sel (1 << 5)
+
+/* CmdReg1 */
+#define Sel_2k_8kN (1 << 0)
+
+/* Read FIFO */
+#define FIFO_Full (1 << 8)
+#define FIFO_Empty (1 << 9)
+
+/* DMA Ch_Enable */
+#define DMA_AutoInitDmaTx (1 << 3)
+#define DMA_EnTx2 (1 << 2)
+#define DMA_EnTx1 (1 << 1)
+#define DMA_EnRx (1 << 0)
+
+/* SatCAN wrapper register fields */
+#define CTRL_BT_P 9
+#define CTRL_NODENO_P 5
+#define CTRL_DIS (1 << 2)
+#define CTRL_DPS_P 1
+#define CTRL_RST (1 << 0)
+
+#define IRQ_AHB (1 << 8)
+#define IRQ_PPS (1 << 7)
+#define IRQ_M5 (1 << 6)
+#define IRQ_M4 (1 << 5)
+#define IRQ_M3 (1 << 4)
+#define IRQ_M2 (1 << 3)
+#define IRQ_M1 (1 << 2)
+#define IRQ_SYNC (1 << 1)
+#define IRQ_CAN (1 << 0)
+
+#define MSK_AHB (1 << 8)
+#define MSK_PPS (1 << 7)
+#define MSK_M5 (1 << 6)
+#define MSK_M4 (1 << 5)
+#define MSK_M3 (1 << 4)
+#define MSK_M2 (1 << 3)
+#define MSK_M1 (1 << 2)
+#define MSK_SYNC (1 << 1)
+#define MSK_CAN (1 << 0)
+
+
+
+struct satcan_regs {
+ volatile unsigned int satcan[32];
+ volatile unsigned int ctrl;
+ volatile unsigned int irqpend;
+ volatile unsigned int irqmask;
+ volatile unsigned int membase;
+};
+
+
+struct satcan_priv {
+ /* config */
+ void *dmaptr;
+ unsigned char *alptr;
+ satcan_config *cfg;
+
+ /* driver state */
+ rtems_id devsem;
+ rtems_id txsem;
+ int open;
+ int txactive;
+ int dmaen;
+ int doff;
+ rtems_interval timeout;
+ int dmamode;
+};
+
+static struct satcan_regs *regs;
+static struct satcan_priv *priv;
+
+static rtems_device_driver satcan_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver satcan_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver satcan_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver satcan_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver satcan_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver satcan_initialize(rtems_device_major_number major, rtems_device_minor_number unused, void *arg);
+
+
+/*
+ * almalloc: allocate memory area of size sz aligned on sz boundary
+ * alptr: Utilized to return aligned pointer
+ * ptr: Unaligned pointer
+ * sz: Size of memory area
+ */
+static void almalloc(unsigned char **alptr, void **ptr, int sz)
+{
+ *ptr = rtems_calloc(1,2*sz);
+ *alptr = (unsigned char *) (((int)*ptr+sz) & ~(sz-1));
+}
+
+static rtems_isr satcan_interrupt_handler(rtems_vector_number v)
+{
+ unsigned int irq;
+ unsigned int fifo;
+
+ irq = regs->irqpend;
+
+ if (irq & IRQ_AHB && priv->cfg->ahb_irq_callback) {
+ priv->cfg->ahb_irq_callback();
+ }
+ if (irq & IRQ_PPS && priv->cfg->pps_irq_callback) {
+ priv->cfg->pps_irq_callback();
+ }
+ if (irq & IRQ_M5 && priv->cfg->m5_irq_callback) {
+ priv->cfg->m5_irq_callback();
+ }
+ if (irq & IRQ_M4 && priv->cfg->m4_irq_callback) {
+ priv->cfg->m4_irq_callback();
+ }
+ if (irq & IRQ_M3 && priv->cfg->m3_irq_callback) {
+ priv->cfg->m3_irq_callback();
+ }
+ if (irq & IRQ_M2 && priv->cfg->m2_irq_callback) {
+ priv->cfg->m2_irq_callback();
+ }
+ if (irq & IRQ_M1 && priv->cfg->m1_irq_callback) {
+ priv->cfg->m1_irq_callback();
+ }
+ if (irq & IRQ_SYNC && priv->cfg->sync_irq_callback) {
+ priv->cfg->sync_irq_callback();
+ }
+ if (irq & IRQ_CAN) {
+ fifo = regs->satcan[SATCAN_FIFO];
+ if (!(fifo & FIFO_Empty) && priv->txactive &&
+ (((fifo & 0xff) == SATCAN_IRQ_EOD1) || ((fifo & 0xff) == SATCAN_IRQ_EOD2))) {
+ rtems_semaphore_release(priv->txsem);
+ }
+ if (priv->cfg->can_irq_callback)
+ priv->cfg->can_irq_callback(fifo);
+ }
+}
+
+
+
+static rtems_device_driver satcan_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t*)arg;
+ int *value;
+ rtems_interval *timeout;
+ satcan_regmod *regmod;
+
+ DBG("SatCAN: IOCTL %d\n\r", ioarg->command);
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case SATCAN_IOC_DMA_2K:
+ DBG("SatCAN: ioctl: setting 2K DMA mode\n\r");
+ free(priv->dmaptr);
+ almalloc(&priv->alptr, &priv->dmaptr, ALIGN_2KMEM);
+ if (priv->dmaptr == NULL) {
+ printk("SatCAN: Failed to allocate DMA memory\n\r");
+ return RTEMS_NO_MEMORY;
+ }
+
+ regs->membase = (unsigned int)priv->alptr;
+ regs->satcan[SATCAN_RAM_BASE] = (unsigned int)priv->alptr >> OFFSET_2K_LOW_POS;
+ regs->satcan[SATCAN_CMD1] = regs->satcan[SATCAN_CMD1] | Sel_2k_8kN;
+ break;
+
+ case SATCAN_IOC_DMA_8K:
+ DBG("SatCAN: ioctl: setting 8K DMA mode\n\r");
+ free(priv->dmaptr);
+ almalloc(&priv->alptr, &priv->dmaptr, ALIGN_8KMEM);
+ if (priv->dmaptr == NULL) {
+ printk("SatCAN: Failed to allocate DMA memory\n\r");
+ return RTEMS_NO_MEMORY;
+ }
+
+ regs->membase = (unsigned int)priv->alptr;
+ regs->satcan[SATCAN_RAM_BASE] = (unsigned int)priv->alptr >> OFFSET_8K_LOW_POS;
+ regs->satcan[SATCAN_CMD1] = regs->satcan[SATCAN_CMD1] & ~Sel_2k_8kN;
+ break;
+
+ case SATCAN_IOC_GET_REG:
+ /* Get regmod structure from argument */
+ regmod = (satcan_regmod*)ioarg->buffer;
+ DBG("SatCAN: ioctl: getting register %d\n\r", regmod->reg);
+ if (regmod->reg < 0)
+ return RTEMS_INVALID_NAME;
+ else if (regmod->reg <= SATCAN_FILTER_STOP)
+ regmod->val = regs->satcan[regmod->reg];
+ else if (regmod->reg == SATCAN_WCTRL)
+ regmod->val = regs->ctrl;
+ else if (regmod->reg == SATCAN_WIPEND)
+ regmod->val = regs->irqpend;
+ else if (regmod->reg == SATCAN_WIMASK)
+ regmod->val = regs->irqmask;
+ else if (regmod->reg == SATCAN_WAHBADDR)
+ regmod->val = regs->membase;
+ else
+ return RTEMS_INVALID_NAME;
+ break;
+
+ case SATCAN_IOC_SET_REG:
+ /* Get regmod structure from argument */
+ regmod = (satcan_regmod*)ioarg->buffer;
+ DBG("SatCAN: ioctl: setting register %d, value %x\n\r",
+ regmod->reg, regmod->val);
+ if (regmod->reg < 0)
+ return RTEMS_INVALID_NAME;
+ else if (regmod->reg <= SATCAN_FILTER_STOP)
+ regs->satcan[regmod->reg] = regmod->val;
+ else if (regmod->reg == SATCAN_WCTRL)
+ regs->ctrl = regmod->val;
+ else if (regmod->reg == SATCAN_WIPEND)
+ regs->irqpend = regmod->val;
+ else if (regmod->reg == SATCAN_WIMASK)
+ regs->irqmask = regmod->val;
+ else if (regmod->reg == SATCAN_WAHBADDR)
+ regs->membase = regmod->val;
+ else
+ return RTEMS_INVALID_NAME;
+ break;
+
+ case SATCAN_IOC_OR_REG:
+ /* Get regmod structure from argument */
+ regmod = (satcan_regmod*)ioarg->buffer;
+ DBG("SatCAN: ioctl: or:ing register %d, with value %x\n\r",
+ regmod->reg, regmod->val);
+ if (regmod->reg < 0)
+ return RTEMS_INVALID_NAME;
+ else if (regmod->reg <= SATCAN_FILTER_STOP)
+ regs->satcan[regmod->reg] |= regmod->val;
+ else if (regmod->reg == SATCAN_WCTRL)
+ regs->ctrl |= regmod->val;
+ else if (regmod->reg == SATCAN_WIPEND)
+ regs->irqpend |= regmod->val;
+ else if (regmod->reg == SATCAN_WIMASK)
+ regs->irqmask |= regmod->val;
+ else if (regmod->reg == SATCAN_WAHBADDR)
+ regs->membase |= regmod->val;
+ else
+ return RTEMS_INVALID_NAME;
+ break;
+
+ case SATCAN_IOC_AND_REG:
+ /* Get regmod structure from argument */
+ regmod = (satcan_regmod*)ioarg->buffer;
+ DBG("SatCAN: ioctl: masking register %d, with value %x\n\r",
+ regmod->reg, regmod->val);
+ if (regmod->reg < 0)
+ return RTEMS_INVALID_NAME;
+ else if (regmod->reg <= SATCAN_FILTER_STOP)
+ regs->satcan[regmod->reg] &= regmod->val;
+ else if (regmod->reg == SATCAN_WCTRL)
+ regs->ctrl &= regmod->val;
+ else if (regmod->reg == SATCAN_WIPEND)
+ regs->irqpend &= regmod->val;
+ else if (regmod->reg == SATCAN_WIMASK)
+ regs->irqmask &= regmod->val;
+ else if (regmod->reg == SATCAN_WAHBADDR)
+ regs->membase &= regmod->val;
+ else
+ return RTEMS_INVALID_NAME;
+ break;
+
+ case SATCAN_IOC_EN_TX1_DIS_TX2:
+ priv->dmaen = SATCAN_DMA_ENABLE_TX1;
+ break;
+
+ case SATCAN_IOC_EN_TX2_DIS_TX1:
+ priv->dmaen = SATCAN_DMA_ENABLE_TX2;
+ break;
+
+ case SATCAN_IOC_GET_DMA_MODE:
+ value = (int*)ioarg->buffer;
+ *value = priv->dmamode;
+ break;
+
+ case SATCAN_IOC_SET_DMA_MODE:
+ value = (int*)ioarg->buffer;
+ if (*value != SATCAN_DMA_MODE_USER && *value != SATCAN_DMA_MODE_SYSTEM) {
+ DBG("SatCAN: ioctl: invalid DMA mode\n\r");
+ return RTEMS_INVALID_NAME;
+ }
+ priv->dmamode = *value;
+ break;
+
+ case SATCAN_IOC_ACTIVATE_DMA:
+ if (priv->dmamode != SATCAN_DMA_MODE_USER) {
+ DBG("SatCAN: ioctl: ACTIVATE_DMA: not in user mode\n\r");
+ return RTEMS_INVALID_NAME;
+ }
+ value = (int*)ioarg->buffer;
+ if (*value != SATCAN_DMA_ENABLE_TX1 && *value != SATCAN_DMA_ENABLE_TX2) {
+ DBG("SatCAN: ioctl: ACTIVATE_DMA: Illegal channel\n\r");
+ return RTEMS_INVALID_NAME;
+ }
+ regs->satcan[SATCAN_DMA] |= *value << 1;
+ break;
+
+ case SATCAN_IOC_DEACTIVATE_DMA:
+ if (priv->dmamode != SATCAN_DMA_MODE_USER) {
+ DBG("SatCAN: ioctl: DEACTIVATE_DMA: not in user mode\n\r");
+ return RTEMS_INVALID_NAME;
+ }
+ value = (int*)ioarg->buffer;
+ if (*value != SATCAN_DMA_ENABLE_TX1 && *value != SATCAN_DMA_ENABLE_TX2) {
+ DBG("SatCAN: ioctl: DEACTIVATE_DMA: Illegal channel\n\r");
+ return RTEMS_INVALID_NAME;
+ }
+ regs->satcan[SATCAN_DMA] &= ~(*value << 1);
+ break;
+
+ case SATCAN_IOC_GET_DOFFSET:
+ value = (int*)ioarg->buffer;
+ *value = priv->doff;
+ break;
+
+ case SATCAN_IOC_SET_DOFFSET:
+ value = (int*)ioarg->buffer;
+ priv->doff = *value;
+ break;
+
+ case SATCAN_IOC_GET_TIMEOUT:
+ timeout = (rtems_interval*)ioarg->buffer;
+ *timeout = priv->timeout;
+ break;
+
+ case SATCAN_IOC_SET_TIMEOUT:
+ timeout = (rtems_interval*)ioarg->buffer;
+ priv->timeout = *timeout;
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver satcan_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ int i;
+ int doff;
+ int msgindex;
+ int messages;
+ rtems_libio_rw_args_t *rw_args=(rtems_libio_rw_args_t *) arg;
+ satcan_msg *msgs;
+ rtems_status_code status;
+
+ DBG("SatCAN: Writing %d bytes from %p\n\r",rw_args->count,rw_args->buffer);
+
+ if ((rw_args->count < sizeof(satcan_msg)) || (!rw_args->buffer)) {
+ DBG("SatCAN: write: returning EINVAL\n\r");
+ return RTEMS_INVALID_NAME; /* EINVAL */
+ }
+
+ messages = rw_args->count / sizeof(satcan_msg);
+ msgs = (satcan_msg*)rw_args->buffer;
+
+ /* Check that size matches any number of satcan_msg */
+ if (rw_args->count % sizeof(satcan_msg)) {
+ DBG("SatCAN: write: count can not be evenly divided with satcan_msg size\n\r");
+ return RTEMS_INVALID_NAME; /* EINVAL */
+ }
+
+
+ /* DMA channel must be set if we are in system DMA mode */
+ DBG("SatCAN: write: dma channel select is %x\n\r", priv->dmaen);
+ if (!priv->dmaen && priv->dmamode == SATCAN_DMA_MODE_SYSTEM)
+ return RTEMS_INVALID_NAME; /* EINVAL */
+
+ /* DMA must not be active */
+ if (regs->satcan[SATCAN_DMA] & (DMA_EnTx1 | DMA_EnTx2 | DMA_AutoInitDmaTx)) {
+ DBG("SatCAN: write: DMA was active\n\r");
+ rw_args->bytes_moved = 0;
+ return RTEMS_IO_ERROR; /* EIO */
+ }
+
+ doff = regs->satcan[SATCAN_CMD1] & Sel_2k_8kN ? DMA_2K_DATA_OFFSET : DMA_8K_DATA_OFFSET;
+
+ for (msgindex = 0; msgindex < messages; msgindex++) {
+ /* Place header in DMA area */
+ for (i = 0; i < SATCAN_HEADER_SIZE; i++) {
+ priv->alptr[priv->doff+8*msgindex+i] = msgs[msgindex].header[i];
+ }
+
+ /* Place data in DMA area */
+ for (i = 0; i < SATCAN_PAYLOAD_SIZE; i++)
+ priv->alptr[priv->doff+doff+8*msgindex+i] = msgs[msgindex].payload[i];
+ }
+
+ if ((priv->dmaen & SATCAN_DMA_ENABLE_TX1) || priv->dmamode == SATCAN_DMA_MODE_USER) {
+ regs->satcan[SATCAN_DMA_TX_1_CUR] = 0;
+ regs->satcan[SATCAN_DMA_TX_1_END] = messages<<3;
+ }
+
+ if ((priv->dmaen & SATCAN_DMA_ENABLE_TX2) || priv->dmamode == SATCAN_DMA_MODE_USER) {
+ regs->satcan[SATCAN_DMA_TX_2_CUR] = 0;
+ regs->satcan[SATCAN_DMA_TX_2_END] = messages<<3;
+ }
+
+ /* If we are in DMA user mode we are done here, otherwise we block */
+ if (priv->dmamode == SATCAN_DMA_MODE_SYSTEM) {
+ priv->txactive = 1;
+
+ /* Enable DMA */
+ regs->satcan[SATCAN_DMA] |= priv->dmaen << 1;
+
+ /* Wait for TX interrupt */
+ status = rtems_semaphore_obtain(priv->txsem, RTEMS_WAIT, priv->timeout);
+
+ priv->txactive = 0;
+
+ /* Disable activated Tx DMA */
+ regs->satcan[SATCAN_DMA] &= ~(priv->dmaen << 1);
+
+ if (status != RTEMS_SUCCESSFUL) {
+ rw_args->bytes_moved = 0;
+ return status;
+ }
+ }
+
+ rw_args->bytes_moved = rw_args->count;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver satcan_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ char *buf;
+ int i;
+ int canid;
+ int messages;
+ rtems_libio_rw_args_t *rw_args = (rtems_libio_rw_args_t*)arg;
+ satcan_msg *ret;
+
+ /* Check that there is room for the return */
+ if (rw_args->count < sizeof(satcan_msg)) {
+ DBG("SatCAN: read: length of buffer must be at least %d, current is %d\n\r",
+ sizeof(satcan_msg) + sizeof(int), rw_args->count);
+ return RTEMS_INVALID_NAME; /* -EINVAL */
+ }
+
+ /* Check that size matches any number of satcan_msg */
+ if (rw_args->count % sizeof(satcan_msg)) {
+ DBG("SatCAN: read: count can not be evenly divided with satcan_msg size\n\r");
+ return RTEMS_INVALID_NAME; /* EINVAL */
+ }
+
+ messages = rw_args->count / sizeof(satcan_msg);
+ ret = (satcan_msg*)rw_args->buffer;
+
+ DBG("SatCAN: read: reading %d messages to %p\n\r", messages, ret);
+
+ for (i = 0; i < messages; i++) {
+ canid = (ret[i].header[1] << 8) | ret[i].header[0];
+
+ /* Copy message header from DMA header area to buffer */
+ buf = (char*)((int)priv->alptr | (canid << 3));
+ memcpy(ret[i].header, buf, SATCAN_HEADER_SIZE);
+
+ DBG("SatCAN: read: copied header from %p to %p\n\r", buf, ret[i].header);
+
+ /* Clear New Message Marker */
+ buf[SATCAN_HEADER_NMM_POS] = 0;
+
+ /* Copy message payload from DMA data area to buffer */
+ buf = (char*)((int)buf |
+ (regs->satcan[SATCAN_CMD1] & Sel_2k_8kN ? DMA_2K_DATA_SELECT : DMA_8K_DATA_SELECT));
+ memcpy(ret[i].payload, buf, SATCAN_PAYLOAD_SIZE);
+
+ DBG("SatCAN: read: copied payload from %p to %p\n\r", buf, ret[i].payload);
+ }
+ rw_args->bytes_moved = rw_args->count;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+static rtems_device_driver satcan_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ DBG("SatCAN: Closing %d\n\r",minor);
+
+ if (priv->open) {
+ regs->irqmask = 0;
+ regs->satcan[SATCAN_INT_EN] = 0;
+ regs->satcan[SATCAN_RX] = 0;
+ regs->satcan[SATCAN_DMA] = 0;
+ priv->open = 0;
+ priv->dmaen = 0;
+ priv->doff = 0;
+ priv->timeout = RTEMS_NO_TIMEOUT;
+ priv->dmamode = SATCAN_DMA_MODE_SYSTEM;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+static rtems_device_driver satcan_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ DBG("SatCAN: Opening %d\n\r",minor);
+
+ rtems_semaphore_obtain(priv->devsem,RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ if (priv->open) {
+ rtems_semaphore_release(priv->devsem);
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ priv->open = 1;
+ rtems_semaphore_release(priv->devsem);
+
+ /* Enable AHB and CAN IRQs in wrapper and EOD1, EOD2 and CAN critical IRQs in SatCAN core */
+ regs->irqmask = MSK_AHB | MSK_CAN;
+ regs->satcan[SATCAN_INT_EN] = ((1 << SATCAN_IRQ_EOD1) | (1 << SATCAN_IRQ_EOD2) |
+ (1 << SATCAN_IRQ_CRITICAL));
+
+ /* Select can_int as IRQ source */
+ regs->satcan[SATCAN_CMD0] = CAN_TODn_Int_sel;
+ /* CAN RX DMA Enable */
+ regs->satcan[SATCAN_DMA] = 1;
+ /* CAN RX Enable */
+ regs->satcan[SATCAN_RX] = 1;
+
+ DBG("SatCAN: Opening %d success\n\r",minor);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver satcan_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct ambapp_ahb_info d;
+ char fs_name[20];
+ rtems_status_code status;
+
+ DBG("SatCAN: Initialize..\n\r");
+
+ strcpy(fs_name, SATCAN_DEVNAME);
+
+ /* Find core and initialize register pointer */
+ if (!ambapp_find_ahbslv(&ambapp_plb, VENDOR_GAISLER, GAISLER_SATCAN, &d)) {
+ printk("SatCAN: Failed to find SatCAN core\n\r");
+ return -1;
+ }
+
+ status = rtems_io_register_name(fs_name, major, minor);
+ if (RTEMS_SUCCESSFUL != status)
+ rtems_fatal_error_occurred(status);
+
+ regs = (struct satcan_regs*)d.start[0];
+
+ /* Set node number and DPS */
+ regs->ctrl |= ((priv->cfg->nodeno & 0xf) << 5) | (priv->cfg->dps << 1);
+
+ /* Reset core */
+ regs->ctrl |= CTRL_RST;
+
+ /* Allocate DMA area */
+ almalloc(&priv->alptr, &priv->dmaptr, ALIGN_2KMEM);
+ if (priv->dmaptr == NULL) {
+ printk("SatCAN: Failed to allocate DMA memory\n\r");
+ free(priv->cfg);
+ free(priv);
+ return -1;
+ }
+
+ /* Wait until core reset has completed */
+ while (regs->ctrl & CTRL_RST)
+ ;
+
+ /* Initialize core registers, default is 2K messages */
+ regs->membase = (unsigned int)priv->alptr;
+ regs->satcan[SATCAN_RAM_BASE] = (unsigned int)priv->alptr >> 15;
+
+ DBG("regs->membase = %x\n\r", (unsigned int)priv->alptr);
+ DBG("regs->satcan[SATCAN_RAM_BASE] = %x\n\r", (unsigned int)priv->alptr >> 15);
+
+ status = rtems_semaphore_create(
+ rtems_build_name('S', 'd', 'v', '0'),
+ 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &priv->devsem);
+ if (status != RTEMS_SUCCESSFUL) {
+ printk("SatCAN: Failed to create dev semaphore (%d)\n\r", status);
+ free(priv->cfg);
+ free(priv);
+ return RTEMS_UNSATISFIED;
+ }
+ status = rtems_semaphore_create(
+ rtems_build_name('S', 't', 'x', '0'),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &priv->txsem);
+ if (status != RTEMS_SUCCESSFUL) {
+ printk("SatCAN: Failed to create tx semaphore (%d)\n\r", status);
+ free(priv->cfg);
+ free(priv);
+ return RTEMS_UNSATISFIED;
+ }
+
+ priv->txactive = 0;
+ priv->open = 0;
+ priv->dmaen = 0;
+ priv->doff = 0;
+ priv->timeout = RTEMS_NO_TIMEOUT;
+ priv->dmamode = SATCAN_DMA_MODE_SYSTEM;
+
+ /* Register interrupt handler */
+ set_vector(satcan_interrupt_handler, d.irq+0x10, 2);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+
+#define SATCAN_DRIVER_TABLE_ENTRY { satcan_initialize, satcan_open, satcan_close, satcan_read, satcan_write, satcan_ioctl }
+
+static rtems_driver_address_table satcan_driver = SATCAN_DRIVER_TABLE_ENTRY;
+
+int satcan_register(satcan_config *conf)
+{
+ rtems_status_code r;
+ rtems_device_major_number m;
+
+ DBG("SatCAN: satcan_register called\n\r");
+
+ /* Create private structure */
+ if ((priv = grlib_malloc(sizeof(*priv))) == NULL) {
+ printk("SatCAN driver could not allocate memory for priv structure\n\r");
+ return -1;
+ }
+
+ DBG("SatCAN: Creating local copy of config structure\n\r");
+ if ((priv->cfg = grlib_malloc(sizeof(*priv->cfg))) == NULL) {
+ printk("SatCAN driver could not allocate memory for cfg structure\n\r");
+ return 1;
+ }
+ memcpy(priv->cfg, conf, sizeof(satcan_config));
+
+ if ((r = rtems_io_register_driver(0, &satcan_driver, &m)) == RTEMS_SUCCESSFUL) {
+ DBG("SatCAN driver successfully registered, major: %d\n\r", m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("SatCAN rtems_io_register_driver failed: RTEMS_TOO_MANY\n\r"); break;
+ case RTEMS_INVALID_NUMBER:
+ printk("SatCAN rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n\r"); break;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("SatCAN rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n\r"); break;
+ default:
+ printk("SatCAN rtems_io_register_driver failed\n\r");
+ }
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/bsps/shared/grlib/drvmgr/ambapp_bus.c b/bsps/shared/grlib/drvmgr/ambapp_bus.c
new file mode 100644
index 0000000000..931d5d2a61
--- /dev/null
+++ b/bsps/shared/grlib/drvmgr/ambapp_bus.c
@@ -0,0 +1,840 @@
+/* General part of a AMBA Plug & Play bus driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * This is the general part of the different AMBA Plug & Play
+ * drivers. The drivers are wrappers around this driver, making
+ * the code size smaller for systems with multiple AMBA Plug &
+ * Play buses.
+ *
+ * The BSP define APBUART_INFO_AVAIL in order to add the info routine
+ * used for debugging.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <bsp.h>
+#include <grlib/ambapp.h>
+#include <rtems/bspIo.h>
+
+/*#define DEBUG 1*/
+#define DBG(args...)
+/*#define DBG(args...) printk(args)*/
+
+struct grlib_gptimer_regs {
+ volatile unsigned int scaler_value; /* common timer registers */
+ volatile unsigned int scaler_reload;
+ volatile unsigned int status;
+ volatile unsigned int notused;
+};
+
+/* AMBA IMPLEMENTATION */
+
+static int ambapp_bus_init1(struct drvmgr_bus *bus);
+static int ambapp_bus_remove(struct drvmgr_bus *bus);
+static int ambapp_unite(struct drvmgr_drv *drv, struct drvmgr_dev *dev);
+static int ambapp_int_register(
+ struct drvmgr_dev *dev,
+ int index,
+ const char *info,
+ drvmgr_isr isr,
+ void *arg);
+static int ambapp_int_unregister(
+ struct drvmgr_dev *dev,
+ int index,
+ drvmgr_isr isr,
+ void *arg);
+static int ambapp_int_clear(struct drvmgr_dev *dev, int index);
+static int ambapp_int_mask(struct drvmgr_dev *dev, int index);
+static int ambapp_int_unmask(struct drvmgr_dev *dev, int index);
+static int ambapp_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+static int ambapp_bus_freq_get(
+ struct drvmgr_dev *dev,
+ int options,
+ unsigned int *freq_hz);
+#ifdef AMBAPPBUS_INFO_AVAIL
+static void ambapp_dev_info(
+ struct drvmgr_dev *,
+ void (*print)(void *p, char *str),
+ void *p);
+#endif
+
+#ifdef RTEMS_SMP
+static int ambapp_int_set_affinity(
+ struct drvmgr_dev *dev,
+ int index,
+ const Processor_mask *cpus);
+#endif
+
+static struct drvmgr_bus_ops ambapp_bus_ops =
+{
+ .init =
+ {
+ /* init1 */ ambapp_bus_init1,
+ /* init2 */ NULL,
+ /* init3 */ NULL,
+ /* init4 */ NULL
+ },
+ .remove = ambapp_bus_remove,
+ .unite = ambapp_unite,
+ .int_register = ambapp_int_register,
+ .int_unregister = ambapp_int_unregister,
+ .int_clear = ambapp_int_clear,
+ .int_mask = ambapp_int_mask,
+#ifdef RTEMS_SMP
+ .int_set_affinity = ambapp_int_set_affinity,
+#endif
+ .int_unmask = ambapp_int_unmask,
+ .get_params = ambapp_get_params,
+ .get_freq = ambapp_bus_freq_get,
+#ifdef AMBAPPBUS_INFO_AVAIL
+ .get_info_dev = ambapp_dev_info,
+#endif
+};
+
+struct ambapp_priv {
+ struct ambapp_config *config;
+};
+
+static int ambapp_unite(struct drvmgr_drv *drv, struct drvmgr_dev *dev)
+{
+ struct amba_drv_info *adrv;
+ struct amba_dev_id *id;
+ struct amba_dev_info *amba;
+
+ if ( !drv || !dev || !dev->parent )
+ return 0;
+
+ if ( ! (((drv->bus_type == DRVMGR_BUS_TYPE_AMBAPP) && (dev->parent->bus_type == DRVMGR_BUS_TYPE_AMBAPP)) ||
+ ((drv->bus_type == DRVMGR_BUS_TYPE_AMBAPP_RMAP) && (dev->parent->bus_type == DRVMGR_BUS_TYPE_AMBAPP_RMAP)) ||
+ ((drv->bus_type == DRVMGR_BUS_TYPE_AMBAPP_DIST) && (dev->parent->bus_type == DRVMGR_BUS_TYPE_AMBAPP_DIST)))
+ ) {
+ return 0;
+ }
+
+ amba = (struct amba_dev_info *)dev->businfo;
+ if ( !amba )
+ return 0;
+
+ adrv = (struct amba_drv_info *)drv;
+ id = adrv->ids;
+ if ( !id )
+ return 0;
+ while( id->vendor != 0 ) {
+ if ( (id->vendor == amba->id.vendor) &&
+ (id->device == amba->id.device) ) {
+ /* Unite device and driver */
+ DBG("DRV 0x%x and DEV 0x%x united\n", (unsigned int)drv, (unsigned int)dev);
+ return 1;
+ }
+ id++;
+ }
+
+ return 0;
+}
+
+static int ambapp_int_get(struct drvmgr_dev *dev, int index)
+{
+ int irq;
+
+ /* Relative (positive) or absolute (negative) IRQ number */
+ if ( index >= 0 ) {
+ /* IRQ Index relative to Cores base IRQ */
+
+ /* Get Base IRQ */
+ irq = ((struct amba_dev_info *)dev->businfo)->info.irq;
+ if ( irq < 0 )
+ return -1;
+ irq += index;
+ } else {
+ /* Absolute IRQ number */
+ irq = -index;
+ }
+ return irq;
+}
+
+static int ambapp_int_register(
+ struct drvmgr_dev *dev,
+ int index,
+ const char *info,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct ambapp_priv *priv;
+ int irq;
+
+ priv = dev->parent->priv;
+
+ /* Get IRQ number from index and device information */
+ irq = ambapp_int_get(dev, index);
+ if ( irq < 0 )
+ return DRVMGR_EINVAL;
+
+ DBG("Register interrupt on 0x%x for dev 0x%x (IRQ: %d)\n",
+ (unsigned int)dev->parent->dev, (unsigned int)dev, irq);
+
+ if ( priv->config->ops->int_register ) {
+ /* Let device override driver default */
+ return priv->config->ops->int_register(dev, irq, info, isr, arg);
+ } else {
+ return DRVMGR_ENOSYS;
+ }
+}
+
+static int ambapp_int_unregister(
+ struct drvmgr_dev *dev,
+ int index,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct ambapp_priv *priv;
+ int irq;
+
+ priv = dev->parent->priv;
+
+ /* Get IRQ number from index and device information */
+ irq = ambapp_int_get(dev, index);
+ if ( irq < 0 )
+ return DRVMGR_EINVAL;
+
+ DBG("Unregister interrupt on 0x%x for dev 0x%x (IRQ: %d)\n",
+ (unsigned int)dev->parent->dev, (unsigned int)dev, irq);
+
+ if ( priv->config->ops->int_unregister ) {
+ /* Let device override driver default */
+ return priv->config->ops->int_unregister(dev, irq, isr, arg);
+ } else {
+ return DRVMGR_ENOSYS;
+ }
+}
+
+static int ambapp_int_clear(
+ struct drvmgr_dev *dev,
+ int index)
+{
+ struct ambapp_priv *priv;
+ int irq;
+
+ priv = dev->parent->priv;
+
+ /* Get IRQ number from index and device information */
+ irq = ambapp_int_get(dev, index);
+ if ( irq < 0 )
+ return -1;
+
+ DBG("Clear interrupt on 0x%x for dev 0x%x (IRQ: %d)\n",
+ (unsigned int)dev->parent->dev, (unsigned int)dev, irq);
+
+ if ( priv->config->ops->int_clear ) {
+ /* Let device override driver default */
+ return priv->config->ops->int_clear(dev, irq);
+ } else {
+ return DRVMGR_ENOSYS;
+ }
+}
+
+static int ambapp_int_mask(
+ struct drvmgr_dev *dev,
+ int index)
+{
+ struct ambapp_priv *priv;
+ int irq;
+
+ priv = dev->parent->priv;
+
+ /* Get IRQ number from index and device information */
+ irq = ambapp_int_get(dev, index);
+ if ( irq < 0 )
+ return -1;
+
+ DBG("MASK interrupt on 0x%x for dev 0x%x (IRQ: %d)\n",
+ (unsigned int)dev->parent->dev, (unsigned int)dev, irq);
+
+ if ( priv->config->ops->int_mask ) {
+ /* Let device override driver default */
+ return priv->config->ops->int_mask(dev, irq);
+ } else {
+ return DRVMGR_ENOSYS;
+ }
+}
+
+static int ambapp_int_unmask(
+ struct drvmgr_dev *dev,
+ int index)
+{
+ struct ambapp_priv *priv;
+ int irq;
+
+ priv = dev->parent->priv;
+
+ /* Get IRQ number from index and device information */
+ irq = ambapp_int_get(dev, index);
+ if ( irq < 0 )
+ return DRVMGR_EINVAL;
+
+ DBG("UNMASK interrupt on 0x%x for dev 0x%x (IRQ: %d)\n",
+ (unsigned int)dev->parent->dev, (unsigned int)dev, irq);
+
+ if ( priv->config->ops->int_unmask ) {
+ /* Let device override driver default */
+ return priv->config->ops->int_unmask(dev, irq);
+ } else {
+ return DRVMGR_ENOSYS;
+ }
+}
+
+/* Assign frequency to an AMBA Bus */
+void ambapp_bus_freq_register(
+ struct drvmgr_dev *dev,
+ int amba_interface,
+ unsigned int freq_hz
+ )
+{
+ struct ambapp_priv *priv = (struct ambapp_priv *)dev->parent->priv;
+ struct ambapp_dev *adev;
+ struct amba_dev_info *pnp = dev->businfo;
+
+ if ( freq_hz == 0 )
+ return;
+
+ if ( amba_interface == DEV_AHB_MST ) {
+ adev = (struct ambapp_dev *)
+ ((unsigned int)pnp->info.ahb_mst -
+ sizeof(struct ambapp_dev));
+ } else if ( amba_interface == DEV_AHB_SLV ) {
+ adev = (struct ambapp_dev *)
+ ((unsigned int)pnp->info.ahb_slv -
+ sizeof(struct ambapp_dev));
+ } else if ( amba_interface == DEV_APB_SLV ) {
+ adev = (struct ambapp_dev *)
+ ((unsigned int)pnp->info.apb_slv -
+ sizeof(struct ambapp_dev));
+ } else {
+ return;
+ }
+
+ /* Calculate Top bus frequency from lower part. The frequency comes
+ * from some kind of hardware able to report local bus frequency.
+ */
+ ambapp_freq_init(priv->config->abus, adev, freq_hz);
+}
+
+static int ambapp_bus_freq_get(
+ struct drvmgr_dev *dev,
+ int options,
+ unsigned int *freq_hz)
+{
+ struct ambapp_priv *priv = (struct ambapp_priv *)dev->parent->priv;
+ struct ambapp_dev *adev;
+ struct amba_dev_info *pnp = dev->businfo;
+
+ if ( options == DEV_AHB_MST ) {
+ adev = (struct ambapp_dev *)
+ ((unsigned int)pnp->info.ahb_mst -
+ sizeof(struct ambapp_dev));
+ } else if ( options == DEV_AHB_SLV ) {
+ adev = (struct ambapp_dev *)
+ ((unsigned int)pnp->info.ahb_slv -
+ sizeof(struct ambapp_dev));
+ } else if ( options == DEV_APB_SLV ) {
+ adev = (struct ambapp_dev *)
+ ((unsigned int)pnp->info.apb_slv -
+ sizeof(struct ambapp_dev));
+ } else {
+ *freq_hz = 0;
+ return -1;
+ }
+
+ /* Calculate core/bus frequency from top most bus frequency. */
+ *freq_hz = ambapp_freq_get(priv->config->abus, adev);
+ if ( *freq_hz == 0 )
+ return -1;
+ return 0;
+}
+
+static int ambapp_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params)
+{
+ struct ambapp_priv *priv = dev->parent->priv;
+
+ if ( priv->config->ops->get_params ) {
+ /* Let device override driver default */
+ return priv->config->ops->get_params(dev, params);
+ } else {
+ return -1;
+ }
+}
+
+#ifdef AMBAPPBUS_INFO_AVAIL
+static void ambapp_dev_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p)
+{
+ struct amba_dev_info *devinfo;
+ struct ambapp_core *core;
+ char buf[64];
+ int ver, i;
+ char *str1, *str2, *str3;
+ unsigned int ahbmst_freq, ahbslv_freq, apbslv_freq;
+
+ if (!dev)
+ return;
+
+ devinfo = (struct amba_dev_info *)dev->businfo;
+ if (!devinfo)
+ return;
+ core = &devinfo->info;
+
+ print_line(p, "AMBA PnP DEVICE");
+
+ str1 = ambapp_vendor_id2str(devinfo->id.vendor);
+ if (str1 == NULL)
+ str1 = "unknown";
+ sprintf(buf, "VENDOR ID: 0x%04x (%s)", devinfo->id.vendor, str1);
+ print_line(p, buf);
+
+ str1 = ambapp_device_id2str(devinfo->id.vendor, devinfo->id.device);
+ if (str1 == NULL)
+ str1 = "unknown";
+ sprintf(buf, "DEVICE ID: 0x%04x (%s)", devinfo->id.device, str1);
+ print_line(p, buf);
+
+ ahbmst_freq = ahbslv_freq = apbslv_freq = 0;
+ ver = 0;
+ str1 = str2 = str3 = "";
+ if (core->ahb_mst) {
+ str1 = "AHBMST ";
+ ver = core->ahb_mst->ver;
+ ambapp_bus_freq_get(dev, DEV_AHB_MST, &ahbmst_freq);
+ }
+ if (core->ahb_slv) {
+ str2 = "AHBSLV ";
+ ver = core->ahb_slv->ver;
+ ambapp_bus_freq_get(dev, DEV_AHB_SLV, &ahbslv_freq);
+ }
+ if (core->apb_slv) {
+ str3 = "APBSLV";
+ ver = core->apb_slv->ver;
+ ambapp_bus_freq_get(dev, DEV_APB_SLV, &apbslv_freq);
+ }
+
+ sprintf(buf, "IRQ: %d", ambapp_int_get(dev, 0));
+ print_line(p, buf);
+
+ sprintf(buf, "VERSION: 0x%x", ver);
+ print_line(p, buf);
+
+ sprintf(buf, "ambapp_core: %p", core);
+ print_line(p, buf);
+
+ sprintf(buf, "interfaces: %s%s%s", str1, str2, str3);
+ print_line(p, buf);
+
+ if (ahbmst_freq != 0) {
+ sprintf(buf, "AHBMST FREQ: %dkHz", ahbmst_freq/1000);
+ print_line(p, buf);
+ }
+
+ if (ahbslv_freq != 0) {
+ sprintf(buf, "AHBSLV FREQ: %dkHz", ahbslv_freq/1000);
+ print_line(p, buf);
+ }
+
+ if (apbslv_freq != 0) {
+ sprintf(buf, "APBSLV FREQ: %dkHz", apbslv_freq/1000);
+ print_line(p, buf);
+ }
+
+ if (core->ahb_slv) {
+ for(i=0; i<4; i++) {
+ if (core->ahb_slv->type[i] == AMBA_TYPE_AHBIO)
+ str1 = " ahbio";
+ else if (core->ahb_slv->type[i] == AMBA_TYPE_MEM)
+ str1 = "ahbmem";
+ else
+ continue;
+ sprintf(buf, " %s[%d]: 0x%08x-0x%08x", str1, i,
+ core->ahb_slv->start[i],
+ core->ahb_slv->start[i]+core->ahb_slv->mask[i]-1);
+ print_line(p, buf);
+ }
+ }
+ if (core->apb_slv) {
+ sprintf(buf, " apb: 0x%08x-0x%08x",
+ core->apb_slv->start,
+ core->apb_slv->start + core->apb_slv->mask - 1);
+ print_line(p, buf);
+ }
+}
+#endif
+
+/* Fix device in last stage and/or register additional devices.
+ * Function returns:
+ * 0 Register device as normal
+ * 1 Fixup function handles registration
+ */
+static int ambapp_dev_fixup(struct drvmgr_dev *dev, struct amba_dev_info *pnp)
+{
+ /* OCCAN speciality:
+ * Mulitple cores are supported through the same amba AHB interface.
+ * The number of "sub cores" can be detected by decoding the AMBA
+ * Plug&Play version information. verion = ncores. A maximum of 8
+ * sub cores are supported, each separeated with 0x100 inbetween.
+ *
+ * Now, lets detect sub cores.
+ */
+ if ( (pnp->info.device == GAISLER_CANAHB) &&
+ (pnp->info.vendor == VENDOR_GAISLER) ) {
+ struct drvmgr_dev *newdev, *devs_to_register[8];
+ struct amba_dev_info *pnpinfo;
+ int subcores;
+ int core;
+
+ devs_to_register[0] = dev;
+ subcores = (pnp->info.ahb_slv->ver & 0x7) + 1;
+ for(core = 1; core < subcores; core++) {
+ drvmgr_alloc_dev(&newdev, sizeof(*pnpinfo));
+ memcpy(newdev, dev, sizeof(*newdev));
+ pnpinfo = (struct amba_dev_info *)(newdev+1);
+ memcpy(pnpinfo, pnp, sizeof(*pnp));
+ pnpinfo->info.index = core;
+ pnpinfo->info.irq += core;
+ newdev->businfo = (void *)pnpinfo;
+
+ devs_to_register[core] = newdev;
+ }
+ /* Register all CAN devices */
+ for(core = 0; core < subcores; core++)
+ drvmgr_dev_register(devs_to_register[core]);
+ return 1;
+ } else if ( (pnp->info.device == GAISLER_GPIO) &&
+ (pnp->info.vendor == VENDOR_GAISLER) ) {
+ /* PIO[N] is connected to IRQ[N]. */
+ pnp->info.irq = 0;
+ }
+ return 0;
+}
+
+struct ambapp_dev_reg_struct {
+ struct ambapp_bus *abus;
+ struct drvmgr_bus *bus;
+ struct ambapp_dev *ahb_mst;
+ struct ambapp_dev *ahb_slv;
+ struct ambapp_dev *apb_slv;
+};
+
+static void ambapp_core_register(
+ struct ambapp_dev *ahb_mst,
+ struct ambapp_dev *ahb_slv,
+ struct ambapp_dev *apb_slv,
+ struct ambapp_dev_reg_struct *arg
+ )
+{
+ struct drvmgr_dev *newdev;
+ struct amba_dev_info *pnpinfo;
+ unsigned short device;
+ unsigned char vendor;
+ int namelen;
+ char buf[64];
+
+ if ( ahb_mst ) {
+ device = ahb_mst->device;
+ vendor = ahb_mst->vendor;
+ }else if ( ahb_slv ) {
+ device = ahb_slv->device;
+ vendor = ahb_slv->vendor;
+ }else if( apb_slv ) {
+ device = apb_slv->device;
+ vendor = apb_slv->vendor;
+ } else {
+ DBG("NO DEV!\n");
+ return;
+ }
+
+ DBG("CORE REGISTER DEV [%x:%x] MST: 0x%x, SLV: 0x%x, APB: 0x%x\n", vendor, device, (unsigned int)ahb_mst, (unsigned int)ahb_slv, (unsigned int)apb_slv);
+
+ /* Get unique device name from AMBA data base by combining VENDOR and
+ * DEVICE short names
+ */
+ namelen = ambapp_vendev_id2str(vendor, device, buf);
+
+ /* Allocate a device */
+ drvmgr_alloc_dev(&newdev, sizeof(struct amba_dev_info) + namelen);
+ pnpinfo = (struct amba_dev_info *)(newdev + 1);
+ newdev->parent = arg->bus; /* Ourselfs */
+ newdev->minor_drv = 0;
+ newdev->minor_bus = 0;
+ newdev->priv = NULL;
+ newdev->drv = NULL;
+ if (namelen > 0) {
+ newdev->name = (char *)(pnpinfo + 1);
+ strcpy(newdev->name, buf);
+ } else {
+ newdev->name = NULL;
+ }
+ newdev->next_in_drv = NULL;
+ newdev->bus = NULL;
+
+ /* Init PnP information, Assign Core interfaces with this device */
+ pnpinfo->id.vendor = vendor;
+ pnpinfo->id.device = device;
+ pnpinfo->info.vendor = vendor;
+ pnpinfo->info.device = device;
+ pnpinfo->info.index = 0;
+ if ( ahb_mst ) {
+ pnpinfo->info.ahb_mst = (struct ambapp_ahb_info *)
+ ahb_mst->devinfo;
+ ambapp_alloc_dev(ahb_mst, (void *)newdev);
+ if ( pnpinfo->info.ahb_mst->irq )
+ pnpinfo->info.irq = pnpinfo->info.ahb_mst->irq;
+ }
+ if ( ahb_slv ) {
+ pnpinfo->info.ahb_slv = (struct ambapp_ahb_info *)
+ ahb_slv->devinfo;
+ ambapp_alloc_dev(ahb_slv, (void *)newdev);
+ if ( pnpinfo->info.ahb_slv->irq )
+ pnpinfo->info.irq = pnpinfo->info.ahb_slv->irq;
+ }
+ if ( apb_slv ) {
+ pnpinfo->info.apb_slv = (struct ambapp_apb_info *)
+ apb_slv->devinfo;
+ ambapp_alloc_dev(apb_slv, (void *)newdev);
+ if ( pnpinfo->info.apb_slv->irq )
+ pnpinfo->info.irq = pnpinfo->info.apb_slv->irq;
+ }
+ if ( pnpinfo->info.irq == 0 )
+ pnpinfo->info.irq = -1; /* indicate no IRQ */
+
+ /* Connect device with PnP information */
+ newdev->businfo = (void *)pnpinfo;
+
+ if ( ambapp_dev_fixup(newdev, pnpinfo) == 0 )
+ drvmgr_dev_register(newdev); /* Register New Device */
+}
+
+/* Fix device registration.
+ * Function returns:
+ * 0 Register device as normal
+ * 1 Fixup function handles registration
+ */
+static int ambapp_dev_register_fixup(struct ambapp_dev *dev, struct ambapp_dev_reg_struct *p)
+{
+ /* GR740 GRPCI2 speciality:
+ * - In the GR740 the APB_SLV is detected before the AHB_SLV
+ * which makes the registration incorrect. We deal with it in
+ * this function. */
+ if ( (dev->dev_type == DEV_APB_SLV) &&
+ (dev->device == GAISLER_GRPCI2) &&
+ (dev->vendor == VENDOR_GAISLER) &&
+ (p->ahb_slv == NULL) ) {
+ DBG("GRPCI2 APB_SLV detected before AHB_SLV. Skipping APB_SLV registration.\n");
+ return 1;
+ }
+ return 0;
+}
+
+/* Register one AMBA device */
+static int ambapp_dev_register(struct ambapp_dev *dev, int index, void *arg)
+{
+ struct ambapp_dev_reg_struct *p = arg;
+
+#ifdef DEBUG
+ char *type;
+
+ if ( dev->dev_type == DEV_AHB_MST )
+ type = "AHB MST";
+ else if ( dev->dev_type == DEV_AHB_SLV )
+ type = "AHB SLV";
+ else if ( dev->dev_type == DEV_APB_SLV )
+ type = "APB SLV";
+
+ DBG("Found [%d:%x:%x], %s\n", index, dev->vendor, dev->device, type);
+#endif
+
+ /* Fixup for device registration */
+ if (ambapp_dev_register_fixup(dev, p)){
+ return 0;
+ }
+
+ if ( dev->dev_type == DEV_AHB_MST ) {
+ if ( p->ahb_mst ) {
+ /* This should not happen */
+ printk("ambapp_dev_register: ahb_mst not NULL!\n");
+ exit(1);
+ }
+
+ /* Remember AHB Master */
+ p->ahb_mst = dev;
+
+ /* Find AHB Slave and APB slave for this Core */
+ ambapp_for_each(p->abus, (OPTIONS_AHB_SLVS|OPTIONS_APB_SLVS|OPTIONS_FREE), dev->vendor, dev->device, ambapp_dev_register, p);
+
+ ambapp_core_register(p->ahb_mst, p->ahb_slv, p->apb_slv, p);
+ p->ahb_mst = p->ahb_slv = p->apb_slv = NULL;
+ return 0;
+
+ } else if ( dev->dev_type == DEV_AHB_SLV ) {
+ if ( p->ahb_slv ) {
+ /* Already got our AHB Slave interface */
+ return 0;
+ }
+
+ /* Remember AHB Slave */
+ p->ahb_slv = dev;
+
+ if ( p->ahb_mst ) {
+ /* Continue searching for APB Slave */
+ return 0;
+ } else {
+ /* Find APB Slave interface for this Core */
+ ambapp_for_each(p->abus, (OPTIONS_APB_SLVS|OPTIONS_FREE), dev->vendor, dev->device, ambapp_dev_register, p);
+
+ ambapp_core_register(p->ahb_mst, p->ahb_slv, p->apb_slv, p);
+ p->ahb_mst = p->ahb_slv = p->apb_slv = NULL;
+ return 0;
+ }
+ } else if ( dev->dev_type == DEV_APB_SLV ) {
+ if ( p->apb_slv ) {
+ /* This should not happen */
+ printk("ambapp_dev_register: apb_slv not NULL!\n");
+ exit(1);
+ }
+ /* Remember APB Slave */
+ p->apb_slv = dev;
+
+ if ( p->ahb_mst || p->ahb_slv ) {
+ /* Stop scanning */
+ return 1;
+ } else {
+ ambapp_core_register(p->ahb_mst, p->ahb_slv, p->apb_slv, p);
+ p->ahb_mst = p->ahb_slv = p->apb_slv = NULL;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* Register all AMBA devices available on the AMBAPP bus */
+static int ambapp_ids_register(struct drvmgr_bus *bus)
+{
+ struct ambapp_priv *priv = bus->priv;
+ struct ambapp_bus *abus;
+ struct ambapp_dev_reg_struct arg;
+
+ DBG("ambapp_ids_register:\n");
+
+ memset(&arg, 0, sizeof(arg));
+
+ abus = priv->config->abus;
+ arg.abus = abus;
+ arg.bus = bus;
+
+ /* Combine the AHB MST, AHB SLV and APB SLV interfaces of a core. A core has often more than
+ * one interface. A core can not have more than one interface of the same type.
+ */
+ ambapp_for_each(abus, (OPTIONS_ALL_DEVS|OPTIONS_FREE), -1, -1, ambapp_dev_register, &arg);
+
+#ifdef DEBUG
+ ambapp_print(abus->root, 1);
+#endif
+
+ return DRVMGR_OK;
+}
+
+/*** DEVICE FUNCTIONS ***/
+
+int ambapp_bus_register(struct drvmgr_dev *dev, struct ambapp_config *config)
+{
+ struct ambapp_priv *priv;
+
+ if ( !config || !config->ops )
+ return DRVMGR_OK;
+
+ DBG("AMBAPP BUS: initializing\n");
+
+ /* Register BUS */
+ drvmgr_alloc_bus(&dev->bus, sizeof(struct ambapp_priv));
+ priv = (struct ambapp_priv *)(dev->bus + 1);
+ priv->config = config;
+ if ( priv->config->bus_type == DRVMGR_BUS_TYPE_AMBAPP_DIST )
+ dev->bus->bus_type = DRVMGR_BUS_TYPE_AMBAPP_DIST;
+ else if ( priv->config->bus_type == DRVMGR_BUS_TYPE_AMBAPP_RMAP )
+ dev->bus->bus_type = DRVMGR_BUS_TYPE_AMBAPP_RMAP;
+ else
+ dev->bus->bus_type = DRVMGR_BUS_TYPE_AMBAPP;
+ dev->bus->next = NULL;
+ dev->bus->dev = dev;
+ dev->bus->priv = priv;
+ dev->bus->children = NULL;
+ dev->bus->ops = &ambapp_bus_ops;
+ dev->bus->funcs = config->funcs;
+ dev->bus->dev_cnt = 0;
+ dev->bus->reslist = NULL;
+ dev->bus->maps_up = config->maps_up;
+ dev->bus->maps_down = config->maps_down;
+
+ /* Add resource configuration */
+ if ( priv->config->resources )
+ drvmgr_bus_res_add(dev->bus, priv->config->resources);
+
+ drvmgr_bus_register(dev->bus);
+
+ return DRVMGR_OK;
+}
+
+/*** BUS INITIALIZE FUNCTIONS ***/
+
+/* Initialize the bus, register devices on this bus */
+static int ambapp_bus_init1(struct drvmgr_bus *bus)
+{
+ /* Initialize the bus, register devices on this bus */
+ return ambapp_ids_register(bus);
+}
+
+static int ambapp_bus_remove(struct drvmgr_bus *bus)
+{
+ return DRVMGR_OK;
+}
+
+#ifdef RTEMS_SMP
+static int ambapp_int_set_affinity(
+ struct drvmgr_dev *dev,
+ int index,
+ const Processor_mask *cpus)
+{
+ struct ambapp_priv *priv;
+ int irq;
+
+ priv = dev->parent->priv;
+
+ /* Get IRQ number from index and device information */
+ irq = ambapp_int_get(dev, index);
+ if (irq < 0)
+ return DRVMGR_EINVAL;
+
+ DBG("Set interrupt affinity on 0x%x for dev 0x%x (IRQ: %d)\n",
+ (unsigned int)dev->parent->dev, (unsigned int)dev, irq);
+
+ if (priv->config->ops->int_set_affinity) {
+ /* Let device override driver default */
+ return priv->config->ops->int_set_affinity(dev, irq, cpus);
+ } else {
+ return DRVMGR_ENOSYS;
+ }
+}
+#endif
diff --git a/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c b/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c
new file mode 100644
index 0000000000..9e8c37daf2
--- /dev/null
+++ b/bsps/shared/grlib/drvmgr/ambapp_bus_grlib.c
@@ -0,0 +1,252 @@
+/* LEON3 GRLIB AMBA Plug & Play bus driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * This is driver is a wrapper for the general AMBA Plug & Play bus
+ * driver. This is the root bus driver for GRLIB systems.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libcpu/access.h>
+
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp_bus_grlib.h>
+#include <grlib/genirq.h>
+
+#include <bsp.h>
+#include <bsp/irq.h>
+
+#include <grlib/grlib_impl.h>
+
+#define DBG(args...)
+/*#define DBG(args...) printk(args)*/
+
+static int ambapp_grlib_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr isr,
+ void *arg);
+static int ambapp_grlib_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg);
+static int ambapp_grlib_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+static int ambapp_grlib_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+static int ambapp_grlib_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+#ifdef RTEMS_SMP
+static int ambapp_grlib_int_set_affinity(
+ struct drvmgr_dev *dev,
+ int irq,
+ const Processor_mask *cpus);
+#endif
+static int ambapp_grlib_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+static int ambapp_grlib_init1(struct drvmgr_dev *dev);
+static int ambapp_grlib_init2(struct drvmgr_dev *dev);
+static int ambapp_grlib_remove(struct drvmgr_dev *dev);
+
+/* READ/WRITE access to SpaceWire target over RMAP */
+static void *ambapp_grlib_rw_arg(struct drvmgr_dev *dev);
+
+static struct ambapp_ops ambapp_grlib_ops = {
+ .int_register = ambapp_grlib_int_register,
+ .int_unregister = ambapp_grlib_int_unregister,
+ .int_clear = ambapp_grlib_int_clear,
+ .int_mask = ambapp_grlib_int_mask,
+ .int_unmask = ambapp_grlib_int_unmask,
+#ifdef RTEMS_SMP
+ .int_set_affinity = ambapp_grlib_int_set_affinity,
+#endif
+ .get_params = ambapp_grlib_get_params
+};
+
+static void *ambapp_grlib_rw_arg(struct drvmgr_dev *dev)
+{
+ return dev; /* No argument really needed, but for debug? */
+}
+
+static struct drvmgr_func ambapp_grlib_funcs[] =
+{
+ DRVMGR_FUNC(AMBAPP_RW_ARG, ambapp_grlib_rw_arg),
+
+ DRVMGR_FUNC(AMBAPP_R8, _ld8),
+ DRVMGR_FUNC(AMBAPP_R16, _ld16),
+ DRVMGR_FUNC(AMBAPP_R32, _ld32),
+ DRVMGR_FUNC(AMBAPP_R64, _ld64),
+
+ DRVMGR_FUNC(AMBAPP_W8, _st8),
+ DRVMGR_FUNC(AMBAPP_W16, _st16),
+ DRVMGR_FUNC(AMBAPP_W32, _st32),
+ DRVMGR_FUNC(AMBAPP_W64, _st64),
+
+ DRVMGR_FUNC(AMBAPP_RMEM, memcpy),
+ DRVMGR_FUNC(AMBAPP_WMEM, memcpy),
+
+ DRVMGR_FUNC_END,
+};
+
+static struct drvmgr_drv_ops ambapp_grlib_drv_ops =
+{
+ .init = {ambapp_grlib_init1, ambapp_grlib_init2, NULL, NULL},
+ .remove = ambapp_grlib_remove,
+ .info = NULL,
+};
+
+static struct drvmgr_drv ambapp_bus_drv_grlib =
+{
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_GRLIB_AMBAPP_ID, /* Driver ID */
+ "AMBAPP_GRLIB_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_ROOT, /* Bus Type */
+ &ambapp_grlib_drv_ops,
+ NULL, /* Funcs */
+ 0,
+ 0,
+};
+
+static struct grlib_config *drv_mgr_grlib_config = NULL;
+
+void ambapp_grlib_register(void)
+{
+ drvmgr_drv_register(&ambapp_bus_drv_grlib);
+}
+
+int ambapp_grlib_root_register(struct grlib_config *config)
+{
+
+ /* Save the configuration for later */
+ drv_mgr_grlib_config = config;
+
+ /* Register root device driver */
+ drvmgr_root_drv_register(&ambapp_bus_drv_grlib);
+
+ return 0;
+}
+
+/* Function called from Driver Manager Initialization Stage 1 */
+static int ambapp_grlib_init1(struct drvmgr_dev *dev)
+{
+ struct ambapp_config *config;
+
+ dev->priv = NULL;
+ dev->name = "GRLIB AMBA PnP";
+
+ DBG("AMBAPP GRLIB: intializing\n");
+
+ config = grlib_malloc(sizeof(*config));
+ if ( !config )
+ return RTEMS_NO_MEMORY;
+
+ config->ops = &ambapp_grlib_ops;
+ config->maps_up = DRVMGR_TRANSLATE_ONE2ONE;
+ config->maps_down = DRVMGR_TRANSLATE_ONE2ONE;
+ config->abus = drv_mgr_grlib_config->abus;
+ config->resources = drv_mgr_grlib_config->resources;
+ config->funcs = ambapp_grlib_funcs;
+ config->bus_type = DRVMGR_BUS_TYPE_AMBAPP;
+
+ /* Initialize the generic part of the AMBA Bus */
+ return ambapp_bus_register(dev, config);
+}
+
+static int ambapp_grlib_init2(struct drvmgr_dev *dev)
+{
+ return 0;
+}
+
+static int ambapp_grlib_remove(struct drvmgr_dev *dev)
+{
+ return 0;
+}
+
+static int ambapp_grlib_int_register
+ (
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr isr,
+ void *arg
+ )
+{
+ return BSP_shared_interrupt_register(irq, info, isr, arg);
+}
+
+static int ambapp_grlib_int_unregister
+ (
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg
+ )
+{
+ return BSP_shared_interrupt_unregister(irq, isr, arg);
+}
+
+static int ambapp_grlib_int_clear
+ (
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ BSP_shared_interrupt_clear(irq);
+ return DRVMGR_OK;
+}
+
+static int ambapp_grlib_int_mask
+ (
+ struct drvmgr_dev *dev,
+ int irq
+ )
+{
+ BSP_shared_interrupt_mask(irq);
+ return DRVMGR_OK;
+}
+
+static int ambapp_grlib_int_unmask
+ (
+ struct drvmgr_dev *dev,
+ int irq
+ )
+{
+ BSP_shared_interrupt_unmask(irq);
+ return DRVMGR_OK;
+}
+
+#ifdef RTEMS_SMP
+static int ambapp_grlib_int_set_affinity
+ (
+ struct drvmgr_dev *dev,
+ int irq,
+ const Processor_mask *cpus
+ )
+{
+ bsp_interrupt_set_affinity(irq, cpus);
+ return DRVMGR_OK;
+}
+#endif
+
+static int ambapp_grlib_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ /* Leave params->freq_hz untouched for default */
+ params->dev_prefix = "";
+ return 0;
+}
diff --git a/bsps/shared/grlib/drvmgr/get_resarray_count.c b/bsps/shared/grlib/drvmgr/get_resarray_count.c
new file mode 100644
index 0000000000..7b5850d982
--- /dev/null
+++ b/bsps/shared/grlib/drvmgr/get_resarray_count.c
@@ -0,0 +1,20 @@
+/* Common driver configuration routines.
+ *
+ * COPYRIGHT (c) 2015.
+ * Cobham Gaisler.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <grlib/bspcommon.h>
+
+int get_resarray_count(struct drvmgr_bus_res **array)
+{
+ int i = 0;
+ while (array[i] != NULL)
+ i++;
+ return i;
+}
diff --git a/bsps/shared/grlib/gpio/gpiolib.c b/bsps/shared/grlib/gpio/gpiolib.c
new file mode 100644
index 0000000000..cf0038c5bb
--- /dev/null
+++ b/bsps/shared/grlib/gpio/gpiolib.c
@@ -0,0 +1,272 @@
+/* GPIOLIB interface implementation
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grlib/gpiolib.h>
+
+#include <grlib/grlib_impl.h>
+
+struct gpiolib_port;
+
+struct gpiolib_port {
+ struct gpiolib_port *next;
+ int minor;
+ struct gpiolib_drv *drv;
+ void *handle;
+
+ int open;
+};
+
+/* Root of GPIO Ports */
+struct gpiolib_port *gpiolib_ports;
+
+/* Number of GPIO ports registered */
+static int port_nr;
+
+/* 1 if libraray initialized */
+static int gpiolib_initied = 0;
+
+/* Insert a port first in ports list */
+static void gpiolib_list_add(struct gpiolib_port *port)
+{
+ port->next = gpiolib_ports;
+ gpiolib_ports = port;
+}
+
+static struct gpiolib_port *gpiolib_find(int minor)
+{
+ struct gpiolib_port *p;
+
+ p = gpiolib_ports;
+ while ( p && (p->minor != minor) ) {
+ p = p->next;
+ }
+ return p;
+}
+
+static struct gpiolib_port *gpiolib_find_by_name(char *name)
+{
+ struct gpiolib_port *p;
+ struct gpiolib_info info;
+ int (*get_info)(void *, struct gpiolib_info *);
+
+ p = gpiolib_ports;
+ while ( p ) {
+ get_info = p->drv->ops->get_info;
+ if ( get_info && (get_info(p->handle, &info) == 0) ) {
+ if ( strncmp(name, (char *)&info.devName[0], 64) == 0 ) {
+ break;
+ }
+ }
+ p = p->next;
+ }
+ return p;
+}
+
+int gpiolib_drv_register(struct gpiolib_drv *drv, void *handle)
+{
+ struct gpiolib_port *port;
+
+ if ( !drv || !drv->ops )
+ return -1;
+
+ port = grlib_calloc(1, sizeof(*port));
+ if ( port == NULL )
+ return -1;
+
+ port->handle = handle;
+ port->minor = port_nr++;
+ port->drv = drv;
+
+ gpiolib_list_add(port);
+
+ return 0;
+}
+
+void gpiolib_show(int port, void *handle)
+{
+ struct gpiolib_port *p;
+
+ if ( port == -1 ) {
+ p = gpiolib_ports;
+ while (p != NULL) {
+ if ( p->drv->ops->show )
+ p->drv->ops->show(p->handle);
+ p = p->next;
+ }
+ } else {
+ if ( handle ) {
+ p = handle;
+ } else {
+ p = gpiolib_find(port);
+ }
+ if ( p == NULL ) {
+ printf("PORT %d NOT FOUND\n", port);
+ return;
+ }
+ if ( p->drv->ops->show )
+ p->drv->ops->show(p->handle);
+ }
+}
+
+static void *gpiolib_open_internal(int port, char *devName)
+{
+ struct gpiolib_port *p;
+
+ if ( gpiolib_initied == 0 )
+ return NULL;
+
+ /* Find */
+ if ( port >= 0 ) {
+ p = gpiolib_find(port);
+ } else {
+ p = gpiolib_find_by_name(devName);
+ }
+ if ( p == NULL )
+ return NULL;
+
+ if ( p->open )
+ return NULL;
+
+ p->open = 1;
+ return p;
+}
+
+void *gpiolib_open(int port)
+{
+ return gpiolib_open_internal(port, NULL);
+}
+
+void *gpiolib_open_by_name(char *devName)
+{
+ return gpiolib_open_internal(-1, devName);
+}
+
+void gpiolib_close(void *handle)
+{
+ struct gpiolib_port *p = handle;
+
+ if ( p && p->open ) {
+ p->open = 0;
+ }
+}
+
+int gpiolib_set_config(void *handle, struct gpiolib_config *cfg)
+{
+ struct gpiolib_port *port = handle;
+
+ if ( !port || !cfg )
+ return -1;
+
+ if ( !port->drv->ops->config )
+ return -1;
+
+ return port->drv->ops->config(port->handle, cfg);
+}
+
+int gpiolib_set(void *handle, int dir, int outval)
+{
+ struct gpiolib_port *port = handle;
+
+ if ( !port )
+ return -1;
+
+ if ( !port->drv->ops->set )
+ return -1;
+
+ return port->drv->ops->set(port->handle, dir, outval);
+}
+
+int gpiolib_get(void *handle, int *inval)
+{
+ struct gpiolib_port *port = handle;
+
+ if ( !port || !inval)
+ return -1;
+
+ if ( !port->drv->ops->get )
+ return -1;
+
+ return port->drv->ops->get(port->handle, inval);
+}
+
+/*** IRQ Functions ***/
+int gpiolib_irq_register(void *handle, void *func, void *arg)
+{
+ struct gpiolib_port *port = handle;
+
+ if ( !port )
+ return -1;
+
+ if ( !port->drv->ops->irq_register )
+ return -1;
+
+ return port->drv->ops->irq_register(port->handle, func, arg);
+}
+
+static int gpiolib_irq_opts(void *handle, unsigned int options)
+{
+ struct gpiolib_port *port = handle;
+
+ if ( !port )
+ return -1;
+
+ if ( !port->drv->ops->irq_opts )
+ return -1;
+
+ return port->drv->ops->irq_opts(port->handle, options);
+}
+
+int gpiolib_irq_clear(void *handle)
+{
+ return gpiolib_irq_opts(handle, GPIOLIB_IRQ_CLEAR);
+}
+
+int gpiolib_irq_force(void *handle)
+{
+ return gpiolib_irq_opts(handle, GPIOLIB_IRQ_FORCE);
+}
+
+int gpiolib_irq_enable(void *handle)
+{
+ return gpiolib_irq_opts(handle, GPIOLIB_IRQ_ENABLE);
+}
+
+int gpiolib_irq_disable(void *handle)
+{
+ return gpiolib_irq_opts(handle, GPIOLIB_IRQ_DISABLE);
+}
+
+int gpiolib_irq_mask(void *handle)
+{
+ return gpiolib_irq_opts(handle, GPIOLIB_IRQ_MASK);
+}
+
+int gpiolib_irq_unmask(void *handle)
+{
+ return gpiolib_irq_opts(handle, GPIOLIB_IRQ_UNMASK);
+}
+
+
+/*** Initialization ***/
+int gpiolib_initialize(void)
+{
+ if ( gpiolib_initied != 0 )
+ return 0;
+
+ /* Initialize Libarary */
+ port_nr = 0;
+ gpiolib_ports = 0;
+ gpiolib_initied = 1;
+ return 0;
+}
diff --git a/bsps/shared/grlib/gpio/grgpio.c b/bsps/shared/grlib/gpio/grgpio.c
new file mode 100644
index 0000000000..05504ef020
--- /dev/null
+++ b/bsps/shared/grlib/gpio/grgpio.c
@@ -0,0 +1,449 @@
+/* GRGPIO GPIO Driver interface.
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <rtems/bspIo.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grgpio.h>
+#include <grlib/gpiolib.h>
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <grlib/grlib_impl.h>
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#define STATIC
+#else
+#define DBG(x...)
+#define STATIC static
+#endif
+
+struct grgpio_isr {
+ drvmgr_isr isr;
+ void *arg;
+};
+
+struct grgpio_priv {
+ struct drvmgr_dev *dev;
+ struct grgpio_regs *regs;
+ int irq;
+ int minor;
+
+ /* Driver implementation */
+ int port_cnt;
+ unsigned char port_handles[32];
+ struct grgpio_isr isrs[32];
+ struct gpiolib_drv gpiolib_desc;
+ unsigned int bypass;
+ unsigned int imask;
+};
+
+/******************* Driver Manager Part ***********************/
+
+int grgpio_device_init(struct grgpio_priv *priv);
+
+int grgpio_init1(struct drvmgr_dev *dev);
+int grgpio_init2(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops grgpio_ops =
+{
+ .init = {grgpio_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grgpio_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GPIO},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grgpio_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRGPIO_ID, /* Driver ID */
+ "GRGPIO_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grgpio_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grgpio_ids[0]
+};
+
+void grgpio_register_drv (void)
+{
+ DBG("Registering GRGPIO driver\n");
+ drvmgr_drv_register(&grgpio_drv_info.general);
+}
+
+/* Register GRGPIO pins as quick as possible to the GPIO library,
+ * other drivers may depend upon them in INIT LEVEL 2.
+ * Note that since IRQ may not be available in init1, it is assumed
+ * that the GPIOLibrary does not request IRQ routines until LEVEL 2.
+ */
+int grgpio_init1(struct drvmgr_dev *dev)
+{
+ struct grgpio_priv *priv;
+ int status, port;
+
+ DBG("GRGPIO[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ /* This core will not find other cores, but other driver may depend upon
+ * the GPIO library to function. So, we set up GPIO right away.
+ */
+
+ /* Initialize library if not already done */
+ status = gpiolib_initialize();
+ if ( status < 0 )
+ return DRVMGR_FAIL;
+
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ if ( grgpio_device_init(priv) ) {
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Register all ports available on this core as GPIO port to
+ * upper layer
+ */
+ for(port=0; port<priv->port_cnt; port++) {
+ priv->port_handles[port] = port;
+ gpiolib_drv_register(&priv->gpiolib_desc,
+ &priv->port_handles[port]);
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+/* Find port from handle, returns -1 if not found */
+static int grgpio_find_port(void *handle, struct grgpio_priv **priv)
+{
+ unsigned char portnr;
+
+ portnr = *(unsigned char *)handle;
+ if ( portnr > 31 )
+ return -1;
+ *priv = (struct grgpio_priv *)
+ (((unsigned int)handle - portnr*sizeof(unsigned char)) -
+ offsetof(struct grgpio_priv, port_handles));
+ return portnr;
+}
+
+static int grgpio_gpiolib_open(void *handle)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ DBG("GRGPIO: FAILED OPENING HANDLE 0x%08x\n", handle);
+ return -1;
+ }
+ DBG("GRGPIO[0x%08x][%d]: OPENING\n", priv->regs, portnr);
+
+ /* Open the device, nothing to be done... */
+
+ return 0;
+}
+
+static int grgpio_grpiolib_config(void *handle, struct gpiolib_config *cfg)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+ unsigned int mask;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ return -1;
+ }
+ DBG("GRGPIO[0x%08x][%d]: CONFIG\n", priv->regs, portnr);
+
+ /* Configure the device. And check that operation is supported,
+ * not all I/O Pins have IRQ support.
+ */
+ mask = (1<<portnr);
+
+ /* Return error when IRQ not supported by this I/O Line and it
+ * is beeing enabled by user.
+ */
+ if ( ((mask & priv->imask) == 0) && cfg->mask )
+ return -1;
+
+ priv->regs->imask &= ~mask; /* Disable interrupt temporarily */
+
+ /* Configure settings before enabling interrupt */
+ priv->regs->ipol = (priv->regs->ipol & ~mask) | (cfg->irq_polarity ? mask : 0);
+ priv->regs->iedge = (priv->regs->iedge & ~mask) | (cfg->irq_level ? 0 : mask);
+ priv->regs->imask |= cfg->mask ? mask : 0;
+
+ return 0;
+}
+
+static int grgpio_grpiolib_get(void *handle, int *inval)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ return -1;
+ }
+ DBG("GRGPIO[0x%08x][%d]: GET\n", priv->regs, portnr);
+
+ /* Get current status of the port */
+ if ( inval )
+ *inval = (priv->regs->data >> portnr) & 0x1;
+
+ return 0;
+}
+
+static int grgpio_grpiolib_irq_opts(void *handle, unsigned int options)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+ drvmgr_isr isr;
+ void *arg;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ return -1;
+ }
+ DBG("GRGPIO[0x%08x][%d]: IRQ OPTS 0x%x\n", priv->regs, portnr, options);
+
+ if ( options & GPIOLIB_IRQ_FORCE )
+ return -1;
+
+ isr = priv->isrs[portnr].isr;
+ arg = priv->isrs[portnr].arg;
+
+ if ( options & GPIOLIB_IRQ_DISABLE ) {
+ /* Disable interrupt at interrupt controller */
+ if ( drvmgr_interrupt_unregister(priv->dev, portnr, isr, arg) ) {
+ return -1;
+ }
+ }
+ if ( options & GPIOLIB_IRQ_CLEAR ) {
+ /* Clear interrupt at interrupt controller */
+ if ( drvmgr_interrupt_clear(priv->dev, portnr) ) {
+ return -1;
+ }
+ }
+ if ( options & GPIOLIB_IRQ_ENABLE ) {
+ /* Enable interrupt at interrupt controller */
+ if ( drvmgr_interrupt_register(priv->dev, portnr, "grgpio", isr, arg) ) {
+ return -1;
+ }
+ }
+ if ( options & GPIOLIB_IRQ_MASK ) {
+ /* Mask (disable) interrupt at interrupt controller */
+ if ( drvmgr_interrupt_mask(priv->dev, portnr) ) {
+ return -1;
+ }
+ }
+ if ( options & GPIOLIB_IRQ_UNMASK ) {
+ /* Unmask (enable) interrupt at interrupt controller */
+ if ( drvmgr_interrupt_unmask(priv->dev, portnr) ) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int grgpio_grpiolib_irq_register(void *handle, void *func, void *arg)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ DBG("GRGPIO: FAILED OPENING HANDLE 0x%08x\n", handle);
+ return -1;
+ }
+ DBG("GRGPIO: OPENING %d at [0x%08x]\n", portnr, priv->regs);
+
+ /* Since the user doesn't provide the ISR and argument, we must... */
+ priv->isrs[portnr].isr = func;
+ priv->isrs[portnr].arg = arg;
+
+ return 0;
+}
+
+static int grgpio_grpiolib_set(void *handle, int dir, int outval)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+ unsigned int mask;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ DBG("GRGPIO: FAILED OPENING HANDLE 0x%08x\n", handle);
+ return -1;
+ }
+ DBG("GRGPIO: OPENING %d at [0x%08x]\n", portnr, priv->regs);
+
+ /* Set Direction and Output */
+ mask = 1<<portnr;
+ priv->regs->dir = (priv->regs->dir & ~mask) | (dir ? mask : 0);
+ priv->regs->output = (priv->regs->output & ~mask) | (outval ? mask : 0);
+
+ return 0;
+}
+
+static int grgpio_gpiolib_show(void *handle)
+{
+ struct grgpio_priv *priv;
+ int portnr, i, regs[7];
+ volatile unsigned int *reg;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ DBG("GRGPIO: FAILED SHOWING HANDLE 0x%08x\n", handle);
+ return -1;
+ }
+ for (i=0, reg=&priv->regs->data; i<7; i++, reg++) {
+ regs[i] = ( *reg >> portnr) & 1;
+ }
+ printf("GRGPIO[%p] PORT[%d]: IN/OUT/DIR: [%d,%d,%d], MASK/POL/EDGE: [%d,%d,%d], BYPASS: %d\n",
+ priv->regs, portnr, regs[0], regs[1], regs[2], regs[3], regs[4], regs[5], regs[6]);
+ return 0;
+}
+
+static int grgpio_gpiolib_get_info(void *handle, struct gpiolib_info *pinfo)
+{
+ struct grgpio_priv *priv;
+ int portnr;
+ char prefix[48];
+ struct drvmgr_dev *dev;
+
+ if ( !pinfo )
+ return -1;
+
+ portnr = grgpio_find_port(handle, &priv);
+ if ( portnr < 0 ) {
+ DBG("GRGPIO: FAILED GET_INFO HANDLE 0x%08x\n", handle);
+ return -1;
+ }
+
+ /* Get Filesystem name prefix */
+ dev = priv->dev;
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ snprintf(pinfo->devName, 64, "/dev/grgpio%d/%d", dev->minor_drv, portnr);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ snprintf(pinfo->devName, 64, "/dev/%sgrgpio%d/%d", prefix, dev->minor_bus, portnr);
+ }
+
+ return 0;
+}
+
+static struct gpiolib_drv_ops grgpio_gpiolib_ops =
+{
+ .config = grgpio_grpiolib_config,
+ .get = grgpio_grpiolib_get,
+ .irq_opts = grgpio_grpiolib_irq_opts,
+ .irq_register = grgpio_grpiolib_irq_register,
+ .open = grgpio_gpiolib_open,
+ .set = grgpio_grpiolib_set,
+ .show = grgpio_gpiolib_show,
+ .get_info = grgpio_gpiolib_get_info,
+};
+
+int grgpio_device_init(struct grgpio_priv *priv)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+ unsigned int mask;
+ int port_cnt;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ priv->irq = pnpinfo->irq;
+ priv->regs = (struct grgpio_regs *)pnpinfo->apb_slv->start;
+
+ DBG("GRGPIO: 0x%08x irq %d\n", (unsigned int)priv->regs, priv->irq);
+
+ /* Mask all Interrupts */
+ priv->regs->imask = 0;
+
+ /* Make IRQ Rising edge triggered default */
+ priv->regs->ipol = 0xfffffffe;
+ priv->regs->iedge = 0xfffffffe;
+
+ /* Read what I/O lines have IRQ support */
+ priv->imask = priv->regs->ipol;
+
+ /* Let the user configure the port count, this might be needed
+ * when the GPIO lines must not be changed (assigned during bootup)
+ */
+ value = drvmgr_dev_key_get(priv->dev, "nBits", DRVMGR_KT_INT);
+ if ( value ) {
+ priv->port_cnt = value->i;
+ } else {
+ /* Auto detect number of GPIO ports */
+ priv->regs->dir = 0;
+ priv->regs->output = 0xffffffff;
+ mask = priv->regs->output;
+ priv->regs->output = 0;
+
+ for(port_cnt=0; port_cnt<32; port_cnt++)
+ if ( (mask & (1<<port_cnt)) == 0 )
+ break;
+ priv->port_cnt = port_cnt;
+ }
+
+ /* Let the user configure the BYPASS register, this might be needed
+ * to select which cores can do I/O on a pin.
+ */
+ value = drvmgr_dev_key_get(priv->dev, "bypass", DRVMGR_KT_INT);
+ if ( value ) {
+ priv->bypass = value->i;
+ } else {
+ priv->bypass = 0;
+ }
+ priv->regs->bypass = priv->bypass;
+
+ /* Prepare GPIOLIB layer */
+ priv->gpiolib_desc.ops = &grgpio_gpiolib_ops;
+
+ return 0;
+}
diff --git a/bsps/shared/grlib/i2c/i2cmst.c b/bsps/shared/grlib/i2c/i2cmst.c
new file mode 100644
index 0000000000..fad0937df2
--- /dev/null
+++ b/bsps/shared/grlib/i2c/i2cmst.c
@@ -0,0 +1,416 @@
+/*
+ * Driver for GRLIB port of OpenCores I2C-master
+ *
+ * COPYRIGHT (c) 2007 Cobham Gaisler AB
+ * based on the RTEMS MPC83xx I2C driver (c) 2007 Embedded Brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * This file contains the driver and initialization code
+ */
+
+#include <bsp.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <grlib/ambapp.h>
+#include <rtems/libi2c.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/i2cmst.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Enable debug printks? */
+/*#define DEBUG*/
+
+#ifdef DEBUG
+ #define DBG(args...) printk(args)
+#else
+ #define DBG(args...)
+#endif
+
+/* The OC I2C core will perform a write after a start unless the RD bit
+ in the command register has been set. Since the rtems framework has
+ a send_start function we buffer that command and use it when the first
+ data is written. The START is buffered in the sendstart member below */
+typedef struct gr_i2cmst_prv {
+ rtems_libi2c_bus_t i2clib_desc;
+ struct drvmgr_dev *dev;
+ gr_i2cmst_regs_t *reg_ptr;
+ unsigned int sysfreq; /* System clock frequency in kHz */
+ int minor;
+ unsigned char sendstart; /* START events are buffered here */
+ /* rtems_irq_number irq_number; */
+ /* rtems_id irq_sema_id; */
+} gr_i2cmst_prv_t;
+
+/* Calculates the scaler value for 100 kHz operation */
+static int gr_i2cmst_calc_scaler(int sysfreq)
+{
+ return sysfreq/500 - 1;
+}
+
+/* Wait for the current transfer to end */
+static int gr_i2cmst_wait(gr_i2cmst_prv_t *prv_ptr, uint8_t expected_sts)
+{
+ uint32_t tout = 0;
+ int current_sts;
+
+ DBG("(gr_i2cmst_wait called...");
+
+ do {
+ if (tout++ > 1000000) {
+ DBG("gr_i2cmst_wait: TIMEOUT\n");
+ return RTEMS_TIMEOUT;
+ }
+ } while (prv_ptr->reg_ptr->cmdsts & GRI2C_STS_TIP);
+
+ current_sts = prv_ptr->reg_ptr->cmdsts & ~GRI2C_STS_IF & ~GRI2C_STS_BUSY;
+
+ if (current_sts != expected_sts) {
+#if defined(DEBUG)
+ if (prv_ptr->reg_ptr->cmdsts & GRI2C_STS_RXACK) {
+ DBG("Transfer NAKed..");
+ }
+ if (prv_ptr->reg_ptr->cmdsts & GRI2C_STS_AL) {
+ DBG("arbitration lost..");
+ }
+ if (prv_ptr->reg_ptr->cmdsts & GRI2C_STS_TIP) {
+ DBG("transfer still in progress, huh?..");
+ }
+ DBG("exited with IO error..)");
+#endif
+ DBG("gr_i2cmst_wait: IO-ERROR\n");
+ return RTEMS_IO_ERROR;
+ }
+
+ DBG("exited...)");
+
+ return RTEMS_SUCCESSFUL;
+}
+
+/* Initialize hardware core */
+static rtems_status_code gr_i2cmst_init(rtems_libi2c_bus_t *bushdl)
+{
+ gr_i2cmst_prv_t *prv_ptr = (gr_i2cmst_prv_t *)bushdl;
+
+ DBG("gr_i2cmst_init called...");
+
+ /* Disable core before changing prescale register */
+ prv_ptr->reg_ptr->ctrl = 0;
+
+ /* Calculate and set prescale value */
+ prv_ptr->reg_ptr->prescl = gr_i2cmst_calc_scaler(prv_ptr->sysfreq);
+
+ /* Enable core, interrupts are not enabled */
+ prv_ptr->reg_ptr->ctrl = GRI2C_CTRL_EN;
+
+ /* Clear possible START condition */
+ prv_ptr->sendstart = 0;
+
+ DBG("exited\n");
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_status_code gr_i2cmst_send_start(rtems_libi2c_bus_t *bushdl)
+{
+ gr_i2cmst_prv_t *prv_ptr = (gr_i2cmst_prv_t *)bushdl;
+
+ DBG("gr_i2cmst_send_start called...");
+
+ /* The OC I2C core does not work with stand alone START events,
+ instead the event is buffered */
+ prv_ptr->sendstart = GRI2C_CMD_STA;
+
+ DBG("exited\n");
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_status_code gr_i2cmst_send_stop(rtems_libi2c_bus_t *bushdl)
+{
+ gr_i2cmst_prv_t *prv_ptr = (gr_i2cmst_prv_t *)bushdl;
+
+ DBG("gr_i2cmst_send_stop called...");
+
+ prv_ptr->reg_ptr->cmdsts = GRI2C_CMD_STO;
+
+ DBG("exited\n");
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_status_code gr_i2cmst_send_addr(rtems_libi2c_bus_t *bushdl,
+ uint32_t addr, int rw)
+{
+ gr_i2cmst_prv_t *prv_ptr = (gr_i2cmst_prv_t *)bushdl;
+ uint8_t addr_byte;
+ rtems_status_code rc;
+
+ DBG("gr_i2cmst_send_addr called, addr = 0x%x, rw = %d...",
+ addr, rw);
+
+ /* Check if long address is needed */
+ if (addr > 0x7f) {
+ addr_byte = ((addr >> 7) & 0x06) | (rw ? 1 : 0);
+
+ prv_ptr->reg_ptr->tdrd = addr_byte;
+ prv_ptr->reg_ptr->cmdsts = GRI2C_CMD_WR | prv_ptr->sendstart;
+ prv_ptr->sendstart = 0;
+
+ /* Wait for transfer to complete */
+ rc = gr_i2cmst_wait(prv_ptr, GRI2C_STATUS_IDLE);
+ if (rc != RTEMS_SUCCESSFUL) {
+
+ DBG("exited with error\n");
+
+ return -rc;
+ }
+ }
+
+ /* For 10-bit adresses the last byte should only be written for a
+ write operation */
+ rc = RTEMS_SUCCESSFUL;
+ if (addr <= 0x7f || rw == 0) {
+ addr_byte = (addr << 1) | (rw ? 1 : 0);
+
+ prv_ptr->reg_ptr->tdrd = addr_byte;
+ prv_ptr->reg_ptr->cmdsts = GRI2C_CMD_WR | prv_ptr->sendstart;
+ prv_ptr->sendstart = 0;
+
+ /* Wait for transfer to complete */
+ rc = gr_i2cmst_wait(prv_ptr, GRI2C_STATUS_IDLE);
+ if (rc != RTEMS_SUCCESSFUL) {
+ DBG("exited with error\n");
+ return -rc;
+ }
+ }
+
+ DBG("exited\n");
+ return rc;
+}
+
+
+static int gr_i2cmst_read_bytes(rtems_libi2c_bus_t *bushdl,
+ unsigned char *bytes, int nbytes)
+{
+ gr_i2cmst_prv_t *prv_ptr = (gr_i2cmst_prv_t *)bushdl;
+ unsigned char *buf = bytes;
+ rtems_status_code rc;
+ unsigned char expected_sts = GRI2C_STATUS_IDLE;
+
+ DBG("gr_i2cmst_read_bytes called, nbytes = %d...", nbytes);
+
+ while (nbytes-- > 0) {
+ if (nbytes == 0) {
+ /* Respond with NAK to end sequential read */
+ prv_ptr->reg_ptr->cmdsts = (GRI2C_CMD_RD | GRI2C_CMD_ACK |
+ prv_ptr->sendstart);
+ expected_sts = GRI2C_STS_RXACK;
+ } else {
+ prv_ptr->reg_ptr->cmdsts = GRI2C_CMD_RD | prv_ptr->sendstart;
+ }
+ prv_ptr->sendstart = 0;
+ /* Wait until end of transfer */
+ rc = gr_i2cmst_wait(prv_ptr, expected_sts);
+ if (rc != RTEMS_SUCCESSFUL) {
+ DBG("exited with error\n");
+ return -rc;
+ }
+ *buf++ = prv_ptr->reg_ptr->tdrd;
+ }
+
+ DBG("exited\n");
+
+ return buf - bytes;
+}
+
+static int gr_i2cmst_write_bytes(rtems_libi2c_bus_t *bushdl,
+ unsigned char *bytes, int nbytes)
+{
+ gr_i2cmst_prv_t *prv_ptr = (gr_i2cmst_prv_t *)bushdl;
+ unsigned char *buf = bytes;
+ rtems_status_code rc;
+
+ DBG("gr_i2cmst_write_bytes called, nbytes = %d...", nbytes);
+
+ while (nbytes-- > 0) {
+
+ DBG("writing byte 0x%02X...", *buf);
+
+ prv_ptr->reg_ptr->tdrd = *buf++;
+ prv_ptr->reg_ptr->cmdsts = GRI2C_CMD_WR | prv_ptr->sendstart;
+ prv_ptr->sendstart = 0;
+
+ /* Wait for transfer to complete */
+ rc = gr_i2cmst_wait(prv_ptr, GRI2C_STATUS_IDLE);
+
+ if (rc != RTEMS_SUCCESSFUL) {
+ DBG("exited with error\n");
+ return -rc;
+ }
+ }
+
+ DBG("exited\n");
+
+ return buf - bytes;
+}
+
+static rtems_libi2c_bus_ops_t gr_i2cmst_ops = {
+ init: gr_i2cmst_init,
+ send_start: gr_i2cmst_send_start,
+ send_stop: gr_i2cmst_send_stop,
+ send_addr: gr_i2cmst_send_addr,
+ read_bytes: gr_i2cmst_read_bytes,
+ write_bytes: gr_i2cmst_write_bytes,
+};
+
+/* Get Hardware and disable it */
+static int i2cmst_device_init(gr_i2cmst_prv_t *priv)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ priv->reg_ptr = (gr_i2cmst_regs_t *)pnpinfo->apb_slv->start;
+
+ /* Disable core */
+ priv->reg_ptr->ctrl = 0;
+
+ priv->i2clib_desc.ops = &gr_i2cmst_ops;
+ priv->i2clib_desc.size = sizeof(gr_i2cmst_ops);
+ return 0;
+}
+
+
+/******************* Driver Manager Part ***********************/
+
+int i2cmst_init2(struct drvmgr_dev *dev);
+int i2cmst_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops i2cmst_ops =
+{
+ .init = {NULL, i2cmst_init2, i2cmst_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id i2cmst_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_I2CMST},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info i2cmst_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_I2CMST_ID, /* Driver ID */
+ "I2CMST_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &i2cmst_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &i2cmst_ids[0]
+};
+
+void i2cmst_register_drv (void)
+{
+ DBG("Registering I2CMST driver\n");
+ drvmgr_drv_register(&i2cmst_drv_info.general);
+}
+
+/* The I2CMST Driver is informed about a new hardware device */
+int i2cmst_init2(struct drvmgr_dev *dev)
+{
+ gr_i2cmst_prv_t *priv;
+
+ DBG("I2CMST[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+/* Init stage 2 */
+int i2cmst_init3(struct drvmgr_dev *dev)
+{
+ gr_i2cmst_prv_t *priv;
+ char prefix[32];
+ char devName[32];
+ int rc;
+
+ priv = (gr_i2cmst_prv_t *)dev->priv;
+
+ /* Do initialization */
+
+ /* Initialize i2c library */
+ rc = rtems_libi2c_initialize();
+ if (rc != 0) {
+ DBG("I2CMST: rtems_libi2c_initialize failed, exiting...\n");
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ /* Get frequency */
+ if ( drvmgr_freq_get(dev, DEV_APB_SLV, &priv->sysfreq) ) {
+ return DRVMGR_FAIL;
+ }
+ priv->sysfreq = priv->sysfreq / 1000; /* Convert to kHz */
+
+ if ( i2cmst_device_init(priv) ) {
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(devName, "/dev/i2c%d", dev->minor_drv+1);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(devName, "/dev/%si2c%d", prefix, dev->minor_bus+1);
+ }
+
+ /* Register Bus for this Device */
+ rc = rtems_libi2c_register_bus(devName, &priv->i2clib_desc);
+ if (rc < 0) {
+ DBG("I2CMST: rtems_libi2c_register_bus(%s) failed, exiting..\n", devName);
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+ priv->minor = rc;
+
+ return DRVMGR_OK;
+}
diff --git a/bsps/shared/grlib/iommu/griommu.c b/bsps/shared/grlib/iommu/griommu.c
new file mode 100644
index 0000000000..d0d22723ca
--- /dev/null
+++ b/bsps/shared/grlib/iommu/griommu.c
@@ -0,0 +1,1458 @@
+/*
+ * GRIOMMU Driver Interface
+ *
+ * COPYRIGHT (c) 2017
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <bsp.h>
+#include <grlib/griommu.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define INLINE*/
+#define INLINE inline
+
+#define UNUSED __attribute__((unused))
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * GRIOMMU CAP0 register fields
+ */
+#define CAP0_A (0x1 << CAP0_A_BIT)
+#define CAP0_AC (0x1 << CAP0_AC_BIT)
+#define CAP0_CA (0x1 << CAP0_CA_BIT)
+#define CAP0_CP (0x1 << CAP0_CP_BIT)
+#define CAP0_NARB (0xf << CAP0_NARB_BIT)
+#define CAP0_CS (0x1 << CAP0_CS_BIT)
+#define CAP0_FT (0x3 << CAP0_FT_BIT)
+#define CAP0_ST (0x1 << CAP0_ST_BIT)
+#define CAP0_I (0x1 << CAP0_I_BIT)
+#define CAP0_IT (0x1 << CAP0_IT_BIT)
+#define CAP0_IA (0x1 << CAP0_IA_BIT)
+#define CAP0_IP (0x1 << CAP0_IP_BIT)
+#define CAP0_MB (0x1 << CAP0_MB_BIT)
+#define CAP0_GRPS (0xf << CAP0_GRPS_BIT)
+#define CAP0_MSTS (0xf << CAP0_MSTS_BIT)
+
+#define CAP0_A_BIT 31
+#define CAP0_AC_BIT 30
+#define CAP0_CA_BIT 29
+#define CAP0_CP_BIT 28
+#define CAP0_NARB_BIT 20
+#define CAP0_CS_BIT 19
+#define CAP0_FT_BIT 17
+#define CAP0_ST_BIT 16
+#define CAP0_I_BIT 15
+#define CAP0_IT_BIT 14
+#define CAP0_IA_BIT 13
+#define CAP0_IP_BIT 12
+#define CAP0_MB_BIT 8
+#define CAP0_GRPS_BIT 4
+#define CAP0_MSTS_BIT 0
+
+/*
+ * GRIOMMU CAP1 register fields
+ */
+#define CAP1_CADDR (0xfff << CAP1_CADDR_BIT)
+#define CAP1_CMASK (0xf << CAP1_CMASK_BIT)
+#define CAP1_CTAGBITS (0xff << CAP1_CTAGBITS_BIT)
+#define CAP1_CISIZE (0x7 << CAP1_CISIZE_BIT)
+#define CAP1_CLINES (0x1f << CAP1_CLINES_BIT)
+
+#define CAP1_CADDR_BIT 20
+#define CAP1_CMASK_BIT 16
+#define CAP1_CTAGBITS_BIT 8
+#define CAP1_CISIZE_BIT 5
+#define CAP1_CLINES_BIT 0
+
+/*
+ * GRIOMMU CTRL register fields
+ * DEFINED IN HEADER FILE
+ */
+
+/*
+ * GRIOMMU FLUSH register fields
+ */
+#define FLUSH_FGRP (0xf << FLUSH_FGRP_BIT)
+#define FLUSH_GF (0x1 << FLUSH_GF_BIT)
+#define FLUSH_F (0x1 << FLUSH_F_BIT)
+
+#define FLUSH_FGRP_BIT 4
+#define FLUSH_GF_BIT 1
+#define FLUSH_F_BIT 0
+
+/*
+ * GRIOMMU STATUS register fields
+ */
+#define STS_PE (0x1 << STS_PE_BIT)
+#define STS_DE (0x1 << STS_DE_BIT)
+#define STS_FC (0x1 << STS_FC_BIT)
+#define STS_FL (0x1 << STS_FL_BIT)
+#define STS_AD (0x1 << STS_AD_BIT)
+#define STS_TE (0x1 << STS_TE_BIT)
+#define STS_ALL (STS_PE | STS_DE | STS_FC | STS_FL | STS_AD | STS_TE)
+
+#define STS_PE_BIT 5
+#define STS_DE_BIT 4
+#define STS_FC_BIT 3
+#define STS_FL_BIT 2
+#define STS_AD_BIT 1
+#define STS_TE_BIT 0
+
+/*
+ * GRIOMMU IMASK register fields
+ */
+#define IMASK_PEI (0x1 << IMASK_PEI_BIT)
+#define IMASK_FCI (0x1 << IMASK_FCI_BIT)
+#define IMASK_FLI (0x1 << IMASK_FLI_BIT)
+#define IMASK_ADI (0x1 << IMASK_ADI_BIT)
+#define IMASK_TEI (0x1 << IMASK_TEI_BIT)
+#define IMASK_ALL (IMASK_PEI | IMASK_FCI | IMASK_FLI | IMASK_ADI | IMASK_TEI)
+
+#define IMASK_PEI_BIT 5
+#define IMASK_FCI_BIT 3
+#define IMASK_FLI_BIT 2
+#define IMASK_ADI_BIT 1
+#define IMASK_TEI_BIT 0
+
+/*
+ * GRIOMMU MASTER register fields
+ */
+/* DEFINED IN HEADER FILE
+#define MASTER_VENDOR (0xff << MASTER_VENDOR_BIT)
+#define MASTER_DEVICE (0xfff << MASTER_DEVICE_BIT)
+#define MASTER_BS (0x1 << MASTER_BS_BIT)
+#define MASTER_GROUP (0xf << MASTER_GROUP_BIT)
+
+#define MASTER_VENDOR_BIT 24
+#define MASTER_DEVICE_BIT 12
+#define MASTER_BS_BIT 4
+#define MASTER_GROUP_BIT 0
+*/
+
+#define MASTER_BS_BUS0 0
+#define MASTER_BS_BUS1 MASTER_BS
+
+/*
+ * GRIOMMU GROUP register fields
+ */
+#define GRP_BASE (0xfffffff << GRP_BASE_BIT)
+#define GRP_P (0x1 << GRP_P_BIT)
+#define GRP_AG (0x1 << GRP_AG_BIT)
+
+#define GRP_BASE_BIT 4
+#define GRP_P_BIT 1
+#define GRP_AG_BIT 0
+
+
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+/*
+ * GRIOMMU APB Register MAP
+ */
+struct griommu_regs {
+ volatile unsigned int cap0; /* 0x00 - Capability 0 */
+ volatile unsigned int cap1; /* 0x04 - Capability 1 */
+ volatile unsigned int cap2; /* 0x08 - Capability 2 */
+ volatile unsigned int resv1; /* 0x0c - Reserved */
+ volatile unsigned int ctrl; /* 0x10 - Control */
+ volatile unsigned int flush; /* 0x14 - TLB/cache flush */
+ volatile unsigned int status; /* 0x18 - Status */
+ volatile unsigned int imask; /* 0x1c - Interrupt mask */
+ volatile unsigned int ahbstat; /* 0x20 - AHB Failing Access */
+ volatile unsigned int resv2[7]; /* 0x24-0x3c - Reserved. No access */
+ volatile unsigned int master[16]; /* 0x40-0x7c - Master configuration */
+ volatile unsigned int grp_ctrl[16]; /* 0x80-0xbc - Group control */
+ volatile unsigned int diag_ca; /* 0xc0 - Diagnostic cache access */
+ volatile unsigned int diag_cad[8]; /* 0xc4-0xe0 - Diagnostic cache data */
+ volatile unsigned int diag_cat; /* 0xe4 - Diagnostic cache tag */
+ volatile unsigned int ei_data; /* 0xe8 - Data RAM error injection */
+ volatile unsigned int ei_tag; /* 0xec - Tag RAM error injection */
+ volatile unsigned int resv3[4]; /* 0xf0-0xfc - Reserved. No access */
+ volatile unsigned int asmpctrl[16]; /* 0x100-0x13c - ASMP access control */
+};
+
+#define DEVNAME_LEN 9
+/*
+ * GRIOMMU Driver private data struture
+ */
+struct griommu_priv {
+ struct drvmgr_dev *dev;
+ char devname[DEVNAME_LEN];
+ /* GRIOMMU control registers */
+ struct griommu_regs *regs;
+
+ /* GRIOMMU capabilities */
+ int apv;
+ int apv_cache;
+ int apv_cache_addr;
+ int conf_pagesize;
+
+ int groups;
+ int masters;
+
+ /* GRIOMMU page size */
+ int pagesize;
+
+ /* GRIOMMU APV cache */
+ int cache_enabled;
+ int group_addressing;
+
+ /* User defined ISR */
+ griommu_isr_t isr;
+ void *isr_arg;
+};
+
+/*
+ * GRIOMMU internal prototypes
+ */
+/* -Register access functions */
+STATIC INLINE unsigned int griommu_reg_cap0(void);
+STATIC INLINE unsigned int griommu_reg_cap1(void);
+STATIC INLINE unsigned int griommu_reg_ctrl(void);
+STATIC INLINE int griommu_reg_ctrl_set(unsigned int val);
+STATIC INLINE int griommu_reg_flush_set(unsigned int val);
+STATIC INLINE unsigned int griommu_reg_status(void);
+STATIC INLINE int griommu_reg_status_clear(unsigned int val);
+STATIC INLINE unsigned int griommu_reg_imask(void);
+STATIC INLINE int griommu_reg_imask_set(int mask);
+STATIC INLINE unsigned int griommu_reg_ahbfas(void);
+STATIC INLINE unsigned int griommu_reg_master(int master);
+STATIC INLINE int griommu_reg_master_set(int master, unsigned int val);
+STATIC INLINE unsigned int griommu_reg_group(int group);
+STATIC INLINE int griommu_reg_group_set(int group, unsigned int val);
+
+/* APV helper functions */
+STATIC void griommu_apv_set_word(unsigned int * wordptr, int startbitidx,
+ int nbits, unsigned int val);
+STATIC int griommu_apv_set(void * apv, int index, int size, unsigned int val);
+
+/* -Init function called by drvmgr */
+int griommu_init1(struct drvmgr_dev *dev);
+STATIC int griommu_init(struct griommu_priv *priv);
+
+
+/* -IRQ handler */
+void griommu_isr(void *arg);
+
+/*
+ * GRIOMMU static members
+ */
+static struct griommu_priv *griommupriv = NULL;
+
+/* GRIOMMU DRIVER */
+
+struct drvmgr_drv_ops griommu_ops =
+{
+ .init = {griommu_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id griommu_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRIOMMU},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info griommu_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRIOMMU_ID,/* Driver ID */
+ "GRIOMMU_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &griommu_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct griommu_priv), /* Make drvmgr alloc private */
+ },
+ &griommu_ids[0]
+};
+
+void griommu_register_drv(void)
+{
+ DBG("Registering GRIOMMU driver\n");
+ drvmgr_drv_register(&griommu_info.general);
+}
+
+/* Initializes the GRIOMMU core and driver
+ *
+ * Return values
+ * 0 Successful initalization
+ */
+STATIC int griommu_init(struct griommu_priv *priv)
+{
+ struct ambapp_ahb_info *ahb;
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+
+ /* Find GRIOMMU core from Plug&Play information */
+ ahb = ainfo->info.ahb_slv;
+
+ /* Found GRIOMMU core, init private structure */
+ priv->regs = (struct griommu_regs *)ahb->start[0];
+
+ /* Mask all interrupts */
+ griommu_reg_imask_set(0);
+
+ /* Initialize GRIOMMU capabilities */
+ uint32_t cap0 = griommu_reg_cap0();
+ priv->apv = (cap0 & CAP0_A) >> CAP0_A_BIT;
+ priv->apv_cache = (cap0 & CAP0_AC) >> CAP0_AC_BIT;
+ priv->apv_cache_addr = (cap0 & CAP0_CA) >> CAP0_CA_BIT;
+ priv->conf_pagesize = (cap0 & CAP0_CS) >> CAP0_CS_BIT;
+ priv->groups = ((cap0 & CAP0_GRPS) >> CAP0_GRPS_BIT) + 1;
+ priv->masters = ((cap0 & CAP0_MSTS) >> CAP0_MSTS_BIT) + 1;
+
+ /* Get GRIOMMU pagesize */
+ uint32_t ctrl = griommu_reg_ctrl();
+ if (priv->conf_pagesize){
+ priv->pagesize = (4*1024 << ((ctrl & CTRL_PGSZ) >> CTRL_PGSZ_BIT));
+ }else{
+ priv->pagesize = 4*1024;
+ }
+ priv->cache_enabled = (ctrl & CTRL_CE);
+ priv->group_addressing = (ctrl & CTRL_GS);
+
+ DBG("GRIOMMU Capabilities: APV=%d, APVC=%d, APVCA=%d, CS=%d, "
+ "GRPS=%d, MSTS=%d\n",
+ priv->apv, priv->apv_cache, priv->apv_cache_addr,
+ priv->conf_pagesize, priv->groups, priv->masters);
+ DBG("GRIOMMU driver initialized\n");
+
+ return 0;
+}
+
+/* Called when a core is found with the AMBA device and vendor ID
+ * given in griommu_ids[]. IRQ, Console does not work here
+ */
+int griommu_init1(struct drvmgr_dev *dev)
+{
+ int status;
+ struct griommu_priv *priv;
+
+ DBG("GRIOMMU[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (griommupriv) {
+ DBG("Driver only supports one GRIOMMU core\n");
+ return DRVMGR_FAIL;
+ }
+
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+
+ priv->dev = dev;
+ strncpy(&priv->devname[0], "griommu0", DEVNAME_LEN);
+ griommupriv = priv;
+
+ /* Initialize GRIOMMU Hardware */
+ status = griommu_init(priv);
+ if (status) {
+ printk("Failed to initialize griommu driver %d\n", status);
+ return -1;
+ }
+
+ return DRVMGR_OK;
+}
+
+STATIC INLINE unsigned int griommu_reg_cap0(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->cap0);
+}
+
+STATIC INLINE unsigned int griommu_reg_cap1(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->cap1);
+}
+
+STATIC INLINE unsigned int griommu_reg_ctrl(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->ctrl);
+}
+
+STATIC INLINE int griommu_reg_ctrl_set(unsigned int val)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ REG_WRITE(&priv->regs->ctrl, val);
+ return 0;
+}
+
+STATIC INLINE int griommu_reg_flush_set(unsigned int val)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ REG_WRITE(&priv->regs->flush, val);
+ return 0;
+}
+
+STATIC INLINE unsigned int griommu_reg_status(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->status);
+}
+
+STATIC INLINE int griommu_reg_status_clear(unsigned int val)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ /* Clear errors */
+ REG_WRITE(&priv->regs->status, (val & STS_ALL));
+ return 0;
+}
+
+STATIC INLINE unsigned int griommu_reg_imask(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->imask);
+}
+
+STATIC INLINE int griommu_reg_imask_set(int mask)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ /* Clear errors */
+ REG_WRITE(&priv->regs->imask, (mask & IMASK_ALL));
+ return 0;
+}
+
+STATIC INLINE unsigned int griommu_reg_ahbfas(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->ahbstat);
+}
+
+STATIC INLINE int griommu_reg_master_set(int master, unsigned int val)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ /* Change master conf */
+ REG_WRITE(&priv->regs->master[master], val);
+ return 0;
+}
+
+STATIC INLINE unsigned int griommu_reg_master(int master)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->master[master]);
+}
+
+STATIC INLINE unsigned int griommu_reg_group(int group)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ return REG_READ(&priv->regs->grp_ctrl[group]);
+}
+
+STATIC INLINE int griommu_reg_group_set(int group, unsigned int val)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ REG_WRITE(&priv->regs->grp_ctrl[group], val);
+ return 0;
+}
+
+STATIC void griommu_apv_set_word(unsigned int * wordptr, int startbitidx,
+ int nbits, unsigned int val)
+{
+ unsigned int mask;
+ unsigned int word = *wordptr;
+ int endbitidx = startbitidx + nbits - 1;
+
+ /* Set initial mask */
+ mask = 0xffffffff;
+
+ /* Adjust mask for the starting bit */
+ mask >>= startbitidx;
+
+ /* Adjust mask for the end bit */
+ mask >>= (31 - endbitidx);
+ mask <<= (31 - endbitidx);
+
+ DBG("Setting word: startbitdx=%d, endbitidx=%d, mask=0x%02x",
+ startbitidx, endbitidx, (unsigned int) mask);
+
+ /* Clear written bits with mask */
+ word &= ~(mask);
+
+ /* Set bits in val with mask */
+ mask &= val;
+ word |= mask;
+
+ DBG(", old word=0x%08x, new word=0x%08x\n",*wordptr, word);
+
+ /* Write word */
+ *wordptr=word;
+}
+
+/* Set certains bits of the APV to val */
+STATIC int griommu_apv_set(void * apv, int index, int size, unsigned int val)
+{
+ unsigned int * words = (unsigned int *) apv;
+ int len = size;
+ int wordidx = (index/32);
+ int startbit = (index % 32);
+ int nbits;
+ int nwords;
+
+ /* First incomplete word is a special case */
+ if (startbit != 0){
+ /* Get how many bits are we changing in this word */
+ if (startbit + len < 32){
+ nbits = len;
+ }else{
+ nbits = 32 - startbit;
+ }
+ griommu_apv_set_word(&words[wordidx], startbit, nbits, val);
+ DBG("First word: wordidx=%d, startbit=%d, bits=%d, val=0x%08x\n",
+ wordidx, startbit, nbits, words[wordidx]);
+
+ /* Update wordidx and len */
+ len = len - nbits;
+ wordidx++;
+ }
+
+ /* Write all complete full words */
+ if (len != 0){
+ nwords = (len/32);
+ memset((void *) &words[wordidx], val, nwords*4);
+ DBG("Middle words: wordidx=%d, nwords=%d\n", wordidx, nwords);
+ /* Update wordidx and len*/
+ wordidx = wordidx + nwords;
+ len = len - nwords*32;
+ }
+
+ /* Last word is a special case */
+ if (len != 0){
+ nbits = len;
+ griommu_apv_set_word(&words[wordidx], 0, nbits, val);
+ DBG("First word: wordidx=%d, startbit=%d, bits=%d, val=0x%08x\n",
+ wordidx, 0, nbits, words[wordidx]);
+ /* Update len */
+ len = len - (nbits);
+ }
+
+ return GRIOMMU_ERR_OK;
+}
+
+/* GRIOMMU Interrupt handler, called when there may be a GRIOMMU interrupt.
+ */
+void griommu_isr(void *arg)
+{
+ struct griommu_priv *priv = arg;
+ unsigned int sts = griommu_reg_status();
+ unsigned int mask = griommu_reg_imask();
+ unsigned int access = griommu_reg_ahbfas();
+
+ /* Make sure that the interrupt is pending and unmasked,
+ * otherwise it migth have been other core
+ * sharing the same interrupt line */
+ if ((sts & STS_ALL) & (mask & IMASK_ALL)){
+ /* Reset error status */
+ griommu_reg_status_clear(sts);
+ /* Execute user IRQ (ther will always be one ISR */
+ (priv->isr)(priv->isr_arg, access, sts);
+ }
+}
+
+/* Setup IOMMU master:
+ */
+int griommu_master_setup(int master, int group, int options)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((master < 0) || (master >= priv->masters)){
+ DBG("Wrong master id.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ if ((group < 0) || (group >= priv->groups)){
+ DBG("Wrong group id.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ griommu_reg_master_set(master,
+ ((options & GRIOMMU_OPTIONS_BUS1)? MASTER_BS_BUS1: MASTER_BS_BUS0)|
+ ((group << MASTER_GROUP_BIT) & MASTER_GROUP)
+ );
+
+ DBG("IOMMU master setup: master %d, traffic routed %s, group %d\n",
+ master,
+ (options & GRIOMMU_OPTIONS_BUS1) ?
+ "to Secondary bus":"to Primary bus",
+ group);
+
+ return GRIOMMU_ERR_OK;
+}
+
+
+/* Get IOMMU master info:
+ */
+int griommu_master_info(int master, uint32_t * info)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((master < 0) || (master >= priv->masters)){
+ DBG("Wrong master id.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ if (info == NULL){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Get master */
+ *info = griommu_reg_master(master);
+
+ return GRIOMMU_ERR_OK;
+}
+
+/* Find IOMMU master:
+ */
+int griommu_master_find(int vendor, int device, int instance)
+{
+ struct griommu_priv * priv = griommupriv;
+ int i, gotvendor, gotdevice;
+ unsigned int master;
+ int found;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Find which master */
+ found=0;
+ for (i=0; i< priv->masters; i++){
+ master = griommu_reg_master(i);
+ gotvendor = (master & MASTER_VENDOR) >> MASTER_VENDOR_BIT;
+ gotdevice = (master & MASTER_DEVICE) >> MASTER_DEVICE_BIT;
+ if ((gotvendor == vendor) && (gotdevice == device)){
+ if(found == instance){
+ DBG("Found master %d: VENDOR=%s(0x%02x), DEVICE=%s(0x%03x), "
+ "Instance=%d\n",
+ i,
+ ambapp_vendor_id2str(vendor), vendor,
+ ambapp_device_id2str(vendor,device), device, instance
+ );
+ return i;
+ }
+ found++;
+ }
+ }
+
+ DBG("Master not found: VENDOR=%s(0x%02x), DEVICE=%s(0x%03x), "
+ "Instance=%d\n",
+ ambapp_vendor_id2str(vendor), vendor,
+ ambapp_device_id2str(vendor,device), device, instance
+ );
+ return GRIOMMU_ERR_NOTFOUND;
+}
+
+/* Setup IOMMU:
+ */
+int griommu_setup(int options)
+{
+ struct griommu_priv * priv = griommupriv;
+ unsigned int ctrl;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Check Cache */
+ if (options & GRIOMMU_OPTIONS_CACHE_ENABLE) {
+ if (priv->apv_cache){
+ /* Flush cache */
+ griommu_reg_flush_set(FLUSH_F);
+ priv->cache_enabled = 1;
+ }else{
+ DBG("GRIOMMU APV cache not supported.\n");
+ return GRIOMMU_ERR_IMPLEMENTED;
+ }
+ }else{
+ priv->cache_enabled = 0;
+ }
+
+ /* Check group addressing */
+ if (options & GRIOMMU_OPTIONS_GROUPADDRESSING_ENABLE){
+ if (priv->apv_cache_addr){
+ priv->group_addressing = 1;
+ }else{
+ DBG("GRIOMMU APV cache group addressing not supported.\n");
+ return GRIOMMU_ERR_IMPLEMENTED;
+ }
+ }else{
+ priv->group_addressing = 0;
+ }
+
+ /* Check pagesize */
+ if ((options & CTRL_PGSZ) != GRIOMMU_OPTIONS_PAGESIZE_4KIB){
+ if (priv->conf_pagesize == 0){
+ DBG("GRIOMMU Configurable pagesize not supported.\n");
+ return GRIOMMU_ERR_IMPLEMENTED;
+ }
+ }
+
+ /* Get CTRL IOMMU */
+ ctrl = griommu_reg_ctrl();
+
+ /* Clear used fields */
+ ctrl &= ~(CTRL_CE | CTRL_GS | CTRL_PGSZ | CTRL_LB |
+ CTRL_DP | CTRL_AU | CTRL_WP);
+
+ /* Clear not used fields */
+ options &= (CTRL_CE | CTRL_GS | CTRL_PGSZ | CTRL_LB |
+ CTRL_DP | CTRL_AU | CTRL_WP);
+
+ /* Set new values */
+ ctrl |= options;
+
+ /* Set CTRL IOMMU */
+ griommu_reg_ctrl_set(ctrl);
+
+ DBG("IOMMU setup: prefetching %s, cache %s, groupaddr %s, "
+ "lookup bus %s, ahb update %s,\nwprot only %s, pagesize %d KiB\n",
+ ((options & GRIOMMU_OPTIONS_PREFETCH_DISABLE)?
+ "disabled":"enabled"),
+ ((options & GRIOMMU_OPTIONS_CACHE_ENABLE)? "enabled":"disabled"),
+ ((options & GRIOMMU_OPTIONS_GROUPADDRESSING_ENABLE)?
+ "enabled":"disabled"),
+ ((options & GRIOMMU_OPTIONS_LOOKUPBUS_BUS1)? "bus1":"bus0"),
+ ((options & GRIOMMU_OPTIONS_AHBUPDATE_ENABLE)?
+ "enabled":"disabled"),
+ ((options & GRIOMMU_OPTIONS_WPROTONLY_ENABLE)?
+ "enabled":"disabled"),
+ (4 << ((options & GRIOMMU_OPTIONS_PAGESIZE_512KIB) >> 18))
+ );
+
+ return GRIOMMU_ERR_OK;
+}
+
+/* Status IOMMU:
+ */
+int griommu_status(void)
+{
+ struct griommu_priv * priv = griommupriv;
+ unsigned int ctrl;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Get CTRL IOMMU */
+ ctrl = griommu_reg_ctrl();
+
+ DBG("IOMMU status: prefetching %s, cache %s, groupaddr %s, "
+ "lookup bus %s, ahb update %s,\nwprot only %s, pagesize %d KiB\n",
+ ((ctrl & CTRL_DP)? "disabled":"enabled"),
+ ((ctrl & CTRL_CE)? "enabled":"disabled"),
+ ((ctrl & CTRL_GS)? "enabled":"disabled"),
+ ((ctrl & CTRL_LB)? "bus1":"bus0"),
+ ((ctrl & CTRL_AU)? "enabled":"disabled"),
+ ((ctrl & CTRL_WP)? "enabled":"disabled"),
+ (4 << ((ctrl & CTRL_PGSZ) >> CTRL_PGSZ_BIT))
+ );
+
+ return ctrl;
+}
+
+int griommu_isr_register(griommu_isr_t isr, void * arg, int options)
+{
+ struct griommu_priv *priv = griommupriv;
+ unsigned int mask;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if (isr == NULL){
+ DBG("GRIOMMU wrong isr.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Get mask */
+ mask = 0 |
+ ((options & GRIOMMU_INTERRUPT_PARITY_ERROR)? IMASK_PEI:0) |
+ ((options & GRIOMMU_INTERRUPT_FLUSH_COMPLETED)? IMASK_FCI:0) |
+ ((options & GRIOMMU_INTERRUPT_FLUSH_START)? IMASK_FLI:0) |
+ ((options & GRIOMMU_INTERRUPT_ACCESS_DENIED)? IMASK_ADI:0) |
+ ((options & GRIOMMU_INTERRUPT_TRANSLATION_ERROR)? IMASK_TEI:0);
+
+ /* Clear previous interrupts and mask them*/
+ griommu_reg_status_clear(STS_ALL);
+ griommu_reg_imask_set(0);
+
+ /* First time registering an ISR */
+ if (priv->isr == NULL){
+ /* Install and Enable GRIOMMU interrupt handler */
+ drvmgr_interrupt_register(priv->dev, 0, priv->devname, griommu_isr,
+ priv);
+ }
+
+ /* Install user ISR */
+ priv->isr=isr;
+ priv->isr_arg=arg;
+
+ /* Now it is safe to unmask interrupts */
+ griommu_reg_imask_set(mask);
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_isr_unregister(void)
+{
+ struct griommu_priv *priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if (priv->isr == NULL){
+ DBG("GRIOMMU wrong isr.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Clear previous interrupts and mask them*/
+ griommu_reg_status_clear(STS_ALL);
+ griommu_reg_imask_set(0);
+
+ /* Uninstall and disable GRIOMMU interrupt handler */
+ drvmgr_interrupt_unregister(priv->dev, 0, griommu_isr, priv);
+
+ /* Uninstall user ISR */
+ priv->isr=NULL;
+ priv->isr_arg=NULL;
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_interrupt_unmask(int options)
+{
+ struct griommu_priv *priv = griommupriv;
+ unsigned int mask, irq;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if (priv->isr == NULL){
+ DBG("GRIOMMU wrong isr.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Unmask interrupts in GRIOMMU */
+ mask = 0 |
+ ((options & GRIOMMU_INTERRUPT_PARITY_ERROR)? IMASK_PEI:0) |
+ ((options & GRIOMMU_INTERRUPT_FLUSH_COMPLETED)? IMASK_FCI:0) |
+ ((options & GRIOMMU_INTERRUPT_FLUSH_START)? IMASK_FLI:0) |
+ ((options & GRIOMMU_INTERRUPT_ACCESS_DENIED)? IMASK_ADI:0) |
+ ((options & GRIOMMU_INTERRUPT_TRANSLATION_ERROR)? IMASK_TEI:0);
+
+ /* Clear previous interrupts*/
+ griommu_reg_status_clear(STS_ALL);
+
+ /* Get previous mask */
+ irq = griommu_reg_imask() & IMASK_ALL;
+
+ /* Set new mask */
+ griommu_reg_imask_set(irq | mask);
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_interrupt_mask(int options)
+{
+ struct griommu_priv *priv = griommupriv;
+ unsigned int mask, irq;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if (priv->isr == NULL){
+ DBG("GRIOMMU wrong isr.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Mask interrupts in GRIOMMU */
+ mask = 0 |
+ ((options & GRIOMMU_INTERRUPT_PARITY_ERROR)? IMASK_PEI:0) |
+ ((options & GRIOMMU_INTERRUPT_FLUSH_COMPLETED)? IMASK_FCI:0) |
+ ((options & GRIOMMU_INTERRUPT_FLUSH_START)? IMASK_FLI:0) |
+ ((options & GRIOMMU_INTERRUPT_ACCESS_DENIED)? IMASK_ADI:0) |
+ ((options & GRIOMMU_INTERRUPT_TRANSLATION_ERROR)? IMASK_TEI:0);
+
+ /* Clear previous interrupts*/
+ griommu_reg_status_clear(STS_ALL);
+
+ /* Get previous mask */
+ irq = griommu_reg_imask() & IMASK_ALL;
+
+ /* Set new mask */
+ griommu_reg_imask_set(irq & ~(mask));
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_error_status(uint32_t * access)
+{
+ struct griommu_priv *priv = griommupriv;
+ int status;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Get status mask */
+ status = griommu_reg_status();
+
+ if (status != 0){
+ /* Update pointed value */
+ if (access != NULL){
+ *access = griommu_reg_ahbfas();
+ }
+ /* Clear errors */
+ griommu_reg_status_clear(status);
+ }
+
+ return status;
+}
+
+/* Print IOMMU masters
+ * DEBUG function
+ */
+int griommu_print(void)
+{
+ #ifdef DEBUG
+ struct griommu_priv * priv = griommupriv;
+ unsigned int ctrl;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Print IOMMU status */
+ ctrl = griommu_reg_ctrl();
+
+ printf("IOMMU status: prefetching %s, lookup bus %s, ahb update %s,\n"
+ "wprot only %s, pagesize %d KiB\n",
+ ((ctrl & CTRL_DP)? "disabled":"enabled"),
+ ((ctrl & CTRL_LB)? "bus1":"bus0"),
+ ((ctrl & CTRL_AU)? "enabled":"disabled"),
+ ((ctrl & CTRL_WP)? "enabled":"disabled"),
+ (4 << ((ctrl & CTRL_PGSZ) >> CTRL_PGSZ_BIT))
+ );
+
+ /* Print each master configuration */
+ int i, vendor, device, routing;
+ unsigned int master;
+ for (i=0; i < priv->masters; i++){
+ master = griommu_reg_master(i);
+ vendor = (master & MASTER_VENDOR) >> MASTER_VENDOR_BIT;
+ device = (master & MASTER_DEVICE) >> MASTER_DEVICE_BIT;
+ routing = (master & MASTER_BS);
+ printf("IOMMU master %d: VENDOR=%s(0x%02x), DEVICE=%s(0x%03x), "
+ "BS=%s\n",
+ i,
+ ambapp_vendor_id2str(vendor), vendor,
+ ambapp_device_id2str(vendor,device), device,
+ (routing == MASTER_BS_BUS0? "Primary bus" : "Secondary bus")
+ );
+ }
+ #endif
+ return GRIOMMU_ERR_OK;
+}
+
+void * griommu_apv_new(void)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return NULL;
+ }
+
+ /* Allocate APV */
+ unsigned int * orig_ptr = grlib_malloc(
+ (GRIOMMU_APV_SIZE/priv->pagesize) + GRIOMMU_APV_ALIGN);
+ if (orig_ptr == NULL) return NULL;
+
+ /* Get the aligned pointer */
+ unsigned int aligned_ptr = (
+ ((unsigned int) orig_ptr + GRIOMMU_APV_ALIGN) &
+ ~(GRIOMMU_APV_ALIGN - 1));
+
+ /* Save the original pointer before the aligned pointer */
+ unsigned int ** tmp_ptr =
+ (unsigned int **) (aligned_ptr - sizeof(orig_ptr));
+ *tmp_ptr= orig_ptr;
+
+ /* Return aligned pointer */
+ return (void *) aligned_ptr;
+}
+
+void griommu_apv_delete(void * apv)
+{
+ /* Recover orignal pointer placed just before the aligned pointer */
+ unsigned int * orig_ptr;
+ unsigned int ** tmp_ptr = (unsigned int **) (apv - sizeof(orig_ptr));
+ orig_ptr = *tmp_ptr;
+
+ /* Deallocate memory */
+ free(orig_ptr);
+}
+
+int griommu_enable(int mode)
+{
+ struct griommu_priv * priv = griommupriv;
+ unsigned int ctrl;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ switch (mode){
+ case GRIOMMU_MODE_IOMMU:
+ default:
+ DBG("IOMMU mode not implemented in driver.\n");
+ return GRIOMMU_ERR_EINVAL;
+ break;
+ case GRIOMMU_MODE_GROUPAPV:
+ if (priv->apv == 0){
+ DBG("IOMMU APV not supported.\n");
+ return GRIOMMU_ERR_IMPLEMENTED;
+ }
+ /* Enable IOMMU */
+ ctrl = (griommu_reg_ctrl() & ~(CTRL_PM));
+ griommu_reg_ctrl_set(ctrl | CTRL_PM_APV | CTRL_EN);
+
+ /* Wait until change has effect */
+ while((griommu_reg_ctrl() & CTRL_EN)==0){};
+
+ DBG("IOMMU enabled.\n");
+ return GRIOMMU_ERR_OK;
+ break;
+ }
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_disable(void)
+{
+ struct griommu_priv * priv = griommupriv;
+ unsigned int ctrl;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Disable IOMMU */
+ ctrl = (griommu_reg_ctrl() & ~(CTRL_EN));
+ griommu_reg_ctrl_set(ctrl);
+
+ /* Wait until change has effect */
+ while(griommu_reg_ctrl() & CTRL_EN){};
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_group_setup(int group, void * apv, int options)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((group < 0) || (group >= priv->groups)){
+ DBG("Wrong group id.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ if ((options < 0) || (options > GRIOMMU_OPTIONS_GROUP_PASSTHROUGH)){
+ DBG("Wrong options.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ if (options == GRIOMMU_OPTIONS_GROUP_DISABLE){
+ if ((unsigned int) apv & (GRIOMMU_APV_ALIGN -1)){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Disable GROUP */
+ griommu_reg_group_set(group, (((unsigned int) apv) & GRP_BASE) | 0);
+ DBG("GROUP[%d] DISABLED.\n", group);
+ return GRIOMMU_ERR_OK;
+ }else if (options == GRIOMMU_OPTIONS_GROUP_PASSTHROUGH){
+ if ((unsigned int) apv & (GRIOMMU_APV_ALIGN -1)){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Group in passthrough */
+ griommu_reg_group_set(group,
+ (((unsigned int) apv) & GRP_BASE) | GRP_P | GRP_AG);
+ DBG("GROUP[%d] set to PASSTHROUGH.\n", group);
+ return GRIOMMU_ERR_OK;
+ }else{
+ if (priv->apv == 0){
+ DBG("IOMMU APV not supported.\n");
+ return GRIOMMU_ERR_IMPLEMENTED;
+ }
+
+ if ((apv == NULL) || ((unsigned int) apv & (GRIOMMU_APV_ALIGN -1))){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Set up base and enable */
+ griommu_reg_group_set(group,
+ (((unsigned int) apv) & GRP_BASE) | GRP_AG);
+ DBG("GROUP[%d] set to APV (0x%08x).\n", group, (unsigned int) apv);
+ return GRIOMMU_ERR_OK;
+ }
+}
+
+int griommu_group_apv_init(int group, int options)
+{
+ struct griommu_priv * priv = griommupriv;
+ void * apv;
+ int val;
+ int ret;
+ size_t len;
+
+ /* Flush APV cache if needed.
+ * This function checks for priv and group being valid.*/
+ ret = griommu_group_apv_flush(group);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Get APV group */
+ apv = (void *) (griommu_reg_group(group) & GRP_BASE);
+
+ if (apv == NULL){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_NOTFOUND;
+ }
+
+ /* Get init value (is a char) */
+ if (options == GRIOMMU_OPTIONS_APV_ALLOW){
+ val = 0x00;
+ }else{
+ val = 0xff;
+ }
+
+ /* Get APV length */
+ len = GRIOMMU_APV_SIZE/priv->pagesize;
+
+ /* Initialize structure */
+ memset(apv, val, len);
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_group_apv_page_set(int group, int index, int size, int options)
+{
+ void * apv;
+ unsigned int val;
+ int ret;
+
+ /* Flush APV cache if needed.
+ * This function checks for priv and group being valid.*/
+ ret = griommu_group_apv_flush(group);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Get APV group */
+ apv = (void *) (griommu_reg_group(group) & GRP_BASE);
+
+ if (apv == NULL){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_NOTFOUND;
+ }
+
+ /* Get init value */
+ if (options == GRIOMMU_OPTIONS_APV_ALLOW){
+ val = 0x0;
+ }else{
+ val = 0xffffffff;
+ }
+
+ return griommu_apv_set(apv, index, size, val);
+}
+
+int griommu_group_apv_address_set(int group, uint32_t addr, int size,
+ int options)
+{
+ struct griommu_priv * priv = griommupriv;
+ void * apv;
+ unsigned int val;
+ int ret;
+ int startpage;
+ int endpage;
+ int npages;
+
+ /* Flush APV cache if needed.
+ * This function checks for priv and group being valid.*/
+ ret = griommu_group_apv_flush(group);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Get APV group */
+ apv = (void *) (griommu_reg_group(group) & GRP_BASE);
+
+ if (apv == NULL){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_NOTFOUND;
+ }
+
+ /* Get init value */
+ if (options == GRIOMMU_OPTIONS_APV_ALLOW){
+ val = 0x0;
+ }else{
+ val = 0xffffffff;
+ }
+
+ /* Get start page */
+ startpage = (addr / priv->pagesize);
+
+ /* Get end page */
+ endpage = ((addr + size)/ priv->pagesize);
+
+ /* Get number of pages */
+ npages = endpage - startpage + 1;
+
+ return griommu_apv_set(apv, startpage, npages, val);
+}
+
+int griommu_apv_init(void * apv, int options)
+{
+ struct griommu_priv * priv = griommupriv;
+ int val;
+ size_t len;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((apv == NULL) || ((unsigned int) apv & (GRIOMMU_APV_ALIGN -1))){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Get init value (is a char) */
+ if (options == GRIOMMU_OPTIONS_APV_ALLOW){
+ val = 0x00;
+ }else{
+ val = 0xff;
+ }
+
+ /* Get APV length */
+ len = GRIOMMU_APV_SIZE/priv->pagesize;
+
+ /* Initialize structure */
+ memset(apv, val, len);
+
+ return GRIOMMU_ERR_OK;
+}
+
+int griommu_apv_page_set(void * apv, int index, int size, int options)
+{
+ struct griommu_priv * priv = griommupriv;
+ unsigned int val;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((apv == NULL) || ((unsigned int) apv & (GRIOMMU_APV_ALIGN -1))){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Get init value */
+ if (options == GRIOMMU_OPTIONS_APV_ALLOW){
+ val = 0x0;
+ }else{
+ val = 0xffffffff;
+ }
+
+ return griommu_apv_set(apv, index, size, val);
+}
+
+int griommu_apv_address_set(void * apv, uint32_t addr, int size, int options)
+{
+ struct griommu_priv * priv = griommupriv;
+ unsigned int val;
+ int startpage;
+ int endpage;
+ int npages;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((apv == NULL) || ((unsigned int) apv & (GRIOMMU_APV_ALIGN -1))){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Get init value */
+ if (options == GRIOMMU_OPTIONS_APV_ALLOW){
+ val = 0x0;
+ }else{
+ val = 0xffffffff;
+ }
+
+ /* Get start page */
+ startpage = (addr / priv->pagesize);
+
+ /* Get end page */
+ endpage = ((addr + size)/ priv->pagesize);
+
+ /* Get number of pages */
+ npages = endpage - startpage + 1;
+
+ return griommu_apv_set(apv, startpage, npages, val);
+}
+
+int griommu_group_info(int group, uint32_t * info)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((group < 0) || (group >= priv->groups)){
+ DBG("Wrong group id.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ if (info == NULL){
+ DBG("Wrong pointer.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Get group */
+ *info = griommu_reg_group(group);
+
+ return GRIOMMU_ERR_OK;
+}
+
+/* Flush APV cache group:
+ */
+int griommu_group_apv_flush(int group)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ if ((group < 0) || (group >= priv->groups)){
+ DBG("Wrong group id.\n");
+ return GRIOMMU_ERR_EINVAL;
+ }
+
+ /* Flush cache */
+ if (priv->cache_enabled){
+ if (priv->group_addressing){
+ griommu_reg_flush_set(((group << FLUSH_FGRP_BIT) & FLUSH_FGRP) |
+ FLUSH_GF | FLUSH_F);
+ }else{
+ griommu_reg_flush_set(FLUSH_F);
+ }
+ DBG("GRIOMMU APV cache flushed.\n");
+ }
+
+ return GRIOMMU_ERR_OK;
+}
+
+/* Flush APV cache:
+ */
+int griommu_apv_flush(void)
+{
+ struct griommu_priv * priv = griommupriv;
+
+ if (priv == NULL){
+ DBG("GRIOMMU not initialized.\n");
+ return GRIOMMU_ERR_NOINIT;
+ }
+
+ /* Flush cache */
+ if (priv->cache_enabled){
+ griommu_reg_flush_set(FLUSH_F);
+ DBG("GRIOMMU APV cache flushed.\n");
+ }
+
+ return GRIOMMU_ERR_OK;
+}
+
diff --git a/bsps/shared/grlib/irq/genirq.c b/bsps/shared/grlib/irq/genirq.c
new file mode 100644
index 0000000000..285416b0d3
--- /dev/null
+++ b/bsps/shared/grlib/irq/genirq.c
@@ -0,0 +1,244 @@
+/*
+ * Generic interrupt helpers mainly for GRLIB PCI peripherals
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <stdlib.h>
+#include <string.h>
+#include <grlib/genirq.h>
+
+#include <grlib/grlib_impl.h>
+
+struct genirq_handler_entry {
+ struct genirq_handler_entry *next; /* Next ISR entry for this IRQ number */
+ genirq_handler isr; /* ISR function called upon IRQ */
+ void *arg; /* custom argument to ISR */
+ int enabled; /* Inidicates if IRQ is enabled */
+};
+
+struct genirq_irq_entry {
+ struct genirq_handler_entry *head;
+ struct genirq_stats stats;
+};
+
+struct genirq_priv {
+ /* Maximum number of interrupt */
+ int genirq_max;
+ /* IRQ Table index N reflect IRQ number N */
+ struct genirq_irq_entry genirq_table[0]; /* Length depends on */
+};
+
+genirq_t genirq_init(int number_of_irqs)
+{
+ size_t size;
+ struct genirq_priv *priv;
+
+ size = sizeof(*priv) +
+ number_of_irqs * sizeof(priv->genirq_table[0]);
+
+ priv = grlib_calloc(1, size);
+ if ( !priv )
+ return NULL;
+ priv->genirq_max = number_of_irqs - 1;
+
+ return priv;
+}
+
+void genirq_destroy(genirq_t d)
+{
+ struct genirq_priv *priv = d;
+ struct genirq_irq_entry *irqentry;
+ struct genirq_handler_entry *isrentry, *tmp;
+ int i;
+
+ /* Free all registered interrupts */
+ for ( i=0; i<priv->genirq_max; i++) {
+ irqentry = &priv->genirq_table[i];
+ isrentry = irqentry->head;
+ while ( isrentry ) {
+ tmp = isrentry;
+ isrentry = isrentry->next;
+ genirq_free_handler(tmp);
+ }
+ }
+
+ free(priv);
+}
+
+int genirq_check(genirq_t d, int irq)
+{
+ struct genirq_priv *priv = d;
+
+ if ( (irq <= 0) || (irq > priv->genirq_max) )
+ return -1;
+ else
+ return 0;
+}
+
+void *genirq_alloc_handler(genirq_handler isr, void *arg)
+{
+ struct genirq_handler_entry *newentry;
+
+ newentry = grlib_malloc(sizeof(*newentry));
+ if ( newentry ) {
+ /* Initialize ISR entry */
+ newentry->isr = isr;
+ newentry->arg = arg;
+ newentry->enabled = 0;
+ }
+ return newentry;
+}
+
+int genirq_register(genirq_t d, int irq, void *handler)
+{
+ struct genirq_priv *priv = d;
+ struct genirq_irq_entry *irqentry;
+ struct genirq_handler_entry *isrentry, *newentry = handler;
+
+ if ( genirq_check(d, irq) )
+ return -1;
+
+ /* Insert new ISR entry first into table */
+ irqentry = &priv->genirq_table[irq];
+ isrentry = irqentry->head;
+ irqentry->head = newentry;
+ newentry->next = isrentry;
+
+ if ( isrentry )
+ return 1; /* This is the first handler on this IRQ */
+ return 0;
+}
+
+void *genirq_unregister(genirq_t d, int irq, genirq_handler isr, void *arg)
+{
+ struct genirq_priv *priv = d;
+ struct genirq_irq_entry *irqentry;
+ struct genirq_handler_entry *isrentry, **prev;
+ void *ret;
+
+ if ( genirq_check(d, irq) )
+ return NULL;
+
+ /* Remove isr[arg] from ISR list */
+ irqentry = &priv->genirq_table[irq];
+ ret = NULL;
+
+ prev = &irqentry->head;
+ isrentry = irqentry->head;
+ while ( isrentry ) {
+ if ( (isrentry->arg == arg) && (isrentry->isr == isr) ) {
+ /* Found ISR, remove it from list */
+ if ( isrentry->enabled ) {
+ /* Can not remove enabled ISRs, disable first */
+ ret = NULL;
+ break;
+ }
+ *prev = isrentry->next;
+ ret = isrentry;
+ break;
+ }
+ prev = &isrentry->next;
+ isrentry = isrentry->next;
+ }
+
+ return ret;
+}
+
+/* Enables or Disables ISR handler. Internal function to reduce footprint
+ * of enable/disable functions.
+ *
+ * \param action 1=enable, 0=disable ISR
+ */
+static int genirq_set_active(
+ struct genirq_priv *priv,
+ int irq,
+ genirq_handler isr,
+ void *arg,
+ int action)
+{
+ struct genirq_irq_entry *irqentry;
+ struct genirq_handler_entry *isrentry, *e = NULL;
+ int enabled;
+
+ if ( genirq_check(priv, irq) )
+ return -1;
+
+ /* Find isr[arg] in ISR list */
+ irqentry = &priv->genirq_table[irq];
+ enabled = 0;
+
+ isrentry = irqentry->head;
+ while ( isrentry ) {
+ if ( (isrentry->arg == arg) && (isrentry->isr == isr) ) {
+ /* Found ISR */
+ if ( isrentry->enabled == action ) {
+ /* The ISR is already enabled or disabled
+ * depending on request, neccessary actions
+ * were taken last time the same action was
+ * requested.
+ */
+ return 1;
+ }
+ e = isrentry;
+ } else {
+ enabled += isrentry->enabled;
+ }
+ isrentry = isrentry->next;
+ }
+
+ if ( !e )
+ return -1;
+
+ e->enabled = action;
+
+ return enabled;
+}
+
+int genirq_enable(genirq_t d, int irq, genirq_handler isr, void *arg)
+{
+ struct genirq_priv *priv = d;
+ return genirq_set_active(priv, irq, isr, arg, 1);
+}
+
+int genirq_disable(genirq_t d, int irq, genirq_handler isr, void *arg)
+{
+ struct genirq_priv *priv = d;
+ return genirq_set_active(priv, irq, isr, arg, 0);
+}
+
+void genirq_doirq(genirq_t d, int irq)
+{
+ struct genirq_priv *priv = d;
+ struct genirq_irq_entry *irqentry;
+ struct genirq_handler_entry *isrentry;
+ int enabled;
+
+ irqentry = &priv->genirq_table[irq];
+ irqentry->stats.irq_cnt++;
+
+ enabled = 0;
+
+ isrentry = irqentry->head;
+ while ( isrentry ) {
+ if ( isrentry->enabled ) {
+ enabled = 1;
+ /* Call the ISR */
+ isrentry->isr(isrentry->arg);
+ }
+ isrentry = isrentry->next;
+ }
+
+ /* Was the IRQ an IRQ without source? */
+ if ( enabled == 0 ) {
+ /* This should not happen */
+ printk("Spurious IRQ happened on IRQ %d\n", irq);
+ }
+}
diff --git a/bsps/shared/grlib/l2c/l2c.c b/bsps/shared/grlib/l2c/l2c.c
new file mode 100644
index 0000000000..ddef0ada5c
--- /dev/null
+++ b/bsps/shared/grlib/l2c/l2c.c
@@ -0,0 +1,2118 @@
+/*
+ * GRLIB L2CACHE Driver
+ *
+ * COPYRIGHT (c) 2017
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <grlib/grlib.h>
+#include <bsp.h>
+#include <grlib/l2c.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define INLINE*/
+#define INLINE inline
+
+#define UNUSED __attribute__((unused))
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * L2CACHE CTRL register fields
+ */
+#define L2C_CTRL_EN (0x1 << L2C_CTRL_EN_BIT)
+#define L2C_CTRL_EDAC (0x1 << L2C_CTRL_EDAC_BIT)
+#define L2C_CTRL_REPL (0x3 << L2C_CTRL_REPL_BIT)
+#define L2C_CTRL_IWAY (0xf << L2C_CTRL_IWAY_BIT)
+#define L2C_CTRL_LOCK (0xf << L2C_CTRL_LOCK_BIT)
+#define L2C_CTRL_HPRHB (0x1 << L2C_CTRL_HPRHB_BIT)
+#define L2C_CTRL_HPB (0x1 << L2C_CTRL_HPB_BIT)
+#define L2C_CTRL_UC (0x1 << L2C_CTRL_UC_BIT)
+#define L2C_CTRL_HC (0x1 << L2C_CTRL_HC_BIT)
+#define L2C_CTRL_WP (0x1 << L2C_CTRL_WP_BIT)
+#define L2C_CTRL_HP (0x1 << L2C_CTRL_HP_BIT)
+
+#define L2C_CTRL_EN_BIT 31
+#define L2C_CTRL_EDAC_BIT 30
+#define L2C_CTRL_REPL_BIT 28
+#define L2C_CTRL_IWAY_BIT 12
+#define L2C_CTRL_LOCK_BIT 8
+#define L2C_CTRL_HPRHB_BIT 5
+#define L2C_CTRL_HPB_BIT 4
+#define L2C_CTRL_UC_BIT 3
+#define L2C_CTRL_HC_BIT 2
+#define L2C_CTRL_WP_BIT 1
+#define L2C_CTRL_HP_BIT 0
+
+/*
+ * L2CACHE STATUS register fields
+ */
+#define L2C_STAT_LS (0x1 << L2C_STAT_LS_BIT)
+#define L2C_STAT_AT (0x1 << L2C_STAT_AT_BIT)
+#define L2C_STAT_MP (0x1 << L2C_STAT_MP_BIT)
+#define L2C_STAT_MTRR (0x3f << L2C_STAT_MTRR_BIT)
+#define L2C_STAT_BBUSW (0x7 << L2C_STAT_BBUSW_BIT)
+#define L2C_STAT_WAYSIZE (0x7ff << L2C_STAT_WAYSIZE_BIT)
+#define L2C_STAT_WAY (0x3 << L2C_STAT_WAY_BIT)
+
+#define L2C_STAT_LS_BIT 24
+#define L2C_STAT_AT_BIT 23
+#define L2C_STAT_MP_BIT 22
+#define L2C_STAT_MTRR_BIT 16
+#define L2C_STAT_BBUSW_BIT 13
+#define L2C_STAT_WAYSIZE_BIT 2
+#define L2C_STAT_WAY_BIT 0
+
+/*
+ * L2CACHE MTRR register fields
+ */
+#define L2C_MTRR_ADDR (0x3fff << L2C_MTRR_ADDR_BIT)
+#define L2C_MTRR_ACC (0x3 << L2C_MTRR_ACC_BIT)
+#define L2C_MTRR_MASK (0x3fff << L2C_MTRR_MASK_BIT)
+#define L2C_MTRR_WP (0x1 << L2C_MTRR_WP_BIT)
+#define L2C_MTRR_AC (0x1 << L2C_MTRR_AC_BIT)
+
+#define L2C_MTRR_ADDR_BIT 18
+#define L2C_MTRR_ACC_BIT 16
+#define L2C_MTRR_MASK_BIT 2
+#define L2C_MTRR_WP_BIT 1
+#define L2C_MTRR_AC_BIT 0
+
+#define L2C_MTRR_UNCACHED 0
+#define L2C_MTRR_WRITETHROUGH (0x1 << L2C_MTRR_ACC_BIT)
+#define L2C_MTRR_WRITEPROT_ENABLE L2C_MTRR_WP
+#define L2C_MTRR_WRITEPROT_DISABLE 0
+#define L2C_MTRR_ACCESSCONTROL_ENABLE L2C_MTRR_AC
+#define L2C_MTRR_ACCESSCONTROL_DISABLE 0
+
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+/*
+ * L2CACHE FLUSHMEM register fields
+ */
+#define L2C_FLUSH_ADDR (0x7ffffff << L2C_FLUSH_ADDR_BIT)
+#define L2C_FLUSH_DI (0x1 << L2C_FLUSH_DI_BIT)
+#define L2C_FLUSH_FMODE (0x7 << L2C_FLUSH_FMODE_BIT)
+
+#define L2C_FLUSH_ADDR_BIT 5
+#define L2C_FLUSH_DI_BIT 3
+#define L2C_FLUSH_FMODE_BIT 0
+
+#define L2C_FLUSH_FMODE_INV_ONE (0x1 << L2C_FLUSH_FMODE_BIT)
+#define L2C_FLUSH_FMODE_WB_ONE (0x2 << L2C_FLUSH_FMODE_BIT)
+#define L2C_FLUSH_FMODE_INV_WB_ONE (0x3 << L2C_FLUSH_FMODE_BIT)
+#define L2C_FLUSH_FMODE_INV_ALL (0x5 << L2C_FLUSH_FMODE_BIT)
+#define L2C_FLUSH_FMODE_WB_ALL (0x6 << L2C_FLUSH_FMODE_BIT)
+#define L2C_FLUSH_FMODE_INV_WB_ALL (0x7 << L2C_FLUSH_FMODE_BIT)
+
+/*
+ * L2CACHE FLUSSETINDEX register fields
+ */
+#define L2C_FLUSHSI_INDEX (0xffff << L2C_FLUSHSI_INDEX_BIT)
+#define L2C_FLUSHSI_TAG (0x3fffff << L2C_FLUSHSI_TAG_BIT)
+#define L2C_FLUSHSI_FL (0x1 << L2C_FLUSHSI_FL_BIT)
+#define L2C_FLUSHSI_VB (0x1 << L2C_FLUSHSI_VB_BIT)
+#define L2C_FLUSHSI_DB (0x1 << L2C_FLUSHSI_DB_BIT)
+#define L2C_FLUSHSI_WAY (0x3 << L2C_FLUSHSI_WAY_BIT)
+#define L2C_FLUSHSI_DI (0x1 << L2C_FLUSHSI_DI_BIT)
+#define L2C_FLUSHSI_WF (0x1 << L2C_FLUSHSI_WF_BIT)
+#define L2C_FLUSHSI_FMODE (0x3 << L2C_FLUSHSI_FMODE_BIT)
+
+#define L2C_FLUSHSI_INDEX_BIT 16
+#define L2C_FLUSHSI_TAG_BIT 10
+#define L2C_FLUSHSI_FL_BIT 9
+#define L2C_FLUSHSI_VB_BIT 8
+#define L2C_FLUSHSI_DB_BIT 7
+#define L2C_FLUSHSI_WAY_BIT 4
+#define L2C_FLUSHSI_DI_BIT 3
+#define L2C_FLUSHSI_WF_BIT 2
+#define L2C_FLUSHSI_FMODE_BIT 0
+
+#define L2C_FLUSHSI_FMODE_SET_INV_ONE (0x1 << L2C_FLUSHSI_FMODE_BIT)
+#define L2C_FLUSHSI_FMODE_SET_WB_ONE (0x2 << L2C_FLUSHSI_FMODE_BIT)
+#define L2C_FLUSHSI_FMODE_SET_INV_WB_ONE (0x3 << L2C_FLUSHSI_FMODE_BIT)
+#define L2C_FLUSHSI_FMODE_WAY_UPDATE (0x1 << L2C_FLUSHSI_FMODE_BIT)
+#define L2C_FLUSHSI_FMODE_WAY_WB (0x2 << L2C_FLUSHSI_FMODE_BIT)
+#define L2C_FLUSHSI_FMODE_WAY_UPDATE_WB_ALL (0x3 << L2C_FLUSHSI_FMODE_BIT)
+
+/*
+ * L2CACHE ERROR register fields
+ */
+#define L2C_ERROR_AHBM (0xf << L2C_ERROR_AHBM_BIT)
+#define L2C_ERROR_SCRUB (0x1 << L2C_ERROR_SCRUB_BIT)
+#define L2C_ERROR_TYPE (0x7 << L2C_ERROR_TYPE_BIT)
+#define L2C_ERROR_TAG (0x1 << L2C_ERROR_TAG_BIT)
+#define L2C_ERROR_COR (0x1 << L2C_ERROR_COR_BIT)
+#define L2C_ERROR_MULTI (0x1 << L2C_ERROR_MULTI_BIT)
+#define L2C_ERROR_VALID (0x1 << L2C_ERROR_VALID_BIT)
+#define L2C_ERROR_DISERESP (0x1 << L2C_ERROR_DISERESP_BIT)
+#define L2C_ERROR_CEC (0x7 << L2C_ERROR_CEC_BIT)
+#define L2C_ERROR_IRQP (0xf << L2C_ERROR_IRQP_BIT)
+#define L2C_ERROR_IRQM (0xf << L2C_ERROR_IRQM_BIT)
+#define L2C_ERROR_IRQM_BCKEND (0x1 << L2C_ERROR_IRQM_BCKEND_BIT)
+#define L2C_ERROR_IRQM_WPROT (0x1 << L2C_ERROR_IRQM_WPROT_BIT)
+#define L2C_ERROR_IRQM_UNCORR (0x1 << L2C_ERROR_IRQM_UNCORR_BIT)
+#define L2C_ERROR_IRQM_CORR (0x1 << L2C_ERROR_IRQM_CORR_BIT)
+#define L2C_ERROR_SCB (0x3 << L2C_ERROR_SCB_BIT)
+#define L2C_ERROR_STCB (0x3 << L2C_ERROR_STCB_BIT)
+#define L2C_ERROR_XCB (0x1 << L2C_ERROR_XCB_BIT)
+#define L2C_ERROR_RCB (0x1 << L2C_ERROR_RCB_BIT)
+#define L2C_ERROR_COMP (0x1 << L2C_ERROR_COMP_BIT)
+#define L2C_ERROR_RST (0x1 << L2C_ERROR_RST_BIT)
+
+#define L2C_ERROR_AHBM_BIT 28
+#define L2C_ERROR_SCRUB_BIT 27
+#define L2C_ERROR_TYPE_BIT 24
+#define L2C_ERROR_TAG_BIT 23
+#define L2C_ERROR_COR_BIT 22
+#define L2C_ERROR_MULTI_BIT 21
+#define L2C_ERROR_VALID_BIT 20
+#define L2C_ERROR_DISERESP_BIT 19
+#define L2C_ERROR_CEC_BIT 16
+#define L2C_ERROR_IRQP_BIT 12
+#define L2C_ERROR_IRQM_BCKEND_BIT 11
+#define L2C_ERROR_IRQM_WPROT_BIT 10
+#define L2C_ERROR_IRQM_UNCORR_BIT 9
+#define L2C_ERROR_IRQM_CORR_BIT 8
+#define L2C_ERROR_IRQM_BIT 8
+#define L2C_ERROR_SCB_BIT 6
+#define L2C_ERROR_STCB_BIT 4
+#define L2C_ERROR_XCB_BIT 3
+#define L2C_ERROR_RCB_BIT 2
+#define L2C_ERROR_COMP_BIT 1
+#define L2C_ERROR_RST_BIT 0
+
+/*
+ * L2CACHE DATA CHECK BITS register fields
+ */
+#define L2C_DCB_CB (0xfffffff << L2C_DCB_CB_BIT)
+
+#define L2C_DCB_CB_BIT 0
+
+/*
+ * L2CACHE SCRUB register fields
+ */
+#define L2C_SCRUB_INDEX (0xffff << L2C_SCRUB_INDEX_BIT)
+#define L2C_SCRUB_WAY (0x3 << L2C_SCRUB_WAY_BIT)
+#define L2C_SCRUB_PEN (0x1 << L2C_SCRUB_PEN_BIT)
+#define L2C_SCRUB_EN (0x1 << L2C_SCRUB_EN_BIT)
+
+#define L2C_SCRUB_INDEX_BIT 16
+#define L2C_SCRUB_WAY_BIT 2
+#define L2C_SCRUB_PEN_BIT 1
+#define L2C_SCRUB_EN_BIT 0
+
+/*
+ * L2CACHE SCRUBDELAY register fields
+ */
+#define L2C_SCRUB_DEL (0xffff << L2C_SCRUB_DEL_BIT)
+
+#define L2C_SCRUB_DEL_BIT 0
+
+/*
+ * L2CACHE ERROR INJECT register fields
+ */
+#define L2C_ERRINJ_ADDR (0x3fffffff << L2C_ERRINJ_ADDR_BIT)
+#define L2C_ERRINJ_EN (0x1 << L2C_ERRINJ_EN_BIT)
+
+#define L2C_ERRINJ_ADDR_BIT 2
+#define L2C_ERRINJ_EN_BIT 0
+
+/*
+ * L2CACHE ACCESS CONTROL register fields
+ */
+#define L2C_ACCCTRL_DSC (0x1 << L2C_ACCCTRL_DSC_BIT)
+#define L2C_ACCCTRL_SH (0x1 << L2C_ACCCTRL_SH_BIT)
+#define L2C_ACCCTRL_SPLITQ (0x1 << L2C_ACCCTRL_SPLITQ_BIT)
+#define L2C_ACCCTRL_NHM (0x1 << L2C_ACCCTRL_NHM_BIT)
+#define L2C_ACCCTRL_BERR (0x1 << L2C_ACCCTRL_BERR_BIT)
+#define L2C_ACCCTRL_OAPM (0x1 << L2C_ACCCTRL_OAPM_BIT)
+#define L2C_ACCCTRL_FLINE (0x1 << L2C_ACCCTRL_FLINE_BIT)
+#define L2C_ACCCTRL_DBPF (0x1 << L2C_ACCCTRL_DBPF_BIT)
+#define L2C_ACCCTRL_128WF (0x1 << L2C_ACCCTRL_128WF_BIT)
+#define L2C_ACCCTRL_DBPWS (0x1 << L2C_ACCCTRL_DBPWS_BIT)
+#define L2C_ACCCTRL_SPLIT (0x1 << L2C_ACCCTRL_SPLIT_BIT)
+
+#define L2C_ACCCTRL_DSC_BIT 14
+#define L2C_ACCCTRL_SH_BIT 13
+#define L2C_ACCCTRL_SPLITQ_BIT 10
+#define L2C_ACCCTRL_NHM_BIT 9
+#define L2C_ACCCTRL_BERR_BIT 8
+#define L2C_ACCCTRL_OAPM_BIT 7
+#define L2C_ACCCTRL_FLINE_BIT 6
+#define L2C_ACCCTRL_DBPF_BIT 5
+#define L2C_ACCCTRL_128WF_BIT 4
+#define L2C_ACCCTRL_DBPWS_BIT 2
+#define L2C_ACCCTRL_SPLIT_BIT 1
+
+#ifdef TEST_L2CACHE
+/*
+ * L2CACHE TAG fields
+ */
+#define L2C_TAG_TAG (0xfffffc << L2C_TAG_TAG_BIT)
+#define L2C_TAG_VALID (0x3 << L2C_TAG_VALID_BIT)
+#define L2C_TAG_DIRTY (0x3 << L2C_TAG_DIRTY_BIT)
+#define L2C_TAG_LRU (0x3 << L2C_TAG_LRU_BIT)
+
+#define L2C_TAG_TAG_BIT 10
+#define L2C_TAG_VALID_BIT 8
+#define L2C_TAG_DIRTY_BIT 6
+#define L2C_TAG_LRU_BIT 0
+
+#endif /* TEST_L2CACHE */
+
+#define DEVNAME_LEN 9
+/*
+ * L2CACHE Driver private data struture
+ */
+struct l2cache_priv {
+ struct drvmgr_dev *dev;
+ char devname[DEVNAME_LEN];
+
+ /* L2CACHE control registers */
+ struct l2c_regs *regs;
+
+ /* L2CACHE status */
+ int ways;
+ int waysize;
+ int linesize;
+ int index;
+ int mtrr;
+ int ft_support;
+ int split_support;
+
+ /* User defined ISR */
+ l2cache_isr_t isr;
+ void *isr_arg;
+};
+
+/*
+ * L2CACHE internal prototypes
+ */
+/* -Register access functions */
+STATIC INLINE int l2cache_reg_ctrl_enable(void);
+STATIC INLINE int l2cache_reg_ctrl_disable(void);
+STATIC INLINE int l2cache_reg_ctrl_locked_set(int locked);
+STATIC INLINE int l2cache_reg_ctrl_edac_set(int edac);
+STATIC INLINE int l2cache_reg_ctrl_repl(int policy);
+STATIC INLINE int l2cache_reg_ctrl_iway(int way);
+STATIC INLINE int l2cache_reg_ctrl_writep(int policy);
+STATIC INLINE unsigned int l2cache_reg_ctrl(void);
+STATIC INLINE unsigned int l2cache_reg_status(void);
+STATIC INLINE int l2cache_reg_mtrr_set(int index, unsigned int addr,
+ unsigned int mask, int options);
+UNUSED STATIC INLINE unsigned int l2cache_reg_mtrr_get(int index);
+STATIC INLINE int l2cache_reg_flushmem(unsigned int addr, int options);
+STATIC INLINE int l2cache_reg_flushline(int way, int index, int options);
+STATIC INLINE int l2cache_reg_flushway(unsigned int tag, int way, int options);
+STATIC INLINE unsigned int l2cache_reg_error(void);
+STATIC INLINE int l2cache_reg_error_reset(void);
+STATIC INLINE int l2cache_reg_error_irqmask(int mask);
+STATIC INLINE unsigned int l2cache_reg_error_addr(void);
+STATIC INLINE unsigned int l2cache_reg_scrub(void);
+STATIC INLINE int l2cache_reg_scrub_enable(int delay);
+STATIC INLINE int l2cache_reg_scrub_disable(void);
+STATIC INLINE unsigned int l2cache_reg_scrub_delay(void);
+STATIC INLINE int l2cache_reg_scrub_line(int way, int index);
+STATIC INLINE unsigned int l2cache_reg_accctrl(void);
+STATIC INLINE int l2cache_reg_accctrl_split_disable(void);
+STATIC INLINE int l2cache_reg_accctrl_split_enable(void);
+#ifdef TEST_L2CACHE
+STATIC INLINE int l2cache_reg_error_dcb(unsigned int cb);
+STATIC INLINE int l2cache_reg_error_inject(unsigned int addr);
+STATIC INLINE unsigned int l2cache_reg_diagtag(int way, int index);
+STATIC INLINE unsigned int l2cache_reg_diagdata(int way, int index, int word);
+STATIC unsigned int log2int(unsigned int v);
+#endif /* TEST_L2CACHE */
+
+/* -Control functions */
+STATIC int l2cache_ctrl_status(void);
+STATIC void l2cache_flushwait(void);
+
+/* -Init function */
+STATIC int l2cache_init(struct l2cache_priv *priv);
+
+/* -Init function called by drvmgr */
+int l2cache_init1(struct drvmgr_dev *dev);
+
+/* -IRQ handler */
+void l2cache_isr(void *arg);
+
+/*
+ * L2CACHE static members
+ */
+static struct l2cache_priv *l2cachepriv = NULL;
+#ifdef DEBUG
+static char * repl_names[4] = {"LRU","Random","Master-Idx-1","Master-IDx-2"};
+#endif
+
+/* L2CACHE DRIVER */
+
+struct drvmgr_drv_ops l2cache_ops =
+{
+ .init = {l2cache_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id l2cache_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_L2CACHE},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info l2cache_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_L2CACHE_ID,/* Driver ID */
+ "L2CACHE_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &l2cache_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct l2cache_priv), /* Make drvmgr alloc private */
+ },
+ &l2cache_ids[0]
+};
+
+void l2cache_register_drv(void)
+{
+ DBG("Registering L2CACHE driver\n");
+ drvmgr_drv_register(&l2cache_info.general);
+}
+
+/* Initializes the L2CACHE core and driver
+ *
+ * Return values
+ * 0 Successful initalization
+ */
+STATIC int l2cache_init(struct l2cache_priv *priv)
+{
+ struct ambapp_ahb_info *ahb;
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+
+ /* Find L2CACHE core from Plug&Play information */
+ ahb = ainfo->info.ahb_slv;
+
+ /* Found L2CACHE core, init private structure */
+ priv->regs = (struct l2c_regs *)ahb->start[1];
+
+ /* Initialize L2CACHE status */
+ unsigned int status = l2cache_reg_status();
+ priv->ways = (status & L2C_STAT_WAY) + 1;
+ priv->waysize =
+ ((status & L2C_STAT_WAYSIZE) >> L2C_STAT_WAYSIZE_BIT) * 1024;
+ priv->linesize = ((status & L2C_STAT_LS)? 64 : 32);
+ priv->index = ((priv->waysize)/(priv->linesize));
+ priv->mtrr = (status & L2C_STAT_MTRR) >> L2C_STAT_MTRR_BIT;
+ priv->ft_support = (status & L2C_STAT_MP) >> L2C_STAT_MP_BIT;
+
+ /* Probe split support. */
+ int split_old = 0;
+ int split_new = 0;
+ split_old = (l2cache_reg_accctrl() & L2C_ACCCTRL_SPLIT);
+ if (split_old){
+ l2cache_reg_accctrl_split_disable();
+ }else{
+ l2cache_reg_accctrl_split_enable();
+ }
+ split_new = (l2cache_reg_accctrl() & L2C_ACCCTRL_SPLIT);
+ if (split_old){
+ l2cache_reg_accctrl_split_enable();
+ }else{
+ l2cache_reg_accctrl_split_disable();
+ }
+ priv->split_support =
+ ((split_new ^ split_old) >> L2C_ACCCTRL_SPLIT_BIT) & 1;
+
+ DBG("L2CACHE driver initialized\n");
+
+ return 0;
+}
+
+/* Called when a core is found with the AMBA device and vendor ID
+ * given in l2cache_ids[]. IRQ, Console does not work here
+ */
+int l2cache_init1(struct drvmgr_dev *dev)
+{
+ int status;
+ struct l2cache_priv *priv;
+
+ DBG("L2CACHE[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (l2cachepriv) {
+ DBG("Driver only supports one L2CACHE core\n");
+ return DRVMGR_FAIL;
+ }
+
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+
+ priv->dev = dev;
+ strncpy(&priv->devname[0], "l2cache0", DEVNAME_LEN);
+ l2cachepriv = priv;
+
+ /* Initialize L2CACHE Hardware */
+ status = l2cache_init(priv);
+ if (status) {
+ printk("Failed to initialize l2cache driver %d\n", status);
+ return -1;
+ }
+
+ return DRVMGR_OK;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_enable(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ REG_WRITE(&priv->regs->control, (ctrl | L2C_CTRL_EN));
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_disable(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ REG_WRITE(&priv->regs->control, (ctrl & ~(L2C_CTRL_EN)));
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_repl(int policy)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ REG_WRITE(&priv->regs->control,
+ ((ctrl & ~(L2C_CTRL_REPL)) |
+ ((policy << L2C_CTRL_REPL_BIT) & L2C_CTRL_REPL))
+ );
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_iway(int way)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ REG_WRITE(&priv->regs->control,
+ ((ctrl & ~(L2C_CTRL_IWAY)) |
+ ((way << L2C_CTRL_IWAY_BIT) & L2C_CTRL_IWAY))
+ );
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_writep(int policy)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ REG_WRITE(&priv->regs->control,
+ ((ctrl & ~(L2C_CTRL_WP)) | ((policy << L2C_CTRL_WP_BIT) & L2C_CTRL_WP))
+ );
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_locked_set(int locked)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ ctrl = (ctrl & ~(L2C_CTRL_LOCK));
+ REG_WRITE(&priv->regs->control,
+ ctrl |
+ ((locked << L2C_CTRL_LOCK_BIT) & L2C_CTRL_LOCK));
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_ctrl_edac_set(int edac)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->control);
+ REG_WRITE(&priv->regs->control,
+ (ctrl & ~(L2C_CTRL_EDAC)) |
+ (edac? L2C_CTRL_EDAC:0));
+ return 0;
+}
+
+STATIC INLINE unsigned int l2cache_reg_ctrl(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->control);
+}
+
+STATIC INLINE unsigned int l2cache_reg_status(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->status);
+}
+
+STATIC INLINE int l2cache_reg_mtrr_set(int index, unsigned int addr,
+ unsigned int mask, int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ /* Set mtrr */
+ addr = addr & L2C_MTRR_ADDR;
+ mask = (mask >> 16) & L2C_MTRR_MASK;
+ options = ((options & ~(L2C_MTRR_ADDR)) & ~(L2C_MTRR_MASK));
+ unsigned int mtrr = 0 | addr | mask | options;
+ REG_WRITE(&priv->regs->mtrr[index], mtrr);
+ return 0;
+}
+
+UNUSED STATIC INLINE unsigned int l2cache_reg_mtrr_get(int index)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->mtrr[index]);
+}
+
+STATIC INLINE int l2cache_reg_flushmem(unsigned int addr, int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ options = (options & ~(L2C_FLUSH_ADDR));
+ REG_WRITE(&priv->regs->flush_mem_addr, (addr & L2C_FLUSH_ADDR) | options);
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_flushline(int way, int index, int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ options = 0 | (options & (L2C_FLUSHSI_FMODE));
+ REG_WRITE(&priv->regs->flush_set_index,
+ ((index << L2C_FLUSHSI_INDEX_BIT) & L2C_FLUSHSI_INDEX) |
+ ((way << L2C_FLUSHSI_WAY_BIT) & L2C_FLUSHSI_WAY) |
+ options
+ );
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_flushway(unsigned int tag, int way, int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ options = (options & ~(L2C_FLUSHSI_TAG | L2C_FLUSHSI_WAY))
+ | L2C_FLUSHSI_WF;
+ REG_WRITE(&priv->regs->flush_set_index,
+ (tag & L2C_FLUSHSI_TAG) |
+ ( (way << L2C_FLUSHSI_WAY_BIT) & L2C_FLUSHSI_WAY) |
+ options);
+ return 0;
+}
+
+STATIC INLINE unsigned int l2cache_reg_error(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->error_status_control);
+}
+
+STATIC INLINE int l2cache_reg_error_reset(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->error_status_control);
+ REG_WRITE(&priv->regs->error_status_control, ctrl | L2C_ERROR_RST);
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_error_irqmask(int mask)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->error_status_control);
+ REG_WRITE(&priv->regs->error_status_control,
+ (ctrl & ~(L2C_ERROR_IRQM)) | (mask & L2C_ERROR_IRQM));
+ return 0;
+}
+
+STATIC INLINE unsigned int l2cache_reg_error_addr(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->error_addr);
+}
+
+STATIC INLINE unsigned int l2cache_reg_scrub(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->scrub_control_status);
+}
+
+STATIC INLINE int l2cache_reg_scrub_enable(int delay)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int accc = REG_READ(&priv->regs->access_control);
+ REG_WRITE(&priv->regs->access_control,
+ accc | L2C_ACCCTRL_DSC | L2C_ACCCTRL_SH);
+
+ unsigned int ctrl = REG_READ(&priv->regs->scrub_control_status);
+ REG_WRITE(&priv->regs->scrub_delay,
+ (delay << L2C_SCRUB_DEL_BIT) & L2C_SCRUB_DEL);
+ REG_WRITE(&priv->regs->scrub_control_status, ctrl | L2C_SCRUB_EN);
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_scrub_disable(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->scrub_control_status);
+ REG_WRITE(&priv->regs->scrub_control_status, ctrl & ~(L2C_SCRUB_EN));
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_scrub_line(int way, int index)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ REG_WRITE(&priv->regs->scrub_control_status,
+ ((index << L2C_SCRUB_INDEX_BIT) & L2C_SCRUB_INDEX) |
+ ((way << L2C_SCRUB_WAY_BIT) & L2C_SCRUB_WAY) |
+ L2C_SCRUB_PEN);
+ return 0;
+}
+
+STATIC INLINE unsigned int l2cache_reg_scrub_delay(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->scrub_delay);
+}
+
+STATIC INLINE unsigned int l2cache_reg_accctrl(void){
+ struct l2cache_priv *priv = l2cachepriv;
+
+ return REG_READ(&priv->regs->access_control);
+}
+
+STATIC INLINE int l2cache_reg_accctrl_split_disable(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ /* Disable split */
+ unsigned int ctrl = REG_READ(&priv->regs->access_control);
+ REG_WRITE(&priv->regs->access_control, (ctrl & ~(L2C_ACCCTRL_SPLIT)));
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_accctrl_split_enable(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ /* Enable split */
+ unsigned int ctrl = REG_READ(&priv->regs->access_control);
+ REG_WRITE(&priv->regs->access_control, (ctrl | (L2C_ACCCTRL_SPLIT)));
+ return 0;
+}
+
+STATIC INLINE int l2cache_ctrl_status(void)
+{
+ return ((l2cache_reg_ctrl() >> L2C_CTRL_EN_BIT) & 0x1);
+}
+
+STATIC void l2cache_flushwait(void)
+{
+ /* Read any L2cache register to wait until flush is done */
+ /* The L2 will block any access until the flush is done */
+ /* Force read operation */
+ //asm volatile ("" : : "r" (l2cache_reg_status()));
+ (void) l2cache_reg_status();
+ return;
+}
+
+#ifdef TEST_L2CACHE
+STATIC INLINE int l2cache_reg_error_dcb(unsigned int cb)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ REG_WRITE(&priv->regs->data_check_bit, (cb & L2C_DCB_CB));
+ return 0;
+}
+
+STATIC INLINE int l2cache_reg_error_inject(unsigned int addr)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ REG_WRITE(&priv->regs->error_injection,
+ (addr & L2C_ERRINJ_ADDR) | L2C_ERRINJ_EN);
+ return 0;
+}
+
+STATIC INLINE unsigned int l2cache_reg_diagtag(int way, int index)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ int offset = (index*8 + way);
+ return REG_READ(&priv->regs->diag_iface_tag[offset]);
+}
+
+STATIC INLINE unsigned int l2cache_reg_diagdata(int way, int index, int word)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ int offset = (index*(priv->linesize/4) + way*0x20000 + word);
+ return REG_READ(&priv->regs->diag_iface_data[offset]);
+}
+
+STATIC unsigned int log2int(unsigned int v)
+{
+ unsigned r = 0;
+ while (v >>= 1) {
+ r++;
+ }
+ return r;
+}
+
+/* Return the index for a given addr */
+int l2cache_get_index( uint32_t addr)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ return (addr % priv->waysize)/(priv->linesize);
+}
+
+/* Return the tag for a given addr */
+uint32_t l2cache_get_tag( uint32_t addr)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ uint32_t tmp;
+ int i = log2int(priv->waysize);
+ tmp = (addr >> i);
+ tmp = (tmp << i);
+ return tmp;
+}
+
+int l2cache_lookup(uint32_t addr, int * way)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int i;
+ struct l2cache_tag gottag;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ uint32_t exptag = l2cache_get_tag(addr);
+ int index = l2cache_get_index(addr);
+
+ /* Check all tags in the set */
+ for(i=0; i< priv->ways; i++){
+ ret = l2cache_diag_tag(i, index, &gottag);
+ if (ret != L2CACHE_ERR_OK){
+ return ret;
+ }
+ /*DBG("L2CACHE gottag: way=%d, valid=%d, tag=0x%08x.\n",
+ * i, gottag.valid, gottag.tag);*/
+ /* Check if valid */
+ if (gottag.valid){
+ /* Check if they are the same */
+ if (gottag.tag == exptag){
+ /* HIT! */
+ if (way){
+ *way = i;
+ }
+ DBG("L2CACHE lookup: index=%d, tag=0x%08x HIT way=%d.\n",
+ index, (unsigned int) exptag, i);
+ return L2CACHE_HIT;
+ }
+ }
+ }
+ DBG("L2CACHE lookup: index=%d, tag=0x%08x MISS.\n",
+ index, (unsigned int) exptag);
+ /* MISS! */
+ return L2CACHE_MISS;
+}
+
+/* Diagnostic Accesses */
+#define l2cache_tag_valid(val) ((val & L2C_TAG_VALID) >> L2C_TAG_VALID_BIT)
+#define l2cache_tag_dirty(val) ((val & L2C_TAG_DIRTY) >> L2C_TAG_DIRTY_BIT)
+#define l2cache_tag_lru(val) ((val & L2C_TAG_LRU) >> L2C_TAG_LRU_BIT)
+int l2cache_diag_tag( int way, int index, struct l2cache_tag * tag)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (way >= priv->ways){
+ DBG("L2CACHE has only %d ways.\n", priv->ways);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if (index >= priv->index){
+ DBG("L2CACHE has only %d lines.\n", priv->index);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ unsigned int val = l2cache_reg_diagtag(way,index);
+
+ if (tag){
+ tag->tag = l2cache_get_tag(val);
+ tag->valid = l2cache_tag_valid(val);
+ tag->dirty = l2cache_tag_dirty(val);
+ tag->lru = l2cache_tag_lru(val);
+ }else{
+ return L2CACHE_ERR_EINVAL;
+ }
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_diag_line( int way, int index, struct l2cache_dataline * dataline)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int i;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (way >= priv->ways){
+ DBG("L2CACHE has only %d ways.\n", priv->ways);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if (index >= priv->index){
+ DBG("L2CACHE has only %d lines.\n", priv->index);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if (dataline){
+ dataline->words = (priv->linesize/4);
+ for (i=0; i< (priv->linesize/4); i++){
+ dataline->data[i] = l2cache_reg_diagdata(way,index,i);
+ }
+ }else{
+ return L2CACHE_ERR_EINVAL;
+ }
+ return L2CACHE_ERR_OK;
+}
+
+/* Inject an error on a given addr */
+int l2cache_error_inject_address( uint32_t addr, uint32_t mask)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int word;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->ft_support){
+ DBG("L2CACHE does not have EDAC support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ if (addr & 0x3){
+ DBG("Address not aligned to 32-bit.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Get word index */
+ word = (addr % priv->linesize)/4;
+
+ /* Shift mask to proper word */
+ mask = (mask << (7*(priv->ways - word - 1)));
+
+ /* Write DCB mask to XOR */
+ l2cache_reg_error_dcb(mask);
+
+ /* Inject error */
+ l2cache_reg_error_inject(addr);
+
+ DBG("L2CACHE error injected in 0x%08x (0x%08x).\n",
+ (unsigned int) addr, (unsigned int) mask);
+
+ return L2CACHE_ERR_OK;
+}
+
+#endif /* TEST_L2CACHE */
+
+/* L2CACHE Interrupt handler, called when there may be a L2CACHE interrupt.
+ */
+void l2cache_isr(void *arg)
+{
+ struct l2cache_priv *priv = arg;
+ unsigned int sts = l2cache_reg_error();
+ unsigned int addr = l2cache_reg_error_addr();
+
+ /* Make sure that the interrupt is pending and unmasked,
+ * otherwise it migth have been other core
+ * sharing the same interrupt line */
+ if ( ((sts & L2C_ERROR_IRQP) >> L2C_ERROR_IRQP_BIT) &
+ ((sts & L2C_ERROR_IRQM) >> L2C_ERROR_IRQM_BIT)){
+ /* Reset error status */
+ l2cache_reg_error_reset();
+ /* Execute user IRQ (ther will always be one ISR */
+ /* Give cacheline address */
+ (priv->isr)(priv->isr_arg, (addr & ~(0x1f)), sts);
+ }
+}
+
+/* Enable L2CACHE:
+ */
+int l2cache_enable(int flush)
+{
+ int ret;
+
+ /* Flush checks flus parameter and INIT state */
+ ret = l2cache_flush(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ l2cache_reg_ctrl_enable();
+
+ DBG("L2CACHE enabled\n");
+ return L2CACHE_ERR_OK;
+}
+
+/* Disable L2CACHE:
+ */
+int l2cache_disable(int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if ((flush < 0) ||
+ (flush >
+ (L2CACHE_OPTIONS_FLUSH_INVALIDATE | L2CACHE_OPTIONS_FLUSH_WAIT))
+ ){
+ DBG("L2CACHE wrong flush option.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Flush & invalidate all cache. Also disable L2C */
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ l2cache_reg_ctrl_disable();
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ l2cache_reg_flushmem(0, L2C_FLUSH_FMODE_INV_WB_ALL | L2C_FLUSH_DI);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ l2cache_reg_flushmem(0, L2C_FLUSH_FMODE_WB_ALL | L2C_FLUSH_DI);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ default:
+ l2cache_reg_flushmem(0, L2C_FLUSH_FMODE_INV_ALL | L2C_FLUSH_DI);
+ break;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("L2CACHE disabled\n");
+ return L2CACHE_ERR_OK;
+}
+
+/* Status L2CACHE:
+ */
+int l2cache_status(void)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int status;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ unsigned int ctrl = l2cache_reg_ctrl();
+ int locked = (ctrl & L2C_CTRL_LOCK) >> L2C_CTRL_LOCK_BIT;
+ int enabled = ((ctrl & L2C_CTRL_EN) >> L2C_CTRL_EN_BIT) & 0x1;
+ int edac = (ctrl & L2C_CTRL_EDAC) >> L2C_CTRL_EDAC_BIT;
+ int repl = (ctrl & L2C_CTRL_REPL) >> L2C_CTRL_REPL_BIT;
+ int writep = (ctrl & L2C_CTRL_WP) >> L2C_CTRL_WP_BIT;
+
+ unsigned int acc = l2cache_reg_accctrl();
+ int split = (acc & L2C_ACCCTRL_SPLIT) >> L2C_ACCCTRL_SPLIT_BIT;
+
+ unsigned int err = l2cache_reg_error();
+ int interrupts = (err & L2C_ERROR_IRQM) >> L2C_ERROR_IRQM_BIT;
+
+ unsigned int scr = l2cache_reg_scrub();
+ int scrub = (scr & L2C_SCRUB_EN) >> L2C_SCRUB_EN_BIT;
+
+ unsigned int dly = l2cache_reg_scrub_delay();
+ int delay = (dly & L2C_SCRUB_DEL) >> L2C_SCRUB_DEL_BIT;
+
+ status = 0|
+ (enabled? L2CACHE_STATUS_ENABLED: 0) |
+ (split? L2CACHE_STATUS_SPLIT_ENABLED: 0) |
+ (edac? L2CACHE_STATUS_EDAC_ENABLED: 0) |
+ ((repl & 0x3) << L2CACHE_STATUS_REPL_BIT) |
+ (writep? L2CACHE_STATUS_WRITETHROUGH: 0) |
+ ((locked & 0xf) << L2CACHE_STATUS_LOCK_BIT) |
+ ((interrupts & 0xf) << L2CACHE_STATUS_INT_BIT) |
+ (scrub? L2CACHE_STATUS_SCRUB_ENABLED: 0) |
+ ((delay & 0xffff) << L2CACHE_STATUS_SCRUB_DELAY_BIT);
+
+ return status;
+}
+
+/* Flush L2CACHE:
+ */
+int l2cache_flush(int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if ((flush < 0) ||
+ (flush >
+ (L2CACHE_OPTIONS_FLUSH_INVALIDATE | L2CACHE_OPTIONS_FLUSH_WAIT))
+ ){
+ DBG("L2CACHE wrong flush option.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ l2cache_reg_flushmem(0, L2C_FLUSH_FMODE_INV_WB_ALL);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ l2cache_reg_flushmem(0, L2C_FLUSH_FMODE_WB_ALL);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ default:
+ l2cache_reg_flushmem(0, L2C_FLUSH_FMODE_INV_ALL);
+ break;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("L2CACHE flushed\n");
+ return L2CACHE_ERR_OK;
+}
+
+/* Flush L2CACHE address:
+ */
+int l2cache_flush_address(uint32_t addr, int size, int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ uint32_t endaddr;
+ int options;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if ((flush < 0) ||
+ (flush >
+ (L2CACHE_OPTIONS_FLUSH_INVALIDATE | L2CACHE_OPTIONS_FLUSH_WAIT))
+ ){
+ DBG("L2CACHE wrong flush option.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if (size <= 0){
+ DBG("L2CACHE wrong size.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ options=L2C_FLUSH_FMODE_INV_WB_ONE;
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ options=L2C_FLUSH_FMODE_WB_ONE;
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ default:
+ options=L2C_FLUSH_FMODE_INV_ONE;
+ break;
+ }
+
+ if ( (flush & 0x3) == L2CACHE_OPTIONS_FLUSH_NONE){
+ return L2CACHE_ERR_OK;
+ }
+
+ /* Get the end address */
+ endaddr = (addr + size);
+
+ /* Start on first cacheline address */
+ addr = addr - (addr % priv->linesize);
+ while( addr < endaddr){
+ /* Flush address */
+ l2cache_reg_flushmem(addr, options);
+ /* Update next line */
+ addr += priv->linesize;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("L2CACHE address range flushed\n");
+ return L2CACHE_ERR_OK;
+}
+
+/* Flush L2CACHE line:
+ */
+int l2cache_flush_line(int way, int index, int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if ((flush < 0) ||
+ (flush >
+ (L2CACHE_OPTIONS_FLUSH_INVALIDATE | L2CACHE_OPTIONS_FLUSH_WAIT))
+ ){
+ DBG("L2CACHE wrong flush option.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if ((index < 0) || (index >= priv->index)){
+ DBG("L2CACHE only has %d lines.\n", priv->index);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if ((way < 0 ) || (way >= priv->ways)){
+ DBG("L2CACHE only has %d ways.\n", priv->ways);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ l2cache_reg_flushline(way, index,
+ L2C_FLUSHSI_FMODE_SET_INV_WB_ONE);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ l2cache_reg_flushline(way, index, L2C_FLUSHSI_FMODE_SET_WB_ONE);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ default:
+ l2cache_reg_flushline(way, index, L2C_FLUSHSI_FMODE_SET_INV_ONE);
+ break;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("L2CACHE line [%d,%d] flushed\n", way, index);
+ return L2CACHE_ERR_OK;
+}
+
+/* Flush L2CACHE way:
+ */
+int l2cache_flush_way(int way, int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if ((flush < 0) ||
+ (flush >
+ (L2CACHE_OPTIONS_FLUSH_INVALIDATE | L2CACHE_OPTIONS_FLUSH_WAIT))
+ ){
+ DBG("L2CACHE wrong flush option.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if ((way < 0 ) || (way >= priv->ways)){
+ DBG("L2CACHE only has %d ways.\n", priv->ways);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ l2cache_reg_flushway(0, way, L2C_FLUSHSI_FMODE_WAY_UPDATE);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ l2cache_reg_flushway(0, way, L2C_FLUSHSI_FMODE_WAY_WB);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ default:
+ l2cache_reg_flushway(0, way, L2C_FLUSHSI_FMODE_WAY_UPDATE_WB_ALL);
+ break;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("L2CACHE way [%d] flushed\n",way);
+ return L2CACHE_ERR_OK;
+}
+
+/* Fill L2CACHE way:
+ */
+int l2cache_fill_way(int way, uint32_t tag, int options, int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int flags;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if ((way < 0 ) || (way >= priv->ways)){
+ DBG("L2CACHE only has %d ways.\n", priv->ways);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Check input parameters */
+ if (tag & 0x000003ff){
+ DBG("Only using bits 31:10 of Addr/Mask\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Perform the Way-flush */
+ flags = ((options & L2CACHE_OPTIONS_FETCH)? L2C_FLUSHSI_FL:0) |
+ ((options & L2CACHE_OPTIONS_VALID)? L2C_FLUSHSI_VB:0) |
+ ((options & L2CACHE_OPTIONS_DIRTY)? L2C_FLUSHSI_DB:0);
+
+ /*DBG("L2CACHE lock way: Locked=%d, way=%d, option=0x%04x\n",
+ * locked, way, flags);*/
+
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ l2cache_reg_flushway(tag, way,
+ flags | L2C_FLUSHSI_FMODE_WAY_UPDATE);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ l2cache_reg_flushway(tag, way, flags | L2C_FLUSHSI_FMODE_WAY_WB);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ default:
+ l2cache_reg_flushway(tag, way,
+ flags | L2C_FLUSHSI_FMODE_WAY_UPDATE_WB_ALL);
+ break;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("Way[%d] filled with Tag 0x%08x\n", way, (unsigned int) tag);
+
+ return L2CACHE_ERR_OK;
+}
+
+/* Lock L2CACHE way:
+ */
+int l2cache_lock_way(uint32_t tag, int options, int flush, int enable)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int way;
+ int locked;
+ int flags;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ locked = L2CACHE_LOCKED_WAYS(l2cache_status());
+ if (locked >= priv->ways){
+ DBG("L2CACHE only has %d ways.\n", priv->ways);
+ return L2CACHE_ERR_TOOMANY;
+ }
+
+ /* Check input parameters */
+ if (tag & 0x000003ff){
+ DBG("Only using bits 31:10 of Addr/Mask\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Check L2C status */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Increase number of locked ways */
+ locked++;
+ way = priv->ways - locked;
+ l2cache_reg_ctrl_locked_set(locked);
+
+ /* Perform the Way-flush */
+ flags = ((options & L2CACHE_OPTIONS_FETCH)? L2C_FLUSHSI_FL:0) |
+ ((options & L2CACHE_OPTIONS_VALID)? L2C_FLUSHSI_VB:0) |
+ ((options & L2CACHE_OPTIONS_DIRTY)? L2C_FLUSHSI_DB:0);
+
+ /*DBG("L2CACHE lock way: Locked=%d, way=%d, option=0x%04x\n",
+ * locked, way, flags);*/
+
+ switch(flush & 0x3){
+ case L2CACHE_OPTIONS_FLUSH_NONE:
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INVALIDATE:
+ l2cache_reg_flushway(tag, way,
+ flags | L2C_FLUSHSI_FMODE_WAY_UPDATE);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_WRITEBACK:
+ l2cache_reg_flushway(tag, way, flags | L2C_FLUSHSI_FMODE_WAY_WB);
+ break;
+ case L2CACHE_OPTIONS_FLUSH_INV_WBACK:
+ default:
+ l2cache_reg_flushway(tag, way,
+ flags | L2C_FLUSHSI_FMODE_WAY_UPDATE_WB_ALL);
+ break;
+ }
+
+ /* Reenable L2C if required */
+ switch(enable){
+ case L2CACHE_OPTIONS_ENABLE:
+ l2cache_reg_ctrl_enable();
+ break;
+ case L2CACHE_OPTIONS_DISABLE:
+ break;
+ case L2CACHE_OPTIONS_NONE:
+ default:
+ if (enabled) {
+ l2cache_reg_ctrl_enable();
+ }
+ break;
+ }
+
+ if (flush & L2CACHE_OPTIONS_FLUSH_WAIT){
+ l2cache_flushwait();
+ }
+
+ DBG("Way[%d] locked with Tag 0x%08x\n", way, (unsigned int) tag);
+
+ return L2CACHE_ERR_OK;
+}
+
+/* Unlock L2CACHE waw:
+ */
+int l2cache_unlock()
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ /* Set number of locked ways to 0*/
+ l2cache_reg_ctrl_locked_set(0);
+
+ DBG("L2CACHE ways unlocked\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+/* Setup L2CACHE:
+ * Parameters:
+ * -options: Can be:
+ */
+int l2cache_mtrr_enable(int index, uint32_t addr, uint32_t mask, int options,
+ int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int flags;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (index < 0){
+ DBG("Wrong index\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if (index >= priv->mtrr){
+ DBG("Not enough MTRR registers\n");
+ return L2CACHE_ERR_TOOMANY;
+ }
+
+ /* Check input parameters */
+ if ((addr & 0x0003ffff) || (mask & 0x0003ffff)){
+ DBG("Only using bits 31:18 of Addr/Mask\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Check L2C status */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Decode options */
+ flags = 0 |
+ (options & L2CACHE_OPTIONS_MTRR_ACCESS_WRITETHROUGH?
+ L2C_MTRR_WRITETHROUGH :
+ L2C_MTRR_UNCACHED) |
+ (options & L2CACHE_OPTIONS_MTRR_WRITEPROT_ENABLE?
+ L2C_MTRR_WRITEPROT_ENABLE :
+ L2C_MTRR_WRITEPROT_DISABLE) |
+ L2C_MTRR_ACCESSCONTROL_ENABLE;
+
+ /* Configure mtrr */
+ l2cache_reg_mtrr_set(index, addr, mask, flags);
+
+ /* Enable cache again (if needed) */
+ if (enabled){
+ l2cache_reg_ctrl_enable();
+ }
+
+ DBG("MTRR[%d] succesfully configured for 0x%08x (mask 0x%08x), "
+ "access=%s, wprot=%s\n",
+ index, (unsigned int) addr, (unsigned int) mask,
+ (options & L2CACHE_OPTIONS_MTRR_ACCESS_WRITETHROUGH?
+ "WRITETHROUGH":"UNCACHED"),
+ (options & L2CACHE_OPTIONS_MTRR_WRITEPROT_ENABLE? "ENABLE":"DISABLE")
+ );
+
+ return L2CACHE_ERR_OK;
+}
+
+/* Setup L2CACHE:
+ * Parameters:
+ * -options: Can be:
+ */
+int l2cache_mtrr_disable(int index)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (index < 0){
+ DBG("Wrong index\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if (index >= priv->mtrr){
+ DBG("Not enough MTRR registers\n");
+ return L2CACHE_ERR_TOOMANY;
+ }
+
+ /* Configure mtrr */
+ l2cache_reg_mtrr_set(index, 0, 0, L2C_MTRR_ACCESSCONTROL_DISABLE);
+
+ DBG("MTRR[%d] disabled\n", index);
+
+ return L2CACHE_ERR_OK;
+}
+
+/* Print L2CACHE status
+ * DEBUG function
+ */
+int l2cache_print(void)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ #ifdef DEBUG
+ int status = l2cache_status();
+ if (status < 0){
+ return status;
+ }
+ printf("L2cache: Ways:%d. Waysize:%d, Linesize:%d, Lines:%d\n"
+ " MTRR:%d, FT:%s, Locked:%d, Split:%s\n"
+ " REPL:%s, WP:%s, EDAC:%s, Enabled:%s\n"
+ " Scrub:%s, S-Delay:%d\n",
+ priv->ways,
+ priv->waysize,
+ priv->linesize,
+ (priv->index * priv->ways),
+ priv->mtrr,
+ (priv->ft_support? "Available":"N/A"),
+ L2CACHE_LOCKED_WAYS(status),
+ (priv->split_support? (L2CACHE_SPLIT_ENABLED(status)?
+ "Enabled":"Disabled"):"N/A"),
+ repl_names[L2CACHE_REPL(status)],
+ (L2CACHE_WRITETHROUGH(status)? "Write-through":"Write-back"),
+ (L2CACHE_EDAC_ENABLED(status)? "Enabled":"Disabled"),
+ (L2CACHE_ENABLED(status)? "Yes":"No"),
+ (L2CACHE_SCRUB_ENABLED(status)? "Enabled":"Disabled"),
+ L2CACHE_SCRUB_DELAY(status)
+ );
+ if (l2cache_ctrl_status()){
+ printf("L2cache enabled.\n");
+ }else{
+ printf("L2cache disabled.\n");
+ }
+ #endif
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_split_enable(void)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->split_support){
+ DBG("L2CACHE does not have split support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ l2cache_reg_accctrl_split_enable();
+ DBG("L2CACHE split is now enabled\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_split_disable(void)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->split_support){
+ DBG("L2CACHE does not have split support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ l2cache_reg_accctrl_split_disable();
+ DBG("L2CACHE split is now disabled\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_edac_enable(int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->ft_support){
+ DBG("L2CACHE does not have EDAC support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ /* Check that L2C is enabled */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable&Flush L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Clear error register */
+ l2cache_reg_error_reset();
+
+ /* Enable EDAC */
+ l2cache_reg_ctrl_edac_set(1);
+
+ /* Enable cache again */
+ if (enabled){
+ l2cache_reg_ctrl_enable();
+ }
+
+ DBG("L2CACHE EDAC is now enabled\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_edac_disable(int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->ft_support){
+ DBG("L2CACHE does not have EDAC support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ /* Check that L2C is enabled */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable&Flush L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Disable EDAC */
+ l2cache_reg_ctrl_edac_set(0);
+
+ /* Clear error register */
+ l2cache_reg_error_reset();
+
+ /* Enable cache again */
+ if (enabled){
+ l2cache_reg_ctrl_enable();
+ }
+
+ DBG("L2CACHE EDAC is now disabled\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_scrub_enable(int delay)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->ft_support){
+ DBG("L2CACHE does not have EDAC support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ /* Enable Scrub */
+ l2cache_reg_scrub_enable(delay);
+
+ DBG("L2CACHE Scrub is now enabled\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_scrub_disable(void)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->ft_support){
+ DBG("L2CACHE does not have EDAC support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ /* Disable Scrub */
+ l2cache_reg_scrub_disable();
+
+ DBG("L2CACHE Scrub is now disabled\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_scrub_line(int way, int index)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ unsigned int scrub;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (!priv->ft_support){
+ DBG("L2CACHE does not have EDAC support.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ if ((index < 0) || (index >= priv->index)){
+ DBG("L2CACHE only has %d lines.\n", priv->index);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ if ((way < 0) || (way >= priv->ways)){
+ DBG("L2CACHE only has %d ways.\n", priv->ways);
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Check pending bit */
+ scrub = l2cache_reg_scrub();
+ if ( (scrub & L2C_SCRUB_PEN) || (scrub & L2C_SCRUB_EN) ){
+ DBG("L2CACHE already scrubbing.\n");
+ return L2CACHE_ERR_ERROR;
+ }
+
+ /* Scrub line */
+ l2cache_reg_scrub_line(way, index);
+
+ DBG("L2CACHE Scrub line [%d,%d]\n",way,index);
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_writethrough(int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ /* Check that L2C is enabled */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable&Flush L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Configure writethrough */
+ l2cache_reg_ctrl_writep(1);
+
+ /* Enable cache again */
+ if (enabled){
+ l2cache_reg_ctrl_enable();
+ }
+
+ DBG("L2CACHE now is writethrough\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_writeback(int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int ret;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ /* Check that L2C is enabled */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable&Flush L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ /* Configure writeback */
+ l2cache_reg_ctrl_writep(0);
+
+ /* Enable cache again */
+ if (enabled){
+ l2cache_reg_ctrl_enable();
+ }
+
+ DBG("L2CACHE now is writeback\n");
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_replacement(int options, int flush)
+{
+ struct l2cache_priv * priv = l2cachepriv;
+ int enabled;
+ int ret;
+ int way;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ /* Check that L2C is enabled */
+ enabled = l2cache_ctrl_status();
+
+ /* Disable&Flush L2C */
+ ret = l2cache_disable(flush);
+ if (ret < 0){
+ return ret;
+ }
+
+ if ( (options & 0x3) == L2CACHE_OPTIONS_REPL_MASTERIDX_IDX){
+ /* Set iway */
+ way = (options >> 2) & 0x3;
+ l2cache_reg_ctrl_iway(way);
+ }
+
+ /* Configure writeback */
+ l2cache_reg_ctrl_repl(options & 0x3);
+
+ /* Enable cache again */
+ if (enabled){
+ l2cache_reg_ctrl_enable();
+ }
+
+ DBG("L2CACHE replacement set to %d\n", (options & 0x3));
+
+ return L2CACHE_ERR_OK;
+
+}
+
+int l2cache_isr_register(l2cache_isr_t isr, void * arg, int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+ unsigned int mask;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (isr == NULL){
+ DBG("L2CACHE wrong isr.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Get mask */
+ mask = 0 |
+ ((options & L2CACHE_INTERRUPT_BACKENDERROR)? L2C_ERROR_IRQM_BCKEND:0) |
+ ((options & L2CACHE_INTERRUPT_WPROTHIT)? L2C_ERROR_IRQM_WPROT:0) |
+ ((options & L2CACHE_INTERRUPT_CORRERROR)? L2C_ERROR_IRQM_CORR:0) |
+ ((options & L2CACHE_INTERRUPT_UNCORRERROR)? L2C_ERROR_IRQM_UNCORR:0);
+
+ /* Clear previous interrupts and mask them*/
+ l2cache_reg_error_reset();
+ l2cache_reg_error_irqmask(0);
+
+ /* First time registering an ISR */
+ if (priv->isr == NULL){
+ /* Install and Enable L2CACHE interrupt handler */
+ drvmgr_interrupt_register(priv->dev, 0, priv->devname, l2cache_isr,
+ priv);
+ }
+
+ /* Install user ISR */
+ priv->isr=isr;
+ priv->isr_arg=arg;
+
+ /* Now it is safe to unmask interrupts */
+ l2cache_reg_error_irqmask(mask);
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_isr_unregister(void)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (priv->isr == NULL){
+ DBG("L2CACHE wrong isr.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Clear previous interrupts and mask them*/
+ l2cache_reg_error_reset();
+ l2cache_reg_error_irqmask(0);
+
+ /* Uninstall and disable L2CACHE interrupt handler */
+ drvmgr_interrupt_unregister(priv->dev, 0, l2cache_isr, priv);
+
+ /* Uninstall user ISR */
+ priv->isr=NULL;
+ priv->isr_arg=NULL;
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_interrupt_unmask(int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+ unsigned int mask, irq;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ if (priv->isr == NULL){
+ DBG("L2CACHE wrong isr.\n");
+ return L2CACHE_ERR_EINVAL;
+ }
+
+ /* Unmask interrupts in L2CACHE */
+ mask = 0 |
+ ((options & L2CACHE_INTERRUPT_BACKENDERROR)? L2C_ERROR_IRQM_BCKEND:0) |
+ ((options & L2CACHE_INTERRUPT_WPROTHIT)? L2C_ERROR_IRQM_WPROT:0) |
+ ((options & L2CACHE_INTERRUPT_CORRERROR)? L2C_ERROR_IRQM_CORR:0) |
+ ((options & L2CACHE_INTERRUPT_UNCORRERROR)? L2C_ERROR_IRQM_UNCORR:0);
+
+ /* Clear previous interrupts*/
+ l2cache_reg_error_reset();
+
+ /* Get previous mask */
+ irq = ((l2cache_reg_error() & L2C_ERROR_IRQM) >> L2C_ERROR_IRQM_BIT);
+
+ /* Set new mask */
+ l2cache_reg_error_irqmask(irq | mask);
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_interrupt_mask(int options)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+ unsigned int mask, irq;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ /* Mask interrupts in L2CACHE */
+ mask = 0 |
+ ((options & L2CACHE_INTERRUPT_BACKENDERROR)? L2C_ERROR_IRQM_BCKEND:0) |
+ ((options & L2CACHE_INTERRUPT_WPROTHIT)? L2C_ERROR_IRQM_WPROT:0) |
+ ((options & L2CACHE_INTERRUPT_CORRERROR)? L2C_ERROR_IRQM_CORR:0) |
+ ((options & L2CACHE_INTERRUPT_UNCORRERROR)? L2C_ERROR_IRQM_UNCORR:0);
+
+ /* Clear previous interrupts */
+ l2cache_reg_error_reset();
+
+ /* Get previous mask */
+ irq = ((l2cache_reg_error() & L2C_ERROR_IRQM) >> L2C_ERROR_IRQM_BIT);
+
+ /* Set new mask */
+ l2cache_reg_error_irqmask(irq & ~(mask));
+
+ return L2CACHE_ERR_OK;
+}
+
+int l2cache_error_status(uint32_t * addr, uint32_t * status)
+{
+ struct l2cache_priv *priv = l2cachepriv;
+ unsigned int sts;
+ unsigned int erraddr;
+
+ if (priv == NULL){
+ DBG("L2CACHE not initialized.\n");
+ return L2CACHE_ERR_NOINIT;
+ }
+
+ /* Get error register */
+ sts = priv->regs->error_status_control;
+ erraddr = priv->regs->error_addr;
+
+ /* Check if an error occurred */
+ if (sts & L2C_ERROR_VALID){
+ /* Reset error register */
+ l2cache_reg_error_reset();
+
+ /* Update user variables if needed */
+ if (addr != NULL){
+ *addr = (erraddr & ~(0x1f));
+ }
+
+ if(status != NULL){
+ *status = sts;
+ }
+
+ /* Return status */
+ if (sts & L2C_ERROR_MULTI){
+ return L2CACHE_STATUS_MULTIPLEERRORS;
+ }else{
+ return L2CACHE_STATUS_NEWERROR;
+ }
+ }else{
+ /* Return status */
+ return L2CACHE_STATUS_NOERROR;
+ }
+}
diff --git a/bsps/shared/grlib/mem/mctrl.c b/bsps/shared/grlib/mem/mctrl.c
new file mode 100644
index 0000000000..a384547de8
--- /dev/null
+++ b/bsps/shared/grlib/mem/mctrl.c
@@ -0,0 +1,213 @@
+/* Memory Controller driver (FTMTRL, MCTRL)
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * This file contains the driver for the MCTRL memory controller.
+ * The driver sets the memory configuration registers (MCFG1, MCFG2, MCFG3)
+ * during driver initialization
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/******************* Driver manager interface ***********************/
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/mctrl.h>
+
+#include <grlib/grlib_impl.h>
+
+#define MEMSET(priv, start, c, length) memset((void *)start, c, length)
+
+#define DBG(args...)
+/*#define DBG(args...) printk(args)*/
+
+struct mctrl_regs {
+ unsigned int mcfg[8];
+};
+
+struct mctrl_priv;
+
+struct mctrl_ops {
+ void (*mcfg_set)(struct mctrl_priv *priv, int index, void *regs, unsigned int regval);
+};
+
+struct mctrl_priv {
+ struct drvmgr_dev *dev;
+ void *regs;
+ unsigned int mcfg[8]; /* The wanted memory configuration */
+ unsigned int configured; /* Determines what mcfgs was configured by user */
+ struct mctrl_ops *ops; /* Operation may depend on hardware */
+};
+
+static int mctrl_init1(struct drvmgr_dev *dev);
+static int mctrl_remove(struct drvmgr_dev *dev);
+
+/* Standard MCFG registers */
+static void mctrl_set_std(struct mctrl_priv *priv, int index, void *regs, unsigned int regval);
+
+struct mctrl_ops std_mctrl_ops =
+{
+ mctrl_set_std
+};
+
+struct drvmgr_drv_ops mctrl_ops =
+{
+ .init = {mctrl_init1, NULL, NULL, NULL},
+ .remove = mctrl_remove,
+ .info = NULL
+};
+
+struct amba_dev_id mctrl_ids[] =
+{
+ {VENDOR_ESA, ESA_MCTRL},
+ {VENDOR_GAISLER, GAISLER_FTMCTRL},
+ {VENDOR_GAISLER, GAISLER_FTSRCTRL},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info mctrl_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_MCTRL_ID, /* Driver ID */
+ "MCTRL_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &mctrl_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &mctrl_ids[0]
+};
+
+void mctrl_register_drv (void)
+{
+ DBG("Registering MCTRL driver\n");
+ drvmgr_drv_register(&mctrl_drv_info.general);
+}
+
+static int mctrl_init1(struct drvmgr_dev *dev)
+{
+ struct mctrl_priv *priv;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ int i;
+ char res_name[16];
+ union drvmgr_key_value *value;
+ unsigned int start, length;
+
+ DBG("MCTRL[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if ( ambadev == NULL ) {
+ return DRVMGR_FAIL;
+ }
+ pnpinfo = &ambadev->info;
+ if ( pnpinfo->apb_slv == NULL ) {
+ /* LEON2 PnP systems are missing the APB interface */
+ priv->regs = (void *)0x80000000;
+ } else {
+ priv->regs = (void *)pnpinfo->apb_slv->start;
+ }
+
+ /* Depending on Hardware selection write/read routines */
+ switch ( pnpinfo->vendor ) {
+ case VENDOR_ESA:
+ switch ( pnpinfo->device ) {
+ case ESA_MCTRL:
+ default:
+ priv->ops = &std_mctrl_ops;
+ }
+ break;
+
+ case VENDOR_GAISLER:
+ switch ( pnpinfo->device ) {
+ case GAISLER_FTMCTRL:
+ case GAISLER_FTSRCTRL:
+ default:
+ priv->ops = &std_mctrl_ops;
+ }
+ break;
+
+ default:
+ priv->ops = &std_mctrl_ops;
+ break;
+ }
+
+ /* Find user configuration from bus resources */
+ priv->configured = 0;
+ strcpy(res_name, "mcfgX");
+ for(i=0; i<8; i++) {
+ res_name[4] = '1' + i;
+ value = drvmgr_dev_key_get(priv->dev, res_name, DRVMGR_KT_INT);
+ if ( value ) {
+ priv->mcfg[i] = value->i;
+ priv->configured |= (1<<i);
+ }
+ }
+
+ /* Init hardware registers right away, other devices may depend on it in init2(), also
+ * the washing depend on it.
+ */
+ for ( i=0; i<8; i++) {
+ if ( priv->configured & (1<<i) ) {
+ DBG("Setting MCFG%d to 0x%08x\n", i+1, priv->mcfg[i]);
+ priv->ops->mcfg_set(priv, i, priv->regs, priv->mcfg[i]);
+ }
+ }
+
+ /* Wash memory partitions if user wants */
+ for (i=0; i<9; i++) {
+ strcpy(res_name, "washXStart");
+ res_name[4] = '0' + i;
+ value = drvmgr_dev_key_get(priv->dev, res_name, DRVMGR_KT_INT);
+ if ( value ) {
+ start = value->i;
+ strcpy(res_name, "washXLength");
+ res_name[4] = '0' + i;
+ value = drvmgr_dev_key_get(priv->dev, res_name, DRVMGR_KT_INT);
+ if ( value ) {
+ length = value->i;
+
+ if ( length > 0 ) {
+ DBG("MCTRL: Washing 0x%08x-0x%08x\n", start, start+length-1);
+
+ MEMSET(priv, (void *)start, 0, length);
+ }
+ }
+ }
+ }
+
+ return DRVMGR_OK;
+}
+
+static int mctrl_remove(struct drvmgr_dev *dev)
+{
+ /* Nothing to be done */
+ DBG("Removing %s\n", dev->name);
+ return DRVMGR_OK;
+}
+
+/* Standard Operations */
+static void mctrl_set_std(struct mctrl_priv *priv, int index, void *regs, unsigned int regval)
+{
+ struct mctrl_regs *pregs = regs;
+
+ /* Store new value */
+ pregs->mcfg[index] = regval;
+}
diff --git a/bsps/shared/grlib/net/README b/bsps/shared/grlib/net/README
new file mode 100644
index 0000000000..3ef086f223
--- /dev/null
+++ b/bsps/shared/grlib/net/README
@@ -0,0 +1,7 @@
+A non Driver Manager GRETH driver is located in libchip/network/greth.c. This
+version requires the driver manager.
+
+network_interface_add is used to assign IP/NETMASK and MAC address to
+GRETH interfaces dynamically according to in which order devices are
+registered. The function takes the settings from the user defined
+interface_configs[] array, defined in the project configuration.
diff --git a/bsps/shared/grlib/net/greth.c b/bsps/shared/grlib/net/greth.c
new file mode 100644
index 0000000000..30eb4cc8a9
--- /dev/null
+++ b/bsps/shared/grlib/net/greth.c
@@ -0,0 +1,1655 @@
+/*
+ * Gaisler Research ethernet MAC driver
+ * adapted from Opencores driver by Marko Isomaki
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ *
+ * 2008-12-10, Converted to driver manager and added support for
+ * multiple GRETH cores. <daniel@gaisler.com>
+ * 2007-09-07, Ported GBIT support from 4.6.5
+ */
+
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems.h>
+#define CPU_U32_FIX
+#include <bsp.h>
+
+#ifdef GRETH_SUPPORTED
+
+#include <inttypes.h>
+#include <errno.h>
+#include <rtems/bspIo.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rtems/error.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include <grlib/greth.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#ifdef malloc
+#undef malloc
+#endif
+#ifdef free
+#undef free
+#endif
+
+#include <grlib/grlib_impl.h>
+
+#if defined(__m68k__)
+extern m68k_isr_entry set_vector( rtems_isr_entry, rtems_vector_number, int );
+#else
+extern rtems_isr_entry set_vector( rtems_isr_entry, rtems_vector_number, int );
+#endif
+
+
+/* #define GRETH_DEBUG */
+
+#ifdef GRETH_DEBUG
+#define DBG(args...) printk(args)
+#else
+#define DBG(args...)
+#endif
+
+/* #define GRETH_DEBUG_MII */
+
+#ifdef GRETH_DEBUG_MII
+#define MIIDBG(args...) printk(args)
+#else
+#define MIIDBG(args...)
+#endif
+
+#ifdef CPU_U32_FIX
+extern void ipalign(struct mbuf *m);
+#endif
+
+/* Used when reading from memory written by GRETH DMA unit */
+#ifndef GRETH_MEM_LOAD
+#define GRETH_MEM_LOAD(addr) (*(volatile unsigned int *)(addr))
+#endif
+
+/*
+ * Number of OCs supported by this driver
+ */
+#define NOCDRIVER 1
+
+/*
+ * Receive buffer size -- Allow for a full ethernet packet including CRC
+ */
+#define RBUF_SIZE 1518
+
+#define ET_MINLEN 64 /* minimum message length */
+
+/*
+ * RTEMS event used by interrupt handler to signal driver tasks.
+ * This must not be any of the events used by the network task synchronization.
+ */
+#define INTERRUPT_EVENT RTEMS_EVENT_1
+
+/*
+ * RTEMS event used to start transmit daemon.
+ * This must not be the same as INTERRUPT_EVENT.
+ */
+#define START_TRANSMIT_EVENT RTEMS_EVENT_2
+
+ /* event to send when tx buffers become available */
+#define GRETH_TX_WAIT_EVENT RTEMS_EVENT_3
+
+#if (MCLBYTES < RBUF_SIZE)
+# error "Driver must have MCLBYTES > RBUF_SIZE"
+#endif
+
+/* 4s Autonegotiation Timeout */
+#ifndef GRETH_AUTONEGO_TIMEOUT_MS
+#define GRETH_AUTONEGO_TIMEOUT_MS 4000
+#endif
+const struct timespec greth_tan = {
+ GRETH_AUTONEGO_TIMEOUT_MS/1000,
+ (GRETH_AUTONEGO_TIMEOUT_MS % 1000) * 1000000
+};
+
+/* For optimizing the autonegotiation time */
+#define GRETH_AUTONEGO_PRINT_TIME
+
+/* Ethernet buffer descriptor */
+
+typedef struct _greth_rxtxdesc {
+ volatile uint32_t ctrl; /* Length and status */
+ uint32_t *addr; /* Buffer pointer */
+} greth_rxtxdesc;
+
+
+/*
+ * Per-device data
+ */
+struct greth_softc
+{
+
+ struct arpcom arpcom;
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32];
+
+ greth_regs *regs;
+ int minor;
+ int phyaddr; /* PHY Address configured by user (or -1 to autodetect) */
+ unsigned int edcl_dis;
+ int greth_rst;
+
+ int acceptBroadcast;
+ rtems_id daemonTid;
+
+ unsigned int tx_ptr;
+ unsigned int tx_dptr;
+ unsigned int tx_cnt;
+ unsigned int rx_ptr;
+ unsigned int txbufs;
+ unsigned int rxbufs;
+ greth_rxtxdesc *txdesc;
+ greth_rxtxdesc *rxdesc;
+ unsigned int txdesc_remote;
+ unsigned int rxdesc_remote;
+ struct mbuf **rxmbuf;
+ struct mbuf **txmbuf;
+ rtems_vector_number vector;
+
+ /* TX descriptor interrupt generation */
+ int tx_int_gen;
+ int tx_int_gen_cur;
+ struct mbuf *next_tx_mbuf;
+ int max_fragsize;
+
+ /*Status*/
+ struct phy_device_info phydev;
+ int phy_read_access;
+ int phy_write_access;
+ int fd;
+ int sp;
+ int gb;
+ int gbit_mac;
+ int auto_neg;
+ unsigned int advmodes; /* advertise ethernet speed modes. 0 = all modes. */
+ struct timespec auto_neg_time;
+ int mc_available;
+
+ /*
+ * Statistics
+ */
+ unsigned long rxInterrupts;
+
+ unsigned long rxPackets;
+ unsigned long rxLengthError;
+ unsigned long rxNonOctet;
+ unsigned long rxBadCRC;
+ unsigned long rxOverrun;
+
+ unsigned long txInterrupts;
+
+ unsigned long txDeferred;
+ unsigned long txHeartbeat;
+ unsigned long txLateCollision;
+ unsigned long txRetryLimit;
+ unsigned long txUnderrun;
+
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+};
+
+int greth_process_tx_gbit(struct greth_softc *sc);
+int greth_process_tx(struct greth_softc *sc);
+
+static char *almalloc(int sz, int alignment)
+{
+ char *tmp;
+ tmp = grlib_calloc(1, sz + (alignment-1));
+ tmp = (char *) (((int)tmp+alignment) & ~(alignment -1));
+ return(tmp);
+}
+
+/* GRETH interrupt handler */
+
+static void greth_interrupt (void *arg)
+{
+ uint32_t status;
+ uint32_t ctrl;
+ rtems_event_set events = 0;
+ struct greth_softc *greth = arg;
+ SPIN_ISR_IRQFLAGS(flags);
+
+ /* read and clear interrupt cause */
+ status = greth->regs->status;
+ greth->regs->status = status;
+
+ SPIN_LOCK(&greth->devlock, flags);
+ ctrl = greth->regs->ctrl;
+
+ /* Frame received? */
+ if ((ctrl & GRETH_CTRL_RXIRQ) && (status & (GRETH_STATUS_RXERR | GRETH_STATUS_RXIRQ)))
+ {
+ greth->rxInterrupts++;
+ /* Stop RX-Error and RX-Packet interrupts */
+ ctrl &= ~GRETH_CTRL_RXIRQ;
+ events |= INTERRUPT_EVENT;
+ }
+
+ if ( (ctrl & GRETH_CTRL_TXIRQ) && (status & (GRETH_STATUS_TXERR | GRETH_STATUS_TXIRQ)) )
+ {
+ greth->txInterrupts++;
+ ctrl &= ~GRETH_CTRL_TXIRQ;
+ events |= GRETH_TX_WAIT_EVENT;
+ }
+
+ /* Clear interrupt sources */
+ greth->regs->ctrl = ctrl;
+ SPIN_UNLOCK(&greth->devlock, flags);
+
+ /* Send the event(s) */
+ if ( events )
+ rtems_bsdnet_event_send(greth->daemonTid, events);
+}
+
+static uint32_t read_mii(struct greth_softc *sc, uint32_t phy_addr, uint32_t reg_addr)
+{
+ sc->phy_read_access++;
+ while (sc->regs->mdio_ctrl & GRETH_MDIO_BUSY) {}
+ sc->regs->mdio_ctrl = (phy_addr << 11) | (reg_addr << 6) | GRETH_MDIO_READ;
+ while (sc->regs->mdio_ctrl & GRETH_MDIO_BUSY) {}
+ if (!(sc->regs->mdio_ctrl & GRETH_MDIO_LINKFAIL)) {
+ MIIDBG("greth%d: mii read[%d] OK to %" PRIx32 ".%" PRIx32
+ " (0x%08" PRIx32 ",0x%08" PRIx32 ")\n",
+ sc->minor, sc->phy_read_access, phy_addr, reg_addr,
+ sc->regs->ctrl, sc->regs->mdio_ctrl);
+ return((sc->regs->mdio_ctrl >> 16) & 0xFFFF);
+ } else {
+ printf("greth%d: mii read[%d] failed to %" PRIx32 ".%" PRIx32
+ " (0x%08" PRIx32 ",0x%08" PRIx32 ")\n",
+ sc->minor, sc->phy_read_access, phy_addr, reg_addr,
+ sc->regs->ctrl, sc->regs->mdio_ctrl);
+ return (0xffff);
+ }
+}
+
+static void write_mii(struct greth_softc *sc, uint32_t phy_addr, uint32_t reg_addr, uint32_t data)
+{
+ sc->phy_write_access++;
+ while (sc->regs->mdio_ctrl & GRETH_MDIO_BUSY) {}
+ sc->regs->mdio_ctrl =
+ ((data & 0xFFFF) << 16) | (phy_addr << 11) | (reg_addr << 6) | GRETH_MDIO_WRITE;
+ while (sc->regs->mdio_ctrl & GRETH_MDIO_BUSY) {}
+ if (!(sc->regs->mdio_ctrl & GRETH_MDIO_LINKFAIL)) {
+ MIIDBG("greth%d: mii write[%d] OK to to %" PRIx32 ".%" PRIx32
+ "(0x%08" PRIx32 ",0x%08" PRIx32 ")\n",
+ sc->minor, sc->phy_write_access, phy_addr, reg_addr,
+ sc->regs->ctrl, sc->regs->mdio_ctrl);
+ } else {
+ printf("greth%d: mii write[%d] failed to to %" PRIx32 ".%" PRIx32
+ " (0x%08" PRIx32 ",0x%08" PRIx32 ")\n",
+ sc->minor, sc->phy_write_access, phy_addr, reg_addr,
+ sc->regs->ctrl, sc->regs->mdio_ctrl);
+ }
+}
+
+static void print_init_info(struct greth_softc *sc)
+{
+ printf("greth: driver attached\n");
+ if ( sc->auto_neg == -1 ){
+ printf("Auto negotiation timed out. Selecting default config\n");
+ }
+ printf("**** PHY ****\n");
+ printf("Vendor: %x Device: %x Revision: %d\n",sc->phydev.vendor, sc->phydev.device, sc->phydev.rev);
+ printf("Current Operating Mode: ");
+ if (sc->gb) {
+ printf("1000 Mbit ");
+ } else if (sc->sp) {
+ printf("100 Mbit ");
+ } else {
+ printf("10 Mbit ");
+ }
+ if (sc->fd) {
+ printf("Full Duplex\n");
+ } else {
+ printf("Half Duplex\n");
+ }
+#ifdef GRETH_AUTONEGO_PRINT_TIME
+ if ( sc->auto_neg ) {
+ printf("Autonegotiation Time: %" PRIdMAX "ms\n",
+ (intmax_t)sc->auto_neg_time.tv_sec * 1000 +
+ sc->auto_neg_time.tv_nsec / 1000000);
+ }
+#endif
+}
+
+/*
+ * Generates the hash words based on CRCs of the enabled MAC addresses that are
+ * allowed to be received. The allowed MAC addresses are maintained in a linked
+ * "multi-cast" list available in the arpcom structure.
+ *
+ * Returns the number of MAC addresses that were processed (in the list)
+ */
+static int
+greth_mac_filter_calc(struct arpcom *ac, uint32_t *msb, uint32_t *lsb)
+{
+ struct ether_multistep step;
+ struct ether_multi *enm;
+ int cnt = 0;
+ uint32_t crc, htindex, ht[2] = {0, 0};
+
+ /* Go through the Ethernet Multicast addresses one by one and add their
+ * CRC contribution to the MAC filter.
+ */
+ ETHER_FIRST_MULTI(step, ac, enm);
+ while (enm) {
+ crc = ether_crc32_be((uint8_t *)enm->enm_addrlo, 6);
+ htindex = crc & 0x3f;
+ ht[htindex >> 5] |= (1 << (htindex & 0x1F));
+ cnt++;
+ ETHER_NEXT_MULTI(step, enm);
+ }
+
+ if (cnt > 0) {
+ *msb = ht[1];
+ *lsb = ht[0];
+ }
+
+ return cnt;
+}
+
+/*
+ * Initialize the ethernet hardware
+ */
+static int greth_mac_filter_set(struct greth_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ uint32_t hash_msb, hash_lsb, ctrl;
+ SPIN_IRQFLAGS(flags);
+
+ hash_msb = 0;
+ hash_lsb = 0;
+ ctrl = 0;
+ if (ifp->if_flags & IFF_PROMISC) {
+ /* No need to enable multi-cast when promiscous mode accepts all */
+ ctrl |= GRETH_CTRL_PRO;
+ } else if(!sc->mc_available) {
+ return EINVAL; /* no hardware support for multicast filtering. */
+ } else if (ifp->if_flags & IFF_ALLMULTI) {
+ /* We should accept all multicast addresses */
+ ctrl |= GRETH_CTRL_MCE;
+ hash_msb = 0xFFFFFFFF;
+ hash_lsb = 0xFFFFFFFF;
+ } else if (greth_mac_filter_calc(&sc->arpcom, &hash_msb, &hash_lsb) > 0) {
+ /* Generate hash for MAC filtering out multicast addresses */
+ ctrl |= GRETH_CTRL_MCE;
+ } else {
+ /* Multicast list is empty .. disable multicast */
+ }
+ SPIN_LOCK_IRQ(&sc->devlock, flags);
+ sc->regs->ht_msb = hash_msb;
+ sc->regs->ht_lsb = hash_lsb;
+ sc->regs->ctrl = (sc->regs->ctrl & ~(GRETH_CTRL_PRO | GRETH_CTRL_MCE)) |
+ ctrl;
+ SPIN_UNLOCK_IRQ(&sc->devlock, flags);
+
+ return 0;
+}
+
+/*
+ * Initialize the ethernet hardware
+ */
+static void
+greth_initialize_hardware (struct greth_softc *sc)
+{
+ struct mbuf *m;
+ int i;
+ int phyaddr;
+ int phyctrl;
+ int phystatus;
+ int tmp1;
+ int tmp2;
+ struct timespec tstart, tnow;
+ greth_regs *regs;
+ unsigned int advmodes, speed;
+
+ regs = sc->regs;
+
+ /* Reset the controller. */
+ sc->rxInterrupts = 0;
+ sc->rxPackets = 0;
+
+ if (sc->greth_rst) {
+ /* Reset ON */
+ regs->ctrl = GRETH_CTRL_RST | GRETH_CTRL_DD | GRETH_CTRL_ED;
+ for (i = 0; i<100 && (regs->ctrl & GRETH_CTRL_RST); i++)
+ ;
+ speed = 0; /* probe mode below */
+ } else {
+ /* inherit EDCL mode for now */
+ speed = sc->regs->ctrl & (GRETH_CTRL_GB|GRETH_CTRL_SP|GRETH_CTRL_FULLD);
+ }
+ /* Reset OFF and RX/TX DMA OFF. SW do PHY Init */
+ regs->ctrl = GRETH_CTRL_DD | GRETH_CTRL_ED | speed;
+
+ /* Check if mac is gbit capable*/
+ sc->gbit_mac = (regs->ctrl >> 27) & 1;
+
+ /* Get the phy address which assumed to have been set
+ correctly with the reset value in hardware*/
+ if ( sc->phyaddr == -1 ) {
+ phyaddr = (regs->mdio_ctrl >> 11) & 0x1F;
+ } else {
+ phyaddr = sc->phyaddr;
+ }
+ sc->phy_read_access = 0;
+ sc->phy_write_access = 0;
+
+ /* As I understand the PHY comes back to a good default state after
+ * Power-down or Reset, so we do both just in case. Power-down bit should
+ * be cleared.
+ * Wait for old reset (if asserted by boot loader) to complete, otherwise
+ * power-down instruction might not have any effect.
+ */
+ while (read_mii(sc, phyaddr, 0) & 0x8000) {}
+ write_mii(sc, phyaddr, 0, 0x0800); /* Power-down */
+ write_mii(sc, phyaddr, 0, 0x0000); /* Power-Up */
+ write_mii(sc, phyaddr, 0, 0x8000); /* Reset */
+
+ /* We wait about 30ms */
+ rtems_task_wake_after(rtems_clock_get_ticks_per_second()/32);
+
+ /* Wait for reset to complete and get default values */
+ while ((phyctrl = read_mii(sc, phyaddr, 0)) & 0x8000) {}
+
+ /* Set up PHY advertising modes for auto-negotiation */
+ advmodes = sc->advmodes;
+ if (advmodes == 0)
+ advmodes = GRETH_ADV_ALL;
+ if (!sc->gbit_mac)
+ advmodes &= ~(GRETH_ADV_1000_FD | GRETH_ADV_1000_HD);
+
+ /* Enable/Disable GBit auto-neg advetisement so that the link partner
+ * know that we have/haven't GBit capability. The MAC may not support
+ * Gbit even though PHY does...
+ */
+ phystatus = read_mii(sc, phyaddr, 1);
+ if (phystatus & 0x0100) {
+ tmp1 = read_mii(sc, phyaddr, 9);
+ tmp1 &= ~0x300;
+ if (advmodes & GRETH_ADV_1000_FD)
+ tmp1 |= 0x200;
+ if (advmodes & GRETH_ADV_1000_HD)
+ tmp1 |= 0x100;
+ write_mii(sc, phyaddr, 9, tmp1);
+ }
+
+ /* Optionally limit the 10/100 modes as configured by user */
+ tmp1 = read_mii(sc, phyaddr, 4);
+ tmp1 &= ~0x1e0;
+ if (advmodes & GRETH_ADV_100_FD)
+ tmp1 |= 0x100;
+ if (advmodes & GRETH_ADV_100_HD)
+ tmp1 |= 0x080;
+ if (advmodes & GRETH_ADV_10_FD)
+ tmp1 |= 0x040;
+ if (advmodes & GRETH_ADV_10_HD)
+ tmp1 |= 0x020;
+ write_mii(sc, phyaddr, 4, tmp1);
+
+ /* If autonegotiation implemented we start it */
+ if (phystatus & 0x0008) {
+ write_mii(sc, phyaddr, 0, phyctrl | 0x1200);
+ phyctrl = read_mii(sc, phyaddr, 0);
+ }
+
+ /* Check if PHY is autoneg capable and then determine operating mode,
+ otherwise force it to 10 Mbit halfduplex */
+ sc->gb = 0;
+ sc->fd = 0;
+ sc->sp = 0;
+ sc->auto_neg = 0;
+ _Timespec_Set_to_zero(&sc->auto_neg_time);
+ if ((phyctrl >> 12) & 1) {
+ /*wait for auto negotiation to complete*/
+ sc->auto_neg = 1;
+ if (rtems_clock_get_uptime(&tstart) != RTEMS_SUCCESSFUL)
+ printk("rtems_clock_get_uptime failed\n");
+ while (!(((phystatus = read_mii(sc, phyaddr, 1)) >> 5) & 1)) {
+ if (rtems_clock_get_uptime(&tnow) != RTEMS_SUCCESSFUL)
+ printk("rtems_clock_get_uptime failed\n");
+ _Timespec_Subtract(&tstart, &tnow, &sc->auto_neg_time);
+ if (_Timespec_Greater_than(&sc->auto_neg_time, &greth_tan)) {
+ sc->auto_neg = -1; /* Failed */
+ tmp1 = read_mii(sc, phyaddr, 0);
+ sc->gb = ((phyctrl >> 6) & 1) && !((phyctrl >> 13) & 1);
+ sc->sp = !((phyctrl >> 6) & 1) && ((phyctrl >> 13) & 1);
+ sc->fd = (phyctrl >> 8) & 1;
+ goto auto_neg_done;
+ }
+ /* Wait about 30ms, time is PHY dependent */
+ rtems_task_wake_after(rtems_clock_get_ticks_per_second()/32);
+ }
+ sc->phydev.adv = read_mii(sc, phyaddr, 4);
+ sc->phydev.part = read_mii(sc, phyaddr, 5);
+ if ((phystatus >> 8) & 1) {
+ sc->phydev.extadv = read_mii(sc, phyaddr, 9);
+ sc->phydev.extpart = read_mii(sc, phyaddr, 10);
+ if ( (sc->phydev.extadv & GRETH_MII_EXTADV_1000HD) &&
+ (sc->phydev.extpart & GRETH_MII_EXTPRT_1000HD)) {
+ sc->gb = 1;
+ sc->fd = 0;
+ }
+ if ( (sc->phydev.extadv & GRETH_MII_EXTADV_1000FD) &&
+ (sc->phydev.extpart & GRETH_MII_EXTPRT_1000FD)) {
+ sc->gb = 1;
+ sc->fd = 1;
+ }
+ }
+ if ((sc->gb == 0) || ((sc->gb == 1) && (sc->gbit_mac == 0))) {
+ if ( (sc->phydev.adv & GRETH_MII_100TXFD) &&
+ (sc->phydev.part & GRETH_MII_100TXFD)) {
+ sc->sp = 1;
+ sc->fd = 1;
+ } else if ( (sc->phydev.adv & GRETH_MII_100TXHD) &&
+ (sc->phydev.part & GRETH_MII_100TXHD)) {
+ sc->sp = 1;
+ sc->fd = 0;
+ } else if ( (sc->phydev.adv & GRETH_MII_10FD) &&
+ (sc->phydev.part & GRETH_MII_10FD)) {
+ sc->fd = 1;
+ }
+ }
+ }
+auto_neg_done:
+ sc->phydev.vendor = 0;
+ sc->phydev.device = 0;
+ sc->phydev.rev = 0;
+ phystatus = read_mii(sc, phyaddr, 1);
+
+ /* Read out PHY info if extended registers are available */
+ if (phystatus & 1) {
+ tmp1 = read_mii(sc, phyaddr, 2);
+ tmp2 = read_mii(sc, phyaddr, 3);
+
+ sc->phydev.vendor = (tmp1 << 6) | ((tmp2 >> 10) & 0x3F);
+ sc->phydev.rev = tmp2 & 0xF;
+ sc->phydev.device = (tmp2 >> 4) & 0x3F;
+ }
+
+ /* Force to 10 mbit half duplex if the 10/100 MAC is used with a 1000 PHY */
+ if (((sc->gb) && !(sc->gbit_mac)) || !((phyctrl >> 12) & 1)) {
+ write_mii(sc, phyaddr, 0, sc->sp << 13);
+
+ /* check if marvell 88EE1111 PHY. Needs special reset handling */
+ if ((phystatus & 1) && (sc->phydev.vendor == 0x005043) &&
+ (sc->phydev.device == 0x0C))
+ write_mii(sc, phyaddr, 0, 0x8000);
+
+ sc->gb = 0;
+ sc->sp = 0;
+ sc->fd = 0;
+ }
+ while ((read_mii(sc, phyaddr, 0)) & 0x8000) {}
+
+ if (sc->greth_rst) {
+ /* Reset ON */
+ regs->ctrl = GRETH_CTRL_RST | GRETH_CTRL_DD | GRETH_CTRL_ED;
+ for (i = 0; i < 100 && (regs->ctrl & GRETH_CTRL_RST); i++)
+ ;
+ }
+ /* Reset OFF. Set mode matching PHY settings. */
+ speed = (sc->gb << 8) | (sc->sp << 7) | (sc->fd << 4);
+ regs->ctrl = GRETH_CTRL_DD | sc->edcl_dis | speed;
+
+ /* Initialize rx/tx descriptor table pointers. Due to alignment we
+ * always allocate maximum table size.
+ */
+ sc->txdesc = (greth_rxtxdesc *) almalloc(0x800, 0x400);
+ sc->rxdesc = (greth_rxtxdesc *) &sc->txdesc[128];
+ sc->tx_ptr = 0;
+ sc->tx_dptr = 0;
+ sc->tx_cnt = 0;
+ sc->rx_ptr = 0;
+
+ /* Translate the Descriptor DMA table base address into an address that
+ * the GRETH core can understand
+ */
+ drvmgr_translate_check(
+ sc->dev,
+ CPUMEM_TO_DMA,
+ (void *)sc->txdesc,
+ (void **)&sc->txdesc_remote,
+ 0x800);
+ sc->rxdesc_remote = sc->txdesc_remote + 0x400;
+ regs->txdesc = (int) sc->txdesc_remote;
+ regs->rxdesc = (int) sc->rxdesc_remote;
+
+ sc->rxmbuf = grlib_calloc(sc->rxbufs, sizeof(*sc->rxmbuf));
+ sc->txmbuf = grlib_calloc(sc->txbufs, sizeof(*sc->txmbuf));
+
+ for (i = 0; i < sc->txbufs; i++)
+ {
+ sc->txdesc[i].ctrl = 0;
+ if (!(sc->gbit_mac)) {
+ drvmgr_translate_check(
+ sc->dev,
+ CPUMEM_TO_DMA,
+ (void *)grlib_malloc(GRETH_MAXBUF_LEN),
+ (void **)&sc->txdesc[i].addr,
+ GRETH_MAXBUF_LEN);
+ }
+#ifdef GRETH_DEBUG
+ /* printf("TXBUF: %08x\n", (int) sc->txdesc[i].addr); */
+#endif
+ }
+ for (i = 0; i < sc->rxbufs; i++)
+ {
+ MGETHDR (m, M_WAIT, MT_DATA);
+ MCLGET (m, M_WAIT);
+ if (sc->gbit_mac)
+ m->m_data += 2;
+ m->m_pkthdr.rcvif = &sc->arpcom.ac_if;
+ sc->rxmbuf[i] = m;
+ drvmgr_translate_check(
+ sc->dev,
+ CPUMEM_TO_DMA,
+ (void *)mtod(m, uint32_t *),
+ (void **)&sc->rxdesc[i].addr,
+ GRETH_MAXBUF_LEN);
+ sc->rxdesc[i].ctrl = GRETH_RXD_ENABLE | GRETH_RXD_IRQ;
+#ifdef GRETH_DEBUG
+/* printf("RXBUF: %08x\n", (int) sc->rxdesc[i].addr); */
+#endif
+ }
+ sc->rxdesc[sc->rxbufs - 1].ctrl |= GRETH_RXD_WRAP;
+
+ /* set ethernet address. */
+ regs->mac_addr_msb =
+ sc->arpcom.ac_enaddr[0] << 8 | sc->arpcom.ac_enaddr[1];
+ regs->mac_addr_lsb =
+ sc->arpcom.ac_enaddr[2] << 24 | sc->arpcom.ac_enaddr[3] << 16 |
+ sc->arpcom.ac_enaddr[4] << 8 | sc->arpcom.ac_enaddr[5];
+
+ if ( sc->rxbufs < 10 ) {
+ sc->tx_int_gen = sc->tx_int_gen_cur = 1;
+ }else{
+ sc->tx_int_gen = sc->tx_int_gen_cur = sc->txbufs/2;
+ }
+ sc->next_tx_mbuf = NULL;
+
+ if ( !sc->gbit_mac )
+ sc->max_fragsize = 1;
+
+ /* clear all pending interrupts */
+ regs->status = 0xffffffff;
+
+ /* install interrupt handler */
+ drvmgr_interrupt_register(sc->dev, 0, "greth", greth_interrupt, sc);
+
+ regs->ctrl |= GRETH_CTRL_RXEN | GRETH_CTRL_RXIRQ;
+
+ print_init_info(sc);
+}
+
+#ifdef CPU_U32_FIX
+
+/*
+ * Routine to align the received packet so that the ip header
+ * is on a 32-bit boundary. Necessary for cpu's that do not
+ * allow unaligned loads and stores and when the 32-bit DMA
+ * mode is used.
+ *
+ * Transfers are done on word basis to avoid possibly slow byte
+ * and half-word writes.
+ */
+
+void ipalign(struct mbuf *m)
+{
+ unsigned int *first, *last, data;
+ unsigned int tmp = 0;
+
+ if ((((int) m->m_data) & 2) && (m->m_len)) {
+ last = (unsigned int *) ((((int) m->m_data) + m->m_len + 8) & ~3);
+ first = (unsigned int *) (((int) m->m_data) & ~3);
+ /* tmp = *first << 16; */
+ asm volatile (" lda [%1] 1, %0\n" : "=r"(tmp) : "r"(first) );
+ tmp = tmp << 16;
+ first++;
+ do {
+ /* When snooping is not available the LDA instruction must be used
+ * to avoid the cache to return an illegal value.
+ ** Load with forced cache miss
+ * data = *first;
+ */
+ asm volatile (" lda [%1] 1, %0\n" : "=r"(data) : "r"(first) );
+ *first = tmp | (data >> 16);
+ tmp = data << 16;
+ first++;
+ } while (first <= last);
+
+ m->m_data = (caddr_t)(((int) m->m_data) + 2);
+ }
+}
+#endif
+
+static void
+greth_Daemon (void *arg)
+{
+ struct ether_header *eh;
+ struct greth_softc *dp = (struct greth_softc *) arg;
+ struct ifnet *ifp = &dp->arpcom.ac_if;
+ struct mbuf *m;
+ unsigned int len, len_status, bad;
+ rtems_event_set events;
+ SPIN_IRQFLAGS(flags);
+ int first;
+ int tmp;
+ unsigned int addr;
+
+ for (;;)
+ {
+ rtems_bsdnet_event_receive (INTERRUPT_EVENT | GRETH_TX_WAIT_EVENT,
+ RTEMS_WAIT | RTEMS_EVENT_ANY,
+ RTEMS_NO_TIMEOUT, &events);
+
+ if ( events & GRETH_TX_WAIT_EVENT ){
+ /* TX interrupt.
+ * We only end up here when all TX descriptors has been used,
+ * and
+ */
+ if ( dp->gbit_mac )
+ greth_process_tx_gbit(dp);
+ else
+ greth_process_tx(dp);
+
+ /* If we didn't get a RX interrupt we don't process it */
+ if ( (events & INTERRUPT_EVENT) == 0 )
+ continue;
+ }
+
+
+#ifdef GRETH_ETH_DEBUG
+ printf ("r\n");
+#endif
+ first=1;
+ /* Scan for Received packets */
+again:
+ while (!((len_status =
+ GRETH_MEM_LOAD(&dp->rxdesc[dp->rx_ptr].ctrl)) & GRETH_RXD_ENABLE))
+ {
+ bad = 0;
+ if (len_status & GRETH_RXD_TOOLONG)
+ {
+ dp->rxLengthError++;
+ bad = 1;
+ }
+ if (len_status & GRETH_RXD_DRIBBLE)
+ {
+ dp->rxNonOctet++;
+ bad = 1;
+ }
+ if (len_status & GRETH_RXD_CRCERR)
+ {
+ dp->rxBadCRC++;
+ bad = 1;
+ }
+ if (len_status & GRETH_RXD_OVERRUN)
+ {
+ dp->rxOverrun++;
+ bad = 1;
+ }
+ if (len_status & GRETH_RXD_LENERR)
+ {
+ dp->rxLengthError++;
+ bad = 1;
+ }
+ if (!bad)
+ {
+ /* pass on the packet in the receive buffer */
+ len = len_status & 0x7FF;
+ m = dp->rxmbuf[dp->rx_ptr];
+#ifdef GRETH_DEBUG
+ int i;
+ printf("RX: 0x%08x, Len: %d : ", (int) m->m_data, len);
+ for (i=0; i<len; i++)
+ printf("%x%x", (m->m_data[i] >> 4) & 0x0ff, m->m_data[i] & 0x0ff);
+ printf("\n");
+#endif
+ m->m_len = m->m_pkthdr.len =
+ len - sizeof (struct ether_header);
+
+ eh = mtod (m, struct ether_header *);
+
+ m->m_data += sizeof (struct ether_header);
+#ifdef CPU_U32_FIX
+ if(!dp->gbit_mac) {
+ /* OVERRIDE CACHED ETHERNET HEADER FOR NON-SNOOPING SYSTEMS */
+ addr = (unsigned int)eh;
+ asm volatile (" lda [%1] 1, %0\n" : "=r"(tmp) : "r"(addr) );
+ addr+=4;
+ asm volatile (" lda [%1] 1, %0\n" : "=r"(tmp) : "r"(addr) );
+ addr+=4;
+ asm volatile (" lda [%1] 1, %0\n" : "=r"(tmp) : "r"(addr) );
+ addr+=4;
+ asm volatile (" lda [%1] 1, %0\n" : "=r"(tmp) : "r"(addr) );
+
+ ipalign(m); /* Align packet on 32-bit boundary */
+ }
+#endif
+/*
+ if(!(dp->gbit_mac) && !CPU_SPARC_HAS_SNOOPING) {
+ rtems_cache_invalidate_entire_data();
+ }
+*/
+ ether_input (ifp, eh, m);
+ MGETHDR (m, M_WAIT, MT_DATA);
+ MCLGET (m, M_WAIT);
+ if (dp->gbit_mac)
+ m->m_data += 2;
+ dp->rxmbuf[dp->rx_ptr] = m;
+ m->m_pkthdr.rcvif = ifp;
+ drvmgr_translate_check(
+ dp->dev,
+ CPUMEM_TO_DMA,
+ (void *)mtod (m, uint32_t *),
+ (void **)&dp->rxdesc[dp->rx_ptr].addr,
+ GRETH_MAXBUF_LEN);
+ dp->rxPackets++;
+ }
+ if (dp->rx_ptr == dp->rxbufs - 1) {
+ dp->rxdesc[dp->rx_ptr].ctrl = GRETH_RXD_ENABLE | GRETH_RXD_IRQ | GRETH_RXD_WRAP;
+ } else {
+ dp->rxdesc[dp->rx_ptr].ctrl = GRETH_RXD_ENABLE | GRETH_RXD_IRQ;
+ }
+ SPIN_LOCK_IRQ(&dp->devlock, flags);
+ dp->regs->ctrl |= GRETH_CTRL_RXEN;
+ SPIN_UNLOCK_IRQ(&dp->devlock, flags);
+ dp->rx_ptr = (dp->rx_ptr + 1) % dp->rxbufs;
+ }
+
+ /* Always scan twice to avoid deadlock */
+ if ( first ){
+ first=0;
+ SPIN_LOCK_IRQ(&dp->devlock, flags);
+ dp->regs->ctrl |= GRETH_CTRL_RXIRQ;
+ SPIN_UNLOCK_IRQ(&dp->devlock, flags);
+ goto again;
+ }
+
+ }
+}
+
+static int
+sendpacket (struct ifnet *ifp, struct mbuf *m)
+{
+ struct greth_softc *dp = ifp->if_softc;
+ unsigned char *temp;
+ struct mbuf *n;
+ unsigned int len;
+ SPIN_IRQFLAGS(flags);
+
+ /*
+ * Is there a free descriptor available?
+ */
+ if (GRETH_MEM_LOAD(&dp->txdesc[dp->tx_ptr].ctrl) & GRETH_TXD_ENABLE){
+ /* No. */
+ return 1;
+ }
+
+ /* Remember head of chain */
+ n = m;
+
+ len = 0;
+ temp = (unsigned char *) GRETH_MEM_LOAD(&dp->txdesc[dp->tx_ptr].addr);
+ drvmgr_translate(dp->dev, CPUMEM_FROM_DMA, (void *)temp, (void **)&temp);
+#ifdef GRETH_DEBUG
+ printf("TXD: 0x%08x : BUF: 0x%08x\n", (int) m->m_data, (int) temp);
+#endif
+ for (;;)
+ {
+#ifdef GRETH_DEBUG
+ int i;
+ printf("MBUF: 0x%08x : ", (int) m->m_data);
+ for (i=0;i<m->m_len;i++)
+ printf("%x%x", (m->m_data[i] >> 4) & 0x0ff, m->m_data[i] & 0x0ff);
+ printf("\n");
+#endif
+ len += m->m_len;
+ if (len <= RBUF_SIZE)
+ memcpy ((void *) temp, (char *) m->m_data, m->m_len);
+ temp += m->m_len;
+ if ((m = m->m_next) == NULL)
+ break;
+ }
+
+ m_freem (n);
+
+ /* don't send long packets */
+
+ if (len <= GRETH_MAXBUF_LEN) {
+ if (dp->tx_ptr < dp->txbufs-1) {
+ dp->txdesc[dp->tx_ptr].ctrl = GRETH_TXD_IRQ |
+ GRETH_TXD_ENABLE | len;
+ } else {
+ dp->txdesc[dp->tx_ptr].ctrl = GRETH_TXD_IRQ |
+ GRETH_TXD_WRAP | GRETH_TXD_ENABLE | len;
+ }
+ dp->tx_ptr = (dp->tx_ptr + 1) % dp->txbufs;
+ SPIN_LOCK_IRQ(&dp->devlock, flags);
+ dp->regs->ctrl = dp->regs->ctrl | GRETH_CTRL_TXEN;
+ SPIN_UNLOCK_IRQ(&dp->devlock, flags);
+ }
+
+ return 0;
+}
+
+
+static int
+sendpacket_gbit (struct ifnet *ifp, struct mbuf *m)
+{
+ struct greth_softc *dp = ifp->if_softc;
+ unsigned int len;
+
+ unsigned int ctrl;
+ int frags;
+ struct mbuf *mtmp;
+ int int_en;
+ SPIN_IRQFLAGS(flags);
+
+ len = 0;
+#ifdef GRETH_DEBUG
+ printf("TXD: 0x%08x\n", (int) m->m_data);
+#endif
+ /* Get number of fragments too see if we have enough
+ * resources.
+ */
+ frags=1;
+ mtmp=m;
+ while(mtmp->m_next){
+ frags++;
+ mtmp = mtmp->m_next;
+ }
+
+ if ( frags > dp->max_fragsize )
+ dp->max_fragsize = frags;
+
+ if ( frags > dp->txbufs ){
+ printf("GRETH: MBUF-chain cannot be sent. Increase descriptor count.\n");
+ return -1;
+ }
+
+ if ( frags > (dp->txbufs-dp->tx_cnt) ){
+ /* Return number of fragments */
+ return frags;
+ }
+
+
+ /* Enable interrupt from descriptor every tx_int_gen
+ * descriptor. Typically every 16 descriptor. This
+ * is only to reduce the number of interrupts during
+ * heavy load.
+ */
+ dp->tx_int_gen_cur-=frags;
+ if ( dp->tx_int_gen_cur <= 0 ){
+ dp->tx_int_gen_cur = dp->tx_int_gen;
+ int_en = GRETH_TXD_IRQ;
+ }else{
+ int_en = 0;
+ }
+
+ /* At this stage we know that enough descriptors are available */
+ for (;;)
+ {
+
+#ifdef GRETH_DEBUG
+ int i;
+ printf("MBUF: 0x%08x, Len: %d : ", (int) m->m_data, m->m_len);
+ for (i=0; i<m->m_len; i++)
+ printf("%x%x", (m->m_data[i] >> 4) & 0x0ff, m->m_data[i] & 0x0ff);
+ printf("\n");
+#endif
+ len += m->m_len;
+ drvmgr_translate_check(
+ dp->dev,
+ CPUMEM_TO_DMA,
+ (void *)(uint32_t *)m->m_data,
+ (void **)&dp->txdesc[dp->tx_ptr].addr,
+ m->m_len);
+
+ /* Wrap around? */
+ if (dp->tx_ptr < dp->txbufs-1) {
+ ctrl = GRETH_TXD_ENABLE;
+ }else{
+ ctrl = GRETH_TXD_ENABLE | GRETH_TXD_WRAP;
+ }
+
+ /* Enable Descriptor */
+ if ((m->m_next) == NULL) {
+ dp->txdesc[dp->tx_ptr].ctrl = ctrl | int_en | m->m_len;
+ break;
+ }else{
+ dp->txdesc[dp->tx_ptr].ctrl = GRETH_TXD_MORE | ctrl | int_en | m->m_len;
+ }
+
+ /* Next */
+ dp->txmbuf[dp->tx_ptr] = m;
+ dp->tx_ptr = (dp->tx_ptr + 1) % dp->txbufs;
+ dp->tx_cnt++;
+ m = m->m_next;
+ }
+ dp->txmbuf[dp->tx_ptr] = m;
+ dp->tx_ptr = (dp->tx_ptr + 1) % dp->txbufs;
+ dp->tx_cnt++;
+
+ /* Tell Hardware about newly enabled descriptor */
+ SPIN_LOCK_IRQ(&dp->devlock, flags);
+ dp->regs->ctrl = dp->regs->ctrl | GRETH_CTRL_TXEN;
+ SPIN_UNLOCK_IRQ(&dp->devlock, flags);
+
+ return 0;
+}
+
+int greth_process_tx_gbit(struct greth_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct mbuf *m;
+ SPIN_IRQFLAGS(flags);
+ int first=1;
+
+ /*
+ * Send packets till queue is empty
+ */
+ for (;;){
+ /* Reap Sent packets */
+ while((sc->tx_cnt > 0) && !(GRETH_MEM_LOAD(&sc->txdesc[sc->tx_dptr].ctrl) & GRETH_TXD_ENABLE)) {
+ m_free(sc->txmbuf[sc->tx_dptr]);
+ sc->tx_dptr = (sc->tx_dptr + 1) % sc->txbufs;
+ sc->tx_cnt--;
+ }
+
+ if ( sc->next_tx_mbuf ){
+ /* Get packet we tried but faild to transmit last time */
+ m = sc->next_tx_mbuf;
+ sc->next_tx_mbuf = NULL; /* Mark packet taken */
+ }else{
+ /*
+ * Get the next mbuf chain to transmit from Stack.
+ */
+ IF_DEQUEUE (&ifp->if_snd, m);
+ if (!m){
+ /* Hardware has sent all schedule packets, this
+ * makes the stack enter at greth_start next time
+ * a packet is to be sent.
+ */
+ ifp->if_flags &= ~IFF_OACTIVE;
+ break;
+ }
+ }
+
+ /* Are there free descriptors available? */
+ /* Try to send packet, if it a negative number is returned. */
+ if ( (sc->tx_cnt >= sc->txbufs) || sendpacket_gbit(ifp, m) ){
+ /* Not enough resources */
+
+ /* Since we have taken the mbuf out of the "send chain"
+ * we must remember to use that next time we come back.
+ * or else we have dropped a packet.
+ */
+ sc->next_tx_mbuf = m;
+
+ /* Not enough resources, enable interrupt for transmissions
+ * this way we will be informed when more TX-descriptors are
+ * available.
+ */
+ if ( first ){
+ first = 0;
+ SPIN_LOCK_IRQ(&sc->devlock, flags);
+ ifp->if_flags |= IFF_OACTIVE;
+ sc->regs->ctrl |= GRETH_CTRL_TXIRQ;
+ SPIN_UNLOCK_IRQ(&sc->devlock, flags);
+
+ /* We must check again to be sure that we didn't
+ * miss an interrupt (if a packet was sent just before
+ * enabling interrupts)
+ */
+ continue;
+ }
+
+ return -1;
+ }else{
+ /* Sent Ok, proceed to process more packets if available */
+ }
+ }
+ return 0;
+}
+
+int greth_process_tx(struct greth_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct mbuf *m;
+ SPIN_IRQFLAGS(flags);
+ int first=1;
+
+ /*
+ * Send packets till queue is empty
+ */
+ for (;;){
+ if ( sc->next_tx_mbuf ){
+ /* Get packet we tried but failed to transmit last time */
+ m = sc->next_tx_mbuf;
+ sc->next_tx_mbuf = NULL; /* Mark packet taken */
+ }else{
+ /*
+ * Get the next mbuf chain to transmit from Stack.
+ */
+ IF_DEQUEUE (&ifp->if_snd, m);
+ if (!m){
+ /* Hardware has sent all schedule packets, this
+ * makes the stack enter at greth_start next time
+ * a packet is to be sent.
+ */
+ ifp->if_flags &= ~IFF_OACTIVE;
+ break;
+ }
+ }
+
+ /* Try to send packet, failed if it a non-zero number is returned. */
+ if ( sendpacket(ifp, m) ){
+ /* Not enough resources */
+
+ /* Since we have taken the mbuf out of the "send chain"
+ * we must remember to use that next time we come back.
+ * or else we have dropped a packet.
+ */
+ sc->next_tx_mbuf = m;
+
+ /* Not enough resources, enable interrupt for transmissions
+ * this way we will be informed when more TX-descriptors are
+ * available.
+ */
+ if ( first ){
+ first = 0;
+ SPIN_LOCK_IRQ(&sc->devlock, flags);
+ ifp->if_flags |= IFF_OACTIVE;
+ sc->regs->ctrl |= GRETH_CTRL_TXIRQ;
+ SPIN_UNLOCK_IRQ(&sc->devlock, flags);
+
+ /* We must check again to be sure that we didn't
+ * miss an interrupt (if a packet was sent just before
+ * enabling interrupts)
+ */
+ continue;
+ }
+
+ return -1;
+ }else{
+ /* Sent Ok, proceed to process more packets if available */
+ }
+ }
+ return 0;
+}
+
+static void
+greth_start (struct ifnet *ifp)
+{
+ struct greth_softc *sc = ifp->if_softc;
+
+ if ( ifp->if_flags & IFF_OACTIVE )
+ return;
+
+ if ( sc->gbit_mac ){
+ /* No use trying to handle this if we are waiting on GRETH
+ * to send the previously scheduled packets.
+ */
+
+ greth_process_tx_gbit(sc);
+ }else{
+ greth_process_tx(sc);
+ }
+
+}
+
+/*
+ * Initialize and start the device
+ */
+static void
+greth_init (void *arg)
+{
+ struct greth_softc *sc = arg;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ char name[4] = {'E', 'T', 'H', '0'};
+
+ if (sc->daemonTid == 0)
+ {
+ /*
+ * Start driver tasks
+ */
+ name[3] += sc->minor;
+ sc->daemonTid = rtems_bsdnet_newproc (name, 4096,
+ greth_Daemon, sc);
+
+ /*
+ * Set up GRETH hardware
+ */
+ greth_initialize_hardware (sc);
+ }
+
+ /*
+ * Setup promiscous/multi-cast MAC address filters if user enabled it
+ */
+ greth_mac_filter_set(sc);
+
+ /*
+ * Tell the world that we're running.
+ */
+ ifp->if_flags |= IFF_RUNNING;
+}
+
+/*
+ * Stop the device
+ */
+static void
+greth_stop (struct greth_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ SPIN_IRQFLAGS(flags);
+ unsigned int speed;
+
+ SPIN_LOCK_IRQ(&sc->devlock, flags);
+ ifp->if_flags &= ~IFF_RUNNING;
+
+ speed = sc->regs->ctrl & (GRETH_CTRL_GB | GRETH_CTRL_SP | GRETH_CTRL_FULLD);
+
+ /* RX/TX OFF */
+ sc->regs->ctrl = GRETH_CTRL_DD | GRETH_CTRL_ED | speed;
+ /* Reset ON */
+ if (sc->greth_rst)
+ sc->regs->ctrl = GRETH_CTRL_RST | GRETH_CTRL_DD | GRETH_CTRL_ED | speed;
+ /* Reset OFF and restore link settings previously detected if any */
+ sc->regs->ctrl = GRETH_CTRL_DD | sc->edcl_dis | speed;
+ SPIN_UNLOCK_IRQ(&sc->devlock, flags);
+
+ sc->next_tx_mbuf = NULL;
+}
+
+
+/*
+ * Show interface statistics
+ */
+static void
+greth_stats (struct greth_softc *sc)
+{
+ printf (" Rx Interrupts:%-8lu", sc->rxInterrupts);
+ printf (" Rx Packets:%-8lu", sc->rxPackets);
+ printf (" Length:%-8lu", sc->rxLengthError);
+ printf (" Non-octet:%-8lu\n", sc->rxNonOctet);
+ printf (" Bad CRC:%-8lu", sc->rxBadCRC);
+ printf (" Overrun:%-8lu", sc->rxOverrun);
+ printf (" Tx Interrupts:%-8lu", sc->txInterrupts);
+ printf (" Maximal Frags:%-8d", sc->max_fragsize);
+ printf (" GBIT MAC:%-8d", sc->gbit_mac);
+}
+
+/*
+ * Driver ioctl handler
+ */
+static int
+greth_ioctl (struct ifnet *ifp, ioctl_command_t command, caddr_t data)
+{
+ struct greth_softc *sc = ifp->if_softc;
+ int error = 0;
+ struct ifreq *ifr;
+
+ switch (command)
+ {
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ ether_ioctl (ifp, command, data);
+ break;
+
+ case SIOCSIFFLAGS:
+ switch (ifp->if_flags & (IFF_UP | IFF_RUNNING))
+ {
+ case IFF_RUNNING:
+ greth_stop (sc);
+ break;
+
+ case IFF_UP:
+ greth_init (sc);
+ break;
+
+ case IFF_UP | IFF_RUNNING:
+ greth_stop (sc);
+ greth_init (sc);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case SIO_RTEMS_SHOW_STATS:
+ greth_stats (sc);
+ break;
+
+ /*
+ * Multicast commands: Enabling/disabling filtering of MAC addresses
+ */
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ ifr = (struct ifreq *)data;
+ if (command == SIOCADDMULTI) {
+ error = ether_addmulti(ifr, &sc->arpcom);
+ } else {
+ error = ether_delmulti(ifr, &sc->arpcom);
+ }
+ if (error == ENETRESET) {
+ error = greth_mac_filter_set(sc);
+ }
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+/*
+ * Attach an GRETH driver to the system
+ */
+static int
+greth_interface_driver_attach (
+ struct rtems_bsdnet_ifconfig *config,
+ int attach
+ )
+{
+ struct greth_softc *sc;
+ struct ifnet *ifp;
+ int mtu;
+ int unitNumber;
+ char *unitName;
+
+ /* parse driver name */
+ if ((unitNumber = rtems_bsdnet_parse_driver_name (config, &unitName)) < 0)
+ return 0;
+
+ sc = config->drv_ctrl;
+ ifp = &sc->arpcom.ac_if;
+#ifdef GRETH_DEBUG
+ printf("GRETH[%d]: %s, sc %p, dev %p on %s\n", unitNumber, config->ip_address, sc, sc->dev, sc->dev->parent->dev->name);
+#endif
+ if (config->hardware_address)
+ {
+ memcpy (sc->arpcom.ac_enaddr, config->hardware_address,
+ ETHER_ADDR_LEN);
+ }
+ else
+ {
+ memset (sc->arpcom.ac_enaddr, 0x08, ETHER_ADDR_LEN);
+ }
+
+ if (config->mtu)
+ mtu = config->mtu;
+ else
+ mtu = ETHERMTU;
+
+ sc->acceptBroadcast = !config->ignore_broadcast;
+
+ /*
+ * Set up network interface values
+ */
+ ifp->if_softc = sc;
+ ifp->if_unit = unitNumber;
+ ifp->if_name = unitName;
+ ifp->if_mtu = mtu;
+ ifp->if_init = greth_init;
+ ifp->if_ioctl = greth_ioctl;
+ ifp->if_start = greth_start;
+ ifp->if_output = ether_output;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
+ if (sc->mc_available)
+ ifp->if_flags |= IFF_MULTICAST;
+ if (ifp->if_snd.ifq_maxlen == 0)
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+
+ /*
+ * Attach the interface
+ */
+ if_attach (ifp);
+ ether_ifattach (ifp);
+
+#ifdef GRETH_DEBUG
+ printf ("GRETH : driver has been attached\n");
+#endif
+ return 1;
+}
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int greth_register_io(rtems_device_major_number *m);
+int greth_device_init(struct greth_softc *sc);
+int network_interface_add(struct rtems_bsdnet_ifconfig *interface);
+
+#ifdef GRETH_INFO_AVAIL
+static int greth_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p, int argc, char *argv[]);
+#define GRETH_INFO_FUNC greth_info
+#else
+#define GRETH_INFO_FUNC NULL
+#endif
+
+int greth_init2(struct drvmgr_dev *dev);
+int greth_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops greth_ops =
+{
+ .init =
+ {
+ NULL,
+ greth_init2,
+ greth_init3,
+ NULL
+ },
+ .remove = NULL,
+ .info = GRETH_INFO_FUNC,
+};
+
+struct amba_dev_id greth_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_ETHMAC},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info greth_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRETH_ID, /* Driver ID */
+ "GRETH_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &greth_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &greth_ids[0]
+};
+
+void greth_register_drv (void)
+{
+ DBG("Registering GRETH driver\n");
+ drvmgr_drv_register(&greth_drv_info.general);
+}
+
+int greth_init2(struct drvmgr_dev *dev)
+{
+ struct greth_softc *priv;
+
+ DBG("GRETH[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init3() */
+
+ return DRVMGR_OK;
+}
+
+int greth_init3(struct drvmgr_dev *dev)
+{
+ struct greth_softc *sc;
+ struct rtems_bsdnet_ifconfig *ifp;
+ rtems_status_code status;
+
+ sc = dev->priv;
+ sprintf(sc->devName, "gr_eth%d", (dev->minor_drv+1));
+
+ /* Init GRETH device */
+ if ( greth_device_init(sc) ) {
+ printk("GRETH: Failed to init device\n");
+ return DRVMGR_FAIL;
+ }
+
+ /* Initialize Spin-lock for GRSPW Device. This is to protect
+ * CTRL and DMACTRL registers from ISR.
+ */
+ SPIN_INIT(&sc->devlock, sc->devName);
+
+ /* Register GRETH device as an Network interface */
+ ifp = grlib_calloc(1, sizeof(*ifp));
+
+ ifp->name = sc->devName;
+ ifp->drv_ctrl = sc;
+ ifp->attach = greth_interface_driver_attach;
+
+ status = network_interface_add(ifp);
+ if (status != 0) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+int greth_device_init(struct greth_softc *sc)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+ unsigned int speed;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)sc->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ sc->regs = (greth_regs *)pnpinfo->apb_slv->start;
+ sc->minor = sc->dev->minor_drv;
+ sc->greth_rst = 1;
+
+ /* Remember EDCL enabled/disable state before reset */
+ sc->edcl_dis = sc->regs->ctrl & GRETH_CTRL_ED;
+
+ /* Default is to inherit EDCL Disable bit from HW. User can force En/Dis */
+ value = drvmgr_dev_key_get(sc->dev, "edclDis", DRVMGR_KT_INT);
+ if ( value ) {
+ /* Force EDCL mode. Has an effect later when GRETH+PHY is initialized */
+ if (value->i > 0) {
+ sc->edcl_dis = GRETH_CTRL_ED;
+ } else {
+ /* Default to avoid soft-reset the GRETH when EDCL is forced */
+ sc->edcl_dis = 0;
+ sc->greth_rst = 0;
+ }
+ }
+
+ /* let user control soft-reset of GRETH (for debug) */
+ value = drvmgr_dev_key_get(sc->dev, "soft-reset", DRVMGR_KT_INT);
+ if ( value) {
+ sc->greth_rst = value->i ? 1 : 0;
+ }
+
+ /* clear control register and reset NIC and keep current speed modes.
+ * This should be done as quick as possible during startup, this is to
+ * stop DMA transfers after a reboot.
+ *
+ * When EDCL is forced enabled reset is skipped, disabling RX/TX DMA is
+ * is enough during debug.
+ */
+ speed = sc->regs->ctrl & (GRETH_CTRL_GB | GRETH_CTRL_SP | GRETH_CTRL_FULLD);
+ sc->regs->ctrl = GRETH_CTRL_DD | GRETH_CTRL_ED | speed;
+ if (sc->greth_rst)
+ sc->regs->ctrl = GRETH_CTRL_RST | GRETH_CTRL_DD | GRETH_CTRL_ED | speed;
+ sc->regs->ctrl = GRETH_CTRL_DD | sc->edcl_dis | speed;
+
+ /* Configure driver by overriding default config with the bus resources
+ * configured by the user
+ */
+ sc->txbufs = 32;
+ sc->rxbufs = 32;
+ sc->phyaddr = -1;
+
+ value = drvmgr_dev_key_get(sc->dev, "txDescs", DRVMGR_KT_INT);
+ if ( value && (value->i <= 128) )
+ sc->txbufs = value->i;
+
+ value = drvmgr_dev_key_get(sc->dev, "rxDescs", DRVMGR_KT_INT);
+ if ( value && (value->i <= 128) )
+ sc->rxbufs = value->i;
+
+ value = drvmgr_dev_key_get(sc->dev, "phyAdr", DRVMGR_KT_INT);
+ if ( value && (value->i < 32) )
+ sc->phyaddr = value->i;
+
+ value = drvmgr_dev_key_get(sc->dev, "advModes", DRVMGR_KT_INT);
+ if ( value )
+ sc->advmodes = value->i;
+
+ /* Check if multicast support is available */
+ sc->mc_available = sc->regs->ctrl & GRETH_CTRL_MC;
+
+ return 0;
+}
+
+#ifdef GRETH_INFO_AVAIL
+static int greth_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p, int argc, char *argv[])
+{
+ struct greth_softc *sc;
+ char buf[64];
+
+ if (dev->priv == NULL)
+ return -DRVMGR_EINVAL;
+ sc = dev->priv;
+
+ sprintf(buf, "IFACE NAME: %s", sc->devName);
+ print_line(p, buf);
+ sprintf(buf, "GBIT MAC: %s", sc->gbit_mac ? "YES" : "NO");
+ print_line(p, buf);
+
+ return DRVMGR_OK;
+}
+#endif
+
+#endif
diff --git a/bsps/shared/grlib/net/network_interface_add.c b/bsps/shared/grlib/net/network_interface_add.c
new file mode 100644
index 0000000000..011137404f
--- /dev/null
+++ b/bsps/shared/grlib/net/network_interface_add.c
@@ -0,0 +1,62 @@
+/* Network interface register help function
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * This function adds a network interface to the
+ * rtems_bsdnet_config.ifconfig linked list of interfaces.
+ * The interface configuration is taken from the user defined
+ * array interface_configs. This function is useful for PnP
+ * systems when an unknown number of interfaces are available.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems/rtems_bsdnet.h>
+#include <stdio.h>
+
+#include <grlib/network_interface_add.h>
+
+extern struct rtems_bsdnet_config rtems_bsdnet_config;
+
+/* Number of interfaces taken */
+int network_interface_cnt = 0;
+
+int network_interface_add(struct rtems_bsdnet_ifconfig *interface)
+{
+ struct ethernet_config *cfg = NULL;
+ int i, last_entry = 1;
+
+ /* Init interface description */
+ interface->next = NULL;
+
+ cfg = &interface_configs[network_interface_cnt];
+ for(i=0; i<6; i++) {
+ if ( cfg->eth_adr[i] != 0 ) {
+ last_entry = 0;
+ break;
+ }
+ }
+ /* Do we have a valid configuration? */
+ if ( last_entry == 0 ) {
+ cfg = &interface_configs[network_interface_cnt];
+
+ interface->ip_address = cfg->ip_addr;
+ interface->ip_netmask = cfg->ip_netmask;
+ interface->hardware_address = cfg->eth_adr;
+
+ network_interface_cnt++;
+ } else {
+ interface->ip_address = NULL;
+ interface->ip_netmask = NULL;
+ interface->hardware_address = NULL;
+ }
+
+ /* Insert interface first into list */
+ interface->next = rtems_bsdnet_config.ifconfig;
+ rtems_bsdnet_config.ifconfig = interface;
+
+ return 0;
+}
diff --git a/bsps/shared/grlib/pci/gr_701.c b/bsps/shared/grlib/pci/gr_701.c
new file mode 100644
index 0000000000..c9ac0db0f2
--- /dev/null
+++ b/bsps/shared/grlib/pci/gr_701.c
@@ -0,0 +1,618 @@
+/* GR-701 PCI Target driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the GR-701 interface PCI board.
+ * This driver provides a AMBA PnP bus by using the general part
+ * of the AMBA PnP bus driver (ambapp_bus.c).
+ *
+ * Driver resources for the AMBA PnP bus provided can be set using
+ * gr701_set_resources().
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+#include <pci/access.h>
+
+#include <grlib/ambapp.h>
+
+#include <grlib/ambapp.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/bspcommon.h>
+#include <grlib/genirq.h>
+
+#include <grlib/gr_701.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Offset from 0x80000000 (dual bus version) */
+#define AHB1_BASE_ADDR 0x80000000
+#define AHB1_IOAREA_BASE_ADDR 0x80100000
+
+/* #define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+int gr701_init1(struct drvmgr_dev *dev);
+int gr701_init2(struct drvmgr_dev *dev);
+void gr701_interrupt(void *arg);
+
+#define READ_REG(address) (*(volatile unsigned int *)address)
+
+/* PCI bride reg layout on AMBA side */
+struct amba_bridge_regs {
+ volatile unsigned int bar0;
+ volatile unsigned int bar1;
+ volatile unsigned int bar2;
+ volatile unsigned int bar3;
+ volatile unsigned int bar4;/* 0x10 */
+
+ volatile unsigned int unused[4*3-1];
+
+ volatile unsigned int ambabars[1]; /* 0x40 */
+};
+
+/* PCI bride reg layout on PCI side */
+struct pci_bridge_regs {
+ volatile unsigned int bar0;
+ volatile unsigned int bar1;
+ volatile unsigned int bar2;
+ volatile unsigned int bar3;
+ volatile unsigned int bar4; /* 0x10 */
+
+ volatile unsigned int ilevel;
+ volatile unsigned int ipend;
+ volatile unsigned int iforce;
+ volatile unsigned int istatus;
+ volatile unsigned int iclear;
+ volatile unsigned int imask;
+};
+
+/* Private data structure for driver */
+struct gr701_priv {
+ /* Driver management */
+ struct drvmgr_dev *dev;
+ char prefix[16];
+ SPIN_DECLARE(devlock);
+
+ struct pci_bridge_regs *pcib;
+ struct amba_bridge_regs *ambab;
+
+ /* PCI */
+ pci_dev_t pcidev;
+ struct pci_dev_info *devinfo;
+
+ /* IRQ */
+ genirq_t genirq;
+ int interrupt_cnt;
+
+ /* GR-701 Address translation */
+ struct drvmgr_map_entry bus_maps_up[2];
+ struct drvmgr_map_entry bus_maps_down[2];
+
+ /* AMBA Plug&Play information on GR-701 */
+ struct ambapp_bus abus;
+ struct ambapp_mmap amba_maps[3];
+ struct ambapp_config config;
+};
+
+int ambapp_gr701_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_gr701_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg);
+int ambapp_gr701_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_gr701_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_gr701_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_gr701_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+struct ambapp_ops ambapp_gr701_ops = {
+ .int_register = ambapp_gr701_int_register,
+ .int_unregister = ambapp_gr701_int_unregister,
+ .int_unmask = ambapp_gr701_int_unmask,
+ .int_mask = ambapp_gr701_int_mask,
+ .int_clear = ambapp_gr701_int_clear,
+ .get_params = ambapp_gr701_get_params
+};
+
+struct drvmgr_drv_ops gr701_ops =
+{
+ .init = {gr701_init1, gr701_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct pci_dev_id_match gr701_ids[] =
+{
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER, PCIID_DEVICE_GR_701),
+ PCIID_END_TABLE /* Mark end of table */
+};
+
+struct pci_drv_info gr701_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_PCI_GAISLER_GR701_ID, /* Driver ID */
+ "GR-701_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_PCI, /* Bus Type */
+ &gr701_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gr701_ids[0]
+};
+
+/* Driver resources configuration for the AMBA bus on the GR-701 board.
+ * It is declared weak so that the user may override it from the project file,
+ * if the default settings are not enough.
+ *
+ * The configuration consists of an array of configuration pointers, each
+ * pointer determine the configuration of one GR-701 board. Pointer
+ * zero is for board0, pointer 1 for board1 and so on.
+ *
+ * The array must end with a NULL pointer.
+ */
+struct drvmgr_bus_res *gr701_resources[] __attribute__((weak)) =
+{
+ NULL
+};
+
+void gr701_register_drv(void)
+{
+ DBG("Registering GR-701 PCI driver\n");
+ drvmgr_drv_register(&gr701_info.general);
+}
+
+void gr701_interrupt(void *arg)
+{
+ struct gr701_priv *priv = arg;
+ unsigned int status;
+ int irq = 0;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ while ( (status=priv->pcib->istatus) != 0 ) {
+ priv->interrupt_cnt++; /* An interrupt was generated */
+ irq = status;
+ genirq_doirq(priv->genirq, irq);
+ /* ACK interrupt */
+ priv->pcib->istatus = 0;
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* ACK interrupt, this is because PCI is Level, so the IRQ Controller still drives the IRQ. */
+ if ( irq )
+ drvmgr_interrupt_clear(priv->dev, 0);
+}
+
+static int gr701_hw_init(struct gr701_priv *priv)
+{
+ uint32_t com1;
+ struct pci_bridge_regs *pcib;
+ struct amba_bridge_regs *ambab;
+ int mst;
+ unsigned int pci_freq_hz;
+ pci_dev_t pcidev = priv->pcidev;
+ struct pci_dev_info *devinfo = priv->devinfo;
+
+ /* Set up PCI ==> AMBA */
+ priv->pcib = pcib = (void *)devinfo->resources[0].address;
+ pcib->bar0 = 0xfc000000;
+
+ /* Set up GR701 AMBA Masters connection to PCI */
+ priv->ambab = ambab = (struct amba_bridge_regs *)(
+ devinfo->resources[1].address + 0x400);
+
+ /* Init all msters, max 16 */
+ for (mst=0; mst<16; mst++) {
+ ambab->ambabars[mst] = 0x40000000;
+ if (READ_REG(&ambab->ambabars[mst]) != 0x40000000)
+ break;
+ }
+
+ /* Setup Address translation for AMBA bus, assume that PCI BAR
+ * are mapped 1:1 to CPU.
+ */
+
+ priv->amba_maps[0].size = 0x04000000;
+ priv->amba_maps[0].local_adr = devinfo->resources[1].address;
+ priv->amba_maps[0].remote_adr = 0xfc000000;
+
+ /* Mark end of table */
+ priv->amba_maps[1].size=0;
+ priv->amba_maps[1].local_adr = 0;
+ priv->amba_maps[1].remote_adr = 0;
+
+ /* Setup DOWN-streams address translation */
+ priv->bus_maps_down[0].name = "PCI BAR1 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)devinfo->resources[1].address;
+ priv->bus_maps_down[0].to_adr = (void *)0xfc000000;
+
+ /* Setup UP-streams address translation */
+ priv->bus_maps_up[0].name = "AMBA PCIF Window";
+ priv->bus_maps_up[0].size = 0x10000000;
+ priv->bus_maps_up[0].from_adr = (void *)0xe0000000;
+ priv->bus_maps_up[0].to_adr = (void *)0x40000000;
+
+ /* Mark end of translation tables */
+ priv->bus_maps_down[1].size = 0;
+ priv->bus_maps_up[1].size = 0;
+
+ /* Enable I/O and Mem accesses */
+ pci_cfg_r32(pcidev, PCIR_COMMAND, &com1);
+ com1 |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN;
+ pci_cfg_w32(pcidev, PCIR_COMMAND, com1);
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(&priv->abus, devinfo->resources[1].address + 0x3f00000,
+ NULL, &priv->amba_maps[0]);
+
+ /* Frequency is the same as the PCI bus frequency */
+ drvmgr_freq_get(priv->dev, 0, &pci_freq_hz);
+
+ /* Initialize Frequency of AMBA bus */
+ ambapp_freq_init(&priv->abus, NULL, pci_freq_hz);
+
+ /* Init IRQ controller (avoid IRQ generation) */
+ pcib->imask = 0x0000;
+ pcib->ipend = 0;
+ pcib->iclear = 0xffff;
+ pcib->iforce = 0;
+ pcib->ilevel = 0x0;
+
+ /* Successfully registered the GR-701 board */
+ return 0;
+}
+
+static void gr701_hw_init2(struct gr701_priv *priv)
+{
+ /* Enable PCI Master (for DMA) */
+ pci_master_enable(priv->pcidev);
+}
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr701_ids[].
+ */
+int gr701_init1(struct drvmgr_dev *dev)
+{
+ struct gr701_priv *priv;
+ struct pci_dev_info *devinfo;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+ int resources_cnt;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+
+ dev->priv = priv;
+ priv->dev = dev;
+
+ /* Determine number of configurations */
+ resources_cnt = get_resarray_count(gr701_resources);
+
+ /* Generate Device prefix */
+ strcpy(priv->prefix, "/dev/gr701_0");
+ priv->prefix[11] += dev->minor_drv;
+ mkdir(priv->prefix, S_IRWXU | S_IRWXG | S_IRWXO);
+ priv->prefix[12] = '/';
+ priv->prefix[13] = '\0';
+
+ priv->devinfo = devinfo = (struct pci_dev_info *)dev->businfo;
+ priv->pcidev = devinfo->pcidev;
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+ printk("\n\n--- GR-701[%d] ---\n", dev->minor_drv);
+ printk(" PCI BUS: 0x%x, SLOT: 0x%x, FUNCTION: 0x%x\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ printk(" PCI VENDOR: 0x%04x, DEVICE: 0x%04x\n\n\n",
+ devinfo->id.vendor, devinfo->id.device);
+ printk(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printk(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printk(" IRQ: %d\n\n\n", devinfo->irq);
+
+ /* all neccessary space assigned to GR-701 target? */
+ if ((bar0_size == 0) || (bar1_size == 0))
+ return DRVMGR_ENORES;
+
+ /* Initialize spin-lock for this PCI perihperal device. This is to
+ * protect the Interrupt Controller Registers. The genirq layer is
+ * protecting its own internals and ISR dispatching.
+ */
+ SPIN_INIT(&priv->devlock, priv->prefix);
+
+ priv->genirq = genirq_init(16);
+ if ( priv->genirq == NULL ) {
+ free(priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ if ( gr701_hw_init(priv) ) {
+ genirq_destroy(priv->genirq);
+ free(priv);
+ dev->priv = NULL;
+ printk(" Failed to initialize GR-701 HW\n");
+ return DRVMGR_FAIL;
+ }
+
+ /* Init amba bus */
+ priv->config.abus = &priv->abus;
+ priv->config.ops = &ambapp_gr701_ops;
+ priv->config.maps_up = &priv->bus_maps_up[0];
+ priv->config.maps_down = &priv->bus_maps_down[0];
+ if ( priv->dev->minor_drv < resources_cnt ) {
+ priv->config.resources = gr701_resources[priv->dev->minor_drv];
+ } else {
+ priv->config.resources = NULL;
+ }
+
+ /* Create and register AMBA PnP bus. */
+ return ambapp_bus_register(dev, &priv->config);
+}
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr701_ids[].
+ */
+int gr701_init2(struct drvmgr_dev *dev)
+{
+ struct gr701_priv *priv = dev->priv;
+
+ /* Clear any old interrupt requests */
+ drvmgr_interrupt_clear(dev, 0);
+
+ /* Enable System IRQ so that GR-701 PCI target interrupt goes through.
+ *
+ * It is important to enable it in stage init2. If interrupts were
+ * enabled in init1 this might hang the system when more than one PCI
+ * board is connected, this is because PCI interrupts might be shared
+ * and PCI target 2 have not initialized and might therefore drive
+ * interrupt already when entering init1().
+ */
+ drvmgr_interrupt_register(dev, 0, "gr701", gr701_interrupt, priv);
+
+ gr701_hw_init2(priv);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_gr701_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg)
+{
+ struct gr701_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *h;
+
+ h = genirq_alloc_handler(handler, arg);
+ if ( h == NULL )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_register(priv->genirq, irq, h);
+ if ( status == 0 ) {
+ /* Clear IRQ for first registered handler */
+ priv->pcib->iclear = (1<<irq);
+ } else if ( status == 1 )
+ status = 0;
+
+ if (status != 0) {
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ genirq_free_handler(h);
+ return DRVMGR_FAIL;
+ }
+
+ status = genirq_enable(priv->genirq, irq, handler, arg);
+ if ( status == 0 ) {
+ /* Enable IRQ for first enabled handler only */
+ priv->pcib->imask |= (1<<irq); /* unmask interrupt source */
+ } else if ( status == 1 )
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return status;
+}
+
+int ambapp_gr701_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct gr701_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *handler;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_disable(priv->genirq, irq, isr, arg);
+ if ( status == 0 ) {
+ /* Disable IRQ only when no enabled handler exists */
+ priv->pcib->imask &= ~(1<<irq); /* mask interrupt source */
+ }
+
+ handler = genirq_unregister(priv->genirq, irq, isr, arg);
+ if ( handler == NULL )
+ status = DRVMGR_FAIL;
+ else
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (handler)
+ genirq_free_handler(handler);
+
+ return status;
+}
+
+int ambapp_gr701_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr701_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("GR-701 IRQ %d: enable\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Enable IRQ */
+ priv->pcib->imask |= (1<<irq); /* unmask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_gr701_int_mask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr701_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("GR-701 IRQ %d: disable\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Disable IRQ */
+ priv->pcib->imask &= ~(1<<irq); /* mask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_gr701_int_clear(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr701_priv *priv = dev->parent->dev->priv;
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ priv->pcib->iclear = (1<<irq);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_gr701_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ struct gr701_priv *priv = dev->parent->dev->priv;
+
+ /* Device name prefix pointer, skip /dev */
+ params->dev_prefix = &priv->prefix[5];
+
+ return 0;
+}
+
+void gr701_print_dev(struct drvmgr_dev *dev, int options)
+{
+ struct gr701_priv *priv = dev->priv;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ unsigned int freq_hz;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+
+ /* Print */
+ printf("--- GR-701 [bus 0x%x, dev 0x%x, fun 0x%x] ---\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+
+ printf(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printf(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printf(" IRQ: %d\n", devinfo->irq);
+
+ /* Frequency is the same as the PCI bus frequency */
+ drvmgr_freq_get(dev, 0, &freq_hz);
+
+ printf(" FREQ: %u Hz\n", freq_hz);
+ printf(" IMASK: 0x%08x\n", priv->pcib->imask);
+ printf(" IPEND: 0x%08x\n", priv->pcib->ipend);
+
+ /* Print amba config */
+ if ( options & GR701_OPTIONS_AMBA ) {
+ ambapp_print(&priv->abus, 10);
+ }
+
+#if 0
+ /* Print IRQ handlers and their arguments */
+ if ( options & GR701_OPTIONS_IRQ ) {
+ int i;
+ for(i=0; i<16; i++) {
+ printf(" IRQ[%02d]: 0x%x, arg: 0x%x\n",
+ i, (unsigned int)priv->isrs[i].handler, (unsigned int)priv->isrs[i].arg);
+ }
+ }
+#endif
+}
+
+void gr701_print(int options)
+{
+ struct pci_drv_info *drv = &gr701_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ gr701_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/pci/gr_rasta_adcdac.c b/bsps/shared/grlib/pci/gr_rasta_adcdac.c
new file mode 100644
index 0000000000..12e35acbc5
--- /dev/null
+++ b/bsps/shared/grlib/pci/gr_rasta_adcdac.c
@@ -0,0 +1,694 @@
+/* GR-RASTA-ADCDAC PCI Target driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the GR-RASTA-ADCDAC interface PCI board.
+ * This driver provides a AMBA PnP bus by using the general part
+ * of the AMBA PnP bus driver (ambapp_bus.c).
+ *
+ * Driver resources for the AMBA PnP bus provided can be set using
+ * gr_rasta_adcdac_set_resources().
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/bspcommon.h>
+#include <grlib/genirq.h>
+
+#include <grlib/gr_rasta_adcdac.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* Determines which PCI address the AHB masters will access, it should be
+ * set so that the masters can access the CPU RAM. Default is base of CPU RAM,
+ * CPU RAM is mapped 1:1 to PCI space.
+ */
+extern unsigned int _RAM_START;
+#define AHBMST2PCIADR (((unsigned int)&_RAM_START) & 0xf0000000)
+
+/* PCI ID */
+#define PCIID_VENDOR_GAISLER 0x1AC8
+#define PCIID_DEVICE_GR_RASTA_ADCDAC 0x0014
+
+int gr_rasta_adcdac_init1(struct drvmgr_dev *dev);
+int gr_rasta_adcdac_init2(struct drvmgr_dev *dev);
+void gr_rasta_adcdac_isr (void *arg);
+
+struct grpci_regs {
+ volatile unsigned int cfg_stat;
+ volatile unsigned int bar0;
+ volatile unsigned int page0;
+ volatile unsigned int bar1;
+ volatile unsigned int page1;
+ volatile unsigned int iomap;
+ volatile unsigned int stat_cmd;
+};
+
+struct gr_rasta_adcdac_ver {
+ const unsigned int amba_freq_hz; /* The frequency */
+ const unsigned int amba_ioarea; /* The address where the PnP IOAREA starts at */
+};
+
+/* Private data structure for driver */
+struct gr_rasta_adcdac_priv {
+ /* Driver management */
+ struct drvmgr_dev *dev;
+ char prefix[20];
+ SPIN_DECLARE(devlock);
+
+ /* PCI */
+ pci_dev_t pcidev;
+ struct pci_dev_info *devinfo;
+ uint32_t ahbmst2pci_map;
+
+ /* IRQ */
+ genirq_t genirq;
+
+ /* GR-RASTA-ADCDAC */
+ struct gr_rasta_adcdac_ver *version;
+ struct irqmp_regs *irq;
+ struct grpci_regs *grpci;
+ struct drvmgr_map_entry bus_maps_down[3];
+ struct drvmgr_map_entry bus_maps_up[2];
+
+ /* AMBA Plug&Play information on GR-RASTA-ADCDAC */
+ struct ambapp_bus abus;
+ struct ambapp_mmap amba_maps[4];
+ struct ambapp_config config;
+};
+
+struct gr_rasta_adcdac_ver gr_rasta_adcdac_ver0 = {
+ .amba_freq_hz = 50000000,
+ .amba_ioarea = 0x80100000,
+};
+
+int ambapp_rasta_adcdac_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_adcdac_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg);
+int ambapp_rasta_adcdac_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_adcdac_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_adcdac_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_adcdac_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+struct ambapp_ops ambapp_rasta_adcdac_ops = {
+ .int_register = ambapp_rasta_adcdac_int_register,
+ .int_unregister = ambapp_rasta_adcdac_int_unregister,
+ .int_unmask = ambapp_rasta_adcdac_int_unmask,
+ .int_mask = ambapp_rasta_adcdac_int_mask,
+ .int_clear = ambapp_rasta_adcdac_int_clear,
+ .get_params = ambapp_rasta_adcdac_get_params
+};
+
+struct drvmgr_drv_ops gr_rasta_adcdac_ops =
+{ .init = {gr_rasta_adcdac_init1, gr_rasta_adcdac_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct pci_dev_id_match gr_rasta_adcdac_ids[] =
+{
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER, PCIID_DEVICE_GR_RASTA_ADCDAC),
+ PCIID_END_TABLE /* Mark end of table */
+};
+
+struct pci_drv_info gr_rasta_adcdac_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_PCI_GAISLER_RASTAADCDAC_ID,/* Driver ID */
+ "GR-RASTA-ADCDAC_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_PCI, /* Bus Type */
+ &gr_rasta_adcdac_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gr_rasta_adcdac_ids[0]
+};
+
+/* Driver resources configuration for the AMBA bus on the GR-RASTA-ADCDAC board.
+ * It is declared weak so that the user may override it from the project file,
+ * if the default settings are not enough.
+ *
+ * The configuration consists of an array of configuration pointers, each
+ * pointer determine the configuration of one GR-RASTA-ADCDAC board. Pointer
+ * zero is for board0, pointer 1 for board1 and so on.
+ *
+ * The array must end with a NULL pointer.
+ */
+struct drvmgr_bus_res *gr_rasta_adcdac_resources[] __attribute__((weak)) =
+{
+ NULL
+};
+
+void gr_rasta_adcdac_register_drv(void)
+{
+ DBG("Registering GR-RASTA-ADCDAC PCI driver\n");
+ drvmgr_drv_register(&gr_rasta_adcdac_info.general);
+}
+
+void gr_rasta_adcdac_isr (void *arg)
+{
+ struct gr_rasta_adcdac_priv *priv = arg;
+ unsigned int status, tmp;
+ int irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ tmp = status = priv->irq->ipend;
+
+ /* DBG("GR-RASTA-ADCDAC: IRQ 0x%x\n",status); */
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for(irq=0; irq<16; irq++) {
+ if ( status & (1<<irq) ) {
+ genirq_doirq(priv->genirq, irq);
+ priv->irq->iclear = (1<<irq);
+ status &= ~(1<<irq);
+ if ( status == 0 )
+ break;
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* ACK interrupt, this is because PCI is Level, so the IRQ Controller still drives the IRQ. */
+ if ( tmp )
+ drvmgr_interrupt_clear(priv->dev, 0);
+
+ DBG("RASTA-ADCDAC-IRQ: 0x%x\n", tmp);
+}
+
+static int gr_rasta_adcdac_hw_init1(struct gr_rasta_adcdac_priv *priv)
+{
+ uint32_t data;
+ unsigned int *page0 = NULL;
+ struct ambapp_dev *tmp;
+ struct ambapp_ahb_info *ahb;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar0_size;
+
+ /* Select version of GR-RASTA-ADCDAC board */
+ switch (devinfo->rev) {
+ case 0:
+ priv->version = &gr_rasta_adcdac_ver0;
+ break;
+ default:
+ return -2;
+ }
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ page0 = (unsigned int *)(bar0 + bar0_size/2);
+
+ /* Point PAGE0 to start of Plug and Play information */
+ *page0 = priv->version->amba_ioarea & 0xf0000000;
+
+ /* set parity error response */
+ pci_cfg_r32(priv->pcidev, PCIR_COMMAND, &data);
+ pci_cfg_w32(priv->pcidev, PCIR_COMMAND, (data|PCIM_CMD_PERRESPEN));
+
+ /* Setup cache line size. Default cache line size will result in
+ * poor performance (256 word fetches), 0xff will set it according
+ * to the max size of the PCI FIFO.
+ */
+ pci_cfg_w8(priv->pcidev, PCIR_CACHELNSZ, 0xff);
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in CPU) ==> 0x80000000(remote amba address) */
+ priv->amba_maps[0].size = bar0_size/2;
+ priv->amba_maps[0].local_adr = bar0;
+ priv->amba_maps[0].remote_adr = 0x80000000;
+
+ /* AMBA MAP bar1 (in CPU) ==> 0x40000000(remote amba address) */
+ priv->amba_maps[1].size = devinfo->resources[1].size;
+ priv->amba_maps[1].local_adr = devinfo->resources[1].address;
+ priv->amba_maps[1].remote_adr = 0x40000000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[2].size = 0xfffffff0;
+ priv->amba_maps[2].local_adr = 0;
+ priv->amba_maps[2].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[3].size=0;
+ priv->amba_maps[3].local_adr = 0;
+ priv->amba_maps[3].remote_adr = 0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ /*ambapp_scan(priv->bar0 + (priv->version->amba_ioarea & ~0xf0000000),
+ NULL, &priv->amba_maps[0], NULL, &priv->abus.root, NULL);*/
+ ambapp_scan(&priv->abus,
+ bar0 + (priv->version->amba_ioarea & ~0xf0000000),
+ NULL, &priv->amba_maps[0]);
+
+ /* Initialize Frequency of AMBA bus */
+ ambapp_freq_init(&priv->abus, NULL, priv->version->amba_freq_hz);
+
+ /* Point PAGE0 to start of APB area */
+ *page0 = 0x80000000;
+
+ /* Find GRPCI controller */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_PCIFBRG,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -3;
+ }
+ priv->grpci = (struct grpci_regs *)((struct ambapp_apb_info *)tmp->devinfo)->start;
+
+ /* Set GRPCI mmap so that AMBA masters can access CPU-RAM over
+ * the PCI window.
+ */
+ priv->grpci->cfg_stat = (priv->grpci->cfg_stat & 0x0fffffff) |
+ (priv->ahbmst2pci_map & 0xf0000000);
+ priv->grpci->page1 = 0x40000000;
+
+ /* Find IRQ controller */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_IRQMP,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -4;
+ }
+ priv->irq = (struct irqmp_regs *)DEV_TO_APB(tmp)->start;
+ /* Set up GR-RASTA-ADCDAC irq controller */
+ priv->irq->iclear = 0xffff;
+ priv->irq->ilevel = 0;
+ priv->irq->mask[0] = 0;
+
+ /* DOWN streams translation table */
+ priv->bus_maps_down[0].name = "PCI BAR0 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)priv->amba_maps[0].local_adr;
+ priv->bus_maps_down[0].to_adr = (void *)priv->amba_maps[0].remote_adr;
+
+ priv->bus_maps_down[1].name = "PCI BAR1 -> AMBA";
+ priv->bus_maps_down[1].size = priv->amba_maps[1].size;
+ priv->bus_maps_down[1].from_adr = (void *)priv->amba_maps[1].local_adr;
+ priv->bus_maps_down[1].to_adr = (void *)priv->amba_maps[1].remote_adr;
+
+ /* Mark end of translation table */
+ priv->bus_maps_down[2].size = 0;
+
+ /* Find GRPCI controller AHB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_AHB_SLVS),
+ VENDOR_GAISLER, GAISLER_PCIFBRG,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -5;
+ }
+ ahb = (struct ambapp_ahb_info *)tmp->devinfo;
+
+ /* UP streams translation table */
+ priv->bus_maps_up[0].name = "AMBA GRPCI Window";
+ priv->bus_maps_up[0].size = ahb->mask[0]; /* AMBA->PCI Window on GR-RASTA-ADCDAC board */
+ priv->bus_maps_up[0].from_adr = (void *)ahb->start[0];
+ priv->bus_maps_up[0].to_adr = (void *)
+ (priv->ahbmst2pci_map & 0xf0000000);
+
+ /* Mark end of translation table */
+ priv->bus_maps_up[1].size = 0;
+
+ /* Successfully registered the RASTA board */
+ return 0;
+}
+
+static int gr_rasta_adcdac_hw_init2(struct gr_rasta_adcdac_priv *priv)
+{
+ /* Enable DMA by enabling PCI target as master */
+ pci_master_enable(priv->pcidev);
+
+ return DRVMGR_OK;
+}
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr_rasta_adcdac_ids[].
+ */
+int gr_rasta_adcdac_init1(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_adcdac_priv *priv;
+ struct pci_dev_info *devinfo;
+ int status;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+ union drvmgr_key_value *value;
+ int resources_cnt;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+
+ dev->priv = priv;
+ priv->dev = dev;
+
+ /* Determine number of configurations */
+ resources_cnt = get_resarray_count(gr_rasta_adcdac_resources);
+
+ /* Generate Device prefix */
+
+ strcpy(priv->prefix, "/dev/rastaadcdac0");
+ priv->prefix[16] += dev->minor_drv;
+ mkdir(priv->prefix, S_IRWXU | S_IRWXG | S_IRWXO);
+ priv->prefix[17] = '/';
+ priv->prefix[18] = '\0';
+
+ priv->devinfo = devinfo = (struct pci_dev_info *)dev->businfo;
+ priv->pcidev = devinfo->pcidev;
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+ printk("\n\n--- GR-RASTA-ADCDAC[%d] ---\n", dev->minor_drv);
+ printk(" PCI BUS: 0x%x, SLOT: 0x%x, FUNCTION: 0x%x\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ printk(" PCI VENDOR: 0x%04x, DEVICE: 0x%04x\n",
+ devinfo->id.vendor, devinfo->id.device);
+ printk(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printk(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printk(" IRQ: %d\n\n\n", devinfo->irq);
+
+ /* all neccessary space assigned to GR-RASTA-ADCDAC target? */
+ if ((bar0_size == 0) || (bar1_size == 0))
+ return DRVMGR_ENORES;
+
+ /* Initialize spin-lock for this PCI perihperal device. This is to
+ * protect the Interrupt Controller Registers. The genirq layer is
+ * protecting its own internals and ISR dispatching.
+ */
+ SPIN_INIT(&priv->devlock, priv->prefix);
+
+ /* Let user override which PCI address the AHB masters of the
+ * RASTA-ADCDAC board access when doing DMA to CPU RAM. The AHB masters
+ * access the PCI Window of the AMBA bus, the MSB 4-bits of that address
+ * is translated according this config option before the address
+ * goes out on the PCI bus.
+ * Only the 4 MSB bits have an effect;
+ */
+ value = drvmgr_dev_key_get(priv->dev, "ahbmst2pci", DRVMGR_KT_INT);
+ if (value)
+ priv->ahbmst2pci_map = value->i;
+ else
+ priv->ahbmst2pci_map = AHBMST2PCIADR; /* default */
+
+ priv->genirq = genirq_init(16);
+ if ( priv->genirq == NULL ) {
+ free(priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ if ( (status = gr_rasta_adcdac_hw_init1(priv)) != 0 ) {
+ genirq_destroy(priv->genirq);
+ free(priv);
+ dev->priv = NULL;
+ printk(" Failed to initialize GR-RASTA-ADCDAC HW: %d\n", status);
+ return DRVMGR_FAIL;
+ }
+
+ /* Init amba bus */
+ priv->config.abus = &priv->abus;
+ priv->config.ops = &ambapp_rasta_adcdac_ops;
+ priv->config.maps_up = &priv->bus_maps_up[0];
+ priv->config.maps_down = &priv->bus_maps_down[0];
+ if ( priv->dev->minor_drv < resources_cnt ) {
+ priv->config.resources = gr_rasta_adcdac_resources[priv->dev->minor_drv];
+ } else {
+ priv->config.resources = NULL;
+ }
+
+ /* Create and register AMBA PnP bus. */
+ return ambapp_bus_register(dev, &priv->config);
+}
+
+int gr_rasta_adcdac_init2(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->priv;
+
+ /* Clear any old interrupt requests */
+ drvmgr_interrupt_clear(dev, 0);
+
+ /* Enable System IRQ so that GR-RASTA-ADCDAC PCI target interrupt
+ * goes through.
+ *
+ * It is important to enable it in stage init2. If interrupts were
+ * enabled in init1 this might hang the system when more than one
+ * PCI board is connected, this is because PCI interrupts might
+ * be shared and PCI board 2 have not initialized and might
+ * therefore drive interrupt already when entering init1().
+ */
+ drvmgr_interrupt_register(
+ dev,
+ 0,
+ "gr_rasta_adcdac",
+ gr_rasta_adcdac_isr,
+ (void *)priv);
+
+ return gr_rasta_adcdac_hw_init2(priv);
+}
+
+int ambapp_rasta_adcdac_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *h;
+
+ h = genirq_alloc_handler(handler, arg);
+ if ( h == NULL )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_register(priv->genirq, irq, h);
+ if ( status == 0 ) {
+ /* Clear IRQ for first registered handler */
+ priv->irq->iclear = (1<<irq);
+ } else if ( status == 1 )
+ status = 0;
+
+ if (status != 0) {
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ genirq_free_handler(h);
+ return DRVMGR_FAIL;
+ }
+
+ status = genirq_enable(priv->genirq, irq, handler, arg);
+ if ( status == 0 ) {
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return status;
+}
+
+int ambapp_rasta_adcdac_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *handler;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_disable(priv->genirq, irq, isr, arg);
+ if ( status == 0 ) {
+ /* Disable IRQ only when no enabled handler exists */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+ }
+
+ handler = genirq_unregister(priv->genirq, irq, isr, arg);
+ if ( handler == NULL )
+ status = DRVMGR_FAIL;
+ else
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (handler)
+ genirq_free_handler(handler);
+
+ return status;
+}
+
+int ambapp_rasta_adcdac_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-ADCDAC IRQ %d: unmask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_adcdac_int_mask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-ADCDAC IRQ %d: mask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Disable/mask IRQ */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_adcdac_int_clear(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->parent->dev->priv;
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ priv->irq->iclear = (1<<irq);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_adcdac_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->parent->dev->priv;
+
+ /* Device name prefix pointer, skip /dev */
+ params->dev_prefix = &priv->prefix[5];
+
+ return 0;
+}
+
+void gr_rasta_adcdac_print_dev(struct drvmgr_dev *dev, int options)
+{
+ struct gr_rasta_adcdac_priv *priv = dev->priv;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+
+ /* Print */
+ printf("--- GR-RASTA-ADCDAC [bus 0x%x, dev 0x%x, fun 0x%x] ---\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+
+ printf(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printf(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printf(" IRQ REGS: 0x%" PRIxPTR "\n", (uintptr_t)priv->irq);
+ printf(" IRQ: %d\n", devinfo->irq);
+ printf(" PCI REVISION: %d\n", devinfo->rev);
+ printf(" FREQ: %d Hz\n", priv->version->amba_freq_hz);
+ printf(" IMASK: 0x%08x\n", priv->irq->mask[0]);
+ printf(" IPEND: 0x%08x\n", priv->irq->ipend);
+
+ /* Print amba config */
+ if ( options & RASTA_ADCDAC_OPTIONS_AMBA ) {
+ ambapp_print(&priv->abus, 10);
+ }
+#if 0
+ /* Print IRQ handlers and their arguments */
+ if ( options & RASTA_ADCDAC_OPTIONS_IRQ ) {
+ int i;
+ for(i=0; i<16; i++) {
+ printf(" IRQ[%02d]: 0x%x, arg: 0x%x\n",
+ i, (unsigned int)priv->isrs[i].handler, (unsigned int)priv->isrs[i].arg);
+ }
+ }
+#endif
+}
+
+void gr_rasta_adcdac_print(int options)
+{
+ struct pci_drv_info *drv = &gr_rasta_adcdac_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ gr_rasta_adcdac_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/pci/gr_rasta_io.c b/bsps/shared/grlib/pci/gr_rasta_io.c
new file mode 100644
index 0000000000..f4c9d50a7b
--- /dev/null
+++ b/bsps/shared/grlib/pci/gr_rasta_io.c
@@ -0,0 +1,892 @@
+/* GR-RASTA-IO PCI Target driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the GR-RASTA-IO interface PCI board.
+ * This driver provides a AMBA PnP bus by using the general part
+ * of the AMBA PnP bus driver (ambapp_bus.c).
+ *
+ * Driver resources for the AMBA PnP bus provided can be set using
+ * gr_rasta_io_set_resources().
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/bspcommon.h>
+#include <grlib/genirq.h>
+
+#include <grlib/gr_rasta_io.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Determines which PCI address the AHB masters will access, it should be
+ * set so that the masters can access the CPU RAM. Default is base of CPU RAM,
+ * CPU RAM is mapped 1:1 to PCI space.
+ */
+extern unsigned int _RAM_START;
+#define AHBMST2PCIADR (((unsigned int)&_RAM_START) & 0xf0000000)
+
+/* Offset from 0x80000000 (dual bus version) */
+#define AHB1_BASE_ADDR 0x80000000
+#define AHB1_IOAREA_BASE_ADDR 0x80100000
+#define AHB1_IOAREA_OFS (AHB1_IOAREA_BASE_ADDR - AHB1_BASE_ADDR)
+
+/* Second revision constants (GRPCI2) */
+#define GRPCI2_BAR0_TO_AHB_MAP 0x04 /* Fixme */
+#define GRPCI2_BAR1_TO_AHB_MAP 0x08 /* Fixme */
+#define GRPCI2_PCI_CONFIG 0x20 /* Fixme */
+
+
+/* #define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* PCI ID */
+#define PCIID_VENDOR_GAISLER 0x1AC8
+
+int gr_rasta_io_init1(struct drvmgr_dev *dev);
+int gr_rasta_io_init2(struct drvmgr_dev *dev);
+void gr_rasta_io_isr (void *arg);
+
+struct grpci_regs {
+ volatile unsigned int cfg_stat;
+ volatile unsigned int bar0;
+ volatile unsigned int page0;
+ volatile unsigned int bar1;
+ volatile unsigned int page1;
+ volatile unsigned int iomap;
+ volatile unsigned int stat_cmd;
+};
+
+struct grpci2_regs {
+ volatile unsigned int ctrl;
+ volatile unsigned int statcap;
+ volatile unsigned int pcimstprefetch;
+ volatile unsigned int ahbtopciiomap;
+ volatile unsigned int dmactrl;
+ volatile unsigned int dmadesc;
+ volatile unsigned int dmachanact;
+ volatile unsigned int reserved;
+ volatile unsigned int pcibartoahb[6];
+ volatile unsigned int reserved2[2];
+ volatile unsigned int ahbtopcimemmap[16];
+ volatile unsigned int trcctrl;
+ volatile unsigned int trccntmode;
+ volatile unsigned int trcadpat;
+ volatile unsigned int trcadmask;
+ volatile unsigned int trcctrlsigpat;
+ volatile unsigned int trcctrlsigmask;
+ volatile unsigned int trcadstate;
+ volatile unsigned int trcctrlsigstate;
+};
+
+struct gr_rasta_io_ver {
+ const unsigned int amba_freq_hz; /* The frequency */
+ const unsigned int amba_ioarea; /* The address where the PnP IOAREA starts at */
+};
+
+/* Private data structure for driver */
+struct gr_rasta_io_priv {
+ /* Driver management */
+ struct drvmgr_dev *dev;
+ char prefix[16];
+ SPIN_DECLARE(devlock);
+
+ /* PCI */
+ pci_dev_t pcidev;
+ struct pci_dev_info *devinfo;
+ uint32_t ahbmst2pci_map;
+
+ /* IRQ */
+ genirq_t genirq;
+
+ /* GR-RASTA-IO */
+ struct gr_rasta_io_ver *version;
+ struct irqmp_regs *irq;
+ struct grpci_regs *grpci;
+ struct grpci2_regs *grpci2;
+ struct drvmgr_map_entry bus_maps_down[3];
+ struct drvmgr_map_entry bus_maps_up[2];
+
+ /* AMBA Plug&Play information on GR-RASTA-IO */
+ struct ambapp_bus abus;
+ struct ambapp_mmap amba_maps[4];
+ struct ambapp_config config;
+};
+
+struct gr_rasta_io_ver gr_rasta_io_ver0 = {
+ .amba_freq_hz = 30000000,
+ .amba_ioarea = 0x80100000,
+};
+
+struct gr_rasta_io_ver gr_rasta_io_ver1 = {
+ .amba_freq_hz = 50000000,
+ .amba_ioarea = 0x80100000,
+};
+
+int ambapp_rasta_io_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_io_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_io_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_io_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_io_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_io_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+struct ambapp_ops ambapp_rasta_io_ops = {
+ .int_register = ambapp_rasta_io_int_register,
+ .int_unregister = ambapp_rasta_io_int_unregister,
+ .int_unmask = ambapp_rasta_io_int_unmask,
+ .int_mask = ambapp_rasta_io_int_mask,
+ .int_clear = ambapp_rasta_io_int_clear,
+ .get_params = ambapp_rasta_io_get_params
+};
+
+struct drvmgr_drv_ops gr_rasta_io_ops =
+{
+ .init = {gr_rasta_io_init1, gr_rasta_io_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct pci_dev_id_match gr_rasta_io_ids[] =
+{
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER, PCIID_DEVICE_GR_RASTA_IO),
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER_OLD, PCIID_DEVICE_GR_RASTA_IO_OLD),
+ PCIID_END_TABLE /* Mark end of table */
+};
+
+struct pci_drv_info gr_rasta_io_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_PCI_GAISLER_RASTAIO_ID, /* Driver ID */
+ "GR-RASTA-IO_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_PCI, /* Bus Type */
+ &gr_rasta_io_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gr_rasta_io_ids[0]
+};
+
+/* Driver resources configuration for the AMBA bus on the GR-RASTA-IO board.
+ * It is declared weak so that the user may override it from the project file,
+ * if the default settings are not enough.
+ *
+ * The configuration consists of an array of configuration pointers, each
+ * pointer determine the configuration of one GR-RASTA-IO board. Pointer
+ * zero is for board0, pointer 1 for board1 and so on.
+ *
+ * The array must end with a NULL pointer.
+ */
+struct drvmgr_bus_res *gr_rasta_io_resources[] __attribute__((weak)) =
+{
+ NULL
+};
+
+void gr_rasta_io_register_drv(void)
+{
+ DBG("Registering GR-RASTA-IO PCI driver\n");
+ drvmgr_drv_register(&gr_rasta_io_info.general);
+}
+
+void gr_rasta_io_isr (void *arg)
+{
+ struct gr_rasta_io_priv *priv = arg;
+ unsigned int status, tmp;
+ int irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ tmp = status = priv->irq->ipend;
+
+ /* DBG("GR-RASTA-IO: IRQ 0x%x\n",status); */
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for(irq=0; irq<16; irq++) {
+ if ( status & (1<<irq) ) {
+ genirq_doirq(priv->genirq, irq);
+ priv->irq->iclear = (1<<irq);
+ status &= ~(1<<irq);
+ if ( status == 0 )
+ break;
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* ACK interrupt, this is because PCI is Level, so the IRQ Controller still drives the IRQ. */
+ if ( tmp )
+ drvmgr_interrupt_clear(priv->dev, 0);
+
+ DBG("RASTA-IO-IRQ: 0x%x\n", tmp);
+}
+
+/* PCI Hardware (Revision 0 and 1) initialization */
+static int gr_rasta_io_hw_init(struct gr_rasta_io_priv *priv)
+{
+ unsigned int *page0 = NULL;
+ struct ambapp_dev *tmp;
+ struct ambapp_ahb_info *ahb;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar0_size;
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ page0 = (unsigned int *)(bar0 + bar0_size/2);
+
+ /* Point PAGE0 to start of Plug and Play information */
+ *page0 = priv->version->amba_ioarea & 0xff000000;
+
+#if 0
+ {
+ uint32_t data;
+ /* set parity error response */
+ pci_cfg_r32(priv->pcidev, PCIR_COMMAND, &data);
+ pci_cfg_w32(priv->pcidev, PCIR_COMMAND, (data|PCIM_CMD_PERRESPEN));
+ }
+#endif
+
+ /* Setup cache line size. Default cache line size will result in
+ * poor performance (256 word fetches), 0xff will set it according
+ * to the max size of the PCI FIFO.
+ */
+ pci_cfg_w8(priv->pcidev, PCIR_CACHELNSZ, 0xff);
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in CPU) ==> 0x80000000(remote amba address) */
+ priv->amba_maps[0].size = bar0_size/2;
+ priv->amba_maps[0].local_adr = bar0;
+ priv->amba_maps[0].remote_adr = AHB1_BASE_ADDR;
+
+ /* AMBA MAP bar1 (in CPU) ==> 0x40000000(remote amba address) */
+ priv->amba_maps[1].size = devinfo->resources[1].size;
+ priv->amba_maps[1].local_adr = devinfo->resources[1].address;
+ priv->amba_maps[1].remote_adr = 0x40000000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[2].size = 0xfffffff0;
+ priv->amba_maps[2].local_adr = 0;
+ priv->amba_maps[2].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[3].size=0;
+ priv->amba_maps[3].local_adr = 0;
+ priv->amba_maps[3].remote_adr = 0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(&priv->abus,
+ bar0 + (priv->version->amba_ioarea & ~0xff000000),
+ NULL, &priv->amba_maps[0]);
+
+ /* Initialize Frequency of AMBA bus */
+ ambapp_freq_init(&priv->abus, NULL, priv->version->amba_freq_hz);
+
+ /* Point PAGE0 to start of APB area */
+ *page0 = AHB1_BASE_ADDR;
+
+ /* Find GRPCI controller */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_PCIFBRG,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -3;
+ }
+ priv->grpci = (struct grpci_regs *)((struct ambapp_apb_info *)tmp->devinfo)->start;
+
+ /* Set GRPCI mmap so that AMBA masters can access CPU-RAM over
+ * the PCI window.
+ */
+ priv->grpci->cfg_stat = (priv->grpci->cfg_stat & 0x0fffffff) |
+ (priv->ahbmst2pci_map & 0xf0000000);
+ priv->grpci->page1 = 0x40000000;
+
+ /* Find IRQ controller, Clear all current IRQs */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_IRQMP,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -4;
+ }
+ priv->irq = (struct irqmp_regs *)DEV_TO_APB(tmp)->start;
+ /* Set up GR-RASTA-IO irq controller */
+ priv->irq->mask[0] = 0;
+ priv->irq->iclear = 0xffff;
+ priv->irq->ilevel = 0;
+
+ /* DOWN streams translation table */
+ priv->bus_maps_down[0].name = "PCI BAR0 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)priv->amba_maps[0].local_adr;
+ priv->bus_maps_down[0].to_adr = (void *)priv->amba_maps[0].remote_adr;
+
+ priv->bus_maps_down[1].name = "PCI BAR1 -> AMBA";
+ priv->bus_maps_down[1].size = priv->amba_maps[1].size;
+ priv->bus_maps_down[1].from_adr = (void *)priv->amba_maps[1].local_adr;
+ priv->bus_maps_down[1].to_adr = (void *)priv->amba_maps[1].remote_adr;
+
+ /* Mark end of translation table */
+ priv->bus_maps_down[2].size = 0;
+
+ /* Find GRPCI controller AHB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_AHB_SLVS),
+ VENDOR_GAISLER, GAISLER_PCIFBRG,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -5;
+ }
+ ahb = (struct ambapp_ahb_info *)tmp->devinfo;
+
+ /* UP streams translation table */
+ priv->bus_maps_up[0].name = "AMBA GRPCI Window";
+ priv->bus_maps_up[0].size = ahb->mask[0]; /* AMBA->PCI Window on GR-RASTA-IO board */
+ priv->bus_maps_up[0].from_adr = (void *)ahb->start[0];
+ priv->bus_maps_up[0].to_adr = (void *)
+ (priv->ahbmst2pci_map & 0xf0000000);
+
+ /* Mark end of translation table */
+ priv->bus_maps_up[1].size = 0;
+
+ /* Successfully registered the RASTA board */
+ return 0;
+}
+
+/* PCI Hardware (Revision 1) initialization */
+static int gr_rasta_io2_hw_init(struct gr_rasta_io_priv *priv)
+{
+ int i;
+ uint32_t data;
+ unsigned int ctrl;
+ uint8_t tmp2;
+ struct ambapp_dev *tmp;
+ struct ambapp_ahb_info *ahb;
+ uint8_t cap_ptr;
+ pci_dev_t pcidev = priv->pcidev;
+ struct pci_dev_info *devinfo = priv->devinfo;
+
+ /* Check capabilities list bit */
+ pci_cfg_r8(pcidev, PCIR_STATUS, &tmp2);
+
+ if (!((tmp2 >> 4) & 1)) {
+ /* Capabilities list not available which it should be in the
+ * GRPCI2
+ */
+ return -3;
+ }
+
+ /* Read capabilities pointer */
+ pci_cfg_r8(pcidev, PCIR_CAP_PTR, &cap_ptr);
+
+ /* Set AHB address mappings for target PCI bars
+ * BAR0: 16MB : Mapped to I/O at 0x80000000
+ * BAR1: 256MB : Mapped to MEM at 0x40000000
+ */
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_BAR0_TO_AHB_MAP, AHB1_BASE_ADDR);
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_BAR1_TO_AHB_MAP, 0x40000000);
+
+ /* Set PCI bus to be same endianess as PCI system */
+ pci_cfg_r32(pcidev, cap_ptr+GRPCI2_PCI_CONFIG, &data);
+ if (pci_endian == PCI_BIG_ENDIAN)
+ data = data & 0xFFFFFFFE;
+ else
+ data = data | 0x00000001;
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_PCI_CONFIG, data);
+
+#if 0
+ /* set parity error response */
+ pci_cfg_r32(pcidev, PCIR_COMMAND, &data);
+ pci_cfg_w32(pcidev, PCIR_COMMAND, (data|PCIM_CMD_PERRESPEN));
+#endif
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in PCI) ==> 0x40000000 (remote amba address) */
+ priv->amba_maps[0].size = devinfo->resources[0].size;
+ priv->amba_maps[0].local_adr = devinfo->resources[0].address;
+ priv->amba_maps[0].remote_adr = AHB1_BASE_ADDR;
+
+ /* AMBA MAP bar0 (in PCI) ==> 0x80000000 (remote amba address) */
+ priv->amba_maps[1].size = devinfo->resources[1].size;
+ priv->amba_maps[1].local_adr = devinfo->resources[1].address;
+ priv->amba_maps[1].remote_adr = 0x40000000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[2].size = 0xfffffff0;
+ priv->amba_maps[2].local_adr = 0;
+ priv->amba_maps[2].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[3].size=0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(
+ &priv->abus,
+ devinfo->resources[0].address + AHB1_IOAREA_OFS,
+ NULL,
+ &priv->amba_maps[0]);
+
+ /* Initialize Frequency of AMBA bus. The AMBA bus runs at same
+ * frequency as PCI bus
+ */
+ ambapp_freq_init(&priv->abus, NULL, priv->version->amba_freq_hz);
+
+ /* Find IRQ controller, Clear all current IRQs */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_IRQMP,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -4;
+ }
+ priv->irq = (struct irqmp_regs *)DEV_TO_APB(tmp)->start;
+ /* Set up GR-RASTA-SPW-ROUTER irq controller */
+ priv->irq->mask[0] = 0;
+ priv->irq->iclear = 0xffff;
+ priv->irq->ilevel = 0;
+
+ priv->bus_maps_down[0].name = "PCI BAR0 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)priv->amba_maps[0].local_adr;
+ priv->bus_maps_down[0].to_adr = (void *)priv->amba_maps[0].remote_adr;
+ priv->bus_maps_down[1].name = "PCI BAR1 -> AMBA";
+ priv->bus_maps_down[1].size = priv->amba_maps[1].size;
+ priv->bus_maps_down[1].from_adr = (void *)priv->amba_maps[1].local_adr;
+ priv->bus_maps_down[1].to_adr = (void *)priv->amba_maps[1].remote_adr;
+ priv->bus_maps_down[2].size = 0;
+
+ /* Find GRPCI2 controller AHB Slave interface */
+ tmp = (void *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_AHB_SLVS),
+ VENDOR_GAISLER, GAISLER_GRPCI2,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -5;
+ }
+ ahb = (struct ambapp_ahb_info *)tmp->devinfo;
+ priv->bus_maps_up[0].name = "AMBA GRPCI2 Window";
+ priv->bus_maps_up[0].size = ahb->mask[0]; /* AMBA->PCI Window on GR-RASTA-SPW-ROUTER board */
+ priv->bus_maps_up[0].from_adr = (void *)ahb->start[0];
+ priv->bus_maps_up[0].to_adr = (void *)
+ (priv->ahbmst2pci_map & ~(ahb->mask[0]-1));
+ priv->bus_maps_up[1].size = 0;
+
+ /* Find GRPCI2 controller APB Slave interface */
+ tmp = (void *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_GRPCI2,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -6;
+ }
+ priv->grpci2 = (struct grpci2_regs *)
+ ((struct ambapp_apb_info *)tmp->devinfo)->start;
+
+ /* Set AHB to PCI mapping for all AMBA AHB masters */
+ for(i = 0; i < 16; i++) {
+ priv->grpci2->ahbtopcimemmap[i] = priv->ahbmst2pci_map &
+ ~(ahb->mask[0]-1);
+ }
+
+ /* Make sure dirq(0) sampling is enabled */
+ ctrl = priv->grpci2->ctrl;
+ ctrl = (ctrl & 0xFFFFFF0F) | (1 << 4);
+ priv->grpci2->ctrl = ctrl;
+
+ /* Successfully registered the RASTA-SPW-ROUTER board */
+ return 0;
+}
+
+static int gr_rasta_io_hw_init2(struct gr_rasta_io_priv *priv)
+{
+ /* Enable DMA by enabling PCI target as master */
+ pci_master_enable(priv->pcidev);
+
+ return DRVMGR_OK;
+}
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr_rasta_io_ids[].
+ */
+int gr_rasta_io_init1(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_io_priv *priv;
+ struct pci_dev_info *devinfo;
+ int status;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+ union drvmgr_key_value *value;
+ int resources_cnt;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+
+ dev->priv = priv;
+ priv->dev = dev;
+
+ /* Determine number of configurations */
+ resources_cnt = get_resarray_count(gr_rasta_io_resources);
+
+ /* Generate Device prefix */
+
+ strcpy(priv->prefix, "/dev/rastaio0");
+ priv->prefix[12] += dev->minor_drv;
+ mkdir(priv->prefix, S_IRWXU | S_IRWXG | S_IRWXO);
+ priv->prefix[13] = '/';
+ priv->prefix[14] = '\0';
+
+ priv->devinfo = devinfo = (struct pci_dev_info *)dev->businfo;
+ priv->pcidev = devinfo->pcidev;
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+ printk("\n\n--- GR-RASTA-IO[%d] ---\n", dev->minor_drv);
+ printk(" PCI BUS: 0x%x, SLOT: 0x%x, FUNCTION: 0x%x\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ printk(" PCI VENDOR: 0x%04x, DEVICE: 0x%04x\n",
+ devinfo->id.vendor, devinfo->id.device);
+ printk(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printk(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printk(" IRQ: %d\n\n\n", devinfo->irq);
+
+ /* all neccessary space assigned to GR-RASTA-IO target? */
+ if ((bar0_size == 0) || (bar1_size == 0))
+ return DRVMGR_ENORES;
+
+ /* Initialize spin-lock for this PCI peripheral device. This is to
+ * protect the Interrupt Controller Registers. The genirq layer is
+ * protecting its own internals and ISR dispatching.
+ */
+ SPIN_INIT(&priv->devlock, priv->prefix);
+
+ /* Let user override which PCI address the AHB masters of the
+ * GR-RASTA-IO board access when doing DMA to CPU RAM. The AHB masters
+ * access the PCI Window of the AMBA bus, the MSB 4-bits of that address
+ * is translated according this config option before the address
+ * goes out on the PCI bus.
+ * Only the 4 MSB bits have an effect;
+ */
+ value = drvmgr_dev_key_get(priv->dev, "ahbmst2pci", DRVMGR_KT_INT);
+ if (value)
+ priv->ahbmst2pci_map = value->i;
+ else
+ priv->ahbmst2pci_map = AHBMST2PCIADR; /* default */
+
+ priv->genirq = genirq_init(16);
+ if ( priv->genirq == NULL ) {
+ free(priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Select version of GR-RASTA-IO board */
+ switch (devinfo->rev) {
+ case 0:
+ priv->version = &gr_rasta_io_ver0;
+ status = gr_rasta_io_hw_init(priv);
+ break;
+ case 1:
+ priv->version = &gr_rasta_io_ver1;
+ status = gr_rasta_io_hw_init(priv);
+ break;
+ case 2:
+ priv->version = &gr_rasta_io_ver1; /* same cfg as 1 */
+ status = gr_rasta_io2_hw_init(priv);
+ break;
+ default:
+ return -2;
+ }
+
+ if ( status != 0 ) {
+ genirq_destroy(priv->genirq);
+ free(priv);
+ dev->priv = NULL;
+ printk(" Failed to initialize GR-RASTA-IO HW: %d\n", status);
+ return DRVMGR_FAIL;
+ }
+
+ /* Init amba bus */
+ priv->config.abus = &priv->abus;
+ priv->config.ops = &ambapp_rasta_io_ops;
+ priv->config.maps_up = &priv->bus_maps_up[0];
+ priv->config.maps_down = &priv->bus_maps_down[0];
+ if ( priv->dev->minor_drv < resources_cnt ) {
+ priv->config.resources = gr_rasta_io_resources[priv->dev->minor_drv];
+ } else {
+ priv->config.resources = NULL;
+ }
+
+ /* Create and register AMBA PnP bus. */
+ return ambapp_bus_register(dev, &priv->config);
+}
+
+int gr_rasta_io_init2(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_io_priv *priv = dev->priv;
+
+ /* Clear any old interrupt requests */
+ drvmgr_interrupt_clear(dev, 0);
+
+ /* Enable System IRQ so that GR-RASTA-IO PCI target interrupt goes
+ * through.
+ *
+ * It is important to enable it in stage init2. If interrupts were
+ * enabled in init1 this might hang the system when more than one
+ * PCI board is connected, this is because PCI interrupts might
+ * be shared and PCI board 2 have not initialized and
+ * might therefore drive interrupt already when entering init1().
+ */
+ drvmgr_interrupt_register(
+ dev,
+ 0,
+ "gr_rasta_io",
+ gr_rasta_io_isr,
+ (void *)priv);
+
+ return gr_rasta_io_hw_init2(priv);
+}
+
+int ambapp_rasta_io_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg)
+{
+ struct gr_rasta_io_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *h;
+
+ h = genirq_alloc_handler(handler, arg);
+ if ( h == NULL )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_register(priv->genirq, irq, h);
+ if ( status == 0 ) {
+ /* Clear IRQ for first registered handler */
+ priv->irq->iclear = (1<<irq);
+ } else if ( status == 1 )
+ status = 0;
+
+ if (status != 0) {
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ genirq_free_handler(h);
+ return DRVMGR_FAIL;
+ }
+
+ status = genirq_enable(priv->genirq, irq, handler, arg);
+ if ( status == 0 ) {
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return status;
+}
+
+int ambapp_rasta_io_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct gr_rasta_io_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *handler;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_disable(priv->genirq, irq, isr, arg);
+ if ( status == 0 ) {
+ /* Disable IRQ only when no enabled handler exists */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+ }
+
+ handler = genirq_unregister(priv->genirq, irq, isr, arg);
+ if ( handler == NULL )
+ status = DRVMGR_FAIL;
+ else
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (handler)
+ genirq_free_handler(handler);
+
+ return status;
+}
+
+int ambapp_rasta_io_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_io_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-IO IRQ %d: unmask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_io_int_mask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_io_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-IO IRQ %d: mask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Disable/mask IRQ */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_io_int_clear(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_io_priv *priv = dev->parent->dev->priv;
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ priv->irq->iclear = (1<<irq);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_io_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ struct gr_rasta_io_priv *priv = dev->parent->dev->priv;
+
+ /* Device name prefix pointer, skip /dev */
+ params->dev_prefix = &priv->prefix[5];
+
+ return 0;
+}
+
+void gr_rasta_io_print_dev(struct drvmgr_dev *dev, int options)
+{
+ struct gr_rasta_io_priv *priv = dev->priv;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+
+ /* Print */
+ printf("--- GR-RASTA-IO [bus 0x%x, dev 0x%x, fun 0x%x] ---\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+
+ printf(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printf(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printf(" IRQ REGS: 0x%" PRIxPTR "\n", (uintptr_t)priv->irq);
+ printf(" IRQ: %d\n", devinfo->irq);
+ printf(" PCI REVISION: %d\n", devinfo->rev);
+ printf(" FREQ: %d Hz\n", priv->version->amba_freq_hz);
+ printf(" IMASK: 0x%08x\n", priv->irq->mask[0]);
+ printf(" IPEND: 0x%08x\n", priv->irq->ipend);
+
+ /* Print amba config */
+ if ( options & RASTA_IO_OPTIONS_AMBA ) {
+ ambapp_print(&priv->abus, 10);
+ }
+
+#if 0
+ /* Print IRQ handlers and their arguments */
+ if ( options & RASTA_IO_OPTIONS_IRQ ) {
+ int i;
+ for(i=0; i<16; i++) {
+ printf(" IRQ[%02d]: 0x%x, arg: 0x%x\n",
+ i, (unsigned int)priv->isrs[i].handler, (unsigned int)priv->isrs[i].arg);
+ }
+ }
+#endif
+}
+
+void gr_rasta_io_print(int options)
+{
+ struct pci_drv_info *drv = &gr_rasta_io_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ gr_rasta_io_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/pci/gr_rasta_spw_router.c b/bsps/shared/grlib/pci/gr_rasta_spw_router.c
new file mode 100644
index 0000000000..6efbaa463d
--- /dev/null
+++ b/bsps/shared/grlib/pci/gr_rasta_spw_router.c
@@ -0,0 +1,696 @@
+/* GR-RASTA-SPW-ROUTER PCI Target driver.
+ *
+ * COPYRIGHT (c) 2011.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * Configures the GR-RASTA-SPW-ROUTER interface PCI board.
+ * This driver provides a AMBA PnP bus by using the general part
+ * of the AMBA PnP bus driver (ambapp_bus.c). Based on the
+ * GR-RASTA-IO driver.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/bspcommon.h>
+#include <grlib/genirq.h>
+#include <grlib/gr_rasta_spw_router.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Determines which PCI address the AHB masters will access, it should be
+ * set so that the masters can access the CPU RAM. Default is base of CPU RAM,
+ * CPU RAM is mapped 1:1 to PCI space.
+ */
+extern unsigned int _RAM_START;
+#define AHBMST2PCIADR (((unsigned int)&_RAM_START) & 0xf0000000)
+
+/* Offset from 0x80000000 (dual bus version) */
+#define AHB1_BASE_ADDR 0x80000000
+#define AHB1_IOAREA_BASE_ADDR 0x80100000
+
+#define GRPCI2_BAR0_TO_AHB_MAP 0x04 /* Fixme */
+#define GRPCI2_PCI_CONFIG 0x20 /* Fixme */
+
+/* #define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* PCI ID */
+#define PCIID_VENDOR_GAISLER 0x1AC8
+
+int gr_rasta_spw_router_init1(struct drvmgr_dev *dev);
+int gr_rasta_spw_router_init2(struct drvmgr_dev *dev);
+void gr_rasta_spw_router_isr(void *arg);
+
+struct grpci2_regs {
+ volatile unsigned int ctrl;
+ volatile unsigned int statcap;
+ volatile unsigned int pcimstprefetch;
+ volatile unsigned int ahbtopciiomap;
+ volatile unsigned int dmactrl;
+ volatile unsigned int dmadesc;
+ volatile unsigned int dmachanact;
+ volatile unsigned int reserved;
+ volatile unsigned int pcibartoahb[6];
+ volatile unsigned int reserved2[2];
+ volatile unsigned int ahbtopcimemmap[16];
+ volatile unsigned int trcctrl;
+ volatile unsigned int trccntmode;
+ volatile unsigned int trcadpat;
+ volatile unsigned int trcadmask;
+ volatile unsigned int trcctrlsigpat;
+ volatile unsigned int trcctrlsigmask;
+ volatile unsigned int trcadstate;
+ volatile unsigned int trcctrlsigstate;
+};
+
+struct gr_rasta_spw_router_ver {
+ const unsigned int amba_freq_hz; /* The frequency */
+ const unsigned int amba_ioarea; /* The address where the PnP IOAREA starts at */
+};
+
+/* Private data structure for driver */
+struct gr_rasta_spw_router_priv {
+ /* Driver management */
+ struct drvmgr_dev *dev;
+ char prefix[20];
+ SPIN_DECLARE(devlock);
+
+ /* PCI */
+ pci_dev_t pcidev;
+ struct pci_dev_info *devinfo;
+ uint32_t ahbmst2pci_map;
+
+ /* IRQ */
+ genirq_t genirq;
+
+ /* GR-RASTA-SPW-ROUTER */
+ struct gr_rasta_spw_router_ver *version;
+ struct irqmp_regs *irq;
+ struct grpci2_regs *grpci2;
+ struct drvmgr_map_entry bus_maps_up[2];
+ struct drvmgr_map_entry bus_maps_down[2];
+
+ /* AMBA Plug&Play information on GR-RASTA-SPW-ROUTER */
+ struct ambapp_bus abus;
+ struct ambapp_mmap amba_maps[3];
+ struct ambapp_config config;
+};
+
+struct gr_rasta_spw_router_ver gr_rasta_spw_router_ver0 = {
+ .amba_freq_hz = 50000000,
+ .amba_ioarea = 0xfff00000,
+};
+
+int ambapp_rasta_spw_router_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_spw_router_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_spw_router_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_spw_router_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_spw_router_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_spw_router_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+struct ambapp_ops ambapp_rasta_spw_router_ops = {
+ .int_register = ambapp_rasta_spw_router_int_register,
+ .int_unregister = ambapp_rasta_spw_router_int_unregister,
+ .int_unmask = ambapp_rasta_spw_router_int_unmask,
+ .int_mask = ambapp_rasta_spw_router_int_mask,
+ .int_clear = ambapp_rasta_spw_router_int_clear,
+ .get_params = ambapp_rasta_spw_router_get_params
+};
+
+struct drvmgr_drv_ops gr_rasta_spw_router_ops =
+{
+ .init = {gr_rasta_spw_router_init1, gr_rasta_spw_router_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct pci_dev_id_match gr_rasta_spw_router_ids[] =
+{
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER, PCIID_DEVICE_GR_RASTA_SPW_RTR),
+ PCIID_END_TABLE /* Mark end of table */
+};
+
+struct pci_drv_info gr_rasta_spw_router_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_PCI_GAISLER_RASTA_SPW_ROUTER_ID, /* Driver ID */
+ "GR-RASTA-SPW_ROUTER_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_PCI, /* Bus Type */
+ &gr_rasta_spw_router_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct gr_rasta_spw_router_priv),
+ },
+ &gr_rasta_spw_router_ids[0]
+};
+
+/* Driver resources configuration for the AMBA bus on the GR-RASTA-SPW-ROUTER board.
+ * It is declared weak so that the user may override it from the project file,
+ * if the default settings are not enough.
+ *
+ * The configuration consists of an array of configuration pointers, each
+ * pointer determine the configuration of one GR-RASTA-SPW-ROUTER board. Pointer
+ * zero is for board0, pointer 1 for board1 and so on.
+ *
+ * The array must end with a NULL pointer.
+ */
+struct drvmgr_bus_res *gr_rasta_spw_router_resources[] __attribute__((weak)) =
+{
+ NULL
+};
+
+void gr_rasta_spw_router_register_drv(void)
+{
+ DBG("Registering GR-RASTA-SPW-ROUTER PCI driver\n");
+ drvmgr_drv_register(&gr_rasta_spw_router_info.general);
+}
+
+void gr_rasta_spw_router_isr(void *arg)
+{
+ struct gr_rasta_spw_router_priv *priv = arg;
+ unsigned int status, tmp;
+ int irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ tmp = status = priv->irq->ipend;
+
+ /* DBG("GR-RASTA-SPW-ROUTER: IRQ 0x%x\n",status); */
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for(irq=0; irq<16; irq++) {
+ if ( status & (1<<irq) ) {
+ genirq_doirq(priv->genirq, irq);
+ priv->irq->iclear = (1<<irq);
+ status &= ~(1<<irq);
+ if ( status == 0 )
+ break;
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* ACK interrupt, this is because PCI is Level, so the IRQ Controller
+ * still drives the IRQ
+ */
+ if ( tmp )
+ drvmgr_interrupt_clear(priv->dev, 0);
+
+ DBG("RASTA-SPW_ROUTER-IRQ: 0x%x\n", tmp);
+}
+
+static int gr_rasta_spw_router_hw_init(struct gr_rasta_spw_router_priv *priv)
+{
+ int i;
+ uint32_t data;
+ unsigned int ctrl;
+ uint8_t tmp2;
+ struct ambapp_dev *tmp;
+ struct ambapp_ahb_info *ahb;
+ uint8_t cap_ptr;
+ pci_dev_t pcidev = priv->pcidev;
+ struct pci_dev_info *devinfo = priv->devinfo;
+
+ /* Select version of GR-RASTA-SPW-ROUTER board. Currently only one
+ * version
+ */
+ switch (devinfo->rev) {
+ case 0:
+ priv->version = &gr_rasta_spw_router_ver0;
+ break;
+ default:
+ return -2;
+ }
+
+ /* Check capabilities list bit */
+ pci_cfg_r8(pcidev, PCIR_STATUS, &tmp2);
+
+ if (!((tmp2 >> 4) & 1)) {
+ /* Capabilities list not available which it should be in the GRPCI2 */
+ return -3;
+ }
+
+ /* Read capabilities pointer */
+ pci_cfg_r8(pcidev, PCIR_CAP_PTR, &cap_ptr);
+
+ /* Set AHB address mappings for target PCI bars */
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_BAR0_TO_AHB_MAP, 0xffe00000); /* APB bus, AHB I/O bus 2 MB */
+
+ /* Set PCI bus to be big endian */
+ pci_cfg_r32(pcidev, cap_ptr+GRPCI2_PCI_CONFIG, &data);
+ data = data & 0xFFFFFFFE;
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_PCI_CONFIG, data);
+
+#if 0
+ /* set parity error response */
+ pci_cfg_r32(pcidev, PCIR_COMMAND, &data);
+ pci_cfg_w32(pcidev, PCIR_COMMAND, (data|PCIM_CMD_PERRESPEN));
+#endif
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in router) ==> 0xffe00000(remote amba address) */
+ priv->amba_maps[0].size = devinfo->resources[0].size;
+ priv->amba_maps[0].local_adr = devinfo->resources[0].address;
+ priv->amba_maps[0].remote_adr = 0xffe00000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[1].size = 0xfffffff0;
+ priv->amba_maps[1].local_adr = 0;
+ priv->amba_maps[1].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[2].size=0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(
+ &priv->abus,
+ devinfo->resources[0].address + 0x100000,
+ NULL,
+ &priv->amba_maps[0]);
+
+ /* Initialize Frequency of AMBA bus */
+ ambapp_freq_init(&priv->abus, NULL, priv->version->amba_freq_hz);
+
+ /* Find IRQ controller, Clear all current IRQs */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_IRQMP,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -4;
+ }
+ priv->irq = (struct irqmp_regs *)DEV_TO_APB(tmp)->start;
+ /* Set up GR-RASTA-SPW-ROUTER irq controller */
+ priv->irq->mask[0] = 0;
+ priv->irq->iclear = 0xffff;
+ priv->irq->ilevel = 0;
+
+ priv->bus_maps_down[0].name = "PCI BAR0 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)priv->amba_maps[0].local_adr;
+ priv->bus_maps_down[0].to_adr = (void *)priv->amba_maps[0].remote_adr;
+ priv->bus_maps_down[1].size = 0;
+
+ /* Find GRPCI2 controller AHB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_AHB_SLVS),
+ VENDOR_GAISLER, GAISLER_GRPCI2,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -5;
+ }
+ ahb = (struct ambapp_ahb_info *)tmp->devinfo;
+ priv->bus_maps_up[0].name = "AMBA GRPCI2 Window";
+ priv->bus_maps_up[0].size = ahb->mask[0]; /* AMBA->PCI Window on GR-RASTA-SPW-ROUTER board */
+ priv->bus_maps_up[0].from_adr = (void *)ahb->start[0];
+ priv->bus_maps_up[0].to_adr = (void *)
+ (priv->ahbmst2pci_map & ~(ahb->mask[0]-1));
+ priv->bus_maps_up[1].size = 0;
+
+ /* Find GRPCI2 controller APB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_GRPCI2,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -6;
+ }
+ priv->grpci2 = (struct grpci2_regs *)
+ ((struct ambapp_apb_info *)tmp->devinfo)->start;
+
+ /* Set AHB to PCI mapping for all AMBA AHB masters */
+ for(i = 0; i < 16; i++) {
+ priv->grpci2->ahbtopcimemmap[i] = priv->ahbmst2pci_map &
+ ~(ahb->mask[0]-1);
+ }
+
+ /* Make sure dirq(0) sampling is enabled */
+ ctrl = priv->grpci2->ctrl;
+ ctrl = (ctrl & 0xFFFFFF0F) | (1 << 4);
+ priv->grpci2->ctrl = ctrl;
+
+ /* Successfully registered the RASTA-SPW-ROUTER board */
+ return 0;
+}
+
+static int gr_rasta_spw_router_hw_init2(struct gr_rasta_spw_router_priv *priv)
+{
+ /* Enable DMA by enabling PCI target as master */
+ pci_master_enable(priv->pcidev);
+
+ return DRVMGR_OK;
+}
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr_rasta_spw_router_ids[].
+ */
+int gr_rasta_spw_router_init1(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_spw_router_priv *priv;
+ struct pci_dev_info *devinfo;
+ int status;
+ uint32_t bar0, bar0_size;
+ union drvmgr_key_value *value;
+ int resources_cnt;
+
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+
+ memset(priv, 0, sizeof(*priv));
+ dev->priv = priv;
+ priv->dev = dev;
+
+ /* Determine number of configurations */
+ resources_cnt = get_resarray_count(gr_rasta_spw_router_resources);
+
+ /* Generate Device prefix */
+
+ strcpy(priv->prefix, "/dev/spwrouter0");
+ priv->prefix[14] += dev->minor_drv;
+ mkdir(priv->prefix, S_IRWXU | S_IRWXG | S_IRWXO);
+ priv->prefix[15] = '/';
+ priv->prefix[16] = '\0';
+
+ priv->devinfo = devinfo = (struct pci_dev_info *)dev->businfo;
+ priv->pcidev = devinfo->pcidev;
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ printk("\n\n--- GR-RASTA-SPW-ROUTER[%d] ---\n", dev->minor_drv);
+ printk(" PCI BUS: 0x%x, SLOT: 0x%x, FUNCTION: 0x%x\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ printk(" PCI VENDOR: 0x%04x, DEVICE: 0x%04x\n",
+ devinfo->id.vendor, devinfo->id.device);
+ printk(" PCI BAR[0]: 0x%08" PRIx32 " - 0x%08" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printk(" IRQ: %d\n\n\n", devinfo->irq);
+
+ /* all neccessary space assigned to GR-RASTA-SPW-ROUTER target? */
+ if (bar0_size == 0)
+ return DRVMGR_ENORES;
+
+ /* Initialize spin-lock for this PCI peripheral device. This is to
+ * protect the Interrupt Controller Registers. The genirq layer is
+ * protecting its own internals and ISR dispatching.
+ */
+ SPIN_INIT(&priv->devlock, priv->prefix);
+
+ /* Let user override which PCI address the AHB masters of the
+ * GR-RASTA-SPW board access when doing DMA to CPU RAM. The AHB masters
+ * access the PCI Window of the AMBA bus, the MSB 4-bits of that address
+ * is translated according this config option before the address
+ * goes out on the PCI bus.
+ * Only the 4 MSB bits have an effect;
+ */
+ value = drvmgr_dev_key_get(priv->dev, "ahbmst2pci", DRVMGR_KT_INT);
+ if (value)
+ priv->ahbmst2pci_map = value->i;
+ else
+ priv->ahbmst2pci_map = AHBMST2PCIADR; /* default */
+
+ priv->genirq = genirq_init(16);
+ if ( priv->genirq == NULL )
+ return DRVMGR_FAIL;
+
+ if ((status = gr_rasta_spw_router_hw_init(priv)) != 0) {
+ genirq_destroy(priv->genirq);
+ printk(" Failed to initialize GR-RASTA-SPW-ROUTER HW: %d\n", status);
+ return DRVMGR_FAIL;
+ }
+
+ /* Init amba bus */
+ priv->config.abus = &priv->abus;
+ priv->config.ops = &ambapp_rasta_spw_router_ops;
+ priv->config.maps_up = &priv->bus_maps_up[0];
+ priv->config.maps_down = &priv->bus_maps_down[0];
+ if ( priv->dev->minor_drv < resources_cnt ) {
+ priv->config.resources = gr_rasta_spw_router_resources[priv->dev->minor_drv];
+ } else {
+ priv->config.resources = NULL;
+ }
+
+ /* Create and register AMBA PnP bus. */
+ return ambapp_bus_register(dev, &priv->config);
+}
+
+int gr_rasta_spw_router_init2(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->priv;
+
+ /* Clear any old interrupt requests */
+ drvmgr_interrupt_clear(dev, 0);
+
+ /* Enable System IRQ so that GR-RASTA-SPW-ROUTER PCI target interrupt
+ * goes through.
+ *
+ * It is important to enable it in stage init2. If interrupts were
+ * enabled in init1 this might hang the system when more than one
+ * PCI board is connected, this is because PCI interrupts might
+ * be shared and PCI board 2 have not initialized and
+ * might therefore drive interrupt already when entering init1().
+ */
+ drvmgr_interrupt_register(
+ dev,
+ 0,
+ "gr_rasta_spw_router",
+ gr_rasta_spw_router_isr,
+ (void *)priv);
+
+ return gr_rasta_spw_router_hw_init2(priv);
+}
+
+int ambapp_rasta_spw_router_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *h;
+
+ h = genirq_alloc_handler(handler, arg);
+ if ( h == NULL )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_register(priv->genirq, irq, h);
+ if (status == 0) {
+ /* Clear IRQ for first registered handler */
+ priv->irq->iclear = (1<<irq);
+ } else if (status == 1)
+ status = 0;
+
+ if (status != 0) {
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ genirq_free_handler(h);
+ return DRVMGR_FAIL;
+ }
+
+ status = genirq_enable(priv->genirq, irq, handler, arg);
+ if ( status == 0 ) {
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return status;
+}
+
+int ambapp_rasta_spw_router_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *handler;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_disable(priv->genirq, irq, isr, arg);
+ if ( status == 0 ) {
+ /* Disable IRQ only when no enabled handler exists */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+ }
+
+ handler = genirq_unregister(priv->genirq, irq, isr, arg);
+ if ( handler == NULL )
+ status = DRVMGR_FAIL;
+ else
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (handler)
+ genirq_free_handler(handler);
+
+ return status;
+}
+
+int ambapp_rasta_spw_router_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-SPW-ROUTER IRQ %d: unmask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_spw_router_int_mask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-SPW-ROUTER IRQ %d: mask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Disable/mask IRQ */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_spw_router_int_clear(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->parent->dev->priv;
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ priv->irq->iclear = (1<<irq);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_spw_router_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->parent->dev->priv;
+
+ /* Device name prefix pointer, skip /dev */
+ params->dev_prefix = &priv->prefix[5];
+
+ return 0;
+}
+
+void gr_rasta_spw_router_print_dev(struct drvmgr_dev *dev, int options)
+{
+ struct gr_rasta_spw_router_priv *priv = dev->priv;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar0_size;
+
+ /* Print */
+ printf("--- GR-RASTA-SPW-ROUTER [bus 0x%x, dev 0x%x, fun 0x%x] ---\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ printf(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printf(" IRQ REGS: 0x%" PRIxPTR "\n", (uintptr_t)priv->irq);
+ printf(" IRQ: %d\n", devinfo->irq);
+ printf(" PCI REVISION: %d\n", devinfo->rev);
+ printf(" FREQ: %d Hz\n", priv->version->amba_freq_hz);
+ printf(" IMASK: 0x%08x\n", priv->irq->mask[0]);
+ printf(" IPEND: 0x%08x\n", priv->irq->ipend);
+
+ /* Print amba config */
+ if (options & RASTA_SPW_ROUTER_OPTIONS_AMBA)
+ ambapp_print(&priv->abus, 10);
+
+#if 0
+ /* Print IRQ handlers and their arguments */
+ if (options & RASTA_SPW_ROUTER_OPTIONS_IRQ) {
+ int i;
+ for(i = 0; i < 16; i++) {
+ printf(" IRQ[%02d]: 0x%x, arg: 0x%x\n",
+ i, (unsigned int)priv->isrs[i].handler,
+ (unsigned int)priv->isrs[i].arg);
+ }
+ }
+#endif
+}
+
+void gr_rasta_spw_router_print(int options)
+{
+ struct pci_drv_info *drv = &gr_rasta_spw_router_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ gr_rasta_spw_router_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/pci/gr_rasta_tmtc.c b/bsps/shared/grlib/pci/gr_rasta_tmtc.c
new file mode 100644
index 0000000000..bef2927926
--- /dev/null
+++ b/bsps/shared/grlib/pci/gr_rasta_tmtc.c
@@ -0,0 +1,897 @@
+/* GR-RASTA-TMTC PCI Target driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the GR-RASTA-TMTC interface PCI board.
+ * This driver provides a AMBA PnP bus by using the general part
+ * of the AMBA PnP bus driver (ambapp_bus.c).
+ *
+ * Driver resources for the AMBA PnP bus provided can be set by overriding
+ * the defaults by declaring gr_rasta_tmtc_resources[].
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/bspcommon.h>
+#include <grlib/genirq.h>
+
+#include <grlib/gr_rasta_tmtc.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Determines which PCI address the AHB masters will access, it should be
+ * set so that the masters can access the CPU RAM. Default is base of CPU RAM,
+ * CPU RAM is mapped 1:1 to PCI space.
+ */
+extern unsigned int _RAM_START;
+#define AHBMST2PCIADR (((unsigned int)&_RAM_START) & 0xf0000000)
+
+#define GAISLER_GPIO 0x01a
+#define AHB1_BASE_ADDR 0x80000000
+#define AHB1_IOAREA_BASE_ADDR 0x80200000
+#define AHB1_IOAREA_OFS (AHB1_IOAREA_BASE_ADDR - AHB1_BASE_ADDR)
+
+/* Second revision constants (GRPCI2) */
+#define GRPCI2_BAR0_TO_AHB_MAP 0x04 /* Fixme */
+#define GRPCI2_BAR1_TO_AHB_MAP 0x08 /* Fixme */
+#define GRPCI2_PCI_CONFIG 0x20 /* Fixme */
+
+/* #define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+int gr_rasta_tmtc_init1(struct drvmgr_dev *dev);
+int gr_rasta_tmtc_init2(struct drvmgr_dev *dev);
+void gr_rasta_tmtc_isr (void *arg);
+
+struct grpci_regs {
+ volatile unsigned int cfg_stat;
+ volatile unsigned int bar0;
+ volatile unsigned int page0;
+ volatile unsigned int bar1;
+ volatile unsigned int page1;
+ volatile unsigned int iomap;
+ volatile unsigned int stat_cmd;
+};
+
+struct grpci2_regs {
+ volatile unsigned int ctrl;
+ volatile unsigned int statcap;
+ volatile unsigned int pcimstprefetch;
+ volatile unsigned int ahbtopciiomap;
+ volatile unsigned int dmactrl;
+ volatile unsigned int dmadesc;
+ volatile unsigned int dmachanact;
+ volatile unsigned int reserved;
+ volatile unsigned int pcibartoahb[6];
+ volatile unsigned int reserved2[2];
+ volatile unsigned int ahbtopcimemmap[16];
+ volatile unsigned int trcctrl;
+ volatile unsigned int trccntmode;
+ volatile unsigned int trcadpat;
+ volatile unsigned int trcadmask;
+ volatile unsigned int trcctrlsigpat;
+ volatile unsigned int trcctrlsigmask;
+ volatile unsigned int trcadstate;
+ volatile unsigned int trcctrlsigstate;
+};
+
+struct gr_rasta_tmtc_ver {
+ const unsigned int amba_freq_hz; /* The frequency */
+ const unsigned int amba_ioarea; /* The address where the PnP IOAREA starts at */
+};
+
+/* Private data structure for driver */
+struct gr_rasta_tmtc_priv {
+ /* Driver management */
+ struct drvmgr_dev *dev;
+ char prefix[20];
+ SPIN_DECLARE(devlock);
+
+ /* PCI */
+ pci_dev_t pcidev;
+ struct pci_dev_info *devinfo;
+ uint32_t ahbmst2pci_map;
+
+ /* IRQ */
+ genirq_t genirq;
+
+ /* GR-RASTA-TMTC */
+ struct gr_rasta_tmtc_ver *version;
+ struct irqmp_regs *irq;
+ struct grpci_regs *grpci;
+ struct grpci2_regs *grpci2;
+ struct grgpio_regs *gpio;
+ struct drvmgr_map_entry bus_maps_down[3];
+ struct drvmgr_map_entry bus_maps_up[2];
+
+ /* AMBA Plug&Play information on GR-RASTA-TMTC */
+ struct ambapp_bus abus;
+ struct ambapp_mmap amba_maps[4];
+ struct ambapp_config config;
+};
+
+struct gr_rasta_tmtc_ver gr_rasta_tmtc_ver0 = {
+ .amba_freq_hz = 30000000,
+ .amba_ioarea = AHB1_IOAREA_BASE_ADDR,
+};
+
+int ambapp_rasta_tmtc_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_tmtc_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_rasta_tmtc_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_tmtc_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_tmtc_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_rasta_tmtc_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+struct ambapp_ops ambapp_rasta_tmtc_ops = {
+ .int_register = ambapp_rasta_tmtc_int_register,
+ .int_unregister = ambapp_rasta_tmtc_int_unregister,
+ .int_unmask = ambapp_rasta_tmtc_int_unmask,
+ .int_mask = ambapp_rasta_tmtc_int_mask,
+ .int_clear = ambapp_rasta_tmtc_int_clear,
+ .get_params = ambapp_rasta_tmtc_get_params
+};
+
+struct drvmgr_drv_ops gr_rasta_tmtc_ops =
+{
+ .init = {gr_rasta_tmtc_init1, gr_rasta_tmtc_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL,
+};
+
+struct pci_dev_id_match gr_rasta_tmtc_ids[] =
+{
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER, PCIID_DEVICE_GR_RASTA_TMTC),
+ PCIID_END_TABLE /* Mark end of table */
+};
+
+struct pci_drv_info gr_rasta_tmtc_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_PCI_GAISLER_RASTATMTC_ID,/* Driver ID */
+ "GR-RASTA-TMTC_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_PCI, /* Bus Type */
+ &gr_rasta_tmtc_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct gr_rasta_tmtc_priv) /* Let drvmgr alloc private */
+ },
+ &gr_rasta_tmtc_ids[0]
+};
+
+/* Driver resources configuration for the AMBA bus on the GR-RASTA-TMTC board.
+ * It is declared weak so that the user may override it from the project file,
+ * if the default settings are not enough.
+ *
+ * The configuration consists of an array of configuration pointers, each
+ * pointer determine the configuration of one GR-RASTA-TMTC board. Pointer
+ * zero is for board0, pointer 1 for board1 and so on.
+ *
+ * The array must end with a NULL pointer.
+ */
+struct drvmgr_bus_res *gr_rasta_tmtc_resources[] __attribute__((weak)) =
+{
+ NULL,
+};
+
+void gr_rasta_tmtc_register_drv(void)
+{
+ DBG("Registering GR-RASTA-TMTC PCI driver\n");
+ drvmgr_drv_register(&gr_rasta_tmtc_info.general);
+}
+
+void gr_rasta_tmtc_isr (void *arg)
+{
+ struct gr_rasta_tmtc_priv *priv = arg;
+ unsigned int status, tmp;
+ int irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ tmp = status = priv->irq->ipend;
+
+ /* printk("GR-RASTA-TMTC: IRQ 0x%x\n",status); */
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for(irq=0; irq<32; irq++) {
+ if ( status & (1<<irq) ) {
+ genirq_doirq(priv->genirq, irq);
+ priv->irq->iclear = (1<<irq);
+ status &= ~(1<<irq);
+ if ( status == 0 )
+ break;
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* ACK interrupt, this is because PCI is Level, so the IRQ Controller still drives the IRQ. */
+ if ( tmp )
+ drvmgr_interrupt_clear(priv->dev, 0);
+
+ DBG("RASTA-TMTC-IRQ: 0x%x\n", tmp);
+}
+
+/* Init AMBA bus frequency, IRQ controller, GPIO register, bus maps and other
+ * common stuff between rev0 and rev1.
+ */
+static int gr_rasta_tmtc_hw_init_common(struct gr_rasta_tmtc_priv *priv)
+{
+ struct ambapp_dev *tmp;
+ unsigned int pci_freq_hz;
+
+ /* Initialize Frequency of AMBA bus. The AMBA bus runs at same
+ * frequency as PCI bus
+ */
+ drvmgr_freq_get(priv->dev, 0, &pci_freq_hz);
+ ambapp_freq_init(&priv->abus, NULL, pci_freq_hz);
+
+ /* Find IRQ controller, Clear all current IRQs */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_IRQMP,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -4;
+ }
+ priv->irq = (struct irqmp_regs *)DEV_TO_APB(tmp)->start;
+ /* Set up GR-RASTA-TMTC irq controller */
+ priv->irq->mask[0] = 0;
+ priv->irq->iclear = 0xffffffff;
+ priv->irq->ilevel = 0;
+
+ /* Find First GPIO controller */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_GPIO,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -5;
+ }
+ priv->gpio = (struct grgpio_regs *) (((struct ambapp_apb_info *)tmp->devinfo)->start);
+ /* Clear GR-RASTA-TMTC GPIO controller */
+ priv->gpio->imask = 0;
+ priv->gpio->ipol = 0;
+ priv->gpio->iedge = 0;
+ priv->gpio->bypass = 0;
+ /* Set up GR-RASTA-TMTC GPIO controller to select GRTM and GRTC */
+ priv->gpio->output = (GR_TMTC_GPIO_GRTM_SEL|GR_TMTC_GPIO_TRANSP_CLK) | (GR_TMTC_GPIO_TC_BIT_LOCK|GR_TMTC_GPIO_TC_RF_AVAIL|GR_TMTC_GPIO_TC_ACTIVE_HIGH|GR_TMTC_GPIO_TC_RISING_CLK);
+ priv->gpio->dir = 0xffffffff;
+ DBG("GR-TMTC GPIO: 0x%x\n", (unsigned int)priv->gpio);
+
+ /* DOWN streams translation table */
+ priv->bus_maps_down[0].name = "PCI BAR0 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)priv->amba_maps[0].local_adr;
+ priv->bus_maps_down[0].to_adr = (void *)priv->amba_maps[0].remote_adr;
+
+ priv->bus_maps_down[1].name = "PCI BAR1 -> AMBA";
+ priv->bus_maps_down[1].size = priv->amba_maps[1].size;
+ priv->bus_maps_down[1].from_adr = (void *)priv->amba_maps[1].local_adr;
+ priv->bus_maps_down[1].to_adr = (void *)priv->amba_maps[1].remote_adr;
+
+ /* Mark end of translation table */
+ priv->bus_maps_down[2].size = 0;
+
+ return 0;
+}
+
+/* PCI Hardware (Revision 0) initialization */
+static int gr_rasta_tmtc0_hw_init(struct gr_rasta_tmtc_priv *priv)
+{
+ unsigned int *page0 = NULL;
+ struct ambapp_dev *tmp;
+ struct ambapp_ahb_info *ahb;
+ int status;
+ pci_dev_t pcidev = priv->pcidev;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar0_size;
+
+ /* Select version of GR-RASTA-TMTC board */
+ switch (devinfo->rev) {
+ case 0:
+ priv->version = &gr_rasta_tmtc_ver0;
+ break;
+ default:
+ return -2;
+ }
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ page0 = (unsigned int *)(bar0 + bar0_size/2);
+
+ /* Point PAGE0 to start of Plug and Play information */
+ *page0 = priv->version->amba_ioarea & 0xf0000000;
+
+#if 0
+ {
+ uint32_t data;
+ /* set parity error response */
+ pci_cfg_r32(pcidev, PCIR_COMMAND, &data);
+ pci_cfg_w32(pcidev, PCIR_COMMAND, (data|PCIM_CMD_PERRESPEN));
+ }
+#endif
+
+ /* Setup cache line size. Default cache line size will result in
+ * poor performance (256 word fetches), 0xff will set it according
+ * to the max size of the PCI FIFO.
+ */
+ pci_cfg_w8(pcidev, PCIR_CACHELNSZ, 0xff);
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in CPU) ==> 0x80000000(remote amba address) */
+ priv->amba_maps[0].size = 0x10000000;
+ priv->amba_maps[0].local_adr = bar0;
+ priv->amba_maps[0].remote_adr = AHB1_BASE_ADDR;
+
+ /* AMBA MAP bar1 (in CPU) ==> 0x40000000(remote amba address) */
+ priv->amba_maps[1].size = devinfo->resources[1].size;
+ priv->amba_maps[1].local_adr = devinfo->resources[1].address;
+ priv->amba_maps[1].remote_adr = 0x40000000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[2].size = 0xfffffff0;
+ priv->amba_maps[2].local_adr = 0;
+ priv->amba_maps[2].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[3].size=0;
+ priv->amba_maps[3].local_adr = 0;
+ priv->amba_maps[3].remote_adr = 0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(&priv->abus,
+ bar0 + (priv->version->amba_ioarea & ~0xf0000000),
+ NULL, &priv->amba_maps[0]);
+
+ /* Point PAGE0 to start of APB area */
+ *page0 = AHB1_BASE_ADDR;
+
+ /* Find GRPCI controller */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_PCIFBRG,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -3;
+ }
+ priv->grpci = (struct grpci_regs *)((struct ambapp_apb_info *)tmp->devinfo)->start;
+
+ /* Set GRPCI mmap so that AMBA masters can access CPU-RAM over
+ * the PCI window.
+ */
+ priv->grpci->cfg_stat = (priv->grpci->cfg_stat & 0x0fffffff) |
+ (priv->ahbmst2pci_map & 0xf0000000);
+ priv->grpci->page1 = 0x40000000;
+
+ /* init AMBA bus, IRQCtrl, GPIO, bus down-maps */
+ status = gr_rasta_tmtc_hw_init_common(priv);
+ if (status)
+ return status;
+
+ /* Find GRPCI controller AHB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_AHB_SLVS),
+ VENDOR_GAISLER, GAISLER_PCIFBRG,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -6;
+ }
+ ahb = (struct ambapp_ahb_info *)tmp->devinfo;
+
+ /* UP streams translation table */
+ priv->bus_maps_up[0].name = "AMBA GRPCI Window";
+ priv->bus_maps_up[0].size = ahb->mask[0]; /* AMBA->PCI Window on GR-RASTA-TMTC board */
+ priv->bus_maps_up[0].from_adr = (void *)ahb->start[0];
+ priv->bus_maps_up[0].to_adr = (void *)
+ (priv->ahbmst2pci_map & 0xf0000000);
+
+ /* Mark end of translation table */
+ priv->bus_maps_up[1].size = 0;
+
+ /* Successfully registered the RASTA board */
+ return 0;
+}
+
+/* PCI Hardware (Revision 1) initialization */
+static int gr_rasta_tmtc1_hw_init(struct gr_rasta_tmtc_priv *priv)
+{
+ int i;
+ uint32_t data;
+ unsigned int ctrl;
+ uint8_t tmp2;
+ struct ambapp_dev *tmp;
+ int status;
+ struct ambapp_ahb_info *ahb;
+ uint8_t cap_ptr;
+ pci_dev_t pcidev = priv->pcidev;
+ struct pci_dev_info *devinfo = priv->devinfo;
+
+ /* Check capabilities list bit */
+ pci_cfg_r8(pcidev, PCIR_STATUS, &tmp2);
+
+ if (!((tmp2 >> 4) & 1)) {
+ /* Capabilities list not available which it should be in the
+ * GRPCI2
+ */
+ return -3;
+ }
+
+ /* Read capabilities pointer */
+ pci_cfg_r8(pcidev, PCIR_CAP_PTR, &cap_ptr);
+
+ /* Set AHB address mappings for target PCI bars
+ * BAR0: 16MB : Mapped to I/O at 0x80000000
+ * BAR1: 256MB : Mapped to MEM at 0x40000000
+ */
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_BAR0_TO_AHB_MAP, AHB1_BASE_ADDR);
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_BAR1_TO_AHB_MAP, 0x40000000);
+
+ /* Set PCI bus to be same endianess as PCI system */
+ pci_cfg_r32(pcidev, cap_ptr+GRPCI2_PCI_CONFIG, &data);
+ if (pci_endian == PCI_BIG_ENDIAN)
+ data = data & 0xFFFFFFFE;
+ else
+ data = data | 0x00000001;
+ pci_cfg_w32(pcidev, cap_ptr+GRPCI2_PCI_CONFIG, data);
+
+#if 0
+ /* set parity error response */
+ pci_cfg_r32(pcidev, PCIR_COMMAND, &data);
+ pci_cfg_w32(pcidev, PCIR_COMMAND, (data|PCIM_CMD_PERRESPEN));
+#endif
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in PCI) ==> 0x40000000 (remote amba address) */
+ priv->amba_maps[0].size = devinfo->resources[0].size;
+ priv->amba_maps[0].local_adr = devinfo->resources[0].address;
+ priv->amba_maps[0].remote_adr = AHB1_BASE_ADDR;
+
+ /* AMBA MAP bar0 (in PCI) ==> 0x80000000 (remote amba address) */
+ priv->amba_maps[1].size = devinfo->resources[1].size;
+ priv->amba_maps[1].local_adr = devinfo->resources[1].address;
+ priv->amba_maps[1].remote_adr = 0x40000000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[2].size = 0xfffffff0;
+ priv->amba_maps[2].local_adr = 0;
+ priv->amba_maps[2].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[3].size=0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(
+ &priv->abus,
+ devinfo->resources[0].address + AHB1_IOAREA_OFS,
+ NULL,
+ &priv->amba_maps[0]);
+
+ /* init AMBA bus, IRQCtrl, GPIO, bus down-maps */
+ status = gr_rasta_tmtc_hw_init_common(priv);
+ if (status)
+ return status;
+
+ /* Find GRPCI2 controller AHB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_AHB_SLVS),
+ VENDOR_GAISLER, GAISLER_GRPCI2,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -6;
+ }
+ ahb = (struct ambapp_ahb_info *)tmp->devinfo;
+ priv->bus_maps_up[0].name = "AMBA GRPCI2 Window";
+ priv->bus_maps_up[0].size = ahb->mask[0]; /* AMBA->PCI Window on GR-RASTA-SPW-ROUTER board */
+ priv->bus_maps_up[0].from_adr = (void *)ahb->start[0];
+ priv->bus_maps_up[0].to_adr = (void *)
+ (priv->ahbmst2pci_map & ~(ahb->mask[0]-1));
+ priv->bus_maps_up[1].size = 0;
+
+ /* Find GRPCI2 controller APB Slave interface */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_GRPCI2,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -7;
+ }
+ priv->grpci2 = (struct grpci2_regs *)
+ ((struct ambapp_apb_info *)tmp->devinfo)->start;
+
+ /* Set AHB to PCI mapping for all AMBA AHB masters */
+ for(i = 0; i < 16; i++) {
+ priv->grpci2->ahbtopcimemmap[i] = priv->ahbmst2pci_map &
+ ~(ahb->mask[0]-1);
+ }
+
+ /* Make sure dirq(0) sampling is enabled */
+ ctrl = priv->grpci2->ctrl;
+ ctrl = (ctrl & 0xFFFFFF0F) | (1 << 4);
+ priv->grpci2->ctrl = ctrl;
+
+ /* Successfully registered the RASTA-SPW-ROUTER board */
+ return 0;
+}
+
+static void gr_rasta_tmtc_hw_init2(struct gr_rasta_tmtc_priv *priv)
+{
+ /* Enable DMA by enabling PCI target as master */
+ pci_master_enable(priv->pcidev);
+}
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr_rasta_tmtc_ids[].
+ */
+int gr_rasta_tmtc_init1(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_tmtc_priv *priv;
+ struct pci_dev_info *devinfo;
+ int status;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+ union drvmgr_key_value *value;
+ int resources_cnt;
+
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* Determine number of configurations */
+ resources_cnt = get_resarray_count(gr_rasta_tmtc_resources);
+
+ /* Generate Device prefix */
+
+ strcpy(priv->prefix, "/dev/rastatmtc0");
+ priv->prefix[14] += dev->minor_drv;
+ mkdir(priv->prefix, S_IRWXU | S_IRWXG | S_IRWXO);
+ priv->prefix[15] = '/';
+ priv->prefix[16] = '\0';
+
+ priv->devinfo = devinfo = (struct pci_dev_info *)dev->businfo;
+ priv->pcidev = devinfo->pcidev;
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+ printk("\n\n--- GR-RASTA-TMTC[%d] ---\n", dev->minor_drv);
+ printk(" PCI BUS: 0x%x, SLOT: 0x%x, FUNCTION: 0x%x\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ printk(" PCI VENDOR: 0x%04x, DEVICE: 0x%04x\n",
+ devinfo->id.vendor, devinfo->id.device);
+ printk(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printk(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printk(" IRQ: %d\n\n\n", devinfo->irq);
+
+ /* all neccessary space assigned to GR-RASTA-IO target? */
+ if ((bar0_size == 0) || (bar1_size == 0))
+ return DRVMGR_ENORES;
+
+ /* Initialize spin-lock for this PCI peripheral device. This is to
+ * protect the Interrupt Controller Registers. The genirq layer is
+ * protecting its own internals and ISR dispatching.
+ */
+ SPIN_INIT(&priv->devlock, priv->prefix);
+
+ /* Let user override which PCI address the AHB masters of the
+ * GR-RASTA-TMTC board access when doing DMA to CPU RAM. The AHB masters
+ * access the PCI Window of the AMBA bus, the MSB 4-bits of that address
+ * is translated according this config option before the address
+ * goes out on the PCI bus.
+ * Only the 4 MSB bits have an effect;
+ */
+ value = drvmgr_dev_key_get(priv->dev, "ahbmst2pci", DRVMGR_KT_INT);
+ if (value)
+ priv->ahbmst2pci_map = value->i;
+ else
+ priv->ahbmst2pci_map = AHBMST2PCIADR; /* default */
+
+ priv->genirq = genirq_init(32);
+ if ( priv->genirq == NULL )
+ return DRVMGR_FAIL;
+
+ /* Select version of GR-RASTA-IO board */
+ switch (devinfo->rev) {
+ case 0:
+ puts("GR-RASTA-TMTC: REVISION 0");
+ status = gr_rasta_tmtc0_hw_init(priv);
+ break;
+ case 1:
+ puts("GR-RASTA-TMTC: REVISION 1");
+ status = gr_rasta_tmtc1_hw_init(priv);
+ break;
+ default:
+ return DRVMGR_ENOSYS; /* HW not supported */
+ }
+
+ if ( status != 0 ) {
+ genirq_destroy(priv->genirq);
+ printk(" Failed to initialize GR-RASTA-TMTC HW: %d\n", status);
+ return DRVMGR_FAIL;
+ }
+
+ /* Init amba bus */
+ priv->config.abus = &priv->abus;
+ priv->config.ops = &ambapp_rasta_tmtc_ops;
+ priv->config.maps_up = &priv->bus_maps_up[0];
+ priv->config.maps_down = &priv->bus_maps_down[0];
+ if ( priv->dev->minor_drv < resources_cnt ) {
+ priv->config.resources = gr_rasta_tmtc_resources[priv->dev->minor_drv];
+ } else {
+ priv->config.resources = NULL;
+ }
+
+ return ambapp_bus_register(dev, &priv->config);
+}
+
+int gr_rasta_tmtc_init2(struct drvmgr_dev *dev)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->priv;
+
+ /* Clear any old interrupt requests */
+ drvmgr_interrupt_clear(priv->dev, 0);
+
+ /* Enable System IRQ so that GR-RASTA-TMTC PCI target interrupt goes
+ * through.
+ *
+ * It is important to enable it in stage init2. If interrupts were
+ * enabled in init1 this might hang the system when more than one
+ * PCI target is connected, this is because PCI interrupts might
+ * be shared and PCI board 2 have not initialized and
+ * might therefore drive interrupt already when entering init1().
+ */
+ drvmgr_interrupt_register(
+ priv->dev,
+ 0,
+ "gr_rasta_tmtc",
+ gr_rasta_tmtc_isr,
+ (void *)priv);
+
+ gr_rasta_tmtc_hw_init2(priv);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_tmtc_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *h;
+
+ h = genirq_alloc_handler(handler, arg);
+ if ( h == NULL )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_register(priv->genirq, irq, h);
+ if ( status == 0 ) {
+ /* Disable and clear IRQ for first registered handler */
+ priv->irq->iclear = (1<<irq);
+ } else if ( status == 1 )
+ status = 0;
+
+ if (status != 0) {
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ genirq_free_handler(h);
+ return DRVMGR_FAIL;
+ }
+
+ status = genirq_enable(priv->genirq, irq, handler, arg);
+ if ( status == 0 ) {
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return status;
+}
+
+int ambapp_rasta_tmtc_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *handler;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_disable(priv->genirq, irq, isr, arg);
+ if ( status == 0 ) {
+ /* Disable IRQ only when no enabled handler exists */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ handler = genirq_unregister(priv->genirq, irq, isr, arg);
+ if ( handler == NULL )
+ status = DRVMGR_FAIL;
+ else
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (handler)
+ genirq_free_handler(handler);
+
+ return status;
+}
+
+int ambapp_rasta_tmtc_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-TMTC IRQ %d: unmask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Enable IRQ */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_tmtc_int_mask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("RASTA-TMTC IRQ %d: mask\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_EINVAL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Disable IRQ */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_tmtc_int_clear(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->parent->dev->priv;
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ priv->irq->iclear = (1<<irq);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_rasta_tmtc_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->parent->dev->priv;
+
+ /* Device name prefix pointer, skip /dev */
+ params->dev_prefix = &priv->prefix[5];
+
+ return 0;
+}
+
+void gr_rasta_tmtc_print_dev(struct drvmgr_dev *dev, int options)
+{
+ struct gr_rasta_tmtc_priv *priv = dev->priv;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar1, bar0_size, bar1_size;
+
+ /* Print */
+ printf("--- GR-RASTA-TMTC [bus 0x%x, dev 0x%x, fun 0x%x] ---\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ bar1 = devinfo->resources[1].address;
+ bar1_size = devinfo->resources[1].size;
+
+ printf(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printf(" PCI BAR[1]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar1, bar1 + bar1_size - 1);
+ printf(" IRQ: %d\n", devinfo->irq);
+ printf(" PCI REVISION: %d\n", devinfo->rev);
+ printf(" FREQ: %d Hz\n", priv->version->amba_freq_hz);
+ printf(" IMASK: 0x%08x\n", priv->irq->mask[0]);
+ printf(" IPEND: 0x%08x\n", priv->irq->ipend);
+
+ /* Print amba config */
+ if ( options & RASTA_TMTC_OPTIONS_AMBA ) {
+ ambapp_print(&priv->abus, 10);
+ }
+
+#if 0
+ /* Print IRQ handlers and their arguments */
+ if ( options & RASTA_TMTC_OPTIONS_IRQ ) {
+ int i;
+ for(i=0; i<16; i++) {
+ printf(" IRQ[%02d]: 0x%x, arg: 0x%x\n",
+ i, (unsigned int)priv->isrs[i].handler, (unsigned int)priv->isrs[i].arg);
+ }
+ }
+#endif
+}
+
+void gr_rasta_tmtc_print(int options)
+{
+ struct pci_drv_info *drv = &gr_rasta_tmtc_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ gr_rasta_tmtc_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/pci/gr_tmtc_1553.c b/bsps/shared/grlib/pci/gr_tmtc_1553.c
new file mode 100644
index 0000000000..ce02b8a256
--- /dev/null
+++ b/bsps/shared/grlib/pci/gr_tmtc_1553.c
@@ -0,0 +1,595 @@
+/* GR-TMTC-1553 PCI Target driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the GR-TMTC-1553 interface PCI board.
+ * This driver provides a AMBA PnP bus by using the general part
+ * of the AMBA PnP bus driver (ambapp_bus.c).
+ *
+ * Driver resources for the AMBA PnP bus provided can be set using
+ * gr_tmtc_1553_set_resources().
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+#include <pci/access.h>
+
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/bspcommon.h>
+#include <grlib/genirq.h>
+
+#include <grlib/gr_tmtc_1553.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* PCI ID */
+#define PCIID_VENDOR_GAISLER 0x1AC8
+
+int gr_tmtc_1553_init1(struct drvmgr_dev *dev);
+int gr_tmtc_1553_init2(struct drvmgr_dev *dev);
+void gr_tmtc_1553_isr (void *arg);
+
+struct gr_tmtc_1553_ver {
+ const unsigned int amba_freq_hz; /* The frequency */
+ const unsigned int amba_ioarea; /* The address where the PnP IOAREA starts at */
+};
+
+/* Private data structure for driver */
+struct gr_tmtc_1553_priv {
+ /* Driver management */
+ struct drvmgr_dev *dev;
+ char prefix[32];
+ SPIN_DECLARE(devlock);
+
+ /* PCI */
+ pci_dev_t pcidev;
+ struct pci_dev_info *devinfo;
+
+ /* IRQ */
+ genirq_t genirq;
+
+ struct gr_tmtc_1553_ver *version;
+ struct irqmp_regs *irq;
+ struct drvmgr_map_entry bus_maps_down[2];
+
+ struct ambapp_bus abus;
+ struct ambapp_mmap amba_maps[4];
+ struct ambapp_config config;
+};
+
+struct gr_tmtc_1553_ver gr_tmtc_1553_ver0 = {
+ .amba_freq_hz = 33333333,
+ .amba_ioarea = 0xfff00000,
+};
+
+
+int ambapp_tmtc_1553_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_tmtc_1553_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr handler,
+ void *arg);
+int ambapp_tmtc_1553_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_tmtc_1553_int_mask(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_tmtc_1553_int_clear(
+ struct drvmgr_dev *dev,
+ int irq);
+int ambapp_tmtc_1553_get_params(
+ struct drvmgr_dev *dev,
+ struct drvmgr_bus_params *params);
+
+struct ambapp_ops ambapp_tmtc_1553_ops = {
+ .int_register = ambapp_tmtc_1553_int_register,
+ .int_unregister = ambapp_tmtc_1553_int_unregister,
+ .int_unmask = ambapp_tmtc_1553_int_unmask,
+ .int_mask = ambapp_tmtc_1553_int_mask,
+ .int_clear = ambapp_tmtc_1553_int_clear,
+ .get_params = ambapp_tmtc_1553_get_params
+};
+
+struct drvmgr_drv_ops gr_tmtc_1553_ops =
+{
+ {gr_tmtc_1553_init1, gr_tmtc_1553_init2, NULL, NULL},
+ NULL,
+ NULL
+};
+
+struct pci_dev_id_match gr_tmtc_1553_ids[] =
+{
+ PCIID_DEVVEND(PCIID_VENDOR_GAISLER, PCIID_DEVICE_GR_TMTC_1553),
+ PCIID_END_TABLE /* Mark end of table */
+};
+
+struct pci_drv_info gr_tmtc_1553_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_PCI_GAISLER_TMTC_1553_ID, /* Driver ID */
+ "GR-TMTC-1553_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_PCI, /* Bus Type */
+ &gr_tmtc_1553_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &gr_tmtc_1553_ids[0]
+};
+
+/* Driver resources configuration for the AMBA bus on the GR-RASTA-IO board.
+ * It is declared weak so that the user may override it from the project file,
+ * if the default settings are not enough.
+ *
+ * The configuration consists of an array of configuration pointers, each
+ * pointer determine the configuration of one GR-RASTA-IO board. Pointer
+ * zero is for board0, pointer 1 for board1 and so on.
+ *
+ * The array must end with a NULL pointer.
+ */
+struct drvmgr_bus_res *gr_tmtc_1553_resources[] __attribute__((weak)) =
+{
+ NULL
+};
+
+void gr_tmtc_1553_register_drv(void)
+{
+ DBG("Registering GR-TMTC-1553 PCI driver\n");
+ drvmgr_drv_register(&gr_tmtc_1553_info.general);
+}
+
+void gr_tmtc_1553_isr (void *arg)
+{
+ struct gr_tmtc_1553_priv *priv = arg;
+ unsigned int status, tmp;
+ int irq;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ tmp = status = priv->irq->ipend;
+
+ /* DBG("GR-RASTA-IO: IRQ 0x%x\n",status); */
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for(irq=0; irq<16; irq++) {
+ if ( status & (1<<irq) ) {
+ genirq_doirq(priv->genirq, irq);
+ priv->irq->iclear = (1<<irq);
+ status &= ~(1<<irq);
+ if ( status == 0 )
+ break;
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* ACK interrupt, this is because PCI is Level, so the IRQ Controller still drives the IRQ. */
+ if ( tmp )
+ drvmgr_interrupt_clear(priv->dev, 0);
+
+ DBG("GR-TMTC-1553-IRQ: 0x%x\n", tmp);
+}
+
+static int gr_tmtc_1553_hw_init(struct gr_tmtc_1553_priv *priv)
+{
+ unsigned int *page0 = NULL;
+ struct ambapp_dev *tmp;
+ unsigned int pci_freq_hz;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar0_size;
+
+ /* Select version of GR-TMTC-1553 board */
+ switch (devinfo->rev) {
+ case 0:
+ priv->version = &gr_tmtc_1553_ver0;
+ break;
+ default:
+ return -2;
+ }
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ page0 = (unsigned int *)(bar0 + bar0_size/2);
+
+ /* Point PAGE0 to start of board address map. RAM at 0xff000000, APB at 0xffc00000, IOAREA at 0xfff000000 */
+ /* XXX We assume little endian host with byte twisting enabled here */
+ *page0 = 0x010000ff; /* Set little endian mode on peripheral. */
+
+ /* Scan AMBA Plug&Play */
+
+ /* AMBA MAP bar0 (in CPU) ==> 0x80000000(remote amba address) */
+ priv->amba_maps[0].size = 0x1000000;
+ priv->amba_maps[0].local_adr = bar0;
+ priv->amba_maps[0].remote_adr = 0xff000000;
+
+ /* Addresses not matching with map be untouched */
+ priv->amba_maps[2].size = 0xfffffff0;
+ priv->amba_maps[2].local_adr = 0;
+ priv->amba_maps[2].remote_adr = 0;
+
+ /* Mark end of table */
+ priv->amba_maps[3].size=0;
+ priv->amba_maps[3].local_adr = 0;
+ priv->amba_maps[3].remote_adr = 0;
+
+ /* Start AMBA PnP scan at first AHB bus */
+ ambapp_scan(&priv->abus,
+ bar0 + (priv->version->amba_ioarea & ~0xff000000),
+ NULL, &priv->amba_maps[0]);
+
+ /* Frequency is the hsame as the PCI bus frequency */
+ drvmgr_freq_get(priv->dev, 0, &pci_freq_hz);
+
+ ambapp_freq_init(&priv->abus, NULL, pci_freq_hz);
+
+ /* Find IRQ controller */
+ tmp = (struct ambapp_dev *)ambapp_for_each(&priv->abus,
+ (OPTIONS_ALL|OPTIONS_APB_SLVS),
+ VENDOR_GAISLER, GAISLER_IRQMP,
+ ambapp_find_by_idx, NULL);
+ if ( !tmp ) {
+ return -4;
+ }
+ priv->irq = (struct irqmp_regs *)DEV_TO_APB(tmp)->start;
+ /* Set up irq controller */
+ priv->irq->mask[0] = 0;
+ priv->irq->iclear = 0xffff;
+ priv->irq->ilevel = 0;
+
+ /* DOWN streams translation table */
+ priv->bus_maps_down[0].name = "PCI BAR0 -> AMBA";
+ priv->bus_maps_down[0].size = priv->amba_maps[0].size;
+ priv->bus_maps_down[0].from_adr = (void *)priv->amba_maps[0].local_adr;
+ priv->bus_maps_down[0].to_adr = (void *)priv->amba_maps[0].remote_adr;
+ /* Mark end of translation table */
+ priv->bus_maps_down[1].size = 0;
+
+ /* Successfully registered the board */
+ return 0;
+}
+
+
+/* Called when a PCI target is found with the PCI device and vendor ID
+ * given in gr_tmtc_1553_ids[].
+ */
+int gr_tmtc_1553_init1(struct drvmgr_dev *dev)
+{
+ struct gr_tmtc_1553_priv *priv;
+ struct pci_dev_info *devinfo;
+ int status;
+ uint32_t bar0, bar0_size;
+ int resources_cnt;
+
+ /* PCI device does not have the IRQ line register, when PCI autoconf configures it the configuration
+ * is forgotten. We take the IRQ number from the PCI Host device (AMBA device), this works as long
+ * as PCI-IRQs are ored together on the bus.
+ *
+ * Note that this only works on LEON.
+ */
+ ((struct pci_dev_info *)dev->businfo)->irq = ((struct amba_dev_info *)dev->parent->dev->businfo)->info.irq;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+
+ dev->priv = priv;
+ priv->dev = dev;
+
+ /* Determine number of configurations */
+ resources_cnt = get_resarray_count(gr_tmtc_1553_resources);
+
+ /* Generate Device prefix */
+
+ strcpy(priv->prefix, "/dev/tmtc1553_0");
+ priv->prefix[14] += dev->minor_drv;
+ mkdir(priv->prefix, S_IRWXU | S_IRWXG | S_IRWXO);
+ priv->prefix[15] = '/';
+ priv->prefix[16] = '\0';
+
+ priv->devinfo = devinfo = (struct pci_dev_info *)dev->businfo;
+ priv->pcidev = devinfo->pcidev;
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+ printk("\n\n--- GR-TMTC-1553[%d] ---\n", dev->minor_drv);
+ printk(" PCI BUS: 0x%x, SLOT: 0x%x, FUNCTION: 0x%x\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+ printk(" PCI VENDOR: 0x%04x, DEVICE: 0x%04x\n",
+ devinfo->id.vendor, devinfo->id.device);
+ printk(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printk(" IRQ: %d\n\n\n", devinfo->irq);
+
+ /* all neccessary space assigned to GR-TMTC-1553 target? */
+ if (bar0_size == 0)
+ return DRVMGR_ENORES;
+
+ /* Initialize spin-lock for this PCI peripheral device. This is to
+ * protect the Interrupt Controller Registers. The genirq layer is
+ * protecting its own internals and ISR dispatching.
+ */
+ SPIN_INIT(&priv->devlock, priv->prefix);
+
+ priv->genirq = genirq_init(16);
+ if ( priv->genirq == NULL ) {
+ free(priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ status = gr_tmtc_1553_hw_init(priv);
+ if ( status != 0 ) {
+ genirq_destroy(priv->genirq);
+ free(priv);
+ dev->priv = NULL;
+ printk(" Failed to initialize GR-TMTC-1553 HW: %d\n", status);
+ return DRVMGR_FAIL;
+ }
+
+ /* Init amba bus */
+ priv->config.abus = &priv->abus;
+ priv->config.ops = &ambapp_tmtc_1553_ops;
+ priv->config.maps_down = &priv->bus_maps_down[0];
+ /* This PCI device has only target interface so DMA is not supported,
+ * which means that translation from AMBA->PCI should fail if attempted.
+ */
+ priv->config.maps_up = DRVMGR_TRANSLATE_NO_BRIDGE;
+ if ( priv->dev->minor_drv < resources_cnt ) {
+ priv->config.resources = gr_tmtc_1553_resources[priv->dev->minor_drv];
+ } else {
+ priv->config.resources = NULL;
+ }
+
+ /* Create And Register AMBA PnP Bus */
+ return ambapp_bus_register(dev, &priv->config);
+}
+
+int gr_tmtc_1553_init2(struct drvmgr_dev *dev)
+{
+ struct gr_tmtc_1553_priv *priv = dev->priv;
+
+ /* Clear any old interrupt requests */
+ drvmgr_interrupt_clear(dev, 0);
+
+ /* Enable System IRQ so that GR-TMTC-1553 PCI target interrupt goes through.
+ *
+ * It is important to enable it in stage init2. If interrupts were enabled in init1
+ * this might hang the system when more than one PCI target is connected, this is
+ * because PCI interrupts might be shared and PCI target 2 have not initialized and
+ * might therefore drive interrupt already when entering init1().
+ */
+ drvmgr_interrupt_register(
+ dev,
+ 0,
+ "gr_tmtc_1553",
+ gr_tmtc_1553_isr,
+ (void *)priv);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_tmtc_1553_int_register(
+ struct drvmgr_dev *dev,
+ int irq,
+ const char *info,
+ drvmgr_isr handler,
+ void *arg)
+{
+ struct gr_tmtc_1553_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *h;
+
+ h = genirq_alloc_handler(handler, arg);
+ if ( h == NULL )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_register(priv->genirq, irq, h);
+ if ( status == 0 ) {
+ /* Disable and clear IRQ for first registered handler */
+ priv->irq->iclear = (1<<irq);
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ if (status != 0) {
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ genirq_free_handler(h);
+ return DRVMGR_FAIL;
+ }
+
+ status = genirq_enable(priv->genirq, irq, handler, arg);
+ if ( status == 0 ) {
+ /* Enable IRQ for first enabled handler only */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return status;
+}
+
+int ambapp_tmtc_1553_int_unregister(
+ struct drvmgr_dev *dev,
+ int irq,
+ drvmgr_isr isr,
+ void *arg)
+{
+ struct gr_tmtc_1553_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+ int status;
+ void *handler;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ status = genirq_disable(priv->genirq, irq, isr, arg);
+ if ( status == 0 ) {
+ /* Disable IRQ only when no enabled handler exists */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+ } else if ( status == 1 )
+ status = 0;
+
+ handler = genirq_unregister(priv->genirq, irq, isr, arg);
+ if ( handler == NULL )
+ status = DRVMGR_FAIL;
+ else
+ status = DRVMGR_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (handler)
+ genirq_free_handler(handler);
+
+ return status;
+}
+
+int ambapp_tmtc_1553_int_unmask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_tmtc_1553_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("TMTC-1553 IRQ %d: enable\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Enable IRQ */
+ priv->irq->mask[0] |= (1<<irq); /* unmask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_tmtc_1553_int_mask(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_tmtc_1553_priv *priv = dev->parent->dev->priv;
+ SPIN_IRQFLAGS(irqflags);
+
+ DBG("TMTC-1553 IRQ %d: disable\n", irq);
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Disable IRQ */
+ priv->irq->mask[0] &= ~(1<<irq); /* mask interrupt source */
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_tmtc_1553_int_clear(
+ struct drvmgr_dev *dev,
+ int irq)
+{
+ struct gr_tmtc_1553_priv *priv = dev->parent->dev->priv;
+
+ if ( genirq_check(priv->genirq, irq) )
+ return DRVMGR_FAIL;
+
+ priv->irq->iclear = (1<<irq);
+
+ return DRVMGR_OK;
+}
+
+int ambapp_tmtc_1553_get_params(struct drvmgr_dev *dev, struct drvmgr_bus_params *params)
+{
+ struct gr_tmtc_1553_priv *priv = dev->parent->dev->priv;
+
+ /* Device name prefix pointer, skip /dev */
+ params->dev_prefix = &priv->prefix[5];
+
+ return 0;
+}
+
+void gr_tmtc_1553_print_dev(struct drvmgr_dev *dev, int options)
+{
+ struct gr_tmtc_1553_priv *priv = dev->priv;
+ struct pci_dev_info *devinfo = priv->devinfo;
+ uint32_t bar0, bar0_size;
+
+ /* Print */
+ printf("--- GR-TMTC-1553 [bus 0x%x, dev 0x%x, fun 0x%x] ---\n",
+ PCI_DEV_EXPAND(priv->pcidev));
+
+ bar0 = devinfo->resources[0].address;
+ bar0_size = devinfo->resources[0].size;
+
+ printf(" PCI BAR[0]: 0x%" PRIx32 " - 0x%" PRIx32 "\n",
+ bar0, bar0 + bar0_size - 1);
+ printf(" IRQ REGS: 0x%" PRIxPTR "\n", (uintptr_t)priv->irq);
+ printf(" IRQ: %d\n", devinfo->irq);
+ printf(" FREQ: %d Hz\n", priv->version->amba_freq_hz);
+ printf(" IMASK: 0x%08x\n", priv->irq->mask[0]);
+ printf(" IPEND: 0x%08x\n", priv->irq->ipend);
+
+ /* Print amba config */
+ if ( options & TMTC_1553_OPTIONS_AMBA ) {
+ ambapp_print(&priv->abus, 10);
+ }
+#if 0
+ /* Print IRQ handlers and their arguments */
+ if ( options & TMTC_1553_OPTIONS_IRQ ) {
+ int i;
+ for(i=0; i<16; i++) {
+ printf(" IRQ[%02d]: 0x%x, arg: 0x%x\n",
+ i, (unsigned int)priv->isrs[i].handler, (unsigned int)priv->isrs[i].arg);
+ }
+ }
+#endif
+}
+
+void gr_tmtc_1553_print(int options)
+{
+ struct pci_drv_info *drv = &gr_tmtc_1553_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ gr_tmtc_1553_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/pci/grpci.c b/bsps/shared/grlib/pci/grpci.c
new file mode 100644
index 0000000000..fc2f06a063
--- /dev/null
+++ b/bsps/shared/grlib/pci/grpci.c
@@ -0,0 +1,722 @@
+/* GRLIB GRPCI PCI HOST driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the GRPCI core and initialize,
+ * - the PCI Library (pci.c)
+ * - the general part of the PCI Bus driver (pci_bus.c)
+ *
+ * System interrupt assigned to PCI interrupt (INTA#..INTD#) is by
+ * default taken from Plug and Play, but may be overridden by the
+ * driver resources INTA#..INTD#.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <rtems/bspIo.h>
+#include <libcpu/byteorder.h>
+#include <libcpu/access.h>
+#include <pci.h>
+#include <pci/cfg.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/grpci.h>
+
+#define DMAPCI_ADDR 0x80000500
+
+/* Configuration options */
+#define SYSTEM_MAINMEM_START 0x40000000
+
+/* If defined to 1 - byte twisting is enabled by default */
+#define DEFAULT_BT_ENABLED 0
+
+/* Interrupt assignment. Set to other value than 0xff in order to
+ * override defaults and plug&play information
+ */
+#ifndef GRPCI_INTA_SYSIRQ
+ #define GRPCI_INTA_SYSIRQ 0xff
+#endif
+#ifndef GRPCI_INTB_SYSIRQ
+ #define GRPCI_INTB_SYSIRQ 0xff
+#endif
+#ifndef GRPCI_INTC_SYSIRQ
+ #define GRPCI_INTC_SYSIRQ 0xff
+#endif
+#ifndef GRPCI_INTD_SYSIRQ
+ #define GRPCI_INTD_SYSIRQ 0xff
+#endif
+
+#define PAGE0_BTEN_BIT 0
+#define PAGE0_BTEN (1<<PAGE0_BTEN_BIT)
+
+#define CFGSTAT_HOST_BIT 13
+#define CFGSTAT_HOST (1<<CFGSTAT_HOST_BIT)
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * Bit encode for PCI_CONFIG_HEADER_TYPE register
+ */
+struct grpci_regs {
+ volatile unsigned int cfg_stat;
+ volatile unsigned int bar0;
+ volatile unsigned int page0;
+ volatile unsigned int bar1;
+ volatile unsigned int page1;
+ volatile unsigned int iomap;
+ volatile unsigned int stat_cmd;
+ volatile unsigned int irq;
+};
+
+#define HOST_TGT PCI_DEV(0xff, 0, 0)
+
+struct grpci_priv *grpcipriv = NULL;
+static int grpci_minor = 0;
+static unsigned int *pcidma = (unsigned int *)DMAPCI_ADDR;
+
+/* PCI Interrupt assignment. Connects an PCI interrupt pin (INTA#..INTD#)
+ * to a system interrupt number.
+ */
+unsigned char grpci_pci_irq_table[4] =
+{
+ /* INTA# */ GRPCI_INTA_SYSIRQ,
+ /* INTB# */ GRPCI_INTB_SYSIRQ,
+ /* INTC# */ GRPCI_INTC_SYSIRQ,
+ /* INTD# */ GRPCI_INTD_SYSIRQ
+};
+
+/* Driver private data struture */
+struct grpci_priv {
+ struct drvmgr_dev *dev;
+ struct grpci_regs *regs;
+ int irq;
+ int minor;
+
+ uint32_t bar1_pci_adr;
+ uint32_t bar1_size;
+
+ int bt_enabled;
+ unsigned int pci_area;
+ unsigned int pci_area_end;
+ unsigned int pci_io;
+ unsigned int pci_conf;
+ unsigned int pci_conf_end;
+
+ uint32_t devVend; /* Host PCI Vendor/Device ID */
+
+ struct drvmgr_map_entry maps_up[2];
+ struct drvmgr_map_entry maps_down[2];
+ struct pcibus_config config;
+};
+
+int grpci_init1(struct drvmgr_dev *dev);
+
+/* GRPCI DRIVER */
+
+struct drvmgr_drv_ops grpci_ops =
+{
+ .init = {grpci_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grpci_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_PCIFBRG},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grpci_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRPCI_ID, /* Driver ID */
+ "GRPCI_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grpci_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct grpci_priv), /* Make drvmgr alloc private */
+ },
+ &grpci_ids[0]
+};
+
+void grpci_register_drv(void)
+{
+ DBG("Registering GRPCI driver\n");
+ drvmgr_drv_register(&grpci_info.general);
+}
+
+static int grpci_cfg_r32(pci_dev_t dev, int ofs, uint32_t *val)
+{
+ struct grpci_priv *priv = grpcipriv;
+ volatile uint32_t *pci_conf;
+ uint32_t devfn;
+ int retval;
+ int bus = PCI_DEV_BUS(dev);
+
+ if (ofs & 3)
+ return PCISTS_EINVAL;
+
+ if (PCI_DEV_SLOT(dev) > 15) {
+ *val = 0xffffffff;
+ return PCISTS_OK;
+ }
+
+ /* GRPCI can access "non-standard" devices on bus0 (on AD11.AD16),
+ * but we skip them.
+ */
+ if (dev == HOST_TGT)
+ bus = devfn = 0;
+ else if (bus == 0)
+ devfn = PCI_DEV_DEVFUNC(dev) + PCI_DEV(0, 6, 0);
+ else
+ devfn = PCI_DEV_DEVFUNC(dev);
+
+ /* Select bus */
+ priv->regs->cfg_stat = (priv->regs->cfg_stat & ~(0xf<<23)) | (bus<<23);
+
+ pci_conf = (volatile uint32_t *)(priv->pci_conf | (devfn << 8) | ofs);
+
+ if (priv->bt_enabled) {
+ *val = CPU_swap_u32(*pci_conf);
+ } else {
+ *val = *pci_conf;
+ }
+
+ if (priv->regs->cfg_stat & 0x100) {
+ *val = 0xffffffff;
+ retval = PCISTS_MSTABRT;
+ } else
+ retval = PCISTS_OK;
+
+ DBG("pci_read: [%x:%x:%x] reg: 0x%x => addr: 0x%x, val: 0x%x\n",
+ PCI_DEV_EXPAND(dev), ofs, pci_conf, *val);
+
+ return retval;
+}
+
+
+static int grpci_cfg_r16(pci_dev_t dev, int ofs, uint16_t *val)
+{
+ uint32_t v;
+ int retval;
+
+ if (ofs & 1)
+ return PCISTS_EINVAL;
+
+ retval = grpci_cfg_r32(dev, ofs & ~0x3, &v);
+ *val = 0xffff & (v >> (8*(ofs & 0x3)));
+
+ return retval;
+}
+
+static int grpci_cfg_r8(pci_dev_t dev, int ofs, uint8_t *val)
+{
+ uint32_t v;
+ int retval;
+
+ retval = grpci_cfg_r32(dev, ofs & ~0x3, &v);
+
+ *val = 0xff & (v >> (8*(ofs & 3)));
+
+ return retval;
+}
+
+static int grpci_cfg_w32(pci_dev_t dev, int ofs, uint32_t val)
+{
+ struct grpci_priv *priv = grpcipriv;
+ volatile uint32_t *pci_conf;
+ uint32_t value, devfn = PCI_DEV_DEVFUNC(dev);
+ int bus = PCI_DEV_BUS(dev);
+
+ if (ofs & 0x3)
+ return PCISTS_EINVAL;
+
+ if (PCI_DEV_SLOT(dev) > 15)
+ return PCISTS_MSTABRT;
+
+ /* GRPCI can access "non-standard" devices on bus0 (on AD11.AD16),
+ * but we skip them.
+ */
+ if (dev == HOST_TGT)
+ bus = devfn = 0;
+ else if (bus == 0)
+ devfn = PCI_DEV_DEVFUNC(dev) + PCI_DEV(0, 6, 0);
+ else
+ devfn = PCI_DEV_DEVFUNC(dev);
+
+ /* Select bus */
+ priv->regs->cfg_stat = (priv->regs->cfg_stat & ~(0xf<<23)) | (bus<<23);
+
+ pci_conf = (volatile uint32_t *)(priv->pci_conf | (devfn << 8) | ofs);
+
+ if ( priv->bt_enabled ) {
+ value = CPU_swap_u32(val);
+ } else {
+ value = val;
+ }
+
+ *pci_conf = value;
+
+ DBG("pci_write - [%x:%x:%x] reg: 0x%x => addr: 0x%x, val: 0x%x\n",
+ PCI_DEV_EXPAND(dev), ofs, pci_conf, value);
+
+ return PCISTS_OK;
+}
+
+static int grpci_cfg_w16(pci_dev_t dev, int ofs, uint16_t val)
+{
+ uint32_t v;
+ int retval;
+
+ if (ofs & 1)
+ return PCISTS_EINVAL;
+
+ retval = grpci_cfg_r32(dev, ofs & ~0x3, &v);
+ if (retval != PCISTS_OK)
+ return retval;
+
+ v = (v & ~(0xffff << (8*(ofs&3)))) | ((0xffff&val) << (8*(ofs&3)));
+
+ return grpci_cfg_w32(dev, ofs & ~0x3, v);
+}
+
+static int grpci_cfg_w8(pci_dev_t dev, int ofs, uint8_t val)
+{
+ uint32_t v;
+ int retval;
+
+ retval = grpci_cfg_r32(dev, ofs & ~0x3, &v);
+ if (retval != PCISTS_OK)
+ return retval;
+
+ v = (v & ~(0xff << (8*(ofs&3)))) | ((0xff&val) << (8*(ofs&3)));
+
+ return grpci_cfg_w32(dev, ofs & ~0x3, v);
+}
+
+/* Return the assigned system IRQ number that corresponds to the PCI
+ * "Interrupt Pin" information from configuration space.
+ *
+ * The IRQ information is stored in the grpci_pci_irq_table configurable
+ * by the user.
+ *
+ * Returns the "system IRQ" for the PCI INTA#..INTD# pin in irq_pin. Returns
+ * 0xff if not assigned.
+ */
+static uint8_t grpci_bus0_irq_map(pci_dev_t dev, int irq_pin)
+{
+ uint8_t sysIrqNr = 0; /* not assigned */
+ int irq_group;
+
+ if ( (irq_pin >= 1) && (irq_pin <= 4) ) {
+ /* Use default IRQ decoding on PCI BUS0 according slot numbering */
+ irq_group = PCI_DEV_SLOT(dev) & 0x3;
+ irq_pin = ((irq_pin - 1) + irq_group) & 0x3;
+ /* Valid PCI "Interrupt Pin" number */
+ sysIrqNr = grpci_pci_irq_table[irq_pin];
+ }
+ return sysIrqNr;
+}
+
+static int grpci_translate(uint32_t *address, int type, int dir)
+{
+ uint32_t adr;
+ struct grpci_priv *priv = grpcipriv;
+
+ if (type == 1) {
+ /* I/O */
+ if (dir != 0) {
+ /* The PCI bus can not access the CPU bus from I/O
+ * because GRPCI core does not support I/O BARs
+ */
+ return -1;
+ }
+
+ /* We have got a PCI BAR address that the CPU want to access...
+ * Check that it is within the PCI I/O window, I/O adresses
+ * are mapped 1:1 with GRPCI driver... no translation needed.
+ */
+ adr = *(uint32_t *)address;
+ if (adr < priv->pci_io || adr >= priv->pci_conf)
+ return -1;
+ } else {
+ /* MEMIO and MEM.
+ * Memory space is mapped 1:1 so no translation is needed.
+ * Check that address is within accessible windows.
+ */
+ adr = *(uint32_t *)address;
+ if (dir == 0) {
+ /* PCI BAR to AMBA-CPU address.. check that it is
+ * located within GRPCI PCI Memory Window
+ * adr = PCI address.
+ */
+ if (adr < priv->pci_area || adr >= priv->pci_area_end)
+ return -1;
+ } else {
+ /* We have a CPU address and want to get access to it
+ * from PCI space, typically when doing DMA into CPU
+ * RAM. The GRPCI core has two target BARs that PCI
+ * masters can access, we check here that the address
+ * is accessible from PCI.
+ * adr = AMBA address.
+ */
+ if (adr < priv->bar1_pci_adr ||
+ adr >= (priv->bar1_pci_adr + priv->bar1_size))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+extern struct pci_memreg_ops pci_memreg_sparc_le_ops;
+extern struct pci_memreg_ops pci_memreg_sparc_be_ops;
+
+/* GRPCI PCI access routines, default to Little-endian PCI Bus */
+struct pci_access_drv grpci_access_drv = {
+ .cfg =
+ {
+ grpci_cfg_r8,
+ grpci_cfg_r16,
+ grpci_cfg_r32,
+ grpci_cfg_w8,
+ grpci_cfg_w16,
+ grpci_cfg_w32,
+ },
+ .io =
+ {
+ _ld8,
+ _ld_le16,
+ _ld_le32,
+ _st8,
+ _st_le16,
+ _st_le32,
+ },
+ .memreg = &pci_memreg_sparc_le_ops,
+ .translate = grpci_translate,
+};
+
+struct pci_io_ops grpci_io_ops_be =
+{
+ _ld8,
+ _ld_be16,
+ _ld_be32,
+ _st8,
+ _st_be16,
+ _st_be32,
+};
+
+static int grpci_hw_init(struct grpci_priv *priv)
+{
+ volatile unsigned int *mbar0, *page0;
+ uint32_t data, addr, mbar0size;
+ pci_dev_t host = HOST_TGT;
+
+ mbar0 = (volatile unsigned int *)priv->pci_area;
+
+ if ( !priv->bt_enabled && ((priv->regs->page0 & PAGE0_BTEN) == PAGE0_BTEN) ) {
+ /* Byte twisting is on, turn it off */
+ grpci_cfg_w32(host, PCIR_BAR(0), 0xffffffff);
+ grpci_cfg_r32(host, PCIR_BAR(0), &addr);
+ /* Setup bar0 to nonzero value */
+ grpci_cfg_w32(host, PCIR_BAR(0),
+ CPU_swap_u32(0x80000000));
+ /* page0 is accessed through upper half of bar0 */
+ addr = (~CPU_swap_u32(addr)+1)>>1;
+ mbar0size = addr*2;
+ DBG("GRPCI: Size of MBAR0: 0x%x, MBAR0: 0x%x(lower) 0x%x(upper)\n",mbar0size,((unsigned int)mbar0),((unsigned int)mbar0)+mbar0size/2);
+ page0 = &mbar0[mbar0size/8];
+ DBG("GRPCI: PAGE0 reg address: 0x%x (0x%x)\n",((unsigned int)mbar0)+mbar0size/2,page0);
+ priv->regs->cfg_stat = (priv->regs->cfg_stat & (~0xf0000000)) | 0x80000000; /* Setup mmap reg so we can reach bar0 */
+ *page0 = 0<<PAGE0_BTEN_BIT; /* Disable bytetwisting ... */
+ }
+
+ /* Get the GRPCI Host PCI ID */
+ grpci_cfg_r32(host, PCIR_VENDOR, &priv->devVend);
+
+ /* set 1:1 mapping between AHB -> PCI memory */
+ priv->regs->cfg_stat = (priv->regs->cfg_stat & 0x0fffffff) | priv->pci_area;
+
+ /* determine size of target BAR1 */
+ grpci_cfg_w32(host, PCIR_BAR(1), 0xffffffff);
+ grpci_cfg_r32(host, PCIR_BAR(1), &addr);
+ priv->bar1_size = (~(addr & ~0xf)) + 1;
+
+ /* and map system RAM at pci address 0x40000000 */
+ priv->bar1_pci_adr &= ~(priv->bar1_size - 1); /* Fix alignment of BAR1 */
+ grpci_cfg_w32(host, PCIR_BAR(1), priv->bar1_pci_adr);
+ priv->regs->page1 = priv->bar1_pci_adr;
+
+ /* Translate I/O accesses 1:1 */
+ priv->regs->iomap = priv->pci_io & 0xffff0000;
+
+ /* Setup Latency Timer and cache line size. Default cache line
+ * size will result in poor performance (256 word fetches), 0xff
+ * will set it according to the max size of the PCI FIFO.
+ */
+ grpci_cfg_w8(host, PCIR_CACHELNSZ, 0xff);
+ grpci_cfg_w8(host, PCIR_LATTIMER, 0x40);
+
+ /* set as bus master and enable pci memory responses */
+ grpci_cfg_r32(host, PCIR_COMMAND, &data);
+ data |= (PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
+ grpci_cfg_w32(host, PCIR_COMMAND, data);
+
+ /* unmask all PCI interrupts at PCI Core, not all GRPCI cores support
+ * this
+ */
+ priv->regs->irq = 0xf0000;
+
+ /* Successful */
+ return 0;
+}
+
+/* Initializes the GRPCI core and driver, must be called before calling init_pci()
+ *
+ * Return values
+ * 0 Successful initalization
+ * -1 Error during initialization, for example "PCI core not found".
+ * -2 Error PCI controller not HOST (targets not supported)
+ * -3 Error due to GRPCI hardware initialization
+ * -4 Error registering driver to PCI layer
+ */
+static int grpci_init(struct grpci_priv *priv)
+{
+ struct ambapp_apb_info *apb;
+ struct ambapp_ahb_info *ahb;
+ int pin;
+ union drvmgr_key_value *value;
+ char keyname[6];
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+
+ /* Find PCI core from Plug&Play information */
+ apb = ainfo->info.apb_slv;
+ ahb = ainfo->info.ahb_slv;
+
+ /* Found PCI core, init private structure */
+ priv->irq = apb->irq;
+ priv->regs = (struct grpci_regs *)apb->start;
+ priv->bt_enabled = DEFAULT_BT_ENABLED;
+
+ /* Calculate the PCI windows
+ * AMBA->PCI Window: AHB SLAVE AREA0
+ * AMBA->PCI I/O cycles Window: AHB SLAVE AREA1 Lower half
+ * AMBA->PCI Configuration cycles Window: AHB SLAVE AREA1 Upper half
+ */
+ priv->pci_area = ahb->start[0];
+ priv->pci_area_end = ahb->start[0] + ahb->mask[0];
+ priv->pci_io = ahb->start[1];
+ priv->pci_conf = ahb->start[1] + (ahb->mask[1] >> 1);
+ priv->pci_conf_end = ahb->start[1] + ahb->mask[1];
+
+ /* On systems where PCI I/O area and configuration area is apart of the "PCI Window"
+ * the PCI Window stops at the start of the PCI I/O area
+ */
+ if ( (priv->pci_io > priv->pci_area) && (priv->pci_io < (priv->pci_area_end-1)) ) {
+ priv->pci_area_end = priv->pci_io;
+ }
+
+ /* Init PCI interrupt assignment table to all use the interrupt routed through
+ * the GRPCI core.
+ */
+ strcpy(keyname, "INTX#");
+ for (pin=1; pin<5; pin++) {
+ if ( grpci_pci_irq_table[pin-1] == 0xff ) {
+ grpci_pci_irq_table[pin-1] = priv->irq;
+
+ /* User may override Both hardcoded IRQ setup and Plug & Play IRQ */
+ keyname[3] = 'A' + (pin-1);
+ value = drvmgr_dev_key_get(priv->dev, keyname, DRVMGR_KT_INT);
+ if ( value )
+ grpci_pci_irq_table[pin-1] = value->i;
+ }
+ }
+
+ /* User may override DEFAULT_BT_ENABLED to enable/disable byte twisting */
+ value = drvmgr_dev_key_get(priv->dev, "byteTwisting", DRVMGR_KT_INT);
+ if ( value )
+ priv->bt_enabled = value->i;
+
+ /* Use GRPCI target BAR1 to map CPU RAM to PCI, this is to make it
+ * possible for PCI peripherals to do DMA directly to CPU memory.
+ */
+ value = drvmgr_dev_key_get(priv->dev, "tgtbar1", DRVMGR_KT_INT);
+ if (value)
+ priv->bar1_pci_adr = value->i;
+ else
+ priv->bar1_pci_adr = SYSTEM_MAINMEM_START; /* default */
+
+ /* This driver only support HOST systems, we check for HOST */
+ if ( !(priv->regs->cfg_stat & CFGSTAT_HOST) ) {
+ /* Target not supported */
+ return -2;
+ }
+
+ /* Init the PCI Core */
+ if ( grpci_hw_init(priv) ) {
+ return -3;
+ }
+
+ /* Down streams translation table */
+ priv->maps_down[0].name = "AMBA -> PCI MEM Window";
+ priv->maps_down[0].size = priv->pci_area_end - priv->pci_area;
+ priv->maps_down[0].from_adr = (void *)priv->pci_area;
+ priv->maps_down[0].to_adr = (void *)priv->pci_area;
+ /* End table */
+ priv->maps_down[1].size = 0;
+
+ /* Up streams translation table */
+ priv->maps_up[0].name = "Target BAR1 -> AMBA";
+ priv->maps_up[0].size = priv->bar1_size;
+ priv->maps_up[0].from_adr = (void *)priv->bar1_pci_adr;
+ priv->maps_up[0].to_adr = (void *)priv->bar1_pci_adr;
+ /* End table */
+ priv->maps_up[1].size = 0;
+
+ return 0;
+}
+
+/* Called when a core is found with the AMBA device and vendor ID
+ * given in grpci_ids[]. IRQ, Console does not work here
+ */
+int grpci_init1(struct drvmgr_dev *dev)
+{
+ int status;
+ struct grpci_priv *priv;
+ struct pci_auto_setup grpci_auto_cfg;
+
+ DBG("GRPCI[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if ( grpci_minor != 0 ) {
+ DBG("Driver only supports one PCI core\n");
+ return DRVMGR_FAIL;
+ }
+
+ if ( (strcmp(dev->parent->dev->drv->name, "AMBAPP_GRLIB_DRV") != 0) &&
+ (strcmp(dev->parent->dev->drv->name, "AMBAPP_LEON2_DRV") != 0) ) {
+ /* We only support GRPCI driver on local bus */
+ return DRVMGR_FAIL;
+ }
+
+ priv = dev->priv;
+ if ( !priv )
+ return DRVMGR_NOMEM;
+
+ priv->dev = dev;
+ priv->minor = grpci_minor++;
+
+ grpcipriv = priv;
+ status = grpci_init(priv);
+ if (status) {
+ printk("Failed to initialize grpci driver %d\n", status);
+ return DRVMGR_FAIL;
+ }
+
+
+ /* Register the PCI core at the PCI layers */
+
+ if (priv->bt_enabled == 0) {
+ /* Host is Big-Endian */
+ pci_endian = PCI_BIG_ENDIAN;
+
+ memcpy(&grpci_access_drv.io, &grpci_io_ops_be,
+ sizeof(grpci_io_ops_be));
+ grpci_access_drv.memreg = &pci_memreg_sparc_be_ops;
+ }
+
+ if (pci_access_drv_register(&grpci_access_drv)) {
+ /* Access routines registration failed */
+ return DRVMGR_FAIL;
+ }
+
+ /* Prepare memory MAP */
+ grpci_auto_cfg.options = 0;
+ grpci_auto_cfg.mem_start = 0;
+ grpci_auto_cfg.mem_size = 0;
+ grpci_auto_cfg.memio_start = priv->pci_area;
+ grpci_auto_cfg.memio_size = priv->pci_area_end - priv->pci_area;
+ grpci_auto_cfg.io_start = priv->pci_io;
+ grpci_auto_cfg.io_size = priv->pci_conf - priv->pci_io;
+ grpci_auto_cfg.irq_map = grpci_bus0_irq_map;
+ grpci_auto_cfg.irq_route = NULL; /* use standard routing */
+ pci_config_register(&grpci_auto_cfg);
+
+ if (pci_config_init()) {
+ /* PCI configuration failed */
+ return DRVMGR_FAIL;
+ }
+
+ priv->config.maps_down = &priv->maps_down[0];
+ priv->config.maps_up = &priv->maps_up[0];
+ return pcibus_register(dev, &priv->config);
+}
+
+/* DMA functions which uses GRPCIs optional DMA controller (len in words) */
+int grpci_dma_to_pci(
+ unsigned int ahb_addr,
+ unsigned int pci_addr,
+ unsigned int len)
+{
+ int ret = 0;
+
+ pcidma[0] = 0x82;
+ pcidma[1] = ahb_addr;
+ pcidma[2] = pci_addr;
+ pcidma[3] = len;
+ pcidma[0] = 0x83;
+
+ while ( (pcidma[0] & 0x4) == 0)
+ ;
+
+ if (pcidma[0] & 0x8) { /* error */
+ ret = -1;
+ }
+
+ pcidma[0] |= 0xC;
+ return ret;
+
+}
+
+int grpci_dma_from_pci(
+ unsigned int ahb_addr,
+ unsigned int pci_addr,
+ unsigned int len)
+{
+ int ret = 0;
+
+ pcidma[0] = 0x80;
+ pcidma[1] = ahb_addr;
+ pcidma[2] = pci_addr;
+ pcidma[3] = len;
+ pcidma[0] = 0x81;
+
+ while ( (pcidma[0] & 0x4) == 0)
+ ;
+
+ if (pcidma[0] & 0x8) { /* error */
+ ret = -1;
+ }
+
+ pcidma[0] |= 0xC;
+ return ret;
+
+}
diff --git a/bsps/shared/grlib/pci/grpci2.c b/bsps/shared/grlib/pci/grpci2.c
new file mode 100644
index 0000000000..21c09f47a5
--- /dev/null
+++ b/bsps/shared/grlib/pci/grpci2.c
@@ -0,0 +1,970 @@
+/* GRLIB GRPCI2 PCI HOST driver.
+ *
+ * COPYRIGHT (c) 2011
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/* Configures the GRPCI2 core and initialize,
+ * - the PCI Library (pci.c)
+ * - the general part of the PCI Bus driver (pci_bus.c)
+ *
+ * System interrupt assigned to PCI interrupt (INTA#..INTD#) is by
+ * default taken from Plug and Play, but may be overridden by the
+ * driver resources INTA#..INTD#. GRPCI2 handles differently depending
+ * on the design (4 different ways).
+ *
+ * GRPCI2 IRQ implementation notes
+ * -------------------------------
+ * Since the Driver Manager pci_bus layer implements IRQ by calling
+ * pci_interrupt_* which translates into BSP_shared_interrupt_*, and the
+ * root-bus also relies on BSP_shared_interrupt_*, it is safe for the GRPCI2
+ * driver to use the drvmgr_interrupt_* routines since they will be
+ * accessing the same routines in the end. Otherwise the GRPCI2 driver must
+ * have used the pci_interrupt_* routines.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <libcpu/byteorder.h>
+#include <libcpu/access.h>
+#include <pci.h>
+#include <pci/cfg.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/grpci2.h>
+
+#include <grlib/grlib_impl.h>
+
+/* If defined to 1 - byte twisting is enabled by default */
+#define DEFAULT_BT_ENABLED 0
+
+/* If defined to 64 - Latency timer is 64 by default */
+#define DEFAULT_LATENCY_TIMER 64
+
+/* Interrupt assignment. Set to other value than 0xff in order to
+ * override defaults and plug&play information
+ */
+#ifndef GRPCI2_INTA_SYSIRQ
+ #define GRPCI2_INTA_SYSIRQ 0xff
+#endif
+#ifndef GRPCI2_INTB_SYSIRQ
+ #define GRPCI2_INTB_SYSIRQ 0xff
+#endif
+#ifndef GRPCI2_INTC_SYSIRQ
+ #define GRPCI2_INTC_SYSIRQ 0xff
+#endif
+#ifndef GRPCI2_INTD_SYSIRQ
+ #define GRPCI2_INTD_SYSIRQ 0xff
+#endif
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * GRPCI2 APB Register MAP
+ */
+struct grpci2_regs {
+ volatile unsigned int ctrl; /* 0x00 */
+ volatile unsigned int sts_cap; /* 0x04 */
+ volatile unsigned int ppref; /* 0x08 */
+ volatile unsigned int io_map; /* 0x0C */
+ volatile unsigned int dma_ctrl; /* 0x10 */
+ volatile unsigned int dma_bdbase; /* 0x14 */
+ volatile unsigned int dma_chact; /* 0x18 */
+ int res1; /* 0x1C */
+ volatile unsigned int bars[6]; /* 0x20 */
+ int res2[2]; /* 0x38 */
+ volatile unsigned int ahbmst_map[16]; /* 0x40 */
+};
+
+#define CTRL_BUS_BIT 16
+
+#define CTRL_SI (1<<27)
+#define CTRL_PE (1<<26)
+#define CTRL_ER (1<<25)
+#define CTRL_EI (1<<24)
+#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
+#define CTRL_HOSTINT 0xf
+
+#define STS_HOST_BIT 31
+#define STS_MST_BIT 30
+#define STS_TAR_BIT 29
+#define STS_DMA_BIT 28
+#define STS_DI_BIT 27
+#define STS_HI_BIT 26
+#define STS_IRQMODE_BIT 24
+#define STS_TRACE_BIT 23
+#define STS_CFGERRVALID_BIT 20
+#define STS_CFGERR_BIT 19
+#define STS_INTTYPE_BIT 12
+#define STS_INTSTS_BIT 8
+#define STS_FDEPTH_BIT 2
+#define STS_FNUM_BIT 0
+
+#define STS_HOST (1<<STS_HOST_BIT)
+#define STS_MST (1<<STS_MST_BIT)
+#define STS_TAR (1<<STS_TAR_BIT)
+#define STS_DMA (1<<STS_DMA_BIT)
+#define STS_DI (1<<STS_DI_BIT)
+#define STS_HI (1<<STS_HI_BIT)
+#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT)
+#define STS_TRACE (1<<STS_TRACE_BIT)
+#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT)
+#define STS_CFGERR (1<<STS_CFGERR_BIT)
+#define STS_INTTYPE (0x7f<<STS_INTTYPE_BIT)
+#define STS_INTSTS (0xf<<STS_INTSTS_BIT)
+#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT)
+#define STS_FNUM (0x3<<STS_FNUM_BIT)
+
+#define STS_ITIMEOUT (1<<18)
+#define STS_ISYSERR (1<<17)
+#define STS_IDMA (1<<16)
+#define STS_IDMAERR (1<<15)
+#define STS_IMSTABRT (1<<14)
+#define STS_ITGTABRT (1<<13)
+#define STS_IPARERR (1<<12)
+
+/* GRPCI2 Capability */
+struct grpci2_cap_first {
+ unsigned int ctrl;
+ unsigned int pci2ahb_map[6];
+ unsigned int ext2ahb_map;
+ unsigned int io_map;
+ unsigned int pcibar_size[6];
+ unsigned int ahb_pref;
+};
+#define CAP9_CTRL_OFS 0
+#define CAP9_BAR_OFS 0x4
+#define CAP9_IOMAP_OFS 0x20
+#define CAP9_BARSIZE_OFS 0x24
+#define CAP9_AHBPREF_OFS 0x3C
+
+/* Used internally for accessing the PCI bridge's configuration space itself */
+#define HOST_TGT PCI_DEV(0xff, 0, 0)
+
+struct grpci2_priv *grpci2priv = NULL;
+
+/* PCI Interrupt assignment. Connects an PCI interrupt pin (INTA#..INTD#)
+ * to a system interrupt number.
+ */
+unsigned char grpci2_pci_irq_table[4] =
+{
+ /* INTA# */ GRPCI2_INTA_SYSIRQ,
+ /* INTB# */ GRPCI2_INTB_SYSIRQ,
+ /* INTC# */ GRPCI2_INTC_SYSIRQ,
+ /* INTD# */ GRPCI2_INTD_SYSIRQ
+};
+
+/* Start of workspace/dynamical area */
+extern unsigned int _end;
+#define DMA_START ((unsigned int) &_end)
+
+/* Default BAR mapping, set BAR0 256MB 1:1 mapped base of CPU RAM */
+struct grpci2_pcibar_cfg grpci2_default_bar_mapping[6] = {
+ /* BAR0 */ {DMA_START, DMA_START, 0x10000000},
+ /* BAR1 */ {0, 0, 0},
+ /* BAR2 */ {0, 0, 0},
+ /* BAR3 */ {0, 0, 0},
+ /* BAR4 */ {0, 0, 0},
+ /* BAR5 */ {0, 0, 0},
+};
+
+/* Driver private data struture */
+struct grpci2_priv {
+ struct drvmgr_dev *dev;
+ struct grpci2_regs *regs;
+ unsigned char ver;
+ char irq;
+ char irq_mode; /* IRQ Mode from CAPSTS REG */
+ char irq_dma; /* IRQ Index for DMA */
+ char bt_enabled;
+ unsigned int irq_mask;
+ unsigned int latency_timer;
+
+ struct grpci2_pcibar_cfg *barcfg;
+
+ unsigned int pci_area;
+ unsigned int pci_area_end;
+ unsigned int pci_io;
+ unsigned int pci_conf;
+ unsigned int pci_conf_end;
+
+ uint32_t devVend; /* Host PCI Device/Vendor ID */
+
+ struct drvmgr_map_entry maps_up[7];
+ struct drvmgr_map_entry maps_down[2];
+ struct pcibus_config config;
+
+ /* DMA interrupts */
+ void (*dma_isr)(void *data);
+ void *dma_isr_arg;
+
+ SPIN_DECLARE(devlock)
+};
+
+int grpci2_init1(struct drvmgr_dev *dev);
+int grpci2_init3(struct drvmgr_dev *dev);
+void grpci2_err_isr(void *arg);
+void grpci2_dma_isr(void *arg);
+
+/* GRPCI2 DRIVER */
+
+struct drvmgr_drv_ops grpci2_ops =
+{
+ .init = {grpci2_init1, NULL, grpci2_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grpci2_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRPCI2},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grpci2_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRPCI2_ID,/* Driver ID */
+ "GRPCI2_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grpci2_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct grpci2_priv), /* Make drvmgr alloc private */
+ },
+ &grpci2_ids[0]
+};
+
+/* Defaults to do nothing - user can override this function
+ * by including the DMA DRIVER.
+ */
+int __attribute__((weak)) grpci2dma_init(void * regs, void isr_register( void (*isr)(void *), void * arg));
+
+int grpci2dma_init(void * regs, void isr_register( void (*isr)(void *), void * arg))
+{
+ return 0;
+}
+
+/* Prototype of grpci2_dma_isr_register function */
+static void grpci2_dma_isr_register( void (*isr)(void *), void * arg);
+
+void grpci2_register_drv(void)
+{
+ DBG("Registering GRPCI2 driver\n");
+ drvmgr_drv_register(&grpci2_info.general);
+}
+
+static int grpci2_cfg_r32(pci_dev_t dev, int ofs, uint32_t *val)
+{
+ struct grpci2_priv *priv = grpci2priv;
+ volatile uint32_t *pci_conf;
+ unsigned int tmp, devfn;
+ int retval, bus = PCI_DEV_BUS(dev);
+ SPIN_IRQFLAGS(irqflags);
+
+ if ((unsigned int)ofs & 0xffffff03) {
+ retval = PCISTS_EINVAL;
+ goto out2;
+ }
+
+ if (PCI_DEV_SLOT(dev) > 15) {
+ retval = PCISTS_MSTABRT;
+ goto out;
+ }
+
+ /* GRPCI2 can access "non-standard" devices on bus0 (on AD11.AD16),
+ * we skip them.
+ */
+ if (dev == HOST_TGT)
+ bus = devfn = 0;
+ else if (bus == 0)
+ devfn = PCI_DEV_DEVFUNC(dev) + PCI_DEV(0, 6, 0);
+ else
+ devfn = PCI_DEV_DEVFUNC(dev);
+
+ pci_conf = (volatile uint32_t *) (priv->pci_conf | (devfn << 8) | ofs);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Select bus */
+ priv->regs->ctrl = (priv->regs->ctrl & ~(0xff<<16)) | (bus<<16);
+ /* clear old status */
+ priv->regs->sts_cap = (STS_CFGERR | STS_CFGERRVALID);
+
+ tmp = *pci_conf;
+
+ /* Wait until GRPCI2 signals that CFG access is done, it should be
+ * done instantaneously unless a DMA operation is ongoing...
+ */
+ while ((priv->regs->sts_cap & STS_CFGERRVALID) == 0)
+ ;
+
+ if (priv->regs->sts_cap & STS_CFGERR) {
+ retval = PCISTS_MSTABRT;
+ } else {
+ /* Bus always little endian (unaffected by byte-swapping) */
+ *val = CPU_swap_u32(tmp);
+ retval = PCISTS_OK;
+ }
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+out:
+ if (retval != PCISTS_OK)
+ *val = 0xffffffff;
+
+ DBG("pci_read: [%x:%x:%x] reg: 0x%x => addr: 0x%x, val: 0x%x (%d)\n",
+ PCI_DEV_EXPAND(dev), ofs, pci_conf, *val, retval);
+
+out2:
+ return retval;
+}
+
+static int grpci2_cfg_r16(pci_dev_t dev, int ofs, uint16_t *val)
+{
+ uint32_t v;
+ int retval;
+
+ if (ofs & 1)
+ return PCISTS_EINVAL;
+
+ retval = grpci2_cfg_r32(dev, ofs & ~0x3, &v);
+ *val = 0xffff & (v >> (8*(ofs & 0x3)));
+
+ return retval;
+}
+
+static int grpci2_cfg_r8(pci_dev_t dev, int ofs, uint8_t *val)
+{
+ uint32_t v;
+ int retval;
+
+ retval = grpci2_cfg_r32(dev, ofs & ~0x3, &v);
+
+ *val = 0xff & (v >> (8*(ofs & 3)));
+
+ return retval;
+}
+
+static int grpci2_cfg_w32(pci_dev_t dev, int ofs, uint32_t val)
+{
+ struct grpci2_priv *priv = grpci2priv;
+ volatile uint32_t *pci_conf;
+ uint32_t value, devfn;
+ int retval, bus = PCI_DEV_BUS(dev);
+ SPIN_IRQFLAGS(irqflags);
+
+ if ((unsigned int)ofs & 0xffffff03)
+ return PCISTS_EINVAL;
+
+ if (PCI_DEV_SLOT(dev) > 15)
+ return PCISTS_MSTABRT;
+
+ value = CPU_swap_u32(val);
+
+ /* GRPCI2 can access "non-standard" devices on bus0 (on AD11.AD16),
+ * we skip them.
+ */
+ if (dev == HOST_TGT)
+ bus = devfn = 0;
+ else if (bus == 0)
+ devfn = PCI_DEV_DEVFUNC(dev) + PCI_DEV(0, 6, 0);
+ else
+ devfn = PCI_DEV_DEVFUNC(dev);
+
+ pci_conf = (volatile uint32_t *) (priv->pci_conf | (devfn << 8) | ofs);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Select bus */
+ priv->regs->ctrl = (priv->regs->ctrl & ~(0xff<<16)) | (bus<<16);
+ /* clear old status */
+ priv->regs->sts_cap = (STS_CFGERR | STS_CFGERRVALID);
+
+ *pci_conf = value;
+
+ /* Wait until GRPCI2 signals that CFG access is done, it should be
+ * done instantaneously unless a DMA operation is ongoing...
+ */
+ while ((priv->regs->sts_cap & STS_CFGERRVALID) == 0)
+ ;
+
+ if (priv->regs->sts_cap & STS_CFGERR)
+ retval = PCISTS_MSTABRT;
+ else
+ retval = PCISTS_OK;
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ DBG("pci_write - [%x:%x:%x] reg: 0x%x => addr: 0x%x, val: 0x%x (%d)\n",
+ PCI_DEV_EXPAND(dev), ofs, pci_conf, value, retval);
+
+ return retval;
+}
+
+static int grpci2_cfg_w16(pci_dev_t dev, int ofs, uint16_t val)
+{
+ uint32_t v;
+ int retval;
+
+ if (ofs & 1)
+ return PCISTS_EINVAL;
+
+ retval = grpci2_cfg_r32(dev, ofs & ~0x3, &v);
+ if (retval != PCISTS_OK)
+ return retval;
+
+ v = (v & ~(0xffff << (8*(ofs&3)))) | ((0xffff&val) << (8*(ofs&3)));
+
+ return grpci2_cfg_w32(dev, ofs & ~0x3, v);
+}
+
+static int grpci2_cfg_w8(pci_dev_t dev, int ofs, uint8_t val)
+{
+ uint32_t v;
+ int retval;
+
+ retval = grpci2_cfg_r32(dev, ofs & ~0x3, &v);
+ if (retval != PCISTS_OK)
+ return retval;
+
+ v = (v & ~(0xff << (8*(ofs&3)))) | ((0xff&val) << (8*(ofs&3)));
+
+ return grpci2_cfg_w32(dev, ofs & ~0x3, v);
+}
+
+/* Return the assigned system IRQ number that corresponds to the PCI
+ * "Interrupt Pin" information from configuration space.
+ *
+ * The IRQ information is stored in the grpci2_pci_irq_table configurable
+ * by the user.
+ *
+ * Returns the "system IRQ" for the PCI INTA#..INTD# pin in irq_pin. Returns
+ * 0xff if not assigned.
+ */
+static uint8_t grpci2_bus0_irq_map(pci_dev_t dev, int irq_pin)
+{
+ uint8_t sysIrqNr = 0; /* not assigned */
+ int irq_group;
+
+ if ( (irq_pin >= 1) && (irq_pin <= 4) ) {
+ /* Use default IRQ decoding on PCI BUS0 according slot numbering */
+ irq_group = PCI_DEV_SLOT(dev) & 0x3;
+ irq_pin = ((irq_pin - 1) + irq_group) & 0x3;
+ /* Valid PCI "Interrupt Pin" number */
+ sysIrqNr = grpci2_pci_irq_table[irq_pin];
+ }
+ return sysIrqNr;
+}
+
+static int grpci2_translate(uint32_t *address, int type, int dir)
+{
+ uint32_t adr, start, end;
+ struct grpci2_priv *priv = grpci2priv;
+ int i;
+
+ if (type == 1) {
+ /* I/O */
+ if (dir != 0) {
+ /* The PCI bus can not access the CPU bus from I/O
+ * because GRPCI2 core does not support I/O BARs
+ */
+ return -1;
+ }
+
+ /* We have got a PCI IO BAR address that the CPU want to access.
+ * Check that it is within the PCI I/O window, I/O adresses
+ * are NOT mapped 1:1 with GRPCI2 driver... translation needed.
+ */
+ adr = *(uint32_t *)address;
+ if (adr < 0x100 || adr > 0x10000)
+ return -1;
+ *address = adr + priv->pci_io;
+ } else {
+ /* MEMIO and MEM.
+ * Memory space is mapped 1:1 so no translation is needed.
+ * Check that address is within accessible windows.
+ */
+ adr = *(uint32_t *)address;
+ if (dir == 0) {
+ /* PCI BAR to AMBA-CPU address.. check that it is
+ * located within GRPCI2 PCI Memory Window
+ * adr = PCI address.
+ */
+ if (adr < priv->pci_area || adr >= priv->pci_area_end)
+ return -1;
+ } else {
+ /* We have a CPU address and want to get access to it
+ * from PCI space, typically when doing DMA into CPU
+ * RAM. The GRPCI2 core may have multiple target BARs
+ * that PCI masters can access, the BARs are user
+ * configurable in the following ways:
+ * BAR_SIZE, PCI_BAR Address and MAPPING (AMBA ADR)
+ *
+ * The below code tries to find a BAR for which the
+ * AMBA bar may have been mapped onto, and translate
+ * the AMBA-CPU address into a PCI address using the
+ * given mapping.
+ *
+ * adr = AMBA address.
+ */
+ for(i=0; i<6; i++) {
+ start = priv->barcfg[i].ahbadr;
+ end = priv->barcfg[i].ahbadr +
+ priv->barcfg[i].barsize;
+ if (adr >= start && adr < end) {
+ /* BAR match: Translate address */
+ *address = (adr - start) +
+ priv->barcfg[i].pciadr;
+ return 0;
+ }
+ }
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+extern struct pci_memreg_ops pci_memreg_sparc_le_ops;
+extern struct pci_memreg_ops pci_memreg_sparc_be_ops;
+
+/* GRPCI2 PCI access routines, default to Little-endian PCI Bus */
+struct pci_access_drv grpci2_access_drv = {
+ .cfg =
+ {
+ grpci2_cfg_r8,
+ grpci2_cfg_r16,
+ grpci2_cfg_r32,
+ grpci2_cfg_w8,
+ grpci2_cfg_w16,
+ grpci2_cfg_w32,
+ },
+ .io =
+ {
+ _ld8,
+ _ld_le16,
+ _ld_le32,
+ _st8,
+ _st_le16,
+ _st_le32,
+ },
+ .memreg = &pci_memreg_sparc_le_ops,
+ .translate = grpci2_translate,
+};
+
+struct pci_io_ops grpci2_io_ops_be =
+{
+ _ld8,
+ _ld_be16,
+ _ld_be32,
+ _st8,
+ _st_be16,
+ _st_be32,
+};
+
+/* PCI Error Interrupt handler, called when there may be a PCI Target/Master
+ * Abort.
+ */
+void grpci2_err_isr(void *arg)
+{
+ struct grpci2_priv *priv = arg;
+ unsigned int sts = priv->regs->sts_cap;
+
+ if (sts & (STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR | STS_ISYSERR | STS_ITIMEOUT)) {
+ /* A PCI error IRQ ... Error handler unimplemented
+ * add your code here...
+ */
+ if (sts & STS_IMSTABRT) {
+ printk("GRPCI2: unhandled Master Abort IRQ\n");
+ }
+ if (sts & STS_ITGTABRT) {
+ printk("GRPCI2: unhandled Target Abort IRQ\n");
+ }
+ if (sts & STS_IPARERR) {
+ printk("GRPCI2: unhandled Parity Error IRQ\n");
+ }
+ if (sts & STS_ISYSERR) {
+ printk("GRPCI2: unhandled System Error IRQ\n");
+ }
+ if (sts & STS_ITIMEOUT) {
+ printk("GRPCI2: unhandled PCI target access timeout IRQ\n");
+ }
+ }
+}
+
+/* PCI DMA Interrupt handler, called when there may be a PCI DMA interrupt.
+ */
+void grpci2_dma_isr(void *arg)
+{
+ struct grpci2_priv *priv = arg;
+ unsigned int sts = (priv->regs->sts_cap & (STS_IDMAERR | STS_IDMA));
+
+ /* Clear Interrupt if taken*/
+ if (sts != 0){
+ /* Clear IDMAERR and IDMA bits */
+ priv->regs->sts_cap = (STS_IDMAERR | STS_IDMA);
+ /* Clear DRVMGR interrupt */
+ drvmgr_interrupt_clear(priv->dev, priv->irq_dma);
+ /* Call DMA driver ISR */
+ (priv->dma_isr)(priv->dma_isr_arg);
+ }
+}
+
+static int grpci2_hw_init(struct grpci2_priv *priv)
+{
+ struct grpci2_regs *regs = priv->regs;
+ int i;
+ uint8_t capptr;
+ uint32_t data, io_map, ahbadr, pciadr, size;
+ pci_dev_t host = HOST_TGT;
+ struct grpci2_pcibar_cfg *barcfg = priv->barcfg;
+
+ /* Reset any earlier setup */
+ regs->ctrl = 0;
+ regs->sts_cap = ~0; /* Clear Status */
+ regs->dma_ctrl = 0;
+ regs->dma_bdbase = 0;
+
+ /* Translate I/O accesses 1:1, (will not work for PCI 2.3) */
+ regs->io_map = priv->pci_io & 0xffff0000;
+
+ /* set 1:1 mapping between AHB -> PCI memory space, for all Masters
+ * Each AHB master has it's own mapping registers. Max 16 AHB masters.
+ */
+ for (i=0; i<16; i++)
+ regs->ahbmst_map[i] = priv->pci_area;
+
+ /* Get the GRPCI2 Host PCI ID */
+ grpci2_cfg_r32(host, PCIR_VENDOR, &priv->devVend);
+
+ /* Get address to first (always defined) capability structure */
+ grpci2_cfg_r8(host, PCIR_CAP_PTR, &capptr);
+ if (capptr == 0)
+ return -1;
+
+ /* Limit the prefetch for GRPCI2 version 0. */
+ if (priv->ver == 0)
+ grpci2_cfg_w32(host, capptr+CAP9_AHBPREF_OFS, 0);
+
+ /* Enable/Disable Byte twisting */
+ grpci2_cfg_r32(host, capptr+CAP9_IOMAP_OFS, &io_map);
+ io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
+ grpci2_cfg_w32(host, capptr+CAP9_IOMAP_OFS, io_map);
+
+ /* Setup the Host's PCI Target BARs for others to access (DMA) */
+ for (i=0; i<6; i++) {
+ /* Make sure address is properly aligned */
+ size = ~(barcfg[i].barsize-1);
+ barcfg[i].pciadr &= size;
+ barcfg[i].ahbadr &= size;
+
+ pciadr = barcfg[i].pciadr;
+ ahbadr = barcfg[i].ahbadr;
+ size |= PCIM_BAR_MEM_PREFETCH;
+
+ grpci2_cfg_w32(host, capptr+CAP9_BARSIZE_OFS+i*4, size);
+ grpci2_cfg_w32(host, capptr+CAP9_BAR_OFS+i*4, ahbadr);
+ grpci2_cfg_w32(host, PCIR_BAR(0)+i*4, pciadr);
+ }
+
+ /* set as bus master and enable pci memory responses */
+ grpci2_cfg_r32(host, PCIR_COMMAND, &data);
+ data |= (PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
+ grpci2_cfg_w32(host, PCIR_COMMAND, data);
+
+ /* set latency timer */
+ grpci2_cfg_r32(host, PCIR_CACHELNSZ, &data);
+ data &= ~0xff00;
+ data |= ((priv->latency_timer & 0xff) << 8);
+ grpci2_cfg_w32(host, PCIR_CACHELNSZ, data);
+
+ /* Enable Error respone (CPU-TRAP) on illegal memory access */
+ regs->ctrl = CTRL_ER | CTRL_PE;
+
+ /* Successful */
+ return 0;
+}
+
+/* Initializes the GRPCI2 core and driver, must be called before calling
+ * init_pci()
+ *
+ * Return values
+ * 0 Successful initalization
+ * -1 Error during initialization, for example "PCI core not found".
+ * -2 Error PCI controller not HOST (targets not supported)
+ * -3 Error due to GRPCI2 hardware initialization
+ */
+static int grpci2_init(struct grpci2_priv *priv)
+{
+ struct ambapp_apb_info *apb;
+ struct ambapp_ahb_info *ahb;
+ int pin, i, j;
+ union drvmgr_key_value *value;
+ char keyname[6];
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+ struct grpci2_pcibar_cfg *barcfg;
+ unsigned int size;
+
+ /* Find PCI core from Plug&Play information */
+ apb = ainfo->info.apb_slv;
+ ahb = ainfo->info.ahb_slv;
+
+ /* Found PCI core, init private structure */
+ priv->irq = apb->irq;
+ priv->ver = apb->ver;
+ priv->regs = (struct grpci2_regs *)apb->start;
+ priv->bt_enabled = DEFAULT_BT_ENABLED;
+ priv->irq_mode = (priv->regs->sts_cap & STS_IRQMODE) >> STS_IRQMODE_BIT;
+ priv->latency_timer = DEFAULT_LATENCY_TIMER;
+
+ /* Initialize Spin-lock for GRPCI2 Device. */
+ SPIN_INIT(&priv->devlock, "grpci2");
+
+ /* Calculate the PCI windows
+ * AMBA->PCI Window: AHB SLAVE AREA0
+ * AMBA->PCI I/O cycles Window: AHB SLAVE AREA1 Lower half
+ * AMBA->PCI Configuration cycles Window: AHB SLAVE AREA1 Upper half
+ */
+ priv->pci_area = ahb->start[0];
+ priv->pci_area_end = ahb->start[0] + ahb->mask[0];
+ priv->pci_io = ahb->start[1];
+ priv->pci_conf = ahb->start[1] + 0x10000;
+ priv->pci_conf_end = priv->pci_conf + 0x10000;
+
+ /* On systems where PCI I/O area and configuration area is apart of the
+ * "PCI Window" the PCI Window stops at the start of the PCI I/O area
+ */
+ if ((priv->pci_io > priv->pci_area) &&
+ (priv->pci_io < (priv->pci_area_end-1))) {
+ priv->pci_area_end = priv->pci_io;
+ }
+
+ /* Init PCI interrupt assignment table to all use the interrupt routed
+ * through the GRPCI2 core.
+ */
+ strcpy(keyname, "INTX#");
+ for (pin=1; pin<5; pin++) {
+ if (grpci2_pci_irq_table[pin-1] == 0xff) {
+ if (priv->irq_mode < 2) {
+ /* PCI Interrupts are shared */
+ grpci2_pci_irq_table[pin-1] = priv->irq;
+ } else {
+ /* Unique IRQ per PCI INT Pin */
+ grpci2_pci_irq_table[pin-1] = priv->irq + pin-1;
+ }
+
+ /* User may override Both hardcoded IRQ setup and Plug & Play IRQ */
+ keyname[3] = 'A' + (pin-1);
+ value = drvmgr_dev_key_get(priv->dev, keyname, DRVMGR_KT_INT);
+ if (value)
+ grpci2_pci_irq_table[pin-1] = value->i;
+ }
+
+ /* Remember which IRQs are enabled */
+ if (grpci2_pci_irq_table[pin-1] != 0)
+ priv->irq_mask |= 1 << (pin-1);
+ }
+
+ /* User may override DEFAULT_BT_ENABLED to enable/disable byte twisting */
+ value = drvmgr_dev_key_get(priv->dev, "byteTwisting", DRVMGR_KT_INT);
+ if (value)
+ priv->bt_enabled = value->i;
+
+ /* Let user Configure the 6 target BARs */
+ value = drvmgr_dev_key_get(priv->dev, "tgtBarCfg", DRVMGR_KT_POINTER);
+ if (value)
+ priv->barcfg = value->ptr;
+ else
+ priv->barcfg = grpci2_default_bar_mapping;
+
+ /* User may override DEFAULT_LATENCY_TIMER */
+ value = drvmgr_dev_key_get(priv->dev, "latencyTimer", DRVMGR_KT_INT);
+ if (value)
+ priv->latency_timer = value->i;
+
+ /* This driver only support HOST systems, we check that it can act as a
+ * PCI Master and that it is in the Host slot. */
+ if ((priv->regs->sts_cap&STS_HOST) || !(priv->regs->sts_cap&STS_MST))
+ return -2; /* Target not supported */
+
+ /* Init the PCI Core */
+ if (grpci2_hw_init(priv))
+ return -3;
+
+ /* Down streams translation table */
+ priv->maps_down[0].name = "AMBA -> PCI MEM Window";
+ priv->maps_down[0].size = priv->pci_area_end - priv->pci_area;
+ priv->maps_down[0].from_adr = (void *)priv->pci_area;
+ priv->maps_down[0].to_adr = (void *)priv->pci_area;
+ /* End table */
+ priv->maps_down[1].size = 0;
+
+ /* Up streams translation table */
+ /* Setup the Host's PCI Target BARs for others to access (DMA) */
+ barcfg = priv->barcfg;
+ for (i=0,j=0; i<6; i++) {
+ size = barcfg[i].barsize;
+ if (size == 0)
+ continue;
+
+ /* Make sure address is properly aligned */
+ priv->maps_up[j].name = "Target BAR[I] -> AMBA";
+ priv->maps_up[j].size = size;
+ priv->maps_up[j].from_adr = (void *)
+ (barcfg[i].pciadr & ~(size - 1));
+ priv->maps_up[j].to_adr = (void *)
+ (barcfg[i].ahbadr & ~(size - 1));
+ j++;
+ }
+
+ /* End table */
+ priv->maps_up[j].size = 0;
+
+ return 0;
+}
+
+/* Called when a core is found with the AMBA device and vendor ID
+ * given in grpci2_ids[]. IRQ, Console does not work here
+ */
+int grpci2_init1(struct drvmgr_dev *dev)
+{
+ int status;
+ struct grpci2_priv *priv;
+ struct pci_auto_setup grpci2_auto_cfg;
+
+ DBG("GRPCI2[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (grpci2priv) {
+ DBG("Driver only supports one PCI core\n");
+ return DRVMGR_FAIL;
+ }
+
+ if ((strcmp(dev->parent->dev->drv->name, "AMBAPP_GRLIB_DRV") != 0) &&
+ (strcmp(dev->parent->dev->drv->name, "AMBAPP_LEON2_DRV") != 0)) {
+ /* We only support GRPCI2 driver on local bus */
+ return DRVMGR_FAIL;
+ }
+
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+
+ priv->dev = dev;
+ grpci2priv = priv;
+
+ /* Initialize GRPCI2 Hardware */
+ status = grpci2_init(priv);
+ if (status) {
+ printk("Failed to initialize grpci2 driver %d\n", status);
+ return -1;
+ }
+
+ /* Register the PCI core at the PCI layers */
+
+ if (priv->bt_enabled == 0) {
+ /* Host is Big-Endian */
+ pci_endian = PCI_BIG_ENDIAN;
+
+ memcpy(&grpci2_access_drv.io, &grpci2_io_ops_be,
+ sizeof(grpci2_io_ops_be));
+ grpci2_access_drv.memreg = &pci_memreg_sparc_be_ops;
+ }
+
+ if (pci_access_drv_register(&grpci2_access_drv)) {
+ /* Access routines registration failed */
+ return DRVMGR_FAIL;
+ }
+
+ /* Prepare memory MAP */
+ grpci2_auto_cfg.options = 0;
+ grpci2_auto_cfg.mem_start = 0;
+ grpci2_auto_cfg.mem_size = 0;
+ grpci2_auto_cfg.memio_start = priv->pci_area;
+ grpci2_auto_cfg.memio_size = priv->pci_area_end - priv->pci_area;
+ grpci2_auto_cfg.io_start = 0x100; /* avoid PCI address 0 */
+ grpci2_auto_cfg.io_size = 0x10000 - 0x100; /* lower 64kB I/O 16 */
+ grpci2_auto_cfg.irq_map = grpci2_bus0_irq_map;
+ grpci2_auto_cfg.irq_route = NULL; /* use standard routing */
+ pci_config_register(&grpci2_auto_cfg);
+
+ if (pci_config_init()) {
+ /* PCI configuration failed */
+ return DRVMGR_FAIL;
+ }
+
+ /* Initialize/Register Driver Manager PCI Bus */
+ priv->config.maps_down = &priv->maps_down[0];
+ priv->config.maps_up = &priv->maps_up[0];
+ return pcibus_register(dev, &priv->config);
+}
+
+int grpci2_init3(struct drvmgr_dev *dev)
+{
+ struct grpci2_priv *priv = dev->priv;
+
+ /* Install and Enable PCI Error interrupt handler */
+ drvmgr_interrupt_register(dev, 0, "grpci2", grpci2_err_isr, priv);
+
+ /* Initialize DMA driver (if supported) */
+ if (priv->regs->sts_cap & STS_DMA){
+ grpci2dma_init((void *) &(priv->regs->dma_ctrl), grpci2_dma_isr_register);
+ }
+
+ /* Unmask Error IRQ and all PCI interrupts at PCI Core. For this to be
+ * safe every PCI board have to be resetted (no IRQ generation) before
+ * Global IRQs are enabled (Init is reached or similar)
+ */
+ priv->regs->ctrl |= (CTRL_EI | priv->irq_mask);
+
+ return DRVMGR_OK;
+}
+
+static void grpci2_dma_isr_register( void (*isr)(void *), void * arg)
+{
+ struct grpci2_priv *priv = grpci2priv;
+
+ /* Handle unregistration */
+ if (priv->dma_isr != NULL) {
+ drvmgr_interrupt_unregister(priv->dev, priv->irq_dma, grpci2_dma_isr, priv);
+ /* Uninstall user ISR */
+ priv->dma_isr = NULL;
+ priv->dma_isr_arg = NULL;
+ }
+
+ if (isr == NULL)
+ return;
+
+ /* Install user ISR */
+ priv->dma_isr_arg = arg;
+ priv->dma_isr = isr;
+
+ /* Install and Enable PCI DMA interrupt handler */
+ if (priv->irq_mode == 1) {
+ priv->irq_dma = 1;
+ } else if (priv->irq_mode == 3) {
+ priv->irq_dma = 4;
+ } else {
+ priv->irq_dma = 0;
+ }
+ drvmgr_interrupt_register(priv->dev, priv->irq_dma, "grpci2dma", grpci2_dma_isr, priv);
+}
diff --git a/bsps/shared/grlib/pci/grpci2dma.c b/bsps/shared/grlib/pci/grpci2dma.c
new file mode 100644
index 0000000000..7e39ca691d
--- /dev/null
+++ b/bsps/shared/grlib/pci/grpci2dma.c
@@ -0,0 +1,2026 @@
+/*
+ * GRPCI2 DMA Driver
+ *
+ * COPYRIGHT (c) 2017
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stddef.h>
+#include <drvmgr/drvmgr.h>
+#include <rtems.h>
+#include <rtems/bspIo.h> /* for printk */
+#include <bsp.h>
+#include <grlib/grpci2dma.h>
+
+#include <grlib/grlib_impl.h>
+
+/* This driver has been prepared for SMP operation
+ */
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define INLINE*/
+#define INLINE inline
+
+/*#define UNUSED*/
+#define UNUSED __attribute__((unused))
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+#define BD_CHAN_EN (1<<BD_CHAN_EN_BIT)
+#define BD_CHAN_ID (0x3<<BD_CHAN_ID_BIT)
+#define BD_CHAN_TYPE (0x3<<BD_CHAN_TYPE_BIT)
+#define BD_CHAN_TYPE_DMA (0x1<<BD_CHAN_TYPE_BIT)
+#define BD_CHAN_BDCNT (0xffff<<BD_CHAN_BDCNT_BIT)
+#define BD_CHAN_EN_BIT 31
+#define BD_CHAN_ID_BIT 22
+#define BD_CHAN_TYPE_BIT 20
+#define BD_CHAN_BDCNT_BIT 0
+
+#define BD_DATA_EN (0x1<<BD_DATA_EN_BIT)
+#define BD_DATA_IE (0x1<<BD_DATA_IE_BIT)
+#define BD_DATA_DR (0x1<<BD_DATA_DR_BIT)
+#define BD_DATA_BE (0x1<<BD_DATA_BE_BIT)
+#define BD_DATA_TYPE (0x3<<BD_DATA_TYPE_BIT)
+#define BD_DATA_TYPE_DATA (0x0<<BD_DATA_TYPE_BIT)
+#define BD_DATA_ER (0x1<<BD_DATA_ER_BIT)
+#define BD_DATA_LEN (0xffff<<BD_DATA_LEN_BIT)
+#define BD_DATA_EN_BIT 31
+#define BD_DATA_IE_BIT 30
+#define BD_DATA_DR_BIT 29
+#define BD_DATA_BE_BIT 28
+#define BD_DATA_TYPE_BIT 20
+#define BD_DATA_ER_BIT 19
+#define BD_DATA_LEN_BIT 0
+
+#define DMACTRL_SAFE (0x1<<DMACTRL_SAFE_BIT)
+#define DMACTRL_WCLEAR (0x1fff<<DMACTRL_ERR_BIT)
+#define DMACTRL_ERR (0x1f<<DMACTRL_ERR_BIT)
+#define DMACTRL_CHIRQ (0xff<<DMACTRL_CHIRQ_BIT)
+#define DMACTRL_ERR (0x1f<<DMACTRL_ERR_BIT)
+#define DMACTRL_NUMCH (0x7<<DMACTRL_NUMCH_BIT)
+#define DMACTRL_DIS (0x1<<DMACTRL_DIS_BIT)
+#define DMACTRL_IE (0x1<<DMACTRL_IE_BIT)
+#define DMACTRL_ACT (0x1<<DMACTRL_ACT_BIT)
+#define DMACTRL_EN (0x1<<DMACTRL_EN_BIT)
+
+#define DMACTRL_SAFE_BIT 31
+#define DMACTRL_CHIRQ_BIT 12
+#define DMACTRL_ERR_BIT 7
+#define DMACTRL_NUMCH_BIT 4
+#define DMACTRL_DIS_BIT 2
+#define DMACTRL_IE_BIT 1
+#define DMACTRL_ACT_BIT 3
+#define DMACTRL_EN_BIT 0
+
+/* GRPCI2 DMA does not allow more than 8 DMA chans */
+#define MAX_DMA_CHANS 8
+
+/* GRPCI2 DMA does not allow transfer of more than 0x10000 words */
+#define MAX_DMA_TRANSFER_SIZE (0x10000*4)
+
+/* We use the following limits as default */
+#define MAX_DMA_DATA 128
+
+/* Memory and HW Registers Access routines. All 32-bit access routines */
+#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
+#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+/*
+ * GRPCI2 DMA Channel descriptor
+ */
+struct grpci2_bd_chan {
+ volatile unsigned int ctrl; /* 0x00 DMA Control */
+ volatile unsigned int nchan; /* 0x04 Next DMA Channel Address */
+ volatile unsigned int nbd; /* 0x08 Next Data Descriptor in channel */
+ volatile unsigned int res; /* 0x0C Reserved */
+};
+
+/*
+ * GRPCI2 DMA Data descriptor
+ */
+struct grpci2_bd_data {
+ volatile unsigned int ctrl; /* 0x00 DMA Data Control */
+ volatile unsigned int pci_adr; /* 0x04 PCI Start Address */
+ volatile unsigned int ahb_adr; /* 0x08 AHB Start address */
+ volatile unsigned int next; /* 0x0C Next Data Descriptor in channel */
+};
+
+
+/*
+ * GRPCI2 DMA APB Register MAP
+ */
+struct grpci2dma_regs {
+ volatile unsigned int dma_ctrl; /* 0x00 */
+ volatile unsigned int dma_bdbase; /* 0x04 */
+ volatile unsigned int dma_chact; /* 0x08 */
+};
+
+#define DEVNAME_LEN 11
+/*
+ * GRPCI2 DMA Driver private data struture
+ */
+struct grpci2dma_priv {
+ /* DMA control registers */
+ struct grpci2dma_regs *regs;
+ char devname[DEVNAME_LEN];
+
+ /* Channel info */
+ struct {
+ /* Channel pointer. Indicates the assigned channel
+ * for a given cid (used as index). NULL if not assigned.
+ */
+ struct grpci2_bd_chan * ptr;
+ /* Is this channel allocated by the driver */
+ int allocated;
+ /* Last added data descriptor for each channel.
+ * This simplifies/speeds up adding data descriptors
+ * to the channel*/
+ struct grpci2_bd_data * lastdata;
+ /* Is this channel active */
+ int active;
+ /* Interrupt-code Handling
+ * - isr: Holds the ISR for each channel
+ * - isr_arg: Holds the ISR arg for each channel
+ */
+ grpci2dma_isr_t isr;
+ void * isr_arg;
+
+ /* DMA Channel Semaphore */
+ rtems_id sem;
+ } channel[MAX_DMA_CHANS];
+
+ /* Indicates the number of channels. */
+ int nchans;
+
+ /* Indicates the number of active channels. */
+ int nactive;
+
+ /* Indicates if the number of DMA ISR that have been registered
+ * into the GRPCI2 DRIVER */
+ int isr_registered;
+
+ /* Callback to register the DMA ISR into the GRPCI2 DRIVER */
+ void (*isr_register)( void (*isr)(void*), void * arg);
+
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+};
+
+/* The GRPCI2 DMA semaphore */
+rtems_id grpci2dma_sem;
+
+/*
+ * GRPCI2 DMA internal prototypes
+ */
+/* -Descriptor linked-list functions*/
+STATIC int grpci2dma_channel_list_add(struct grpci2_bd_chan * list,
+ struct grpci2_bd_chan * chan);
+STATIC int grpci2dma_channel_list_remove(struct grpci2_bd_chan * chan);
+STATIC int grpci2dma_data_list_add(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_data * data, struct grpci2_bd_data * last_chan_data);
+STATIC int grpci2dma_data_list_remove(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_data * data);
+STATIC int grpci2dma_channel_list_foreach(struct grpci2_bd_chan * chan,
+ int func( struct grpci2_bd_chan * chan), int maxindex);
+STATIC int grpci2dma_data_list_foreach(struct grpci2_bd_data * data,
+ int func( struct grpci2_bd_data * data), int maxindex);
+
+/* -DMA ctrl access functions */
+STATIC INLINE int grpci2dma_ctrl_init(void);
+STATIC INLINE int grpci2dma_ctrl_start(struct grpci2_bd_chan * chan);
+STATIC INLINE int grpci2dma_ctrl_stop(void);
+STATIC INLINE int grpci2dma_ctrl_resume(void);
+STATIC INLINE unsigned int grpci2dma_ctrl_status(void);
+STATIC INLINE unsigned int grpci2dma_ctrl_base(void);
+STATIC INLINE unsigned int grpci2dma_ctrl_active(void);
+STATIC INLINE int grpci2dma_ctrl_numch_set(int numch);
+STATIC INLINE int grpci2dma_ctrl_interrupt_status(void);
+STATIC INLINE int grpci2dma_ctrl_interrupt_enable(void);
+STATIC INLINE int grpci2dma_ctrl_interrupt_disable(void);
+STATIC INLINE int grpci2dma_ctrl_interrupt_clear(void);
+
+/* -Descriptor access functions */
+STATIC int grpci2dma_channel_bd_init(struct grpci2_bd_chan * chan);
+STATIC int grpci2dma_data_bd_init(struct grpci2_bd_data * data,
+ uint32_t pci_adr, uint32_t ahb_adr, int dir, int endianness,
+ int size, struct grpci2_bd_data * next);
+STATIC int grpci2dma_channel_bd_enable(struct grpci2_bd_chan * chan,
+ unsigned int options);
+STATIC int grpci2dma_channel_bd_disable(struct grpci2_bd_chan * chan);
+STATIC void grpci2dma_channel_bd_set_cid(struct grpci2_bd_chan * chan,
+ int cid);
+STATIC int grpci2dma_channel_bd_get_cid(struct grpci2_bd_chan * chan);
+STATIC int grpci2dma_data_bd_status(struct grpci2_bd_data *data);
+STATIC int grpci2dma_data_bd_disable(struct grpci2_bd_data *desc);
+STATIC int grpci2dma_data_bd_interrupt_enable(struct grpci2_bd_data * data);
+STATIC struct grpci2_bd_data * grpci2dma_channel_bd_get_data(
+ struct grpci2_bd_chan * chan);
+STATIC void grpci2dma_channel_bd_set_data(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_data * data);
+STATIC struct grpci2_bd_chan * grpci2dma_channel_bd_get_next(
+ struct grpci2_bd_chan * chan);
+STATIC struct grpci2_bd_data * grpci2dma_data_bd_get_next(
+ struct grpci2_bd_data * data);
+STATIC void grpci2dma_channel_bd_set_next(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_chan * next);
+STATIC void grpci2dma_data_bd_set_next(struct grpci2_bd_data * data,
+ struct grpci2_bd_data * next);
+
+/* -Channel functions */
+STATIC int grpci2dma_channel_open(struct grpci2_bd_chan * chan, int cid);
+STATIC int grpci2dma_channel_free_id(void);
+STATIC struct grpci2_bd_chan * grpci2dma_channel_get_active_list(void);
+STATIC int grpci2dma_channel_start(int chan_no, int options);
+STATIC int grpci2dma_channel_stop(int chan_no);
+STATIC int grpci2dma_channel_push(int chan_no, void *dataptr, int index,
+ int ndata);
+STATIC int grpci2dma_channel_close(int chan_no);
+STATIC int grpci2dma_channel_isr_unregister(int chan_no);
+
+/* -ISR functions*/
+STATIC void grpci2dma_isr(void *arg);
+
+/* -Init function called by GRPCI2*/
+int grpci2dma_init(void * regs,
+ void isr_register( void (*isr)(void*), void * arg));
+
+
+#ifdef DEBUG
+STATIC int grpci2dma_channel_print(struct grpci2_bd_chan * chan);
+STATIC int grpci2dma_data_print(struct grpci2_bd_data * data);
+#endif
+
+static struct grpci2dma_priv *grpci2dmapriv = NULL;
+
+/* All data linked list must point to a disabled descriptor at the end.
+ * We use this DISABLED_DESCRIPTOR as a list end for all channels.
+ */
+#define ALIGNED __attribute__((aligned(GRPCI2DMA_BD_DATA_ALIGN)))
+static ALIGNED struct grpci2_bd_data disabled_data = {
+ /*.ctrl=*/0,
+ /*.pci_adr=*/0,
+ /*.ahb_adr=*/0,
+ /*.next=*/0
+};
+#define DISABLED_DESCRIPTOR (&disabled_data)
+
+/*** START OF DESCRIPTOR LINKED-LIST HELPER FUNCTIONS ***/
+
+/* This functions adds a channel descriptor to the DMA channel
+ * linked list. It assumes that someone has check the input
+ * parameters already.
+ */
+STATIC int grpci2dma_channel_list_add(struct grpci2_bd_chan * list,
+ struct grpci2_bd_chan * chan)
+{
+ DBG("Adding channel (0x%08x) to GRPCI2 DMA driver\n", (unsigned int) chan);
+
+ /* Add channel to the linnked list */
+ if (list == chan) {
+ /* No previous channels. Finish. */
+ return GRPCI2DMA_ERR_OK;
+ } else {
+ /* Get next chan from list */
+ struct grpci2_bd_chan * nchan = grpci2dma_channel_bd_get_next(list);
+ /* Close the circular linked list */
+ grpci2dma_channel_bd_set_next(chan,nchan);
+ /* Attach the new channel in the middle */
+ grpci2dma_channel_bd_set_next(list, chan);
+ return GRPCI2DMA_ERR_OK;
+ }
+}
+
+/* This functions removes a channel descriptor from the DMA channel
+ * linked list. It assumes that someone has check the input
+ * parameters already.
+ * It returns 0 if successfull. Otherwise,
+ * it can return:
+ * - ERROR: Different causes:
+ * x Number of channels is corrupted.
+ */
+STATIC int grpci2dma_channel_list_remove(struct grpci2_bd_chan * chan)
+{
+ DBG("Removing channel (0x%08x) from GRPCI2 DMA driver\n",
+ (unsigned int) chan);
+
+ /* Remove channel from the linnked list */
+ struct grpci2_bd_chan * nchan = grpci2dma_channel_bd_get_next(chan);
+ if (nchan != chan){
+ /* There are more channels */
+ /* Since this is a circular linked list, we need to find last channel
+ * and update the pointer to the next element */
+ /* Use index to avoid having an infinite loop in case of corrupted
+ * channels */
+ struct grpci2_bd_chan * new_first_chan = nchan;
+ struct grpci2_bd_chan * curr_chan;
+ int i=1;
+ while((nchan != chan) && (i<MAX_DMA_CHANS)){
+ curr_chan = nchan;
+ nchan = grpci2dma_channel_bd_get_next(curr_chan);
+ i++;
+ }
+ if (nchan != chan) {
+ DBG("Maximum DMA channels exceeded. Maybe corrupted?\n");
+ return GRPCI2DMA_ERR_ERROR;
+ } else {
+ /* Update the pointer */
+ grpci2dma_channel_bd_set_next(curr_chan, new_first_chan);
+ return GRPCI2DMA_ERR_OK;
+ }
+ }else{
+ /* There are no more channels */
+ return GRPCI2DMA_ERR_OK;
+ }
+}
+
+/* This functions adds a data descriptor to the channel's data
+ * linked list. The function assumes, that the data descriptor
+ * points to either a DISABLED_DESCRIPTOR or to linked list of
+ * data descriptors that ends with a DISABLED_DESCRIPTOR.
+ * It returns the number of active data descriptors
+ * if successfull. Otherwise, it can return:
+ * - ERROR: Different causes:
+ * x Number of channels is corrupted.
+ * x Last linked list element is not pointing to the first.
+ */
+STATIC int grpci2dma_data_list_add(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_data * data, struct grpci2_bd_data * last_chan_data)
+{
+ DBG("Adding data (0x%08x) to channel (0x%08x)\n",
+ (unsigned int) data, (unsigned int) chan);
+
+ /* Add data to the linnked list */
+ /* 1st- Get current data */
+ struct grpci2_bd_data * first_data = grpci2dma_channel_bd_get_data(chan);
+ if (first_data == NULL) {
+ /* Channel should always be pointing to a disabled descriptor */
+ DBG("Channel not pointing to disabled descpriptor\n");
+ return GRPCI2DMA_ERR_ERROR;
+ } else if (first_data == DISABLED_DESCRIPTOR){
+ /* No previous data. Assign this one and finish. */
+ grpci2dma_channel_bd_set_data(chan, data);
+ return GRPCI2DMA_ERR_OK;
+ } else {
+ /* Let's add the data to the last data pointer added to this channel */
+ /* Attach the new data */
+ grpci2dma_data_bd_set_next(last_chan_data, data);
+ /* 2nd- Let's check again to make sure that the DMA did not finished
+ * while we were inserting the new data */
+ first_data = grpci2dma_channel_bd_get_data(chan);
+ if (first_data == DISABLED_DESCRIPTOR){
+ grpci2dma_channel_bd_set_data(chan, data);
+ }
+ return GRPCI2DMA_ERR_OK;
+ }
+}
+
+/* This functions removes a data descriptor from the channel's data
+ * linked list. Note that in a normal execution, the DMA will remove
+ * the data descriptors from the linked list, so there is no need to
+ * use this function. It returns 0 if successfull. Otherwise,
+ * it can return:
+ * - WRONGPTR: The chan (or data) pointer is either NULL or not aligned to
+ * 0x10.
+ * - STOPDMA: The DMA is running, cannot add channels while DMA is running.
+ * - TOOMANY: The max number of data is reached.
+ * - ERROR: Different causes:
+ * x There are no free channel id numbers.
+ * x Number of channels is corrupted.
+ * x Last linked list element is not pointing to the first.
+ */
+UNUSED STATIC int grpci2dma_data_list_remove(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_data * data)
+{
+ DBG("Removing data (0x%08x) from channel (0x%08x)\n",
+ (unsigned int) data, (unsigned int) chan);
+
+ /* Remove data from the linked list */
+ /* 1st- Get current DMA data */
+ struct grpci2_bd_data * first_data = grpci2dma_channel_bd_get_data(chan);
+ if (first_data == NULL) {
+ /* Channel should always be pointing to a disabled descriptor */
+ DBG("Channel not pointing to disabled descpriptor\n");
+ return GRPCI2DMA_ERR_ERROR;
+ } else if (first_data == DISABLED_DESCRIPTOR){
+ /* No previous data. Cannot detach */
+ DBG("No data to detach.\n");
+ return GRPCI2DMA_ERR_NOTFOUND;
+ } else {
+ /* 2nd- Already available data, let's find the data */
+ if (first_data == data) {
+ /* 3rd- It is the first one. */
+ struct grpci2_bd_data *current = first_data;
+ struct grpci2_bd_data *next = grpci2dma_data_bd_get_next(current);
+ if (next != DISABLED_DESCRIPTOR){
+ /* There are more data */
+ /* Set channel next data descriptor to data*/
+ grpci2dma_channel_bd_set_data(chan, next);
+ /* Update the removed data */
+ grpci2dma_data_bd_set_next(data, DISABLED_DESCRIPTOR);
+ return GRPCI2DMA_ERR_OK;
+ }else{
+ /* No more data */
+ /* Clear DMA NBD */
+ grpci2dma_channel_bd_set_data(chan, DISABLED_DESCRIPTOR);
+ /* Update the removed data */
+ grpci2dma_data_bd_set_next(data, DISABLED_DESCRIPTOR);
+ return GRPCI2DMA_ERR_OK;
+ }
+ } else {
+ /* It is not the first data. Let's find it */
+ struct grpci2_bd_data * current = first_data;
+ struct grpci2_bd_data * next = grpci2dma_data_bd_get_next(current);
+ while( (next != data) && (next != DISABLED_DESCRIPTOR) &&
+ (next != NULL)){
+ current = next;
+ next = grpci2dma_data_bd_get_next(current);
+ }
+ if (next != data) {
+ DBG("Maximum DMA data exceeded. Maybe corrupted?\n");
+ return GRPCI2DMA_ERR_NOTFOUND;
+ } else {
+ /* Detach the data */
+ next = grpci2dma_data_bd_get_next(data);
+ grpci2dma_data_bd_set_next(current, next);
+ /* Update the removed data */
+ grpci2dma_data_bd_set_next(data, DISABLED_DESCRIPTOR);
+ return GRPCI2DMA_ERR_OK;
+ }
+ }
+ }
+}
+
+/* Iterate through all channel starting in FIRST_CHAN up to MAXINDEX
+ * and execute FUNC*/
+UNUSED STATIC int grpci2dma_channel_list_foreach(
+ struct grpci2_bd_chan * first_chan,
+ int func( struct grpci2_bd_chan * chan), int maxindex)
+{
+ if (maxindex <= 0) return 0;
+ if (first_chan == NULL) {
+ /* No previous channels */
+ return 0;
+ } else {
+ /* Available channels */
+ /* Iterate through next channels */
+ /* Use index to avoid having an infinite loop in case of corrupted
+ * channels */
+ int i=0;
+ int ret;
+ struct grpci2_bd_chan * curr_chan = first_chan;
+ struct grpci2_bd_chan * nchan;
+ do{
+ if (curr_chan == NULL) return GRPCI2DMA_ERR_WRONGPTR;
+ ret = func(curr_chan);
+ if (ret < 0){
+ /* error */
+ return ret;
+ }
+ nchan = grpci2dma_channel_bd_get_next(curr_chan);
+ curr_chan = nchan;
+ i++;
+ }while((curr_chan != first_chan) && (i < maxindex));
+ }
+ return 0;
+}
+
+/* Iterate through all data starting in FIRST_DATA up to MAXINDEX
+ * and execute FUNC*/
+STATIC int grpci2dma_data_list_foreach(struct grpci2_bd_data * first_data,
+ int func( struct grpci2_bd_data * data), int maxindex)
+{
+ if (maxindex <= 0) return 0;
+ if (first_data == NULL) return GRPCI2DMA_ERR_WRONGPTR;
+ /* Available data */
+ /* Iterate through next data */
+ /* Use index to avoid having an infinite loop in case of corrupted
+ * channels */
+ int i=0;
+ int ret;
+ struct grpci2_bd_data * curr_data = first_data;
+ struct grpci2_bd_data * ndata;
+ while((curr_data != DISABLED_DESCRIPTOR) && (i < maxindex)){
+ if (curr_data == NULL) return GRPCI2DMA_ERR_WRONGPTR;
+ ret = func(curr_data);
+ if (ret < 0){
+ /* error */
+ return ret;
+ }
+ ndata = grpci2dma_data_bd_get_next(curr_data);
+ curr_data = ndata;
+ i++;
+ }
+ return 0;
+}
+
+
+/*** END OF DESCRIPTOR LINKED-LIST HELPER FUNCTIONS ***/
+
+/*** START OF DMACTRL ACCESS FUNCTIONS ***/
+
+/* Initialize the DMA Ctrl*/
+STATIC INLINE int grpci2dma_ctrl_init()
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Clear DMA Control: clear IRQ and ERR status */
+ REG_WRITE(&priv->regs->dma_ctrl, 0|DMACTRL_SAFE|DMACTRL_CHIRQ|DMACTRL_ERR);
+
+ /* Clear DMA BASE */
+ REG_WRITE(&priv->regs->dma_bdbase, 0);
+
+ /* Clear DMA Chan */
+ REG_WRITE(&priv->regs->dma_chact, 0);
+
+ return 0;
+}
+
+
+/* Stop the DMA */
+STATIC INLINE int grpci2dma_ctrl_stop( void )
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Stop DMA */
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ REG_WRITE(&priv->regs->dma_ctrl, (ctrl & ~(DMACTRL_WCLEAR | DMACTRL_EN)) |
+ DMACTRL_DIS);
+
+ return 0;
+}
+
+/* Start the DMA */
+STATIC INLINE int grpci2dma_ctrl_start( struct grpci2_bd_chan * chan)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Set BDBASE to linked list of chans */
+ REG_WRITE(&priv->regs->dma_bdbase, (unsigned int) chan);
+
+ /* Start DMA */
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ REG_WRITE(&priv->regs->dma_ctrl, (ctrl & ~(DMACTRL_WCLEAR | DMACTRL_DIS)) |
+ DMACTRL_EN);
+
+ return 0;
+}
+
+/* Resume the DMA */
+STATIC INLINE int grpci2dma_ctrl_resume( void )
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Resume DMA */
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ REG_WRITE(&priv->regs->dma_ctrl, (ctrl & ~(DMACTRL_WCLEAR | DMACTRL_DIS)) |
+ DMACTRL_EN);
+
+ return 0;
+}
+
+/* Interrupt status*/
+STATIC INLINE int grpci2dma_ctrl_interrupt_status(void)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ return (ctrl & DMACTRL_IE);
+}
+
+/* Enable interrupts */
+STATIC INLINE int grpci2dma_ctrl_interrupt_enable(void)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ if (ctrl & DMACTRL_IE){
+ /* Nothing to do. Already enabled */
+ return 0;
+ }
+
+ /* Clear pending CHIRQ and errors */
+ ctrl = ctrl | (DMACTRL_CHIRQ | DMACTRL_ERR);
+
+ /* Enable interrupts */
+ ctrl = ctrl | DMACTRL_IE;
+
+ REG_WRITE(&priv->regs->dma_ctrl, ctrl );
+ return 0;
+}
+
+/* Disable interrupts */
+STATIC INLINE int grpci2dma_ctrl_interrupt_disable(void)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ if ((ctrl & DMACTRL_IE) == 0){
+ /* Nothing to do. Already disabled */
+ return 0;
+ }
+
+ /* Clear pending CHIRQ and errors */
+ ctrl = ctrl | (DMACTRL_CHIRQ | DMACTRL_ERR);
+
+ /* Disable interrupts */
+ ctrl = ctrl & ~(DMACTRL_IE);
+
+ REG_WRITE(&priv->regs->dma_ctrl, ctrl );
+ return 0;
+}
+
+/* Clear interrupts */
+STATIC INLINE int grpci2dma_ctrl_interrupt_clear(void)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+ REG_WRITE(&priv->regs->dma_ctrl, (ctrl | DMACTRL_ERR | DMACTRL_CHIRQ));
+ return 0;
+}
+
+STATIC INLINE unsigned int grpci2dma_ctrl_status()
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Read DMA */
+ return (REG_READ(&priv->regs->dma_ctrl));
+}
+
+STATIC INLINE unsigned int grpci2dma_ctrl_base()
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Read DMA */
+ return (REG_READ(&priv->regs->dma_bdbase));
+}
+
+UNUSED STATIC INLINE unsigned int grpci2dma_ctrl_active()
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Read DMA */
+ return (REG_READ(&priv->regs->dma_chact));
+}
+
+/* Set the DMA CTRL register NUMCH field */
+STATIC INLINE int grpci2dma_ctrl_numch_set(int numch)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ unsigned int ctrl = REG_READ(&priv->regs->dma_ctrl);
+
+ /* Clear old value */
+ ctrl = (ctrl & ~(DMACTRL_NUMCH));
+
+ /* Put new value */
+ ctrl = (ctrl | ( (numch << DMACTRL_NUMCH_BIT) & DMACTRL_NUMCH));
+
+ REG_WRITE(&priv->regs->dma_ctrl, ctrl & ~(DMACTRL_WCLEAR));
+ return 0;
+}
+
+/*** END OF DMACTRL ACCESS FUNCTIONS ***/
+
+/*** START OF DESCRIPTOR ACCESS FUNCTIONS ***/
+
+STATIC int grpci2dma_data_bd_init(struct grpci2_bd_data * data,
+ uint32_t pci_adr, uint32_t ahb_adr, int dir, int endianness, int size,
+ struct grpci2_bd_data * next)
+{
+ BD_WRITE(&data->ctrl, 0 |
+ (BD_DATA_EN) |
+ (BD_DATA_TYPE_DATA) |
+ (dir == GRPCI2DMA_AHBTOPCI? BD_DATA_DR:0) |
+ (endianness == GRPCI2DMA_LITTLEENDIAN? BD_DATA_BE:0) |
+ ( (size << BD_DATA_LEN_BIT) & BD_DATA_LEN )
+ );
+ BD_WRITE(&data->pci_adr, pci_adr);
+ BD_WRITE(&data->ahb_adr, ahb_adr);
+ BD_WRITE(&data->next, (unsigned int) next);
+ return 0;
+}
+
+STATIC int grpci2dma_channel_bd_init(struct grpci2_bd_chan * chan)
+{
+ BD_WRITE(&chan->ctrl, 0 | BD_CHAN_TYPE_DMA | BD_CHAN_EN);
+ BD_WRITE(&chan->nchan, (unsigned int) chan);
+ BD_WRITE(&chan->nbd, (unsigned int) DISABLED_DESCRIPTOR);
+ return 0;
+}
+
+/* Enable a channel with options.
+ * options include:
+ * - options & 0xFFFF: Maximum data descriptor count before
+ * moving to next DMA channel.
+ */
+STATIC int grpci2dma_channel_bd_enable(struct grpci2_bd_chan * chan,
+ unsigned int options)
+{
+ unsigned int ctrl = BD_READ(&chan->ctrl);
+ ctrl = (ctrl & ~(BD_CHAN_BDCNT));
+ BD_WRITE(&chan->ctrl, (ctrl | BD_CHAN_EN |
+ ( (options << BD_CHAN_BDCNT_BIT) & BD_CHAN_BDCNT)));
+ return 0;
+}
+
+/* Disable channel.
+ */
+STATIC int grpci2dma_channel_bd_disable(struct grpci2_bd_chan * chan)
+{
+ unsigned int ctrl = BD_READ(&chan->ctrl);
+ BD_WRITE(&chan->ctrl, (ctrl & ~(BD_CHAN_EN)));
+ return 0;
+}
+
+/* Get the CID of a channel.
+ */
+UNUSED STATIC int grpci2dma_channel_bd_get_cid(struct grpci2_bd_chan * chan)
+{
+ /* Get cid from chan */
+ unsigned ctrl = BD_READ(&chan->ctrl);
+ unsigned cid = (ctrl & (BD_CHAN_ID)) >> BD_CHAN_ID_BIT;
+ return cid;
+}
+
+/* Set the CID of a channel. */
+STATIC void grpci2dma_channel_bd_set_cid(struct grpci2_bd_chan * chan, int cid)
+{
+ /* Set cid from chan */
+ unsigned ctrl = BD_READ(&chan->ctrl);
+ ctrl = (ctrl & ~(BD_CHAN_ID)) | ((cid << BD_CHAN_ID_BIT) & BD_CHAN_ID);
+ BD_WRITE(&chan->ctrl,ctrl);
+ return;
+}
+
+/* Disable data descriptor*/
+UNUSED STATIC int grpci2dma_data_bd_disable(struct grpci2_bd_data *desc)
+{
+ BD_WRITE(&desc->ctrl,0);
+ return 0;
+}
+
+/* Return status of data descriptor*/
+STATIC int grpci2dma_data_bd_status(struct grpci2_bd_data *desc)
+{
+ int status = BD_READ(&desc->ctrl);
+ if (status & BD_DATA_ER) {
+ return GRPCI2DMA_BD_STATUS_ERR;
+ }else if (status & BD_DATA_EN) {
+ return GRPCI2DMA_BD_STATUS_ENABLED;
+ }else {
+ return GRPCI2DMA_BD_STATUS_DISABLED;
+ }
+ return GRPCI2DMA_BD_STATUS_ERR;
+}
+
+/* Enable interrupts in data descriptor*/
+STATIC int grpci2dma_data_bd_interrupt_enable(struct grpci2_bd_data * data)
+{
+ unsigned int ctrl = BD_READ(&data->ctrl);
+ BD_WRITE(&data->ctrl, ctrl | BD_DATA_IE);
+ return 0;
+}
+
+/* Get data descriptor */
+STATIC struct grpci2_bd_data * grpci2dma_channel_bd_get_data(
+ struct grpci2_bd_chan * chan)
+{
+ return (struct grpci2_bd_data *) BD_READ(&chan->nbd);
+}
+
+/* Set data descriptorl */
+STATIC void grpci2dma_channel_bd_set_data(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_data * data)
+{
+ BD_WRITE(&chan->nbd, (unsigned int) data);
+}
+
+/* Get next channel */
+STATIC struct grpci2_bd_chan * grpci2dma_channel_bd_get_next(
+ struct grpci2_bd_chan * chan)
+{
+ return (struct grpci2_bd_chan *) BD_READ(&chan->nchan);
+}
+
+/* Get next data */
+STATIC struct grpci2_bd_data * grpci2dma_data_bd_get_next(
+ struct grpci2_bd_data * data)
+{
+ return (struct grpci2_bd_data *) BD_READ(&data->next);
+}
+
+/* Set next channel */
+STATIC void grpci2dma_channel_bd_set_next(struct grpci2_bd_chan * chan,
+ struct grpci2_bd_chan * next)
+{
+ BD_WRITE(&chan->nchan,(unsigned int) next);
+}
+
+/* Set next data */
+STATIC void grpci2dma_data_bd_set_next(struct grpci2_bd_data * data,
+ struct grpci2_bd_data * next)
+{
+ BD_WRITE(&data->next,(unsigned int) next);
+}
+
+/*** END OF DESCRIPTOR ACCESS FUNCTIONS ***/
+
+/*** START OF CHANNEL FUNCTIONS ***/
+
+STATIC int grpci2dma_channel_open(struct grpci2_bd_chan * chan, int cid)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int allocated = 0;
+
+ /* Get pointer */
+ if (chan == NULL) {
+ /* User does not provide channel, let's create it */
+ chan = grpci2dma_channel_new(1);
+ allocated = 1;
+ }else{
+ /* Make sure the pointer is not already on the linked list */
+ int i;
+ for (i=0; i<MAX_DMA_CHANS; i++){
+ if (priv->channel[i].ptr == chan){
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+ }
+ }
+
+ DBG("Opening channel %d (0x%08x)\n", cid, (unsigned int) chan);
+
+ /* Init channel descriptor */
+ grpci2dma_channel_bd_init(chan);
+
+ /* Assign cid to chan */
+ priv->channel[cid].ptr = chan;
+ grpci2dma_channel_bd_set_cid(chan, cid);
+
+ /* Increase number of channels */
+ priv->nchans++;
+
+ DBG("number of channels: %d\n", priv->nchans);
+
+ /* Initialize channel data */
+ priv->channel[cid].allocated = allocated;
+ priv->channel[cid].active = 0;
+
+ /* Initialize record of last added data */
+ priv->channel[cid].lastdata = DISABLED_DESCRIPTOR;
+
+ return cid;
+}
+
+/* Get first free CID.
+ */
+STATIC int grpci2dma_channel_free_id()
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+
+ /* Find the first free CID */
+ int i;
+ for (i=0; i<MAX_DMA_CHANS; i++){
+ if (priv->channel[i].ptr == NULL){
+ return i;
+ }
+ }
+ return GRPCI2DMA_ERR_TOOMANY;
+}
+
+/* Get the active channel circular linked list */
+STATIC struct grpci2_bd_chan * grpci2dma_channel_get_active_list()
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int i;
+ /* Just get the first non NULL associated cid */
+ for (i=0; i< MAX_DMA_CHANS; i++){
+ if ((priv->channel[i].ptr != NULL) && (priv->channel[i].active)){
+ return priv->channel[i].ptr;
+ }
+ }
+ return NULL;
+}
+
+/* Start a channel */
+STATIC int grpci2dma_channel_start(int chan_no, int options)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ struct grpci2_bd_chan *chan;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Get chan pointer */
+ chan = priv->channel[chan_no].ptr;
+
+ /* Check if channel is active */
+ if (priv->channel[chan_no].active){
+ /* nothing to do */
+ return GRPCI2DMA_ERR_OK;
+ }
+
+ /* Get the max descriptor count */
+ unsigned int desccnt;
+ if (options == 0){
+ /* Default */
+ desccnt = 0xffff;
+ }else{
+ desccnt = options & 0xffff;
+ }
+
+ /* Start the channel by enabling it.
+ * HWNOTE: In GRPCI2 this bit does not work as it is supposed.
+ * So we better add/remove the channel from the active linked
+ * list. */
+ grpci2dma_channel_bd_enable(chan, desccnt);
+ priv->channel[chan_no].active = 1;
+ priv->nactive++;
+ /* Get active linked list */
+ struct grpci2_bd_chan * list = grpci2dma_channel_get_active_list();
+ if (list == NULL){
+ /* No previous channels. New list */
+ list = chan;
+ }
+ /* Add channel from the linked list */
+ if (grpci2dma_channel_list_add(list, chan) < 0){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Increase NUMCH in DMA ctrl */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ grpci2dma_ctrl_numch_set( (priv->nactive? priv->nactive -1:0));
+
+ /* Check if DMA is active */
+ if (!grpci2dma_active()){
+ /* Start DMA */
+ grpci2dma_ctrl_start(chan);
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ DBG("Channel %d started (0x%08x)\n", chan_no, (unsigned int) chan);
+
+ return GRPCI2DMA_ERR_OK;
+}
+
+/* Stop a channel */
+STATIC int grpci2dma_channel_stop(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ struct grpci2_bd_chan *chan;
+ SPIN_IRQFLAGS(irqflags);
+ int resume;
+
+ /* Get chan pointer */
+ chan = priv->channel[chan_no].ptr;
+
+ /* Check if channel is active */
+ if (!priv->channel[chan_no].active){
+ /* nothing to do */
+ return GRPCI2DMA_ERR_OK;
+ }
+
+ /* First remove channel from the linked list */
+ if (grpci2dma_channel_list_remove(chan) < 0){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Update driver struct */
+ priv->channel[chan_no].active = 0;
+ priv->nactive--;
+
+ /* Check if DMA is active and it the removed
+ * channel is the active */
+ resume = 0;
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ if (grpci2dma_active() && (grpci2dma_ctrl_active() == (unsigned int)chan)){
+ /* We need to stop the DMA */
+ grpci2dma_ctrl_stop();
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ /* Wait until DMA stops */
+ while (grpci2dma_active()){}
+ /* We need to check later to resume the DMA */
+ resume = 1;
+ }else{
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+
+
+ /* Now either the DMA is stopped, or it is processing
+ * a different channel and the removed channel is no
+ * longer in the linked list */
+
+ /* Now is safe to update the removed channel */
+ grpci2dma_channel_bd_set_next(chan, chan);
+
+ /* Stop the channel by disabling it.
+ * HWNOTE: In GRPCI2 this bit does not work as it is supposed.
+ * So we better remove the channel from the active linked
+ * list. */
+ grpci2dma_channel_bd_disable(chan);
+
+ /* Point channel to disabled descriptor */
+ grpci2dma_channel_bd_set_data(chan, DISABLED_DESCRIPTOR);
+
+ DBG("Channel %d stoped (0x%08x)\n", chan_no, (unsigned int) chan);
+
+ /* Decrease NUMCH in DMA ctrl */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ grpci2dma_ctrl_numch_set( (priv->nactive? priv->nactive -1:0));
+
+ /* Reactivate DMA only if we stopped */
+ if (resume){
+ /* We have two options, either we stopped when the active
+ * channel was still the active one, or we stopped when
+ * the active channel was a different one */
+ if (grpci2dma_ctrl_active() == (unsigned int) chan){
+ /* In this case, we need to start the DMA with
+ * any active channel on the list */
+ int i;
+ for (i=0; i<MAX_DMA_CHANS; i++){
+ if (priv->channel[i].active){
+ grpci2dma_ctrl_start(priv->channel[i].ptr);
+ break;
+ }
+ }
+ }else{
+ /* In this case, we need to resume the DMA operation */
+ /* HWNOTE: The GRPCI2 core does not update the channel next
+ * data descriptor if we stopped a channel. This means that
+ * we need to resume the DMA from the descriptor is was,
+ * by only setting the enable bit, and not changing the
+ * base register */
+ grpci2dma_ctrl_resume();
+ }
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return GRPCI2DMA_ERR_OK;
+}
+
+STATIC int grpci2dma_channel_push(int chan_no, void *dataptr, int index,
+ int ndata)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ struct grpci2_bd_chan * chan;
+ struct grpci2_bd_data * data = dataptr;
+
+ /* Get channel */
+ chan = priv->channel[chan_no].ptr;
+
+ DBG("Pushing %d data (starting at 0x%08x) to channel %d (0x%08x)\n",
+ ndata, (unsigned int) &data[index], chan_no, (unsigned int) chan);
+
+ /* Get last added data */
+ struct grpci2_bd_data * last_added = priv->channel[chan_no].lastdata;
+
+ /* Add data to channel */
+ grpci2dma_data_list_add(chan, &data[index], last_added);
+
+ /* Update last added */
+ priv->channel[chan_no].lastdata = &data[index + ndata-1];
+
+ return GRPCI2DMA_ERR_OK;
+}
+
+STATIC int grpci2dma_channel_close(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ struct grpci2_bd_chan * chan;
+
+ /* Get channel */
+ chan = priv->channel[chan_no].ptr;
+
+ DBG("Closing channel %d (0x%08x)\n", chan_no, (unsigned int) chan);
+
+ /* Stop channel */
+ if (grpci2dma_channel_stop(chan_no) != GRPCI2DMA_ERR_OK ){
+ DBG("Cannot stop channel!.\n");
+ return GRPCI2DMA_ERR_STOPDMA;
+ }
+
+ /* Unregister channel ISR */
+ grpci2dma_channel_isr_unregister(chan_no);
+
+ /* Free the cid */
+ priv->channel[chan_no].ptr = NULL;
+
+ /* Remove the ISR */
+ priv->channel[chan_no].isr = NULL;
+ priv->channel[chan_no].isr_arg = NULL;
+
+ /* Deallocate channel if needed */
+ if (priv->channel[chan_no].allocated){
+ grpci2dma_channel_delete((void *)chan);
+ }
+
+ /* Decrease number of channels */
+ priv->nchans--;
+
+ DBG("number of channels: %d\n", priv->nchans);
+
+ /* Everything OK */
+ return GRPCI2DMA_ERR_OK;
+}
+
+/* Register channel ISR */
+STATIC int grpci2dma_channel_isr_unregister(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Unregister channel ISR */
+ priv->channel[chan_no].isr = NULL;
+ priv->channel[chan_no].isr_arg = NULL;
+
+ /* Unregister DMA ISR in GRPCI2 if needed */
+ priv->isr_registered--;
+ if(priv->isr_registered == 0){
+ /* Disable DMA Interrupts */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ grpci2dma_ctrl_interrupt_disable();
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ (priv->isr_register)( NULL, NULL);
+ }
+
+ /* Everything OK */
+ return GRPCI2DMA_ERR_OK;
+}
+
+/*** END OF CHANNEL FUNCTIONS ***/
+
+/*** START OF ISR FUNCTIONS ***/
+
+/* PCI DMA Interrupt handler, called when there is a PCI DMA interrupt */
+STATIC void grpci2dma_isr(void *arg)
+{
+ struct grpci2dma_priv *priv = arg;
+ SPIN_ISR_IRQFLAGS(irqflags);
+ unsigned int ctrl = grpci2dma_ctrl_status();
+ /* Clear Interrupts */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ grpci2dma_ctrl_interrupt_clear();
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+ unsigned int sts = (ctrl & DMACTRL_CHIRQ) >> DMACTRL_CHIRQ_BIT;
+ unsigned int errsts = (ctrl & DMACTRL_ERR);
+
+ /* Error interrupt */
+ if(errsts){
+ /* Find which channels had the error.
+ * The GRPCI2DMA core does not indicate which channel
+ * had the error, so we need to get 1st the base descriptor register
+ * and see if it a channel. If is not a channel, then the active
+ * channel register tells us which channel is.
+ * After having the channel we need to find out which channel was. */
+ struct grpci2_bd_chan * chan =
+ (struct grpci2_bd_chan *) grpci2dma_ctrl_base();
+ /* Check if the base is a channel descriptor */
+ if ((BD_READ(&chan->ctrl) & BD_CHAN_TYPE) != BD_CHAN_TYPE_DMA){
+ /* Is not a channel, so the channel is in the channel active
+ * register */
+ chan = (struct grpci2_bd_chan *) grpci2dma_ctrl_active();
+ }
+ int i;
+ for (i=0; i<MAX_DMA_CHANS; i++){
+ if (chan == priv->channel[i].ptr){
+ /* Found */
+ if (priv->channel[i].isr != NULL){
+ (priv->channel[i].isr)(priv->channel[i].isr_arg,i,errsts);
+ }else{
+ printk("Unhandled GRPCI2 DMA error interrupt, sts:0x%02x\n", errsts);
+ }
+ break;
+ }
+ }
+ if (i == MAX_DMA_CHANS){
+ printk("Unhandled GRPCI2 DMA error interrupt , sts:0x%02x\n", errsts);
+ }
+ }
+
+ /* Normal packet interrupt */
+ int cid=0;
+ /* Find which channels have interrupts */
+ while(sts){
+ /* Find if current channel has an interrupt*/
+ if(sts & 0x1){
+ /* Find if current channel has an ISR */
+ if (priv->channel[cid].isr != NULL){
+ (priv->channel[cid].isr)(
+ priv->channel[cid].isr_arg, cid, errsts);
+ }else{
+ printk("Unhandled GRPCI2 DMA interrupt in channel %d, sts:0x%02x\n", cid, 0);
+ }
+ }
+ /* Next channel */
+ sts = sts >> 1;
+ cid++;
+ }
+}
+
+/*** END OF ISR FUNCTIONS ***/
+
+/*** START OF DEBUG HELPERS ***/
+#ifdef DEBUG
+STATIC int grpci2dma_channel_print(struct grpci2_bd_chan * chan)
+{
+ printf(" GRPCI2 DMA channel descriptor\n");
+ printf(" 0x%08x DMA channel control 0x%08x\n", (unsigned int) chan, chan->ctrl);
+ printf(" 31 en 0x%01x Channel descriptor enable.\n", (chan->ctrl >> 31) & (0x1));
+ printf(" 24:22 cid 0x%01x Channel ID.\n", (chan->ctrl >> 22) & (0x7));
+ printf(" 21:20 type 0x%01x Descriptor type. 01=DMA channel descriptor.\n", (chan->ctrl >> 20) & (0x3));
+ printf(" 15:0 dlen 0x%04x Data descriptor count.\n", (chan->ctrl >> 0) & (0xffff));
+ printf("\n");
+ printf(" 0x%08x Next DMA channel 0x%08x\n", (unsigned int) &(chan->nchan), chan->nchan);
+ printf(" 31:0 nc 0x%08x Next DMA channel.\n", chan->nchan);
+ printf("\n");
+ printf(" 0x%08x Next data descriptor 0x%08x\n" , (unsigned int) &(chan->nbd), chan->nbd);
+ printf(" 31:0 nd 0x%08x Next data descriptor.\n", chan->nbd);
+ printf("\n");
+ return 0;
+}
+
+STATIC int grpci2dma_data_print(struct grpci2_bd_data * data)
+{
+ printf(" GRPCI2 DMA data descriptor\n");
+ printf(" 0x%08x DMA data control 0x%08x\n", (unsigned int) data, data->ctrl);
+ printf(" 31 en 0x%01x Data descriptor enable.\n" , (data->ctrl >> 31) & (0x1));
+ printf(" 30 ie 0x%01x Interrupt generation enable.\n" , (data->ctrl >> 30) & (0x1));
+ printf(" 29 dr 0x%01x Tranfer direction.\n" , (data->ctrl >> 29) & (0x1));
+ printf(" 28 be 0x%01x Bus endianess.\n" , (data->ctrl >> 28) & (0x1));
+ printf(" 21:20 type 0x%01x Descriptor type. 00=DMA data descriptor.\n" , (data->ctrl >> 20) & (0x3));
+ printf(" 19 er 0x%01x Error status.\n" , (data->ctrl >> 19) & (0x1));
+ printf(" 15:0 len 0x%04x Transfer lenght (in words) - 1.\n" , (data->ctrl >> 0) & (0xffff));
+ printf("\n");
+ printf(" 0x%08x 32-bit PCI start address 0x%08x\n" , (unsigned int) &(data->pci_adr), data->pci_adr);
+ printf(" 31:0 pa 0x%08x PCI address.\n" , data->pci_adr);
+ printf("\n");
+ printf(" 0x%08x 32-bit AHB start address 0x%08x\n" , (unsigned int) &(data->ahb_adr), data->ahb_adr);
+ printf(" 31:0 aa 0x%08x AHB address.\n" , data->ahb_adr);
+ printf("\n");
+ printf(" 0x%08x Next data descriptor 0x%08x\n" , (unsigned int) &(data->next), data->next);
+ printf(" 31:0 nd 0x%08x Next data descriptor.\n" , data->next);
+ printf("\n");
+ return 0;
+}
+#endif
+/*** END OF DEBUG HELPERS ***/
+
+/*** START OF MEMORY ALLOCATION FUNCTIONS ***/
+
+void * grpci2dma_channel_new(int number)
+{
+ /* Allocate memory */
+ unsigned int * orig_ptr = (unsigned int *) grlib_malloc(
+ (GRPCI2DMA_BD_CHAN_SIZE)*number + GRPCI2DMA_BD_CHAN_ALIGN);
+ if (orig_ptr == NULL) return NULL;
+
+ /* Get the aligned pointer */
+ unsigned int aligned_ptr = (
+ ((unsigned int) orig_ptr + GRPCI2DMA_BD_CHAN_ALIGN) &
+ ~(GRPCI2DMA_BD_CHAN_ALIGN - 1));
+
+ /* Save the original pointer just before the aligned pointer */
+ unsigned int ** tmp_ptr =
+ (unsigned int **) (aligned_ptr - sizeof(orig_ptr));
+ *tmp_ptr= orig_ptr;
+
+ /* Return aligned pointer */
+ return (void *) aligned_ptr;
+}
+
+void grpci2dma_channel_delete(void * chan)
+{
+ /* Recover orignal pointer placed just before the aligned pointer */
+ unsigned int * orig_ptr;
+ unsigned int ** tmp_ptr = (unsigned int **) (chan - sizeof(orig_ptr));
+ orig_ptr = *tmp_ptr;
+
+ /* Deallocate memory */
+ free(orig_ptr);
+}
+
+void * grpci2dma_data_new(int number)
+{
+ /* Allocate memory */
+ unsigned int * orig_ptr = (unsigned int *) grlib_malloc(
+ (GRPCI2DMA_BD_DATA_SIZE)*number + GRPCI2DMA_BD_DATA_ALIGN);
+ if (orig_ptr == NULL) return NULL;
+
+ /* Get the aligned pointer */
+ unsigned int aligned_ptr = (
+ ((unsigned int) orig_ptr + GRPCI2DMA_BD_DATA_ALIGN) &
+ ~(GRPCI2DMA_BD_DATA_ALIGN - 1));
+
+ /* Save the original pointer before the aligned pointer */
+ unsigned int ** tmp_ptr =
+ (unsigned int **) (aligned_ptr - sizeof(orig_ptr));
+ *tmp_ptr= orig_ptr;
+
+ /* Return aligned pointer */
+ return (void *) aligned_ptr;
+}
+
+void grpci2dma_data_delete(void * data)
+{
+ /* Recover orignal pointer placed just before the aligned pointer */
+ unsigned int * orig_ptr;
+ unsigned int ** tmp_ptr = (unsigned int **) (data - sizeof(orig_ptr));
+ orig_ptr = *tmp_ptr;
+
+ /* Deallocate memory */
+ free(orig_ptr);
+}
+
+/*** END OF MEMORY ALLOCATION FUNCTIONS ***/
+
+/*** START OF USER API ***/
+
+/* Initialize GRPCI2 DMA: GRPCI2 DRIVER calls this
+ * using a weak function definition */
+int grpci2dma_init(
+ void * regs, void isr_register( void (*isr)(void*), void * arg))
+{
+ struct grpci2dma_priv *priv;
+ int i;
+
+ DBG("Registering GRPCI2 DMA driver with arg: 0x%08x\n",
+ (unsigned int) regs);
+
+ /* We only allow one GRPCI2 DMA */
+ if (grpci2dmapriv) {
+ DBG("Driver only supports one PCI DMA core\n");
+ return DRVMGR_FAIL;
+ }
+
+ /* Device Semaphore created with count = 1 */
+ if (rtems_semaphore_create(rtems_build_name('G', 'P', '2', 'D'), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &grpci2dma_sem) != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Allocate and init Memory for DMA */
+ priv = grlib_calloc(1, sizeof(*priv));
+ if (priv == NULL)
+ return DRVMGR_NOMEM;
+
+ priv->regs = regs;
+ strncpy(&priv->devname[0], "grpci2dma0", DEVNAME_LEN);
+
+ /* Initialize Spin-lock for GRPCI2dma Device. */
+ SPIN_INIT(&priv->devlock, priv->devname);
+
+ /* Channel Sempahores */
+ for (i=0; i<MAX_DMA_CHANS; i++){
+ /* set to NULL, they are created when openning channels */
+ priv->channel[i].sem = RTEMS_ID_NONE;
+ }
+
+ /* Register device */
+ grpci2dmapriv = priv;
+
+ /* Initialize Ctrl regs */
+ grpci2dma_ctrl_init();
+
+ /* Install DMA ISR */
+ priv->isr_register = isr_register;
+
+ /* Startup actions:
+ * - stop DMA
+ */
+ grpci2dma_ctrl_stop();
+
+ return DRVMGR_OK;
+}
+
+/* Assign ISR Function to DMA IRQ */
+int grpci2dma_isr_register(int chan_no, grpci2dma_isr_t dmaisr, void *data)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ /* Check isr */
+ if (dmaisr == NULL){
+ /* No ISR */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Get chan pointer */
+ if ((chan_no < 0 ) || (chan_no >= MAX_DMA_CHANS)) {
+ /* Wrong channel id */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan is open */
+ if (priv->channel[chan_no].ptr == NULL){
+ /* No channel */
+ return GRPCI2DMA_ERR_NOTFOUND;
+ }
+
+ /* Take driver lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grpci2dma_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Take channel lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->channel[chan_no].sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Register channel ISR */
+ priv->channel[chan_no].isr_arg = data;
+ priv->channel[chan_no].isr = dmaisr;
+
+ /* Register DMA ISR in GRPCI2 if not done yet */
+ if(priv->isr_registered == 0){
+ (priv->isr_register)( grpci2dma_isr, (void *) priv);
+ /* Enable DMA Interrupts */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ grpci2dma_ctrl_interrupt_enable();
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+ priv->isr_registered++;
+
+ /* Release channel sempahore */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+
+ /* Release driver sempahore */
+ rtems_semaphore_release(grpci2dma_sem);
+
+ return GRPCI2DMA_ERR_OK;
+}
+
+/* Assign ISR Function to DMA IRQ */
+int grpci2dma_isr_unregister(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int ret;
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ /* Get chan pointer */
+ if ((chan_no < 0 ) || (chan_no >= MAX_DMA_CHANS)) {
+ /* Wrong channel id */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan is open */
+ if (priv->channel[chan_no].ptr == NULL){
+ /* No channel */
+ return GRPCI2DMA_ERR_NOTFOUND;
+ }
+
+ /* Get chan ISR */
+ if (priv->channel[chan_no].isr == NULL){
+ /* Nothing to do */
+ return GRPCI2DMA_ERR_OK;
+ }
+
+ /* Take driver lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grpci2dma_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Take channel lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->channel[chan_no].sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Unregister channel ISR */
+ ret = grpci2dma_channel_isr_unregister(chan_no);
+
+ /* Release channel sempahore */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+
+ /* Release driver sempahore */
+ rtems_semaphore_release(grpci2dma_sem);
+
+ return ret;
+}
+
+int grpci2dma_open(void * chanptr)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int cid;
+ int ret;
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ /* Check alignment */
+ if (((unsigned int ) chanptr) & (GRPCI2DMA_BD_CHAN_ALIGN-1)) {
+ /* Channel is not properly aligned */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Take driver lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grpci2dma_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Get free channel id */
+ cid = grpci2dma_channel_free_id();
+ if (cid < 0 ){
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_TOOMANY;
+ }
+
+ /* Open channel */
+ ret = grpci2dma_channel_open((struct grpci2_bd_chan *) chanptr, cid);
+
+ /* Create channel semaphore with count = 1 */
+ if (ret >= 0){
+ if (rtems_semaphore_create(
+ rtems_build_name('P', 'D', '0', '0' + cid), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &priv->channel[cid].sem
+ ) != RTEMS_SUCCESSFUL) {
+ priv->channel[cid].sem = RTEMS_ID_NONE;
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+ }
+
+ /* Release driver semaphore */
+ rtems_semaphore_release(grpci2dma_sem);
+
+ /* Return channel id */
+ return ret;
+}
+
+int grpci2dma_close(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int ret;
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ /* Get chan pointer */
+ if ((chan_no < 0) || (chan_no >= MAX_DMA_CHANS)){
+ /* Wrong channel id */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan is open */
+ if (priv->channel[chan_no].ptr == NULL){
+ /* No channel */
+ return GRPCI2DMA_ERR_NOTFOUND;
+ }
+
+ /* Take driver lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grpci2dma_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Take channel lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->channel[chan_no].sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Close channel */
+ ret = grpci2dma_channel_close(chan_no);
+
+ /* Release channel sempahore */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+
+ /* Delete channel semaphore */
+ if (ret == GRPCI2DMA_ERR_OK){
+ if (rtems_semaphore_delete(priv->channel[chan_no].sem)
+ != RTEMS_SUCCESSFUL){
+ /* Release driver semaphore */
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+ }
+
+ /* Release driver semaphore */
+ rtems_semaphore_release(grpci2dma_sem);
+
+ return ret;
+}
+
+/* Transfer_size =0 means maximum */
+int grpci2dma_prepare(
+ uint32_t pci_start, uint32_t ahb_start, int dir, int endianness,
+ int size, void * dataptr, int index, int ndata, int transfer_size)
+{
+ struct grpci2_bd_data * data = dataptr;
+
+ /* Check data pointer */
+ if ((data == NULL) ||
+ (((unsigned int ) data) & (GRPCI2DMA_BD_DATA_ALIGN-1))){
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check indexes */
+ int maxdata = ndata - index;
+ if ((maxdata < 1) || (index < 0)){
+ /* No data descriptors to use */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check PCI transfer size */
+ if ( (transfer_size < 0) ||
+ (transfer_size > MAX_DMA_TRANSFER_SIZE) ||
+ (transfer_size%4 != 0) ) {
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+ if (transfer_size == 0){
+ transfer_size = MAX_DMA_TRANSFER_SIZE;
+ }
+
+ /* Check total size */
+ if ( (size <=0) || (size % 4 != 0)){
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Calculate number of data descriptors needed */
+ int words = size/4;
+ int blocksize = transfer_size/4;
+ int datacnt = words/blocksize + (words%blocksize != 0? 1: 0);
+ /* Check that we can transfer the data */
+ if (datacnt > maxdata) {
+ return GRPCI2DMA_ERR_TOOMANY;
+ }
+
+ /* Prepare data descriptors */
+ int i;
+ uint32_t pci_adr;
+ uint32_t ahb_adr;
+ int remaining=words;
+ int datasize;
+ struct grpci2_bd_data * next;
+ for (i=0; i<datacnt; i++){
+ /* Get PCI and AHB start addresses */
+ pci_adr = pci_start + i*blocksize;
+ ahb_adr = ahb_start + i*blocksize;
+ /* Get current data size */
+ if (remaining >= blocksize){
+ datasize = blocksize - 1;
+ remaining -= blocksize;
+ } else {
+ datasize = remaining -1;
+ remaining = 0;
+ }
+ /* Get linked list pointers */
+ if (i == datacnt - 1){
+ /* Last transfer */
+ next = DISABLED_DESCRIPTOR;
+ }else{
+ next = &data[i+index+1];
+ }
+ /* Set Data descriptor */
+ grpci2dma_data_bd_init(&data[i+index], pci_adr, ahb_adr, dir, endianness, datasize, next);
+ }
+ /* Return number of transfers used */
+ return datacnt;
+}
+
+int grpci2dma_status(void *dataptr, int index, int ndata)
+{
+ struct grpci2_bd_data * data = dataptr;
+ int i;
+
+ /* Check data pointer */
+ if ((data == NULL) ||
+ (((unsigned int ) data) & (GRPCI2DMA_BD_DATA_ALIGN-1))){
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check maxdata */
+ int maxdata = ndata - index;
+ if ((maxdata < 1) || (index < 0)){
+ /* No data descriptors to use */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check status of all packets in transfer */
+ int status;
+ for (i=0; i< maxdata; i++){
+ status = grpci2dma_data_bd_status(&data[i+index]);
+ if (status == GRPCI2DMA_BD_STATUS_ERR){
+ /* Error in one packet, means error in transfer */
+ return status;
+ } else if (status == GRPCI2DMA_BD_STATUS_ENABLED){
+ /* If one packet is enabled, means transfer is not done */
+ return status;
+ }
+ }
+
+ /* If we reach here it means they are all disabled */
+ return status;
+}
+
+int grpci2dma_print(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ struct grpci2_bd_chan * chan;
+
+ if (!priv){
+ /* DMA not initialized */
+ DBG("DMA not initialized.\n");
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ if ( (chan_no < 0) || (chan_no >= MAX_DMA_CHANS )){
+ /* Wrong chan no*/
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ chan = priv->channel[chan_no].ptr;
+ if (chan == NULL) {
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ #ifdef DEBUG
+ /* Print channel state */
+ grpci2dma_channel_print(chan);
+
+ /* Get current DATA desc */
+ struct grpci2_bd_data * first_data = (struct grpci2_bd_data *) BD_READ(&chan->nbd);
+
+ /* Print data state */
+ grpci2dma_data_list_foreach(first_data, grpci2dma_data_print, MAX_DMA_DATA);
+ #endif
+ return GRPCI2DMA_ERR_OK;
+}
+
+int grpci2dma_print_bd(void * dataptr)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ struct grpci2_bd_data * data = (struct grpci2_bd_data *) dataptr;
+
+ if (!priv){
+ /* DMA not initialized */
+ DBG("DMA not initialized.\n");
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ if ( data == NULL ){
+ /* Wrong chan no*/
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ #ifdef DEBUG
+ /* Print data state */
+ grpci2dma_data_list_foreach(data, grpci2dma_data_print, MAX_DMA_DATA);
+ #endif
+ return GRPCI2DMA_ERR_OK;
+}
+
+int grpci2dma_interrupt_enable(
+ void *dataptr, int index, int maxindex, int options)
+{
+ struct grpci2_bd_data * data = dataptr;
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ /* Check data pointer */
+ if ((data == NULL) ||
+ (((unsigned int ) data) & (GRPCI2DMA_BD_DATA_ALIGN-1))){
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check index */
+ if ((index < 0) || (maxindex < 1) || (index >= maxindex)){
+ /* No data descriptors to use */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ if (options & GRPCI2DMA_OPTIONS_ALL){
+ /* Enable all interrupts */
+ if (grpci2dma_data_list_foreach(
+ &data[index],
+ grpci2dma_data_bd_interrupt_enable, maxindex -index)){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+ }else{
+ /* Enable one packet interrupts */
+ grpci2dma_data_bd_interrupt_enable(&data[index]);
+ }
+
+ /* Finally enable DMA interrupts if they are not already enabled */
+ if (grpci2dma_ctrl_interrupt_status()==0){
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ grpci2dma_ctrl_interrupt_enable();
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+
+ DBG("Interrupts enabled for data (0x%08x), index:%d, maxindex:%d, %s.\n",
+ (unsigned int) data, index, maxindex,
+ (options & GRPCI2DMA_OPTIONS_ALL)? "ALL":"ONE" );
+
+ return GRPCI2DMA_ERR_OK;
+}
+
+int grpci2dma_push(int chan_no, void *dataptr, int index, int ndata)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ SPIN_IRQFLAGS(irqflags);
+ int ret;
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ /* Check data pointer */
+ if ((dataptr == NULL) ||
+ (((unsigned int ) dataptr) & (GRPCI2DMA_BD_DATA_ALIGN-1))){
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check index */
+ if ((ndata < 1) || (index < 0)){
+ /* No data descriptors to use */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan_no */
+ if ( (chan_no < 0) || (chan_no >= MAX_DMA_CHANS )){
+ /* Wrong chan no*/
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan is open */
+ if (priv->channel[chan_no].ptr == NULL){
+ /* No channel */
+ return GRPCI2DMA_ERR_NOTFOUND;
+ }
+
+ /* Take channel lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->channel[chan_no].sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* push data to channel */
+ ret = grpci2dma_channel_push(chan_no, dataptr, index, ndata);
+
+ if (ret != GRPCI2DMA_ERR_OK){
+ /* Release channel lock */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+ return ret;
+ }
+
+ /* Start DMA if it is not active and channel is active*/
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ if ((!grpci2dma_active()) && (priv->channel[chan_no].active)){
+ grpci2dma_ctrl_start(priv->channel[chan_no].ptr);
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Release channel lock */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+
+ return ret;
+}
+
+/* Start the channel */
+int grpci2dma_start(int chan_no, int options)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int ret;
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ if ((chan_no < 0 ) || (chan_no >= MAX_DMA_CHANS )) {
+ /* Wrong channel id */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ if ( options < 0 ) {
+ /* Wrong options */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan is open */
+ if (priv->channel[chan_no].ptr == NULL){
+ /* No channel */
+ return GRPCI2DMA_ERR_NOTFOUND;
+ }
+
+ /* Take driver lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grpci2dma_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Take channel lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->channel[chan_no].sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Start the channel */
+ ret = grpci2dma_channel_start(chan_no, options);
+
+ /* Release channel lock */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+
+ /* Release driver lock */
+ rtems_semaphore_release(grpci2dma_sem);
+
+ return ret;
+}
+
+/* Stop the channel, but don't stop ongoing transfers! */
+int grpci2dma_stop(int chan_no)
+{
+ struct grpci2dma_priv *priv = grpci2dmapriv;
+ int ret;
+
+ if (!priv){
+ /* DMA not initialized */
+ return GRPCI2DMA_ERR_NOINIT;
+ }
+
+ if ((chan_no < 0 ) || (chan_no >= MAX_DMA_CHANS)) {
+ /* Wrong channel id */
+ return GRPCI2DMA_ERR_WRONGPTR;
+ }
+
+ /* Check chan is open */
+ if (priv->channel[chan_no].ptr == NULL){
+ /* No channel */
+ return GRPCI2DMA_ERR_NOTFOUND;
+ }
+
+ /* Take driver lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grpci2dma_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Take channel lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->channel[chan_no].sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL){
+ rtems_semaphore_release(grpci2dma_sem);
+ return GRPCI2DMA_ERR_ERROR;
+ }
+
+ /* Stop the channel */
+ ret = grpci2dma_channel_stop(chan_no);
+
+ /* Release channel lock */
+ rtems_semaphore_release(priv->channel[chan_no].sem);
+
+ /* Release driver lock */
+ rtems_semaphore_release(grpci2dma_sem);
+
+ return ret;
+}
+
+int grpci2dma_active()
+{
+ return ((grpci2dma_ctrl_status()) & DMACTRL_ACT) >> DMACTRL_ACT_BIT;
+}
+
diff --git a/bsps/shared/grlib/pci/pcif.c b/bsps/shared/grlib/pci/pcif.c
new file mode 100644
index 0000000000..17708a9c0b
--- /dev/null
+++ b/bsps/shared/grlib/pci/pcif.c
@@ -0,0 +1,586 @@
+/* GRLIB PCIF PCI HOST driver.
+ *
+ * COPYRIGHT (c) 2008.
+ * Cobham Gaisler AB.
+ *
+ * Configures the PCIF core and initialize,
+ * - the PCI Library (pci.c)
+ * - the general part of the PCI Bus driver (pci_bus.c)
+ *
+ * System interrupt assigned to PCI interrupt (INTA#..INTD#) is by
+ * default taken from Plug and Play, but may be overridden by the
+ * driver resources INTA#..INTD#.
+ *
+ * The license and distribution terms for this file may be
+ * found in found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <libcpu/byteorder.h>
+#include <libcpu/access.h>
+#include <rtems/bspIo.h>
+#include <pci.h>
+#include <pci/cfg.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+#include <drvmgr/pci_bus.h>
+#include <grlib/pcif.h>
+
+
+/* Configuration options */
+#define SYSTEM_MAINMEM_START 0x40000000
+
+/* Interrupt assignment. Set to other value than 0xff in order to
+ * override defaults and plug&play information
+ */
+#ifndef PCIF_INTA_SYSIRQ
+ #define PCIF_INTA_SYSIRQ 0xff
+#endif
+#ifndef PCIF_INTB_SYSIRQ
+ #define PCIF_INTB_SYSIRQ 0xff
+#endif
+#ifndef PCIF_INTC_SYSIRQ
+ #define PCIF_INTC_SYSIRQ 0xff
+#endif
+#ifndef PCIF_INTD_SYSIRQ
+ #define PCIF_INTD_SYSIRQ 0xff
+#endif
+
+/*#define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * Bit encode for PCI_CONFIG_HEADER_TYPE register
+ */
+struct pcif_regs {
+ volatile unsigned int bars[4]; /* 0x00-0x10 */
+ volatile unsigned int bus; /* 0x10 */
+ volatile unsigned int map_io; /* 0x14 */
+ volatile unsigned int status; /* 0x18 */
+ volatile unsigned int intr; /* 0x1c */
+ int unused[(0x40-0x20)/4]; /* 0x20-0x40 */
+ volatile unsigned int maps[(0x80-0x40)/4]; /* 0x40-0x80*/
+};
+
+/* Used internally for accessing the PCI bridge's configuration space itself */
+#define HOST_TGT PCI_DEV(0xff, 0, 0)
+
+struct pcif_priv *pcifpriv = NULL;
+static int pcif_minor = 0;
+
+/* PCI Interrupt assignment. Connects an PCI interrupt pin (INTA#..INTD#)
+ * to a system interrupt number.
+ */
+unsigned char pcif_pci_irq_table[4] =
+{
+ /* INTA# */ PCIF_INTA_SYSIRQ,
+ /* INTB# */ PCIF_INTB_SYSIRQ,
+ /* INTC# */ PCIF_INTC_SYSIRQ,
+ /* INTD# */ PCIF_INTD_SYSIRQ
+};
+
+/* Driver private data struture */
+struct pcif_priv {
+ struct drvmgr_dev *dev;
+ struct pcif_regs *regs;
+ int irq;
+ int minor;
+ int irq_mask;
+
+ unsigned int pci_area;
+ unsigned int pci_area_end;
+ unsigned int pci_io;
+ unsigned int pci_conf;
+ unsigned int pci_conf_end;
+
+ uint32_t devVend; /* Host PCI Vendor/Device ID */
+ uint32_t bar1_size;
+
+ struct drvmgr_map_entry maps_up[2];
+ struct drvmgr_map_entry maps_down[2];
+ struct pcibus_config config;
+};
+
+int pcif_init1(struct drvmgr_dev *dev);
+int pcif_init3(struct drvmgr_dev *dev);
+
+/* PCIF DRIVER */
+
+struct drvmgr_drv_ops pcif_ops =
+{
+ .init = {pcif_init1, NULL, pcif_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id pcif_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_PCIF},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info pcif_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_PCIF_ID, /* Driver ID */
+ "PCIF_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &pcif_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct pcif_priv), /* Let drvmgr alloc private */
+ },
+ &pcif_ids[0]
+};
+
+void pcif_register_drv(void)
+{
+ DBG("Registering PCIF driver\n");
+ drvmgr_drv_register(&pcif_info.general);
+}
+
+static int pcif_cfg_r32(pci_dev_t dev, int ofs, uint32_t *val)
+{
+ struct pcif_priv *priv = pcifpriv;
+ volatile uint32_t *pci_conf;
+ uint32_t devfn;
+ int retval;
+ int bus = PCI_DEV_BUS(dev);
+
+ if (ofs & 3)
+ return PCISTS_EINVAL;
+
+ if (PCI_DEV_SLOT(dev) > 15) {
+ *val = 0xffffffff;
+ return PCISTS_OK;
+ }
+
+ /* PCIF can access "non-standard" devices on bus0 (on AD11.AD16),
+ * but we skip them.
+ */
+ if (dev == HOST_TGT)
+ bus = devfn = 0;
+ else if (bus == 0)
+ devfn = PCI_DEV_DEVFUNC(dev) + PCI_DEV(0, 6, 0);
+ else
+ devfn = PCI_DEV_DEVFUNC(dev);
+
+ /* Select bus */
+ priv->regs->bus = bus << 16;
+
+ pci_conf = (volatile uint32_t *)(priv->pci_conf | (devfn << 8) | ofs);
+
+ *val = *pci_conf;
+
+ if (priv->regs->status & 0x30000000) {
+ *val = 0xffffffff;
+ retval = PCISTS_MSTABRT;
+ } else
+ retval = PCISTS_OK;
+
+ DBG("pci_read: [%x:%x:%x] reg: 0x%x => addr: 0x%x, val: 0x%x\n",
+ PCI_DEV_EXPAND(dev), ofs, pci_conf, *val);
+
+ return retval;
+}
+
+static int pcif_cfg_r16(pci_dev_t dev, int ofs, uint16_t *val)
+{
+ uint32_t v;
+ int retval;
+
+ if (ofs & 1)
+ return PCISTS_EINVAL;
+
+ retval = pcif_cfg_r32(dev, ofs & ~0x3, &v);
+ *val = 0xffff & (v >> (8*(ofs & 0x3)));
+
+ return retval;
+}
+
+static int pcif_cfg_r8(pci_dev_t dev, int ofs, uint8_t *val)
+{
+ uint32_t v;
+ int retval;
+
+ retval = pcif_cfg_r32(dev, ofs & ~0x3, &v);
+
+ *val = 0xff & (v >> (8*(ofs & 3)));
+
+ return retval;
+}
+
+static int pcif_cfg_w32(pci_dev_t dev, int ofs, uint32_t val)
+{
+ struct pcif_priv *priv = pcifpriv;
+ volatile uint32_t *pci_conf;
+ uint32_t devfn;
+ int bus = PCI_DEV_BUS(dev);
+
+ if (ofs & ~0xfc)
+ return PCISTS_EINVAL;
+
+ if (PCI_DEV_SLOT(dev) > 15)
+ return PCISTS_MSTABRT;
+
+ /* PCIF can access "non-standard" devices on bus0 (on AD11.AD16),
+ * but we skip them.
+ */
+ if (dev == HOST_TGT)
+ bus = devfn = 0;
+ else if (bus == 0)
+ devfn = PCI_DEV_DEVFUNC(dev) + PCI_DEV(0, 6, 0);
+ else
+ devfn = PCI_DEV_DEVFUNC(dev);
+
+ /* Select bus */
+ priv->regs->bus = bus << 16;
+
+ pci_conf = (volatile uint32_t *)(priv->pci_conf | (devfn << 8) | ofs);
+
+ *pci_conf = val;
+
+ DBG("pci_write - [%x:%x:%x] reg: 0x%x => addr: 0x%x, val: 0x%x\n",
+ PCI_DEV_EXPAND(dev), ofs, pci_conf, value);
+
+ return PCISTS_OK;
+}
+
+static int pcif_cfg_w16(pci_dev_t dev, int ofs, uint16_t val)
+{
+ uint32_t v;
+ int retval;
+
+ if (ofs & 1)
+ return PCISTS_EINVAL;
+
+ retval = pcif_cfg_r32(dev, ofs & ~0x3, &v);
+ if (retval != PCISTS_OK)
+ return retval;
+
+ v = (v & ~(0xffff << (8*(ofs&3)))) | ((0xffff&val) << (8*(ofs&3)));
+
+ return pcif_cfg_w32(dev, ofs & ~0x3, v);
+}
+
+static int pcif_cfg_w8(pci_dev_t dev, int ofs, uint8_t val)
+{
+ uint32_t v;
+ int retval;
+
+ retval = pcif_cfg_r32(dev, ofs & ~0x3, &v);
+ if (retval != PCISTS_OK)
+ return retval;
+
+ v = (v & ~(0xff << (8*(ofs&3)))) | ((0xff&val) << (8*(ofs&3)));
+
+ return pcif_cfg_w32(dev, ofs & ~0x3, v);
+}
+
+
+/* Return the assigned system IRQ number that corresponds to the PCI
+ * "Interrupt Pin" information from configuration space.
+ *
+ * The IRQ information is stored in the pcif_pci_irq_table configurable
+ * by the user.
+ *
+ * Returns the "system IRQ" for the PCI INTA#..INTD# pin in irq_pin. Returns
+ * 0xff if not assigned.
+ */
+static uint8_t pcif_bus0_irq_map(pci_dev_t dev, int irq_pin)
+{
+ uint8_t sysIrqNr = 0; /* not assigned */
+ int irq_group;
+
+ if ( (irq_pin >= 1) && (irq_pin <= 4) ) {
+ /* Use default IRQ decoding on PCI BUS0 according slot numbering */
+ irq_group = PCI_DEV_SLOT(dev) & 0x3;
+ irq_pin = ((irq_pin - 1) + irq_group) & 0x3;
+ /* Valid PCI "Interrupt Pin" number */
+ sysIrqNr = pcif_pci_irq_table[irq_pin];
+ }
+ return sysIrqNr;
+}
+
+static int pcif_translate(uint32_t *address, int type, int dir)
+{
+ /* No address translation implmented at this point */
+ return 0;
+}
+
+extern struct pci_memreg_ops pci_memreg_sparc_be_ops;
+
+/* PCIF Big-Endian PCI access routines */
+struct pci_access_drv pcif_access_drv = {
+ .cfg =
+ {
+ pcif_cfg_r8,
+ pcif_cfg_r16,
+ pcif_cfg_r32,
+ pcif_cfg_w8,
+ pcif_cfg_w16,
+ pcif_cfg_w32,
+ },
+ .io = /* PCIF only supports Big-endian */
+ {
+ _ld8,
+ _ld_be16,
+ _ld_be32,
+ _st8,
+ _st_be16,
+ _st_be32,
+ },
+ .memreg = &pci_memreg_sparc_be_ops,
+ .translate = pcif_translate,
+};
+
+/* Initializes the PCIF core hardware
+ *
+ */
+static int pcif_hw_init(struct pcif_priv *priv)
+{
+ struct pcif_regs *regs;
+ uint32_t data, size;
+ int mst;
+ pci_dev_t host = HOST_TGT;
+
+ regs = priv->regs;
+
+ /* Mask PCI interrupts */
+ regs->intr = 0;
+
+ /* Get the PCIF Host PCI ID */
+ pcif_cfg_r32(host, PCIR_VENDOR, &priv->devVend);
+
+ /* set 1:1 mapping between AHB -> PCI memory space, for all Master cores */
+ for ( mst=0; mst<16; mst++) {
+ regs->maps[mst] = priv->pci_area;
+
+ /* Check if this register is implemented */
+ if ( regs->maps[mst] != priv->pci_area )
+ break;
+ }
+
+ /* and map system RAM at pci address SYSTEM_MAINMEM_START. This way
+ * PCI targets can do DMA directly into CPU main memory.
+ */
+ regs->bars[0] = SYSTEM_MAINMEM_START;
+ regs->bars[1] = 0;
+ regs->bars[2] = 0;
+ regs->bars[3] = 0;
+
+ /* determine size of target BAR1 */
+ pcif_cfg_w32(host, PCIR_BAR(1), 0xffffffff);
+ pcif_cfg_r32(host, PCIR_BAR(1), &size);
+ priv->bar1_size = (~(size & ~0xf)) + 1;
+
+ pcif_cfg_w32(host, PCIR_BAR(0), 0);
+ pcif_cfg_w32(host, PCIR_BAR(1), SYSTEM_MAINMEM_START);
+ pcif_cfg_w32(host, PCIR_BAR(2), 0);
+ pcif_cfg_w32(host, PCIR_BAR(3), 0);
+ pcif_cfg_w32(host, PCIR_BAR(4), 0);
+ pcif_cfg_w32(host, PCIR_BAR(5), 0);
+
+ /* set as bus master and enable pci memory responses */
+ pcif_cfg_r32(host, PCIR_COMMAND, &data);
+ data |= (PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
+ pcif_cfg_w32(host, PCIR_COMMAND, data);
+
+ /* Successful */
+ return 0;
+}
+
+/* Initializes the PCIF core and driver, must be called before calling init_pci()
+ *
+ * Return values
+ * 0 Successful initalization
+ * -1 Error during initialization, for example "PCI core not found".
+ * -2 Error PCI controller not HOST (targets not supported)
+ * -3 Error due to PCIF hardware initialization
+ * -4 Error registering driver to PCI layer
+ */
+static int pcif_init(struct pcif_priv *priv)
+{
+ struct ambapp_apb_info *apb;
+ struct ambapp_ahb_info *ahb;
+ int pin;
+ union drvmgr_key_value *value;
+ char keyname[6];
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+
+ /* Find PCI core from Plug&Play information */
+ apb = ainfo->info.apb_slv;
+ ahb = ainfo->info.ahb_slv;
+
+ /* Found PCI core, init private structure */
+ priv->irq = apb->irq;
+ priv->regs = (struct pcif_regs *)apb->start;
+
+ /* Calculate the PCI windows
+ * AMBA->PCI Window: AHB SLAVE AREA0
+ * AMBA->PCI I/O cycles Window: AHB SLAVE AREA1 Lower half
+ * AMBA->PCI Configuration cycles Window: AHB SLAVE AREA1 Upper half
+ */
+ priv->pci_area = ahb->start[0];
+ priv->pci_area_end = ahb->start[0] + ahb->mask[0];
+ priv->pci_io = ahb->start[1];
+ priv->pci_conf = ahb->start[1] + (ahb->mask[1] >> 1);
+ priv->pci_conf_end = ahb->start[1] + ahb->mask[1];
+
+ /* On systems where PCI I/O area and configuration area is apart of the "PCI Window"
+ * the PCI Window stops at the start of the PCI I/O area
+ */
+ if ( (priv->pci_io > priv->pci_area) && (priv->pci_io < (priv->pci_area_end-1)) ) {
+ priv->pci_area_end = priv->pci_io;
+ }
+
+ /* Init PCI interrupt assignment table to all use the interrupt routed through
+ * the PCIF core.
+ */
+ strcpy(keyname, "INTX#");
+ for (pin=1; pin<5; pin++) {
+ if ( pcif_pci_irq_table[pin-1] == 0xff ) {
+ pcif_pci_irq_table[pin-1] = priv->irq;
+
+ /* User may override Plug & Play IRQ */
+ keyname[3] = 'A' + (pin-1);
+ value = drvmgr_dev_key_get(priv->dev, keyname, DRVMGR_KT_INT);
+ if ( value )
+ pcif_pci_irq_table[pin-1] = value->i;
+ }
+ }
+
+ priv->irq_mask = 0xf;
+ value = drvmgr_dev_key_get(priv->dev, "", DRVMGR_KT_INT);
+ if ( value )
+ priv->irq_mask = value->i & 0xf;
+
+ /* This driver only support HOST systems, we check for HOST */
+ if ( priv->regs->status & 0x00000001 ) {
+ /* Target not supported */
+ return -2;
+ }
+
+ /* Init the PCI Core */
+ if ( pcif_hw_init(priv) ) {
+ return -3;
+ }
+
+ /* Down streams translation table */
+ priv->maps_down[0].name = "AMBA -> PCI MEM Window";
+ priv->maps_down[0].size = priv->pci_area_end - priv->pci_area;
+ priv->maps_down[0].from_adr = (void *)priv->pci_area;
+ priv->maps_down[0].to_adr = (void *)priv->pci_area;
+ /* End table */
+ priv->maps_down[1].size = 0;
+
+ /* Up streams translation table */
+ priv->maps_up[0].name = "Target BAR1 -> AMBA";
+ priv->maps_up[0].size = priv->bar1_size;
+ priv->maps_up[0].from_adr = (void *)SYSTEM_MAINMEM_START;
+ priv->maps_up[0].to_adr = (void *)SYSTEM_MAINMEM_START;
+ /* End table */
+ priv->maps_up[1].size = 0;
+
+ return 0;
+}
+
+/* Called when a core is found with the AMBA device and vendor ID
+ * given in pcif_ids[].
+ */
+int pcif_init1(struct drvmgr_dev *dev)
+{
+ struct pcif_priv *priv;
+ struct pci_auto_setup pcif_auto_cfg;
+
+ DBG("PCIF[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if ( pcif_minor != 0 ) {
+ printk("Driver only supports one PCI core\n");
+ return DRVMGR_FAIL;
+ }
+
+ priv = dev->priv;
+ if ( !priv )
+ return DRVMGR_NOMEM;
+
+ dev->priv = priv;
+ priv->dev = dev;
+ priv->minor = pcif_minor++;
+
+ pcifpriv = priv;
+ if ( pcif_init(priv) ) {
+ printk("Failed to initialize PCIF driver\n");
+ free(priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Host is always Big-Endian */
+ pci_endian = PCI_BIG_ENDIAN;
+
+ /* Register the PCI core at the PCI layer */
+
+ if (pci_access_drv_register(&pcif_access_drv)) {
+ /* Access routines registration failed */
+ return DRVMGR_FAIL;
+ }
+
+ /* Prepare memory MAP */
+ pcif_auto_cfg.options = 0;
+ pcif_auto_cfg.mem_start = 0;
+ pcif_auto_cfg.mem_size = 0;
+ pcif_auto_cfg.memio_start = priv->pci_area;
+ pcif_auto_cfg.memio_size = priv->pci_area_end - priv->pci_area;
+ pcif_auto_cfg.io_start = priv->pci_io;
+ pcif_auto_cfg.io_size = priv->pci_conf - priv->pci_io;
+ pcif_auto_cfg.irq_map = pcif_bus0_irq_map;
+ pcif_auto_cfg.irq_route = NULL; /* use standard routing */
+ pci_config_register(&pcif_auto_cfg);
+
+ if (pci_config_init()) {
+ /* PCI configuration failed */
+ return DRVMGR_FAIL;
+ }
+
+ priv->config.maps_down = &priv->maps_down[0];
+ priv->config.maps_up = &priv->maps_up[0];
+ return pcibus_register(dev, &priv->config);
+}
+
+int pcif_init3(struct drvmgr_dev *dev)
+{
+ struct pcif_priv *priv = dev->priv;
+
+ /* Unmask all interrupts, on some sytems this
+ * might be problematic because all PCI IRQs are
+ * not connected on the PCB or used for something
+ * else. The irqMask driver resource can be used to
+ * control which PCI IRQs are used to generate the
+ * PCI system IRQ, example:
+ *
+ * 0xf - enable all (DEFAULT)
+ * 0x8 - enable one PCI irq
+ *
+ * Before unmasking PCI IRQ, all PCI boards must
+ * have been initialized and IRQ turned off to avoid
+ * system hang.
+ */
+
+ priv->regs->intr = priv->irq_mask;
+
+ return DRVMGR_OK;
+}
diff --git a/bsps/shared/grlib/pwm/grpwm.c b/bsps/shared/grlib/pwm/grpwm.c
new file mode 100644
index 0000000000..a824201795
--- /dev/null
+++ b/bsps/shared/grlib/pwm/grpwm.c
@@ -0,0 +1,854 @@
+/*
+ * GRPWM PWM Driver interface.
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB,
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <rtems/bspIo.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grpwm.h>
+#include <grlib/ambapp.h>
+
+#include <grlib/grlib_impl.h>
+
+/* #define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#define STATIC
+#else
+#define DBG(x...)
+#define STATIC static
+#endif
+
+/*** REGISTER LAYOUT ***/
+
+/* PWM Channel specific registers */
+struct grpwm_pwm_regs {
+ volatile unsigned int period; /* 0x00 */
+ volatile unsigned int comp; /* 0x04 */
+ volatile unsigned int dbcomp; /* 0x08 */
+ volatile unsigned int ctrl; /* 0x0C */
+};
+
+/* Core common registers */
+struct grpwm_regs {
+ volatile unsigned int ctrl; /* 0x00 */
+ volatile unsigned int scaler; /* 0x04 */
+ volatile unsigned int ipend; /* 0x08 */
+ volatile unsigned int cap1; /* 0x0C */
+ volatile unsigned int cap2; /* 0x10 */
+ volatile unsigned int wctrl; /* 0x14 */
+ int reserved0[2];
+ struct grpwm_pwm_regs pwms[8]; /* 0x20 */
+ int reserved1[(0x8000-0xA0)/4]; /* 0xA0-0x7FFC */
+ volatile unsigned int wram[0x8000/4]; /* 0x8000-0xFFFC */
+};
+
+/*** REGISTER BIT LAYOUT ***/
+
+/* CTRL REGISTER - 0x0 */
+#define GRPWM_CTRL_EN_BIT 0
+#define GRPWM_CTRL_SCSEL_BIT 8
+#define GRPWM_CTRL_NOUP_BIT 12
+#define GRPWM_CTRL_EN (1<<GRPWM_CTRL_EN_BIT)
+#define GRPWM_CTRL_SCSEL (0x7<<GRPWM_CTRL_SCSEL_BIT)
+#define GRPWM_CTRL_NOUP (0xff<<GRPWM_CTRL_NOUP_BIT)
+
+
+/* CAPABILITY1 REGISTER - 0x0C */
+#define GRPWM_CAP_NPWM_BIT 0
+#define GRPWM_CAP_PBITS_BIT 3
+#define GRPWM_CAP_SBITS_BIT 8
+#define GRPWM_CAP_NSC_BIT 13
+#define GRPWM_CAP_DBB_BIT 16
+#define GRPWM_CAP_DBSC_BIT 21
+#define GRPWM_CAP_ASY_BIT 22
+#define GRPWM_CAP_SYM_BIT 23
+#define GRPWM_CAP_SEP_BIT 25
+#define GRPWM_CAP_DCM_BIT 27
+
+#define GRPWM_CAP_NPWM (0x7<<GRPWM_CAP_NPWM_BIT)
+#define GRPWM_CAP_PBITS (0x1f<<GRPWM_CAP_PBITS_BIT)
+#define GRPWM_CAP_SBITS (0x1f<<GRPWM_CAP_SBITS_BIT)
+#define GRPWM_CAP_NSC (0x7<<GRPWM_CAP_NSC_BIT)
+#define GRPWM_CAP_DBB (0x1f<<GRPWM_CAP_DBB_BIT)
+#define GRPWM_CAP_DBSC (1<<GRPWM_CAP_DBSC_BIT)
+#define GRPWM_CAP_ASY (1<<GRPWM_CAP_ASY_BIT)
+#define GRPWM_CAP_SYM (1<<GRPWM_CAP_SYM_BIT)
+#define GRPWM_CAP_SEP (0x3<<GRPWM_CAP_SEP_BIT)
+#define GRPWM_CAP_DCM (1<<GRPWM_CAP_DCM_BIT)
+
+/* CAPABILITY2 REGISTER - 0x10 */
+#define GRPWM_CAP2_WPWM_BIT 0
+#define GRPWM_CAP2_WDBITS_BIT 1
+#define GRPWM_CAP2_WABITS_BIT 6
+#define GRPWM_CAP2_WSYNC_BIT 10
+
+#define GRPWM_CAP2_WPWM (0x1<<GRPWM_CAP2_WPWM_BIT)
+#define GRPWM_CAP2_WDBITS (0x1f<<GRPWM_CAP2_WDBITS_BIT)
+#define GRPWM_CAP2_WABITS (0xf<<GRPWM_CAP2_WABITS_BIT)
+#define GRPWM_CAP2_WSYNC (1<<GRPWM_CAP2_WSYNC_BIT)
+
+/* WAVE FORM CONFIG REGISTER - 0x14 */
+#define GRPWM_WCTRL_STOP_BIT 0
+#define GRPWM_WCTRL_WSYNC_BIT 16
+#define GRPWM_WCTRL_WSEN_BIT 29
+#define GRPWM_WCTRL_WSYNCCFG_BIT 30
+
+#define GRPWM_WCTRL_STOP (0x1fff<<GRPWM_WCTRL_STOP_BIT)
+#define GRPWM_WCTRL_WSYNC (0x1fff<<GRPWM_WCTRL_WSYNC_BIT)
+#define GRPWM_WCTRL_WSEN (0x1<<GRPWM_WCTRL_WSEN_BIT)
+#define GRPWM_WCTRL_WSYNCCFG (0x3<<GRPWM_WCTRL_WSYNCCFG_BIT)
+
+
+/* PWM CONTROL REGISTER - 0x2C, 0x3C... */
+#define GRPWM_PCTRL_EN_BIT 0
+#define GRPWM_PCTRL_POL_BIT 1
+#define GRPWM_PCTRL_PAIR_BIT 2
+#define GRPWM_PCTRL_FIX_BIT 3
+#define GRPWM_PCTRL_METH_BIT 6
+#define GRPWM_PCTRL_DCEN_BIT 8
+#define GRPWM_PCTRL_WEN_BIT 9
+#define GRPWM_PCTRL_SCSEL_BIT 10
+#define GRPWM_PCTRL_IEN_BIT 13
+#define GRPWM_PCTRL_IT_BIT 14
+#define GRPWM_PCTRL_ISC_BIT 15
+#define GRPWM_PCTRL_DBEN_BIT 21
+#define GRPWM_PCTRL_DBSC_BIT 22
+#define GRPWM_PCTRL_FLIP_BIT 26
+
+#define GRPWM_PCTRL_EN (0x1<<GRPWM_PCTRL_EN_BIT)
+#define GRPWM_PCTRL_POL (0x1<<GRPWM_PCTRL_POL_BIT)
+#define GRPWM_PCTRL_PAIR (0x1<<GRPWM_PCTRL_PAIR_BIT)
+#define GRPWM_PCTRL_FIX (0x7<<GRPWM_PCTRL_FIX_BIT)
+#define GRPWM_PCTRL_METH (0x1<<GRPWM_PCTRL_METH_BIT)
+#define GRPWM_PCTRL_DCEN (0x1<<GRPWM_PCTRL_DCEN_BIT)
+#define GRPWM_PCTRL_WEN (0x1<<GRPWM_PCTRL_WEN_BIT)
+#define GRPWM_PCTRL_SCSEL (0x7<<GRPWM_PCTRL_SCSEL_BIT)
+#define GRPWM_PCTRL_IEN (0x1<<GRPWM_PCTRL_IEN_BIT)
+#define GRPWM_PCTRL_IT (0x1<<GRPWM_PCTRL_IT_BIT)
+#define GRPWM_PCTRL_ISC (0x3f<<GRPWM_PCTRL_ISC_BIT)
+#define GRPWM_PCTRL_DBEN (0x1<<GRPWM_PCTRL_DBEN_BIT)
+#define GRPWM_PCTRL_DBSC (0xf<<GRPWM_PCTRL_DBSC_BIT)
+#define GRPWM_PCTRL_FLIP (0xf<<GRPWM_PCTRL_FLIP_BIT)
+
+/*** DRIVER PRIVATE STRUCTURE ***/
+struct grpwm_priv {
+ struct drvmgr_dev *dev;
+ struct grpwm_regs *regs;
+ char devName[32];
+ int irq;
+ int open;
+
+ /* Driver implementation */
+ char nscalers; /* Number of scalers */
+ char wave; /* If Wave form is available */
+ int wlength; /* Wave Form RAM Length */
+ int channel_cnt;
+ struct grpwm_chan_priv *channels[8];
+ rtems_id dev_sem;
+};
+
+struct grpwm_chan_priv {
+ struct grpwm_priv *common;
+ struct grpwm_pwm_regs *pwmregs;
+ /* IRQ */
+ int irqindex;
+ void (*isr)(int channel, void *arg);
+ void *isr_arg;
+};
+
+/******************* Driver Manager Part ***********************/
+
+int grpwm_device_init(struct grpwm_priv *priv);
+int grpwm_register_io(rtems_device_major_number *m);
+static int grpwm_driver_io_registered = 0;
+static rtems_device_major_number grpwm_driver_io_major = 0;
+
+int grpwm_init2(struct drvmgr_dev *dev);
+int grpwm_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops grpwm_ops =
+{
+ .init = {NULL, grpwm_init2, grpwm_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grpwm_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRPWM},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grpwm_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRPWM_ID, /* Driver ID */
+ "GRPWM_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grpwm_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grpwm_ids[0]
+};
+
+void grpwm_register_drv (void)
+{
+ DBG("Registering GRPWM driver\n");
+ drvmgr_drv_register(&grpwm_drv_info.general);
+}
+
+int grpwm_init2(struct drvmgr_dev *dev)
+{
+ struct grpwm_priv *priv;
+
+ DBG("GRPWM[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ priv = dev->priv = grlib_malloc(sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ memset(priv, 0, sizeof(*priv));
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int grpwm_init3(struct drvmgr_dev *dev)
+{
+ struct grpwm_priv *priv = dev->priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ if ( !priv )
+ return DRVMGR_FAIL;
+
+ if ( grpwm_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grpwm_register_io(&grpwm_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grpwm_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+ if ( grpwm_device_init(priv) ) {
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grpwm%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrpwm%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grpwm_driver_io_major,
+ dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+static rtems_device_driver grpwm_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grpwm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grpwm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grpwm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grpwm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grpwm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define GRPWM_DRIVER_TABLE_ENTRY { grpwm_initialize, grpwm_open, grpwm_close, grpwm_read, grpwm_write, grpwm_ioctl }
+
+static rtems_driver_address_table grpwm_driver = GRPWM_DRIVER_TABLE_ENTRY;
+
+int grpwm_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grpwm_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("GRPWM driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ DBG("GRPWM rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ DBG("GRPWM rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ DBG("GRPWM rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ DBG("GRPWM rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void grpwm_scaler_set(
+ struct grpwm_regs *regs,
+ int scaler,
+ unsigned int value)
+{
+ /* Select scaler */
+ regs->ctrl = (regs->ctrl & ~GRPWM_CTRL_SCSEL) | (scaler << GRPWM_CTRL_SCSEL_BIT);
+ /* Write scaler */
+ regs->scaler = value;
+}
+
+/* Write Wave form RAM */
+static void grpwm_write_wram(
+ struct grpwm_regs *regs,
+ unsigned int *data,
+ int length)
+{
+ unsigned int *end;
+ volatile unsigned int *pos;
+
+ pos = &regs->wram[0];
+
+ /* Write RAM */
+ if ( data ) {
+ end = data + length;
+ while ( data < end ) {
+ *pos++ = *data++;
+ }
+ } else {
+ while( length > 0 ) {
+ *pos++ = 0;
+ length -= 4;
+ }
+ }
+}
+
+static void grpwm_hw_reset(struct grpwm_priv *priv)
+{
+ int i;
+ struct grpwm_chan_priv *pwm;
+ struct grpwm_regs *regs = priv->regs;
+
+ /* Disable Core */
+ regs->ctrl = 0;
+
+ /* Clear all registers */
+ regs->ipend = 0xffffffff;
+ regs->wctrl = 0;
+
+ /* Init all PWM channels */
+ for (i=0; i<priv->channel_cnt; i++) {
+ pwm = priv->channels[i];
+ pwm->pwmregs->ctrl = 0;
+ pwm->pwmregs->period = 0;
+ pwm->pwmregs->comp = 0;
+ pwm->pwmregs->dbcomp = 0;
+ pwm->pwmregs->ctrl = 0; /* Twice because METH and POL requires EN=0 */
+ }
+
+ /* Clear RAM */
+ if ( priv->wave ) {
+ grpwm_write_wram(regs, NULL, priv->wlength);
+ }
+
+ /* Set max scaler */
+ for (i=0; i<priv->nscalers; i++) {
+ grpwm_scaler_set(regs, i, 0xffffffff);
+ }
+}
+
+/* Update one Channel but leaves the "Hold update" bit set
+ *
+ * A bit mask of updated bits are returned.
+ */
+static unsigned int grpwm_update_prepare_channel(
+ struct grpwm_priv *priv,
+ int channel,
+ struct grpwm_ioctl_update_chan *up
+ )
+{
+ struct grpwm_chan_priv *pwm;
+ struct grpwm_pwm_regs *pwmregs;
+ unsigned int ctrl;
+ unsigned int ret;
+
+ pwm = priv->channels[channel];
+ pwmregs = pwm->pwmregs;
+
+ /* Read channel control register */
+ ctrl = pwmregs->ctrl;
+ ret = 0;
+
+ if ( up->options & GRPWM_UPDATE_OPTION_DISABLE ) {
+ ctrl &= ~GRPWM_PCTRL_EN;
+ pwmregs->ctrl = ctrl;
+ ret |= GRPWM_PCTRL_EN;
+ }
+
+ /* Hold the updates */
+ if ( up->options & (GRPWM_UPDATE_OPTION_PERIOD|
+ GRPWM_UPDATE_OPTION_COMP|GRPWM_UPDATE_OPTION_DBCOMP) ) {
+
+ if ( up->options & (GRPWM_UPDATE_OPTION_PERIOD) ) {
+ DBG("GRPWM: UPDATING 0x%x: 0x%x\n", &pwmregs->period, up->period);
+ pwmregs->period = up->period;
+ }
+ if ( up->options & (GRPWM_UPDATE_OPTION_COMP) ) {
+ DBG("GRPWM: UPDATING 0x%x: 0x%x\n", &pwmregs->comp, up->compare);
+ pwmregs->comp = up->compare;
+ }
+ if ( up->options & (GRPWM_UPDATE_OPTION_DBCOMP) ) {
+ DBG("GRPWM: UPDATING 0x%x: 0x%x\n", &pwmregs->dbcomp, up->dbcomp);
+ pwmregs->dbcomp = up->dbcomp;
+ }
+ }
+
+ if ( up->options & GRPWM_UPDATE_OPTION_ENABLE ) {
+ ret |= GRPWM_PCTRL_EN;
+ pwmregs->ctrl = ctrl | GRPWM_PCTRL_EN;
+ }
+ return ret;
+}
+
+static void grpwm_update_active(struct grpwm_priv *priv, int enable)
+{
+ unsigned int ctrl;
+ int i;
+
+ ctrl = priv->regs->ctrl;
+
+ /* Make all "Update Hold" bits be cleared */
+ ctrl &= ~GRPWM_CTRL_NOUP;
+
+ /* A change in any of the Channel enable/disable bits? */
+ if ( enable ) {
+ ctrl &= ~GRPWM_CTRL_EN;
+ for(i=0; i<priv->channel_cnt; i++) {
+ ctrl |= priv->regs->pwms[i].ctrl & GRPWM_CTRL_EN;
+ }
+ }
+ priv->regs->ctrl = ctrl;
+}
+
+/* Configure the hardware of a channel according to this */
+static rtems_status_code grpwm_config_channel(
+ struct grpwm_priv *priv,
+ int channel,
+ struct grpwm_ioctl_config *cfg
+ )
+{
+ struct grpwm_chan_priv *pwm;
+ unsigned int pctrl, wctrl=0;
+
+ pwm = priv->channels[channel];
+ if ( pwm->pwmregs->ctrl & GRPWM_PCTRL_EN_BIT ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ if ( cfg->options & ~GRPWM_CONFIG_OPTION_MASK ) {
+ return RTEMS_INVALID_NAME;
+ }
+ if ( (cfg->options & GRPWM_CONFIG_OPTION_DUAL) &&
+ ((priv->regs->cap1 & GRPWM_CAP_DCM) == 0) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ /* IRQ set up */
+ pwm->isr_arg = cfg->isr_arg;
+ pwm->isr = cfg->isr;
+
+ pctrl = cfg->options |
+ (cfg->dbscaler << GRPWM_PCTRL_DBSC_BIT) |
+ (cfg->irqscaler << GRPWM_PCTRL_ISC_BIT) |
+ (cfg->scaler_index << GRPWM_PCTRL_SCSEL_BIT);
+
+ /* Set Wave form gerneration if available */
+ if ( !priv->wave || (priv->channel_cnt != (channel+1)) ) {
+ /* Wave Form not available for this channel (or core) */
+ if ( cfg->wave_activate || cfg->wave_data || cfg->wave_data_length ) {
+ return RTEMS_INVALID_NAME;
+ }
+ } else if ( cfg->wave_activate ) {
+ /* Enable Wave form generation */
+ DBG("GRPWM: ENABLING WAVE FORM GENERATION 0x%x\n", cfg->wave_data_length);
+
+ if ( cfg->wave_data ) {
+ grpwm_write_wram(priv->regs, cfg->wave_data, cfg->wave_data_length);
+ }
+
+ /* Write length register, and let user control Wave-Sync functionality */
+ wctrl = (((cfg->wave_data_length-1) << GRPWM_WCTRL_STOP_BIT) & GRPWM_WCTRL_STOP);
+ wctrl |= cfg->wave_synccfg & (GRPWM_WCTRL_WSYNCCFG|GRPWM_WCTRL_WSEN);
+ wctrl |= (cfg->wave_sync << 16) & 0x1fff0000;
+ priv->regs->wctrl = wctrl;
+
+ /* Enable Wave form */
+ pctrl |= GRPWM_PCTRL_WEN;
+ }
+
+ DBG("GRPWM: CONFIG: 0x%x, WAVE CONFIG: 0x%x\n", pctrl, wctrl);
+
+ pwm->pwmregs->ctrl = pctrl;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grpwm_isr(void *arg)
+{
+ unsigned int ipend;
+ struct grpwm_chan_priv *pwm = arg;
+ struct grpwm_priv *priv = pwm->common;
+ int i;
+
+ /* Get current pending interrupts */
+ ipend = priv->regs->ipend;
+
+ for (i=0; i<priv->channel_cnt; i++) {
+ if ( ipend & (1<<i) ) {
+ pwm = priv->channels[i];
+ if ( pwm->isr ) {
+ pwm->isr(i, pwm->isr_arg);
+ }
+ }
+ }
+ priv->regs->ipend = ipend;
+}
+
+static rtems_device_driver grpwm_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grpwm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grpwm_priv *priv;
+ rtems_device_driver ret;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&grpwm_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NAME;
+ }
+ priv = (struct grpwm_priv *)dev->priv;
+
+ /* Wait until we get semaphore */
+ if ( rtems_semaphore_obtain(priv->dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) !=
+ RTEMS_SUCCESSFUL ){
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* is device busy/taken? */
+ if ( priv->open ) {
+ ret=RTEMS_RESOURCE_IN_USE;
+ goto out;
+ }
+
+ /* Mark device taken */
+ priv->open = 1;
+
+ ret = RTEMS_SUCCESSFUL;
+out:
+ rtems_semaphore_release(priv->dev_sem);
+ return ret;
+}
+
+static rtems_device_driver grpwm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grpwm_priv *priv;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&grpwm_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ priv = (struct grpwm_priv *)dev->priv;
+
+ /* Reset Hardware */
+ grpwm_hw_reset(priv);
+
+ /* Mark Device closed */
+ priv->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grpwm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ return RTEMS_UNSATISFIED;
+}
+
+static rtems_device_driver grpwm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ return RTEMS_UNSATISFIED;
+}
+
+static rtems_device_driver grpwm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+
+ struct grpwm_priv *priv;
+ struct drvmgr_dev *dev;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg;
+
+ if ( drvmgr_get_dev(&grpwm_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ priv = (struct grpwm_priv *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ default: /* Not a valid command */
+ return RTEMS_NOT_DEFINED;
+
+ case GRPWM_IOCTL_GET_CAP:
+ {
+ struct grpwm_ioctl_cap *cap = (void *)ioarg->buffer;
+ if ( cap == NULL )
+ return RTEMS_INVALID_NAME;
+
+ /* Copy Capability registers to user */
+ cap->channel_cnt = priv->channel_cnt;
+ cap->pwm = priv->regs->cap1;
+ cap->wave = priv->regs->cap2;
+ break;
+ }
+ case GRPWM_IOCTL_SET_CONFIG:
+ {
+ struct grpwm_ioctl_config *cfg = (void *)ioarg->buffer;
+ if ( cfg == NULL )
+ return RTEMS_INVALID_NAME;
+ if ( cfg->channel >= priv->channel_cnt )
+ return RTEMS_INVALID_NAME;
+
+ return grpwm_config_channel(priv, cfg->channel, cfg);
+ }
+ case GRPWM_IOCTL_SET_SCALER:
+ {
+ unsigned int invalid_mask;
+ int i;
+ struct grpwm_ioctl_scaler *sc = ioarg->buffer;
+
+ if ( sc == NULL )
+ return RTEMS_INVALID_NAME;
+
+ /* Test if caller reqest to set a scaler not existing */
+ invalid_mask = ~((1 << priv->nscalers) - 1);
+ if ( invalid_mask & sc->index_mask ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Set scalers requested */
+ for (i=0; i<priv->nscalers; i++) {
+ if ( sc->index_mask & (1<<i) ) {
+ /* Update Scaler 'i' */
+ grpwm_scaler_set(priv->regs, i, sc->values[i]);
+ }
+ }
+ break;
+ }
+ case GRPWM_IOCTL_UPDATE:
+ {
+ struct grpwm_ioctl_update *up = ioarg->buffer;
+ unsigned int invalid_mask, pctrl = 0;
+ int i;
+
+ if ( up == NULL )
+ return RTEMS_INVALID_NAME;
+
+ /* Test if caller reqest to set a scaler not existing */
+ invalid_mask = ~((1 << priv->channel_cnt) - 1);
+ if ( invalid_mask & up->chanmask ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* In order for the changes to take effect at the same time, the "Hold update"
+ * bits is set for all PWM channels that will be updated. The hold update bits
+ * will be cleared at the same time for all channels.
+ */
+ priv->regs->ctrl = (priv->regs->ctrl & ~GRPWM_CTRL_NOUP) |
+ (up->chanmask << GRPWM_CTRL_NOUP_BIT);
+
+ for (i=0; i<priv->channel_cnt; i++) {
+ if ( up->chanmask & (1<<i) ) {
+ /* Prepare update channel 'i' */
+ pctrl |= grpwm_update_prepare_channel(priv, i, &up->channels[i]);
+ }
+ }
+
+ /* 1. Update all channels requested,
+ * 2. Enable the core if at least one channel is enabled
+ * 3. Disable the core if all channels are disabled
+ */
+ grpwm_update_active(priv, (pctrl & GRPWM_PCTRL_EN));
+
+ break;
+ }
+ case GRPWM_IOCTL_IRQ:
+ {
+ unsigned int data = (unsigned int)ioarg->buffer;
+ int channel = (data >> 8) & 0x7;
+ struct grpwm_chan_priv *pwm;
+ unsigned int pctrl;
+
+ pwm = priv->channels[channel];
+
+ if ( data & GRPWM_IRQ_CLEAR ) {
+ priv->regs->ipend |= (1<<channel);
+ drvmgr_interrupt_clear(priv->dev, pwm->irqindex);
+ }
+ if ( (data & 0x3) && !pwm->isr ) {
+ /* Enable IRQ but no ISR */
+ return RTEMS_INVALID_NAME;
+ }
+ pctrl = pwm->pwmregs->ctrl & ~(GRPWM_PCTRL_IEN|GRPWM_PCTRL_IT);
+ pctrl |= ((data & 0x3) << GRPWM_PCTRL_IEN_BIT);
+ pwm->pwmregs->ctrl = pctrl;
+ break;
+ }
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+#define MAX_CHANNEL 8
+char grpwm_irqindex_lookup[8][MAX_CHANNEL] =
+{
+/* Channel 1 2 3 4 5 6 7 8 */
+/* npwm 1 */ {0, 0, 0, 0, 0, 0, 0, 0},
+/* npwm 2 */ {0, 1, 0, 0, 0, 0, 0, 0},
+/* npwm 3 */ {0, 0, 0, 0, 0, 0, 0, 0},
+/* npwm 4 */ {0, 0, 0, 1, 0, 0, 0, 0},
+/* npwm 5 */ {0, 0, 0, 1, 2, 0, 0, 0},
+/* npwm 6 */ {0, 0, 0, 1, 1, 1, 0, 0},
+/* npwm 7 */ {0, 0, 0, 1, 1, 1, 2, 0},
+/* npwm 8 */ {0, 0, 0, 1, 1, 1, 2, 3}
+};
+
+int grpwm_device_init(struct grpwm_priv *priv)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ int mask, i, sepirq;
+ unsigned int wabits;
+ struct grpwm_chan_priv *pwm;
+ struct grpwm_regs *regs;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ priv->irq = pnpinfo->irq;
+ regs = priv->regs = (struct grpwm_regs *)pnpinfo->apb_slv->start;
+
+ DBG("GRPWM: 0x%08x irq %d\n", (unsigned int)regs, priv->irq);
+
+ /* Disable Core */
+ regs->ctrl = 0;
+
+ /* Clear all registers */
+ regs->ipend = 0xffffffff;
+ regs->wctrl = 0;
+
+ /* Find the number of PWM channels */
+ priv->channel_cnt = 1 + ((regs->cap1 & GRPWM_CAP_NPWM) >> GRPWM_CAP_NPWM_BIT);
+ pwm = grlib_calloc(priv->channel_cnt, sizeof(*pwm));
+ if ( !pwm )
+ return -1;
+
+ /* Init all PWM channels */
+ sepirq = ((regs->cap1 & GRPWM_CAP_SEP) >> GRPWM_CAP_SEP_BIT);
+ for (i=0; i<priv->channel_cnt; i++, pwm++) {
+ priv->channels[i] = pwm;
+ pwm->common = priv;
+ pwm->pwmregs = &regs->pwms[i];
+ if ( sepirq == 0 ) {
+ pwm->irqindex = 0;
+ } else if ( sepirq == 1 ) {
+ pwm->irqindex = i;
+ } else {
+ pwm->irqindex = grpwm_irqindex_lookup[priv->channel_cnt][i];
+ }
+ }
+
+ /* Detect if Wave Form capability is availble for last PWM channel */
+ if ( regs->cap2 & GRPWM_CAP2_WPWM ) {
+ priv->wave = 1;
+
+ /* Clear RAM */
+ wabits = (regs->cap2 & GRPWM_CAP2_WABITS) >> GRPWM_CAP2_WABITS_BIT;
+ priv->wlength = 1 << wabits;
+ }
+ priv->nscalers = 1 + ((regs->cap1 & GRPWM_CAP_NSC) >> GRPWM_CAP_NSC_BIT);
+
+ grpwm_hw_reset(priv);
+
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'P', 'W', 'M'),
+ 1,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &priv->dev_sem) != RTEMS_SUCCESSFUL ) {
+ return -1;
+ }
+
+ /* Register interrupt handler for all PWM channels */
+ mask = 0;
+ for (i=0; i<priv->channel_cnt; i++) {
+ pwm = priv->channels[i];
+ if ( (mask & (1 << pwm->irqindex)) == 0 ) {
+ /* Not registered interrupt handler for this IRQ index before,
+ * we do it now.
+ */
+ mask |= (1 << pwm->irqindex);
+ drvmgr_interrupt_register(
+ priv->dev,
+ pwm->irqindex,
+ "grpwm",
+ grpwm_isr,
+ pwm);
+ }
+ }
+
+ return 0;
+}
diff --git a/bsps/shared/grlib/scrub/memscrub.c b/bsps/shared/grlib/scrub/memscrub.c
new file mode 100644
index 0000000000..7c6ceb43e0
--- /dev/null
+++ b/bsps/shared/grlib/scrub/memscrub.c
@@ -0,0 +1,692 @@
+/* Memory Scrubber register driver
+ *
+ * COPYRIGHT (c) 2017.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <rtems/bspIo.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+
+#include <grlib/memscrub.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+#define UNUSED __attribute__((unused))
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+#define REG_WRITE(addr, val) (*(volatile uint32_t *)(addr) = (uint32_t)(val))
+#define REG_READ(addr) (*(volatile uint32_t *)(addr))
+
+/*
+ * MEMORYSCRUBBER AHBS register fields
+ * DEFINED IN HEADER FILE
+ */
+
+/*
+ * MEMORYSCRUBBER AHBERC register fields
+ * DEFINED IN HEADER FILE
+ */
+
+/*
+ * MEMORYSCRUBBER STAT register fields
+ * DEFINED IN HEADER FILE
+ */
+
+/*
+ * MEMORYSCRUBBER CONFIG register fields
+ * DEFINED IN HEADER FILE
+ */
+
+/*
+ * MEMORYSCRUBBER ETHRES register fields
+ * DEFINED IN HEADER FILE
+ */
+
+/* MEMORYSCRUBBER Registers layout */
+struct memscrub_regs {
+ volatile uint32_t ahbstatus; /* 0x00 */
+ volatile uint32_t ahbfailing; /* 0x04 */
+ volatile uint32_t ahberc; /* 0x08 */
+ volatile uint32_t resv1; /* 0x0c */
+ volatile uint32_t status; /* 0x10 */
+ volatile uint32_t config; /* 0x14 */
+ volatile uint32_t rangel; /* 0x18 */
+ volatile uint32_t rangeh; /* 0x1c */
+ volatile uint32_t pos; /* 0x20 */
+ volatile uint32_t ethres; /* 0x24 */
+ volatile uint32_t init; /* 0x28 */
+ volatile uint32_t rangel2; /* 0x2c */
+ volatile uint32_t rangeh2; /* 0x30 */
+};
+
+#define DEVNAME_LEN 10
+struct memscrub_priv {
+ struct drvmgr_dev *dev;
+ char devname[DEVNAME_LEN];
+ struct memscrub_regs *regs;
+ int minor;
+ int burstlen;
+ int blockmask;
+ /* Cached error */
+ uint32_t last_status;
+ uint32_t last_address;
+ /* User defined ISR */
+ memscrub_isr_t isr;
+ void *isr_arg;
+};
+static struct memscrub_priv * memscrubpriv = NULL;
+
+STATIC int memscrub_init2(struct drvmgr_dev *dev);
+STATIC int memscrub_init(struct memscrub_priv *priv);
+
+void memscrub_isr(void *arg);
+
+struct drvmgr_drv_ops memscrub_ops =
+{
+ .init = {NULL, memscrub_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id memscrub_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_MEMSCRUB},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info memscrub_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_MEMSCRUB_ID,/* Driver ID */
+ "MEMSCRUB_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &memscrub_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct memscrub_priv),
+ },
+ &memscrub_ids[0]
+};
+
+void memscrub_register_drv (void)
+{
+ drvmgr_drv_register(&memscrub_drv_info.general);
+}
+
+STATIC int memscrub_init(struct memscrub_priv *priv)
+{
+ struct ambapp_ahb_info *ahb;
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+ unsigned int tmp;
+ int i,j;
+
+ /* Get device information from AMBA PnP information */
+ if (ainfo == NULL){
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Find MEMSCRUB core from Plug&Play information */
+ ahb = ainfo->info.ahb_slv;
+ priv->regs = (struct memscrub_regs *)ahb->start[0];
+
+ DBG("MEMSCRUB regs 0x%08x\n", (unsigned int) priv->regs);
+
+ /* Find MEMSCRUB capabilities */
+ tmp = REG_READ(&priv->regs->status);
+ i = (tmp & STAT_BURSTLEN) >> STAT_BURSTLEN_BIT;
+ for (j=1; i>0; i--) j <<= 1;
+ priv->burstlen = j;
+
+
+ /* If scrubber is active, we cannot stop it to read blockmask value */
+ if (tmp & STAT_ACTIVE){
+ priv->blockmask = 0;
+ }else{
+ /* Detect block size in bytes and burst length */
+ tmp = REG_READ(&priv->regs->rangeh);
+ REG_WRITE(&priv->regs->rangeh, 0);
+ priv->blockmask = REG_READ(&priv->regs->rangeh);
+ REG_WRITE(&priv->regs->rangeh, tmp);
+ }
+
+ /* DEBUG print */
+ DBG("MEMSCRUB with following capabilities:\n");
+ DBG(" -Burstlength: %d\n", priv->burstlen);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+STATIC int memscrub_init2(struct drvmgr_dev *dev)
+{
+ struct memscrub_priv *priv = dev->priv;
+
+ DBG("MEMSCRUB[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (memscrubpriv) {
+ DBG("Driver only supports one MEMSCRUB core\n");
+ return DRVMGR_FAIL;
+ }
+
+ if (priv==NULL){
+ return DRVMGR_NOMEM;
+ }
+
+ /* Assign priv */
+ priv->dev = dev;
+ strncpy(&priv->devname[0], "memscrub0", DEVNAME_LEN);
+ memscrubpriv=priv;
+
+ /* Initilize driver struct */
+ if (memscrub_init(priv) != MEMSCRUB_ERR_OK){
+ return DRVMGR_FAIL;
+ }
+
+ /* Startup Action:
+ * - Clear status
+ * - Register ISR
+ */
+
+ /* Initialize hardware by clearing its status */
+ REG_WRITE(&priv->regs->ahbstatus, 0);
+ REG_WRITE(&priv->regs->status, 0);
+
+ return DRVMGR_OK;
+}
+
+
+int memscrub_init_start(uint32_t value, uint8_t delay, int options)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+ uint32_t sts, tmp;
+ int i;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Check if scrubber is active */
+ sts = REG_READ(&priv->regs->status);
+ if (sts & STAT_ACTIVE){
+ DBG("MEMSCRUB running.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Check if we need to probe blockmask */
+ if (priv->blockmask == 0){
+ /* Detect block size in bytes and burst length */
+ tmp = REG_READ(&priv->regs->rangeh);
+ REG_WRITE(&priv->regs->rangeh, 0);
+ priv->blockmask = REG_READ(&priv->regs->rangeh);
+ REG_WRITE(&priv->regs->rangeh, tmp);
+ }
+
+ /* Set data value */
+ for (i=0; i<priv->blockmask; i+=4){
+ REG_WRITE(&priv->regs->init,value);
+ }
+
+ /* Clear unused bits */
+ options = options & ~(CONFIG_MODE | CONFIG_DELAY);
+
+ /* Enable scrubber */
+ REG_WRITE(&priv->regs->config, options |
+ ((delay << CONFIG_DELAY_BIT) & CONFIG_DELAY) |
+ CONFIG_MODE_INIT | CONFIG_SCEN);
+
+ DBG("MEMSCRUB INIT STARTED\n");
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_scrub_start(uint8_t delay, int options)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+ uint32_t ctrl,sts;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Check if scrubber is active */
+ sts = REG_READ(&priv->regs->status);
+ if (sts & STAT_ACTIVE){
+ /* Check if mode is not init */
+ ctrl = REG_READ(&priv->regs->config);
+ if ((ctrl & CONFIG_MODE)==CONFIG_MODE_INIT){
+ DBG("MEMSCRUB init running.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+ }
+
+ /* Clear unused bits */
+ options = options & ~(CONFIG_MODE | CONFIG_DELAY);
+
+ /* Enable scrubber */
+ REG_WRITE(&priv->regs->config, options |
+ ((delay << CONFIG_DELAY_BIT) & CONFIG_DELAY) |
+ CONFIG_MODE_SCRUB | CONFIG_SCEN);
+
+ DBG("MEMSCRUB SCRUB STARTED\n");
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_regen_start(uint8_t delay, int options)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+ uint32_t ctrl,sts;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Check if scrubber is active */
+ sts = REG_READ(&priv->regs->status);
+ if (sts & STAT_ACTIVE){
+ /* Check if mode is not init */
+ ctrl = REG_READ(&priv->regs->config);
+ if ((ctrl & CONFIG_MODE)==CONFIG_MODE_INIT){
+ DBG("MEMSCRUB init running.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+ }
+
+ /* Clear unused bits */
+ options = options & ~(CONFIG_MODE | CONFIG_DELAY);
+
+ /* Enable scrubber */
+ REG_WRITE(&priv->regs->config, options |
+ ((delay << CONFIG_DELAY_BIT) & CONFIG_DELAY) |
+ CONFIG_MODE_REGEN | CONFIG_SCEN);
+
+ DBG("MEMSCRUB REGEN STARTED\n");
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_stop(void)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Disable scrubber */
+ REG_WRITE(&priv->regs->config, 0);
+
+ /* Wait until finished */
+ while(REG_READ(&priv->regs->status) & STAT_ACTIVE){};
+
+ DBG("MEMSCRUB STOPPED\n");
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_range_set(uint32_t start, uint32_t end)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if (end <= start){
+ DBG("MEMSCRUB wrong address.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Check if scrubber is active */
+ if (REG_READ(&priv->regs->status) & STAT_ACTIVE){
+ DBG("MEMSCRUB running.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Set range */
+ REG_WRITE(&priv->regs->rangel, start);
+ REG_WRITE(&priv->regs->rangeh, end);
+
+ DBG("MEMSCRUB range: 0x%08x-0x%08x\n",
+ (unsigned int) start,
+ (unsigned int) end);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_secondary_range_set(uint32_t start, uint32_t end)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if (end <= start){
+ DBG("MEMSCRUB wrong address.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Check if scrubber is active */
+ if (REG_READ(&priv->regs->status) & STAT_ACTIVE){
+ DBG("MEMSCRUB running.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Set range */
+ REG_WRITE(&priv->regs->rangel2, start);
+ REG_WRITE(&priv->regs->rangeh2, end);
+
+ DBG("MEMSCRUB 2nd range: 0x%08x-0x%08x\n",
+ (unsigned int) start,
+ (unsigned int) end);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_range_get(uint32_t * start, uint32_t * end)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if ((start==NULL) || (end == NULL)){
+ DBG("MEMSCRUB wrong pointer.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Get range */
+ *start = REG_READ(&priv->regs->rangel);
+ *end = REG_READ(&priv->regs->rangeh);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_secondary_range_get(uint32_t * start, uint32_t * end)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if ((start==NULL) || (end == NULL)){
+ DBG("MEMSCRUB wrong pointer.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Get range */
+ *start = REG_READ(&priv->regs->rangel2);
+ *end = REG_READ(&priv->regs->rangeh2);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_ahberror_setup(int uethres, int cethres, int options)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Set AHBERR */
+ REG_WRITE(&priv->regs->ahberc,
+ ((cethres << AHBERC_CECNTT_BIT) & AHBERC_CECNTT) |
+ ((uethres << AHBERC_UECNTT_BIT) & AHBERC_UECNTT) |
+ (options & (AHBERC_CECTE | AHBERC_UECTE)));
+
+ DBG("MEMSCRUB ahb err: UE[%d]:%s, CE[%d]:%s\n",
+ (unsigned int) uethres,
+ (options & AHBERC_UECTE)? "enabled":"disabled",
+ (unsigned int) cethres,
+ (options & AHBERC_CECTE)? "enabled":"disabled"
+ );
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_scruberror_setup(int blkthres, int runthres, int options)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ /* Set ETHRES */
+ REG_WRITE(&priv->regs->ethres,
+ ((blkthres << ETHRES_BECT_BIT) & ETHRES_BECT) |
+ ((runthres << ETHRES_RECT_BIT) & ETHRES_RECT) |
+ (options & (ETHRES_RECTE | ETHRES_BECTE)));
+
+ DBG("MEMSCRUB scrub err: BLK[%d]:%s, RUN[%d]:%s\n",
+ (unsigned int) blkthres,
+ (options & ETHRES_BECTE)? "enabled":"disabled",
+ (unsigned int) runthres,
+ (options & ETHRES_RECTE)? "enabled":"disabled"
+ );
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_scrub_position(uint32_t * position)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if (position==NULL){
+ DBG("MEMSCRUB wrong pointer.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ *position = REG_READ(&priv->regs->pos);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_isr_register(memscrub_isr_t isr, void * data)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+ unsigned int ethres, ahberc, config;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if (isr==NULL){
+ DBG("MEMSCRUB wrong pointer.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Mask interrupts */
+ ethres = REG_READ(&priv->regs->ethres);
+ REG_WRITE(&priv->regs->ethres, ethres & ~(ETHRES_RECTE | ETHRES_BECTE));
+
+ ahberc = REG_READ(&priv->regs->ahberc);
+ REG_WRITE(&priv->regs->ahberc, ahberc & ~(AHBERC_CECTE | AHBERC_UECTE));
+
+ config = REG_READ(&priv->regs->config);
+ REG_WRITE(&priv->regs->config, config & ~(CONFIG_IRQD));
+
+ /* Install IRQ handler if needed */
+ if (priv->isr == NULL){
+ drvmgr_interrupt_register(priv->dev, 0, priv->devname, memscrub_isr,
+ priv);
+ }
+
+ /* Install user ISR */
+ priv->isr=isr;
+ priv->isr_arg=data;
+
+ /* Unmask interrupts */
+ REG_WRITE(&priv->regs->ethres, ethres);
+
+ REG_WRITE(&priv->regs->ahberc, ahberc);
+
+ REG_WRITE(&priv->regs->config, config);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_isr_unregister(void)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+ unsigned int ethres, ahberc, config;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if (priv->isr==NULL){
+ DBG("MEMSCRUB wrong pointer.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Mask interrupts */
+ ethres = REG_READ(&priv->regs->ethres);
+ REG_WRITE(&priv->regs->ethres, ethres & ~(ETHRES_RECTE | ETHRES_BECTE));
+
+ ahberc = REG_READ(&priv->regs->ahberc);
+ REG_WRITE(&priv->regs->ahberc, ahberc & ~(AHBERC_CECTE | AHBERC_UECTE));
+
+ config = REG_READ(&priv->regs->config);
+ REG_WRITE(&priv->regs->config, config & ~(CONFIG_IRQD));
+
+ /* Uninstall IRQ handler if needed */
+ drvmgr_interrupt_unregister(priv->dev, 0, memscrub_isr, priv);
+
+ /* Uninstall user ISR */
+ priv->isr=NULL;
+ priv->isr_arg=NULL;
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_error_status(uint32_t *ahbaccess, uint32_t *ahbstatus,
+ uint32_t *scrubstatus)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+ uint32_t mask, ahbstatus_val;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ if ((ahbaccess==NULL) || (ahbstatus==NULL) || (scrubstatus == NULL)){
+ DBG("MEMSCRUB wrong pointer.\n");
+ return MEMSCRUB_ERR_EINVAL;
+ }
+
+ /* Get hardware status */
+ *ahbaccess = REG_READ(&priv->regs->ahbfailing);
+ *ahbstatus = ahbstatus_val = REG_READ(&priv->regs->ahbstatus);
+ *scrubstatus = REG_READ(&priv->regs->status);
+
+ /* Clear error status */
+ mask = 0;
+ /* Clear CECNT only if we crossed the CE threshold*/
+ if ((ahbstatus_val & AHBS_CE) == 0){
+ /* Don't clear the CECNT */
+ mask |= AHBS_CECNT;
+ }
+ /* Clear UECNT only if we crossed the UE threshold*/
+ if ((ahbstatus_val & (AHBS_NE|AHBS_CE|AHBS_SBC|AHBS_SEC)) != AHBS_NE){
+ /* Don't clear the UECNT */
+ mask |= AHBS_UECNT;
+ }
+ REG_WRITE(&priv->regs->ahbstatus, ahbstatus_val & mask);
+ REG_WRITE(&priv->regs->status,0);
+
+ return MEMSCRUB_ERR_OK;
+}
+
+int memscrub_active(void)
+{
+ struct memscrub_priv *priv = memscrubpriv;
+
+ if (priv==NULL){
+ DBG("MEMSCRUB not init.\n");
+ return MEMSCRUB_ERR_ERROR;
+ }
+
+ return REG_READ(&priv->regs->status) & STAT_ACTIVE;
+}
+
+void memscrub_isr(void *arg)
+{
+ struct memscrub_priv *priv = arg;
+ uint32_t fadr, ahbstatus, status, mask;
+
+ /* Get hardware status */
+ ahbstatus = REG_READ(&priv->regs->ahbstatus);
+ if ((ahbstatus & (AHBS_NE|AHBS_DONE)) == 0){
+ return;
+ }
+
+ /* IRQ generated by MEMSCRUB core... handle it here */
+
+ /* Get Failing address */
+ fadr = REG_READ(&priv->regs->ahbfailing);
+
+ /* Get Status */
+ status = REG_READ(&priv->regs->status);
+
+ /* Clear error status */
+ mask = 0;
+ /* Clear CECNT only if we crossed the CE threshold*/
+ if ((ahbstatus & AHBS_CE) == 0){
+ /* Don't clear the CECNT */
+ mask |= AHBS_CECNT;
+ }
+ /* Clear UECNT only if we crossed the UE threshold*/
+ if ((ahbstatus & (AHBS_NE|AHBS_CE|AHBS_SBC|AHBS_SEC)) != AHBS_NE){
+ /* Don't clear the UECNT */
+ mask |= AHBS_UECNT;
+ }
+ REG_WRITE(&priv->regs->ahbstatus, ahbstatus & mask);
+ REG_WRITE(&priv->regs->status,0);
+
+ /* Let user handle error */
+ (priv->isr)(priv->isr_arg, fadr, ahbstatus, status);
+
+ return;
+}
diff --git a/bsps/shared/grlib/slink/grslink.c b/bsps/shared/grlib/slink/grslink.c
new file mode 100644
index 0000000000..5a3b5b5ea6
--- /dev/null
+++ b/bsps/shared/grlib/slink/grslink.c
@@ -0,0 +1,664 @@
+/*
+ * This file contains the RTEMS GRSLINK SLINK master driver
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * Comments concerning current driver implementation:
+ *
+ * The SLINK specification says that there are three IO cards that are capable
+ * of transmitting data. But these IO cards can have the address range 0 to 3,
+ * and an 'For information only' comment explains that the current
+ * implementation has receive buffers for ".. x 4 (IO cards)".
+ * Because of this the driver has four queues, one for each IO card 0 - 3.
+ * When the addressing convention used for the IO cards is known, the number of
+ * queues may be lowered to three.
+ *
+ */
+
+#include <stdlib.h>
+
+#include <bsp.h>
+#include <grlib/grslink.h>
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+
+#include <grlib/grlib_impl.h>
+
+#ifndef GAISLER_SLINK
+#define GAISLER_SLINK 0x02F
+#endif
+
+/* Enable debug output? */
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* Bits and fields in SLINK transmit word */
+#define SLINK_RW (1 << 23)
+#define SLINK_CHAN_POS 16
+
+/* Local types */
+typedef struct {
+ volatile unsigned int clockscale;
+ volatile unsigned int ctrl;
+ volatile unsigned int nullwrd;
+ volatile unsigned int sts;
+ volatile unsigned int msk;
+ volatile unsigned int abase;
+ volatile unsigned int bbase;
+ volatile unsigned int td;
+ volatile unsigned int rd;
+} SLINK_regs;
+
+typedef struct {
+ char readstat; /* Status of READ operation */
+ char seqstat; /* Status of SEQUENCE operation */
+ unsigned char scnt; /* Number of SEQUENCE words transferred */
+} SLINK_status;
+
+typedef struct {
+ int size;
+ unsigned int *buf;
+ unsigned int *first;
+ unsigned int *last;
+ unsigned int *max;
+ int full;
+} SLINK_queue;
+
+typedef struct {
+ SLINK_regs *reg; /* Pointer to core registers */
+ SLINK_status *status; /* Driver status information */
+ void (*slink_irq_handler)(int); /* Handler for INTERRUPT */
+ void (*slink_seq_change)(int); /* Callback on SEQUENCE change */
+ int rword; /* Placeholder for READ response */
+ rtems_id read_sem; /* Semaphore for blocking SLINK_read */
+ SLINK_queue *queues; /* Receive queues */
+#ifdef SLINK_COLLECT_STATISTICS
+ SLINK_stats *stats; /* Core statistics, optional */
+#endif
+} SLINK_cfg;
+
+
+static SLINK_cfg *cfg = NULL;
+
+/**** SLINK driver queues for unsolicited and INTERRUPT requests ****/
+
+/* Function: SLINK_createqueues
+ * Arguments: size: Number of elements in each queue
+ * Returns: 0 on success, -1 on failure
+ * Description: Creates SLINK_NUMQUEUES queues, one for each IO card
+ * that can send data. The pointers to the queues is saved in the driver
+ * config structure.
+ */
+static int SLINK_createqueues(int size)
+{
+ SLINK_queue *q;
+ int i, j;
+
+ if ((q = grlib_malloc(SLINK_NUMQUEUES*sizeof(*q))) == NULL)
+ goto slink_qiniterr1;
+
+ for (i = 0; i < SLINK_NUMQUEUES; i++) {
+ q[i].size = size;
+ if ((q[i].buf = grlib_malloc(size*sizeof(int))) == NULL)
+ goto slink_qiniterr2;
+ q[i].first = q[i].last = q[i].buf;
+ q[i].max = q[i].buf + (size-1);
+ q[i].full = 0;
+ }
+
+ cfg->queues = q;
+
+ return 0;
+
+ slink_qiniterr2:
+ for (j = 0; j < i; j++)
+ free(q[i].buf);
+ free(q);
+ slink_qiniterr1:
+ return -1;
+}
+
+/*
+ * Function: SLINK_destroyqueues
+ * Arguments: None
+ * Returns: Nothing
+ * Description: Frees the memory occupied by the queues in cfg->queues
+ */
+/*
+ static void SLINK_destroyqueues(void)
+ {
+ int i;
+
+ for(i = 0; i < SLINK_NUMQUEUES; i++)
+ free(cfg->queues[i].buf);
+
+ free(cfg->queues);
+}
+*/
+
+/*
+ * Function: SLINK_enqueue
+ * Arguments: Received SLINK word
+ * Returns: Nothing
+ * Description:
+ */
+static void SLINK_enqueue(unsigned int slink_wrd)
+{
+ SLINK_queue *ioq = cfg->queues + SLINK_WRD_CARDNUM(slink_wrd);
+
+ if (!ioq->full && SLINK_WRD_CARDNUM(slink_wrd) < SLINK_NUMQUEUES) {
+ *ioq->last = slink_wrd;
+ ioq->last = (ioq->last >= ioq->max) ? ioq->buf : ioq->last+1;
+ ioq->full = ioq->last == ioq->first;
+ return;
+ }
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->lostwords++;
+#endif
+}
+
+/**** SLINK driver helper functions ****/
+
+/*
+ * Function: SLINK_getaddr
+ * Arguments: amba_conf
+ * base: assigned to base of core registers
+ * irq: assigned to core irq lines
+ * Returns: Base address and IRQ via arguments, 0 if core is found, else -1
+ * Description: See above.
+ */
+static int SLINK_getaddr(int *base, int *irq)
+{
+ struct ambapp_apb_info c;
+
+ if (ambapp_find_apbslv(&ambapp_plb,VENDOR_GAISLER,GAISLER_SLINK,&c) == 1) {
+ *base = c.start;
+ *irq = c.irq;
+ return 0;
+ }
+ return -1;
+}
+
+/* Function: SLINK_calcscaler
+ * Arguments: sysfreq: System frequency in Hz
+ * Returns: Clock scaler register value
+ * Description: Calculates value for SLINK clock scaler register to attain
+ * a SLINK bus frequency as close to 6 MHz as possible. Please see the IP core
+ * documentation for a description of how clock scaling is implemented.
+ */
+static int SLINK_calcscaler(int sysfreq)
+{
+ int fact = sysfreq / SLINK_FREQ_HZ;
+ return ((fact/2-1) << 16) | (fact % 2 ? fact/2 : fact/2-1);
+}
+
+
+/*
+ * Function: SLINK_getsysfreq
+ * Arguments: None
+ * Returns: System frequency in Hz, or 0 if system timer is not found.
+ * Description: Looks at the timer to determine system frequency. Makes use
+ * of AMBA Plug'n'Play.
+ */
+static int SLINK_getsysfreq(void)
+{
+ struct ambapp_apb_info t;
+ struct gptimer_regs *tregs;
+
+ if (ambapp_find_apbslv(&ambapp_plb,VENDOR_GAISLER,GAISLER_GPTIMER,&t)==1) {
+ tregs = (struct gptimer_regs *)t.start;
+ DBG("SLINK_getsysfreq returning %d\n",
+ (tregs->scaler_reload+1)*1000*1000);
+ return (tregs->scaler_reload+1)*1000*1000;
+ }
+ return 0;
+}
+
+/*
+ * Function: SLINK_interrupt_handler
+ * Arguments: v: not used
+ * Returns: Nothing
+ * Description: Interrupt handles checks RNE, SEQUENCE and error status
+ * bits. Reads word from receive queue and distinguishes between INTERRUPT,
+ * READ responses and SLAVE-WORD-SEND. When an INTERRUPT transfer is detected
+ * the handler calls the user specified slink_irq_handler with the received
+ * word. READ responses are saved and given to SLINK_read via a private
+ * variable. SLAVE-WORD-SEND transfers are placed in the IO card's receive
+ * queue.
+ */
+static rtems_isr SLINK_interrupt_handler(rtems_vector_number v)
+{
+ unsigned int sts;
+ unsigned int wrd;
+
+ /* Read all words from Receive queue */
+ while ((sts = cfg->reg->sts) & SLINK_S_RNE) {
+
+ /* Read first word in receive queue */
+ wrd = cfg->reg->rd;
+
+ /* Check channel value to determine action */
+ switch (SLINK_WRD_CHAN(wrd)) {
+ case 0: /* Interrupt */
+ cfg->slink_irq_handler(wrd);
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->interrupts++;
+#endif
+ break;
+ case 3: /* Read response, if no active READ, fall-through */
+ if (cfg->status->readstat == SLINK_ACTIVE) {
+ rtems_semaphore_release(cfg->read_sem);
+ cfg->status->readstat = SLINK_COMPLETED;
+ cfg->rword = wrd;
+ break;
+ }
+ default: /* Unsolicited request */
+ SLINK_enqueue(wrd);
+ break;
+ }
+ }
+
+ /* Check sequence operation */
+ if (sts & SLINK_S_SC) {
+ /* SEQUENCE completed */
+ cfg->status->seqstat = SLINK_COMPLETED;
+ if (cfg->slink_seq_change)
+ cfg->slink_seq_change(SLINK_COMPLETED);
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->seqcomp++;
+#endif
+ } else if (sts & SLINK_S_SA) {
+ /* SEQUENCE aborted */
+ cfg->status->seqstat = SLINK_ABORTED;
+ cfg->status->scnt = (sts >> SLINK_S_SI_POS);
+ if (cfg->slink_seq_change)
+ cfg->slink_seq_change(SLINK_ABORTED);
+ }
+
+ /* Check error conditions */
+ if (sts & SLINK_S_PERR) {
+ /*
+ Parity error detected, set seqstat if there is an ongoing
+ sequence so that the calling application can decide if the
+ sequence should be aborted
+ */
+ if (cfg->status->seqstat == SLINK_ACTIVE) {
+ cfg->status->seqstat = SLINK_PARERR;
+ if (cfg->slink_seq_change)
+ cfg->slink_seq_change(SLINK_PARERR);
+ }
+ /* Abort READ operation */
+ if (cfg->status->readstat == SLINK_ACTIVE) {
+ cfg->status->readstat = SLINK_PARERR;
+ rtems_semaphore_release(cfg->read_sem);
+ }
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->parerr++;
+#endif
+ }
+ if (sts & SLINK_S_AERR) {
+ /* AMBA error response, sequence aborted */
+ cfg->status->seqstat = SLINK_AMBAERR;
+ cfg->status->scnt = sts >> SLINK_S_SI_POS;
+ if (cfg->slink_seq_change)
+ cfg->slink_seq_change(SLINK_AMBAERR);
+ }
+ if (sts & SLINK_S_ROV) {
+ /* Receive overflow, abort any ongoing READ */
+ if (cfg->status->readstat == SLINK_ACTIVE) {
+ cfg->status->readstat = SLINK_ROV;
+ rtems_semaphore_release(cfg->read_sem);
+ }
+#ifdef SLINK_COLLECT_STATISICS
+ cfg->status->recov++;
+#endif
+ }
+
+ /* Clear processed bits */
+ cfg->reg->sts = sts;
+}
+
+/**** SLINK driver interface starts here ****/
+
+/* Function: SLINK_init
+ * Arguments: nullwrd: NULL word
+ * parity: Even (0) or Odd (1) parity
+ * interrupt_trans_handler: Function that handles interrupt requests
+ * sequence_callback: Callback on SEQUENCE status changes
+ * qsize: Size of each receive queue
+ * Returns: 0 on success, -1 on failure
+ * Description: Initializes the SLINK core
+ */
+int SLINK_init(unsigned int nullwrd, int parity, int qsize,
+ void (*interrupt_trans_handler)(int),
+ void (*sequence_callback)(int))
+{
+ int base;
+ int irq;
+ rtems_status_code st;
+
+ /* Allocate private config structure */
+ if (cfg == NULL && (cfg = grlib_malloc(sizeof(*cfg))) == NULL) {
+ DBG("SLINK_init: Could not allocate cfg structure\n");
+ goto slink_initerr1;
+ }
+
+ /* Create simple binary semaphore for blocking SLINK_read */
+ st = rtems_semaphore_create(rtems_build_name('S', 'L', 'R', '0'), 0,
+ (RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|
+ RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|
+ RTEMS_NO_PRIORITY_CEILING), 0,
+ &cfg->read_sem);
+ if (st != RTEMS_SUCCESSFUL) {
+ DBG("SLINK_init: Could not create semaphore\n");
+ goto slink_initerr1;
+ }
+
+ /* Initialize pointer to SLINK core registers and get IRQ line */
+ if (SLINK_getaddr(&base, &irq) == -1) {
+ DBG("SLINK_init: Could not find core\n");
+ goto slink_initerr2;
+ }
+ cfg->reg = (SLINK_regs*)base;
+
+ /* Allocate status structure and initialize members */
+ if ((cfg->status = grlib_calloc(1, sizeof(*cfg->status))) == NULL) {
+ DBG("SLINK_init: Could not allocate status structure\n");
+ goto slink_initerr2;
+ }
+ cfg->status->seqstat = SLINK_COMPLETED;
+ cfg->status->readstat = SLINK_COMPLETED;
+
+#ifdef SLINK_COLLECT_STATISTICS
+ /* Allocate statistics structure and initialize members */
+ if ((cfg->stats = grlib_calloc(1, sizeof(*cfg->stats))) == NULL) {
+ DBG("SLINK_init: Could not allocate statistics structure\n");
+ goto slink_initerr3;
+ }
+#endif
+
+ /* Allocate and initialize queues */
+ if (SLINK_createqueues(qsize) == -1) {
+ DBG("SLINK_init: Could not create queues\n");
+ goto slink_initerr3;
+ }
+
+ /* Configure core registers */
+ cfg->reg->clockscale = SLINK_calcscaler(SLINK_getsysfreq());
+ cfg->reg->ctrl = parity ? SLINK_C_PAR : 0;
+ cfg->reg->nullwrd = nullwrd;
+ cfg->reg->msk = (SLINK_M_PERRE | SLINK_M_AERRE | SLINK_M_ROVE |
+ SLINK_M_RNEE | SLINK_M_SAE | SLINK_M_SCE);
+
+ /* Set-up INTERRUPT transfer handling */
+ cfg->slink_irq_handler = interrupt_trans_handler;
+
+ /* Save SEQUENCE callback */
+ cfg->slink_seq_change = sequence_callback;
+
+ /* Set-up IRQ handling */
+ set_vector(SLINK_interrupt_handler,irq+0x10,2);
+
+ return 0;
+
+ slink_initerr3:
+ free(cfg->status);
+ slink_initerr2:
+ free(cfg);
+ slink_initerr1:
+ return -1;
+}
+
+/* Function: SLINK_start
+ * Description: Enables the core
+ */
+void SLINK_start(void)
+{
+ if (cfg != NULL)
+ cfg->reg->ctrl |= SLINK_C_SLE;
+}
+
+/* Function: SLINK_stop
+ * Description: Disables the core
+ */
+void SLINK_stop(void)
+{
+ if (cfg != NULL)
+ cfg->reg->ctrl &= ~SLINK_C_SLE;
+}
+
+/*
+ * Function: SLINK_read
+ * Arguments: data: Payload of data word
+ * channel: -
+ * reply: Reply from IO card
+ * Returns: 0 on success
+ * -(SLINK_PARERR, SLINK_ROV) on error or -SLINK_QFULL if transmit queue
+ * is full and software should try again.
+ * Description: Reads one word and returns the response in *reply unless there
+ * is an error. This function blocks until the READ operation is
+ * completed or aborted.
+ */
+int SLINK_read(int data, int channel, int *reply)
+{
+ DBG("SLINK_read: called..");
+
+ if (cfg->reg->sts & SLINK_S_TNF) {
+ cfg->status->readstat = SLINK_ACTIVE;
+ cfg->reg->td = SLINK_RW | channel << SLINK_CHAN_POS | data;
+ } else {
+ DBG("queue FULL\n");
+ return -SLINK_QFULL; /* Transmit queue full */
+ }
+
+ /* Block until the operation has completed or has been aborted */
+ rtems_semaphore_obtain(cfg->read_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+
+ if (cfg->status->readstat == SLINK_COMPLETED) {
+ *reply = cfg->rword;
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->reads++;
+#endif
+ DBG("returning 0\n");
+ return 0;
+ } else {
+ DBG("returning error code\n");
+ return -cfg->status->readstat;
+ }
+}
+
+/*
+ * Function: SLINK_write
+ * Arguments: data: Payload of SLINK data word
+ * channel: Channel value (bits 22 downto 16) of receive
+ * register word
+ * Returns: 0 if command was placed in transmit queue
+ * -SLINK_QFULL if transmit queue was full (software should retry)
+ * Description: See above.
+ */
+int SLINK_write(int data, int channel)
+{
+ if (cfg->reg->sts & SLINK_S_TNF) {
+ cfg->reg->td = channel << SLINK_CHAN_POS | data;
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->writes++;
+#endif
+ return 0;
+ }
+
+ return -SLINK_QFULL;
+}
+
+/*
+ * Function: SLINK_sequence
+ * Arguments: a: Array containing sequence commands
+ * b: Array where SEQUENCE responses will be stored
+ * n: Number of commands in a array
+ * channel: Sequence Channel Number
+ * reconly: Set to 1 if the SEQUENCE operation is receive only
+ * Returns: 0 if SEQUENCE could be started (SUCCESS)
+ * -1 if SEQUNCE was not started due to ongoing SEQUENCE
+ */
+int SLINK_seqstart(int *a, int *b, int n, int channel, int reconly)
+{
+ /* Only start a new SEQUENCE of the former SEQUENCE has completed */
+ if (cfg->status->seqstat == SLINK_ACTIVE ||
+ cfg->status->seqstat == SLINK_PARERR)
+ return -1;
+
+ /* Tell core about arrays */
+ cfg->reg->abase = (int)a;
+ cfg->reg->bbase = (int)b;
+
+ /* As far as software is concerned the sequence is now active */
+ cfg->status->seqstat = SLINK_ACTIVE;
+
+ /* Enable SEQUENCE operation with SCN = channel and SLEN = n-1 */
+ if (reconly == 1) {
+ cfg->reg->ctrl = (((n-1) << SLINK_C_SLEN_POS) | SLINK_C_SRO |
+ (channel << SLINK_C_SCN_POS) |
+ SLINK_C_SE | (cfg->reg->ctrl & 0xC000000F));
+ } else {
+ cfg->reg->ctrl = (((n-1) << SLINK_C_SLEN_POS) |
+ (channel << SLINK_C_SCN_POS) |
+ SLINK_C_SE | (cfg->reg->ctrl & 0xC000000F));
+ }
+
+#ifdef SLINK_COLLECT_STATISTICS
+ cfg->stats->sequences++;
+#endif
+
+ return 0;
+}
+
+
+/* Function: SLINK_seqabort
+ * Description: This function aborts an ongoing SEQUENCE. Software can tell
+ * when the SEQUENCE is aborted by polling SLINK_seqstat().
+ */
+void SLINK_seqabort(void)
+{
+ cfg->reg->ctrl = cfg->reg->ctrl | SLINK_C_AS;
+}
+
+
+/*
+ * Function: SLINK_seqstatus
+ * Returns: The current or status of the SEQUENCE operation:
+ * SLINK_COMPLETED, SLINK_ACTIVE, SLINK_PARERR, SLINK_AMBAERR,
+ * SLINK_ABORTED (these are defined in bsp/grslink.h)
+ * Description: Meaning of returned values:
+ * SLINK_ABORTED: Aborted before all operations completed.
+ * SLINK_ACTIVE: The core is busy processing the SEQUENCE
+ * SLINK_AMBAERR: The last SEQUENCE was aborted by an AMBA ERROR
+ * SLINK_COMPLETED: All words were transferred in the last SEQUENCE
+ * SLINK_PARERR: Parity error detected. Software may want to abort
+ *
+ * If the SEQUENCE was aborted SLINK_seqwrds() can be used to
+ * determine the number of completed operations.
+ */
+int SLINK_seqstatus(void)
+{
+ return cfg->status->seqstat;
+}
+
+/*
+ * Function: SLINK_seqwrds
+ * Returns: -1 for ongoing sequence
+ * 0 if all words were transferred in the last sequence
+ * number of words if the last SEQUENCE did not complete
+ * (SLINK_AMBAERR or SLINK_ABORTED is reported ny SLINK_seqstatus())
+ */
+int SLINK_seqwrds(void)
+{
+ switch (cfg->status->seqstat) {
+ case SLINK_COMPLETED: return 0;
+ case SLINK_ACTIVE | SLINK_PARERR: return -1;
+ default: return cfg->status->scnt;
+ }
+}
+
+/*
+ * Function: SLINK_hwstatus
+ * Returns: The SLINK core's status register. The register values can be
+ * interpreted with the help of macros defined in bsp/grslink.h.
+ */
+int SLINK_hwstatus(void)
+{
+ return cfg->reg->sts;
+}
+
+/*
+ * Function: SLINK_queuestatus
+ * Arguments: iocard: Queue which to check status for
+ * Returns: Number of elements in queue or -1 on non-existent queue
+ * Description: SLINK_queuestatus(queue) returns the number of elements in
+ * queue 'iocard'
+ */
+int SLINK_queuestatus(int iocard)
+{
+ unsigned int first, last;
+ SLINK_queue *ioq;
+
+ if (iocard >= SLINK_NUMQUEUES)
+ return -1;
+
+ ioq = cfg->queues + iocard;
+
+ if (ioq->full)
+ return ioq->size;
+ if (ioq->first == ioq->last)
+ return 0;
+
+ first = ((unsigned int)ioq->first)/sizeof(unsigned int);
+ last = ((unsigned int)ioq->last)/sizeof(unsigned int);
+
+ return first < last ? last - first : ioq->size - first + last;
+}
+
+/*
+ * Function: SLINK_dequeue
+ * Arguments: iocard: IO card number
+ * elem: First element in IO card queue
+ * Returns: 0 on success or -1 on empty or non-existent queue
+ * Description:
+ */
+int SLINK_dequeue(int iocard, int *elem)
+{
+ if (iocard >= SLINK_NUMQUEUES)
+ return -1;
+
+ SLINK_queue *ioq = cfg->queues + iocard;
+
+ if (ioq->last != ioq->first || ioq->full) {
+ *elem = *ioq->first;
+ ioq->first = (ioq->first >= ioq->max) ? ioq->buf : ioq->first+1;
+ ioq->full = 0;
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * Function: SLINK_statistics
+ * Returns: If the core has statistics colletion enabled this function returns
+ * a pointer to a struct containing statistics information, otherwise NULL.
+ */
+SLINK_stats *SLINK_statistics(void)
+{
+#ifdef SLINK_COLLECT_STATISTICS
+ return cfg->stats;
+#else
+ return NULL;
+#endif
+}
diff --git a/bsps/shared/grlib/spi/spictrl.c b/bsps/shared/grlib/spi/spictrl.c
new file mode 100644
index 0000000000..0c9f88c10f
--- /dev/null
+++ b/bsps/shared/grlib/spi/spictrl.c
@@ -0,0 +1,1018 @@
+/*
+ * SPICTRL SPI driver implmenetation
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <rtems/bspIo.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/spictrl.h>
+#include <grlib/ambapp.h>
+
+#include <rtems/libi2c.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#define STATIC
+#else
+#define DBG(x...)
+#define STATIC static
+#endif
+
+/*** CAPABILITY REGISTER 0x00 ***/
+#define SPICTRL_CAP_SSSZ_BIT 24
+#define SPICTRL_CAP_AMODE_BIT 18
+#define SPICTRL_CAP_ASELA_BIT 17
+#define SPICTRL_CAP_SSEN_BIT 16
+#define SPICTRL_CAP_FDEPTH_BIT 8
+#define SPICTRL_CAP_REV_BIT 0
+
+#define SPICTRL_CAP_SSSZ (0xff << SPICTRL_CAP_SSSZ_BIT)
+#define SPICTRL_CAP_AMODE (1<<SPICTRL_CAP_AMODE_BIT)
+#define SPICTRL_CAP_ASELA (1<<SPICTRL_CAP_ASELA_BIT)
+#define SPICTRL_CAP_SSEN (1 << SPICTRL_CAP_SSEN_BIT)
+#define SPICTRL_CAP_FDEPTH (0xff << SPICTRL_CAP_FDEPTH_BIT)
+#define SPICTRL_CAP_REV (0xff << SPICTRL_CAP_REV_BIT)
+
+/*** MODE REGISTER 0x20 ***/
+#define SPICTRL_MODE_AMEN_BIT 31
+#define SPICTRL_MODE_LOOP_BIT 30
+#define SPICTRL_MODE_CPOL_BIT 29
+#define SPICTRL_MODE_CPHA_BIT 28
+#define SPICTRL_MODE_DIV16_BIT 27
+#define SPICTRL_MODE_REV_BIT 26
+#define SPICTRL_MODE_MS_BIT 25
+#define SPICTRL_MODE_EN_BIT 24
+#define SPICTRL_MODE_LEN_BIT 20
+#define SPICTRL_MODE_PM_BIT 16
+#define SPICTRL_MODE_ASEL_BIT 14
+#define SPICTRL_MODE_FACT_BIT 13
+#define SPICTRL_MODE_CG_BIT 7
+#define SPICTRL_MODE_TAC_BIT 4
+
+#define SPICTRL_MODE_AMEN (1 << SPICTRL_MODE_AMEN_BIT)
+#define SPICTRL_MODE_LOOP (1 << SPICTRL_MODE_LOOP_BIT)
+#define SPICTRL_MODE_CPOL (1 << SPICTRL_MODE_CPOL_BIT)
+#define SPICTRL_MODE_CPHA (1 << SPICTRL_MODE_CPHA_BIT)
+#define SPICTRL_MODE_DIV16 (1 << SPICTRL_MODE_DIV16_BIT)
+#define SPICTRL_MODE_REV (1 << SPICTRL_MODE_REV_BIT)
+#define SPICTRL_MODE_MS (1 << SPICTRL_MODE_MS_BIT)
+#define SPICTRL_MODE_EN (1 << SPICTRL_MODE_EN_BIT)
+#define SPICTRL_MODE_LEN (0xf << SPICTRL_MODE_LEN_BIT)
+#define SPICTRL_MODE_PM (0xf << SPICTRL_MODE_PM_BIT)
+#define SPICTRL_MODE_ASEL (1 << SPICTRL_MODE_ASEL_BIT)
+#define SPICTRL_MODE_FACT (1 << SPICTRL_MODE_FACT_BIT)
+#define SPICTRL_MODE_CG (0x1f << SPICTRL_MODE_CG_BIT)
+#define SPICTRL_MODE_TAC (0x1 << SPICTRL_MODE_TAC_BIT)
+
+/*** EVENT REGISTER 0x24 ***/
+#define SPICTRL_EVENT_AT_BIT 15
+#define SPICTRL_EVENT_LT_BIT 14
+#define SPICTRL_EVENT_OV_BIT 12
+#define SPICTRL_EVENT_UN_BIT 11
+#define SPICTRL_EVENT_MME_BIT 10
+#define SPICTRL_EVENT_NE_BIT 9
+#define SPICTRL_EVENT_NF_BIT 8
+
+#define SPICTRL_EVENT_AT (1 << SPICTRL_EVENT_AT_BIT)
+#define SPICTRL_EVENT_LT (1 << SPICTRL_EVENT_LT_BIT)
+#define SPICTRL_EVENT_OV (1 << SPICTRL_EVENT_OV_BIT)
+#define SPICTRL_EVENT_UN (1 << SPICTRL_EVENT_UN_BIT)
+#define SPICTRL_EVENT_MME (1 << SPICTRL_EVENT_MME_BIT)
+#define SPICTRL_EVENT_NE (1 << SPICTRL_EVENT_NE_BIT)
+#define SPICTRL_EVENT_NF (1 << SPICTRL_EVENT_NF_BIT)
+
+/*** MASK REGISTER 0x28 ***/
+#define SPICTRL_MASK_ATE_BIT 15
+#define SPICTRL_MASK_LTE_BIT 14
+#define SPICTRL_MASK_OVE_BIT 12
+#define SPICTRL_MASK_UNE_BIT 11
+#define SPICTRL_MASK_MMEE_BIT 10
+#define SPICTRL_MASK_NEE_BIT 9
+#define SPICTRL_MASK_NFE_BIT 8
+
+#define SPICTRL_MASK_ATE (1 << SPICTRL_MASK_ATE_BIT)
+#define SPICTRL_MASK_LTE (1 << SPICTRL_MASK_LTE_BIT)
+#define SPICTRL_MASK_OVE (1 << SPICTRL_MASK_OVE_BIT)
+#define SPICTRL_MASK_UNE (1 << SPICTRL_MASK_UNE_BIT)
+#define SPICTRL_MASK_MMEE (1 << SPICTRL_MASK_MMEE_BIT)
+#define SPICTRL_MASK_NEE (1 << SPICTRL_MASK_NEE_BIT)
+#define SPICTRL_MASK_NFE (1 << SPICTRL_MASK_NFE_BIT)
+
+/*** COMMAND REGISTER 0x2c ***/
+#define SPICTRL_CMD_LST_BIT 22
+#define SPICTRL_CMD_LST (1 << SPICTRL_CMD_LST_BIT)
+
+/*** TRANSMIT REGISTER 0x30 ***/
+#define SPICTRL_TX_TDATA_BIT 0
+#define SPICTRL_TX_TDATA 0xffffffff
+
+/*** RECEIVE REGISTER 0x34 ***/
+#define SPICTRL_RX_RDATA_BIT 0
+#define SPICTRL_RX_RDATA 0xffffffff
+
+/*** SLAVE SELECT REGISTER 0x38 - VARIABLE ***/
+
+/*** AM CONFIGURATION REGISTER 0x40 ***/
+#define SPICTRL_AMCFG_ERPT_BIT 6
+#define SPICTRL_AMCFG_SEQ_BIT 5
+#define SPICTRL_AMCFG_STRICT_BIT 4
+#define SPICTRL_AMCFG_OVTB_BIT 3
+#define SPICTRL_AMCFG_OVDB_BIT 2
+#define SPICTRL_AMCFG_ACT_BIT 1
+#define SPICTRL_AMCFG_EACT_BIT 0
+
+#define SPICTRL_AMCFG_ERPT (1<<SPICTRL_AMCFG_ERPT_BIT)
+#define SPICTRL_AMCFG_SEQ (1<<SPICTRL_AMCFG_SEQ_BIT)
+#define SPICTRL_AMCFG_STRICT (1<<SPICTRL_AMCFG_STRICT_BIT)
+#define SPICTRL_AMCFG_OVTB (1<<SPICTRL_AMCFG_OVTB_BIT)
+#define SPICTRL_AMCFG_OVDB (1<<SPICTRL_AMCFG_OVDB_BIT)
+#define SPICTRL_AMCFG_ACT (1<<SPICTRL_AMCFG_ACT_BIT)
+#define SPICTRL_AMCFG_EACT (1<<SPICTRL_AMCFG_EACT_BIT)
+
+struct spictrl_priv {
+ rtems_libi2c_bus_t i2clib_desc;
+ struct drvmgr_dev *dev;
+ struct spictrl_regs *regs;
+ int irq;
+ int minor;
+ unsigned int core_freq_hz;
+
+ /* Driver */
+ int fdepth;
+ int bits_per_char;
+ int lsb_first;
+ int txshift;
+ int rxshift;
+ unsigned int idle_char;
+ int (*slvSelFunc)(void *regs, uint32_t addr, int select);
+
+ /* Automated Periodic transfers */
+ int periodic_started;
+ struct spictrl_ioctl_config periodic_cfg;
+};
+
+/******************* Driver Manager Part ***********************/
+
+int spictrl_device_init(struct spictrl_priv *priv);
+
+int spictrl_init2(struct drvmgr_dev *dev);
+int spictrl_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops spictrl_ops =
+{
+ .init = {NULL, spictrl_init2, spictrl_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id spictrl_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPICTRL},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info spictrl_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_SPICTRL_ID, /* Driver ID */
+ "SPICTRL_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &spictrl_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &spictrl_ids[0]
+};
+
+void spictrl_register_drv (void)
+{
+ DBG("Registering SPICTRL driver\n");
+ drvmgr_drv_register(&spictrl_drv_info.general);
+}
+
+int spictrl_init2(struct drvmgr_dev *dev)
+{
+ struct spictrl_priv *priv;
+
+ DBG("SPICTRL[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int spictrl_init3(struct drvmgr_dev *dev)
+{
+ struct spictrl_priv *priv;
+ char prefix[32];
+ char devName[32];
+ int rc;
+
+ priv = (struct spictrl_priv *)dev->priv;
+
+ /* Do initialization */
+
+ /* Initialize i2c library */
+ rc = rtems_libi2c_initialize();
+ if (rc != 0) {
+ DBG("SPICTRL: rtems_libi2c_initialize failed, exiting...\n");
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ /* Get frequency */
+ if ( drvmgr_freq_get(dev, DEV_APB_SLV, &priv->core_freq_hz) ) {
+ return DRVMGR_FAIL;
+ }
+
+ if ( spictrl_device_init(priv) ) {
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(devName, "/dev/spi%d", dev->minor_drv+1);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(devName, "/dev/%sspi%d", prefix, dev->minor_bus+1);
+ }
+
+ /* Register Bus for this Device */
+ rc = rtems_libi2c_register_bus(devName, &priv->i2clib_desc);
+ if (rc < 0) {
+ DBG("SPICTRL: rtems_libi2c_register_bus(%s) failed\n", devName);
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+ priv->minor = rc;
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+STATIC rtems_status_code spictrl_libi2c_send_addr(rtems_libi2c_bus_t *bushdl,
+ uint32_t addr, int rw);
+
+/* Set as high frequency of SCK as possible but not higher than
+ * requested frequency (freq).
+ */
+static int spictrl_set_freq(struct spictrl_priv *priv, unsigned int freq)
+{
+ unsigned int core_freq_hz = priv->core_freq_hz;
+ unsigned int lowest_freq_possible;
+ unsigned int div, div16, pm, fact;
+
+ /* Lowest possible when DIV16 is set and PM is 0xf */
+ lowest_freq_possible = core_freq_hz / (16 * 4 * (0xf + 1));
+
+ if ( freq < lowest_freq_possible ) {
+ DBG("SPICTRL: TOO LOW FREQ %u, CORE FREQ %u, LOWEST FREQ %u\n",
+ freq, core_freq_hz, lowest_freq_possible);
+ return -1;
+ }
+
+ div = ((core_freq_hz / 2) + (freq-1)) / freq;
+ DBG("SPICTRL: DIV=%d, FREQ=%d\n", div, freq);
+
+ /* Is DIV16 neccessary? */
+ if ( div > 16 ) {
+ div = (div + (16 - 1)) / 16;
+ div16 = 1;
+ } else {
+ div16 = 0;
+ }
+
+ if ( div > 0xf ) {
+ fact = 0; /* FACT adds an factor /2 */
+ div = (div + (2 - 1)) / 2;
+ } else {
+ fact = 1;
+ }
+
+ pm = div-1;
+
+ /* Update hardware */
+ priv->regs->mode =
+ (priv->regs->mode & ~(SPICTRL_MODE_PM|SPICTRL_MODE_DIV16|SPICTRL_MODE_FACT)) |
+ (pm << SPICTRL_MODE_PM_BIT) | (div16 << SPICTRL_MODE_DIV16_BIT) |
+ (fact << SPICTRL_MODE_FACT_BIT);
+
+ DBG("SPICTRL: Effective bit rate %u (requested %u), PM: %x, FACT: %d, div16: %x, core_freq: %u\n",
+ core_freq_hz / (2 * (fact ? 1 : 2) * (div) * (div16 ? 16 : 1)),
+ freq, pm, fact, div16, core_freq_hz);
+
+ return 0;
+}
+
+/* Start Automated Periodic transfers, after this call read can be done */
+static int spictrl_start_periodic(struct spictrl_priv *priv)
+{
+ struct spictrl_ioctl_config *cfg = &priv->periodic_cfg;
+ unsigned int am_cfg;
+
+ /* Clear the events */
+ priv->regs->event = 0xffffffff;
+
+ /* Enable core */
+ priv->regs->mode |= SPICTRL_MODE_EN | SPICTRL_MODE_MS;
+
+ /* Update hardware config from flags and period */
+ priv->regs->am_period = cfg->period;
+
+ /* Remove SPICTRL_PERIOD_FLAGS_ASEL and ACT bit and shift into posistion */
+ am_cfg = (cfg->period_flags & 0x1f8) >> 1;
+ priv->regs->am_cfg = am_cfg;
+
+ /* Start automated periodic transfers */
+ if ( cfg->period_flags & SPICTRL_PERIOD_FLAGS_EACT ) {
+ /* Enable external triggering */
+ priv->regs->am_cfg = am_cfg | SPICTRL_AMCFG_EACT;
+ } else {
+ /* Activate periodic transfers */
+ priv->regs->am_cfg = am_cfg | SPICTRL_AMCFG_ACT;
+ }
+
+ return 0;
+}
+
+/* Stop Automated Periodic transfers */
+static void spictrl_stop_periodic(struct spictrl_priv *priv)
+{
+ priv->regs->am_cfg = 0;
+}
+
+/* Return the status of the SPI controller (the event register),
+ * it may be needed in periodic mode to look at the Not Full bit (NF)
+ * in order not to hang in an infinte loop when read is called.
+ */
+static inline unsigned int spictrl_status(struct spictrl_priv *priv)
+{
+ return priv->regs->event;
+}
+
+static int spictrl_read_periodic(
+ struct spictrl_priv *priv,
+ struct spictrl_period_io *rarg)
+{
+ int i, rxi, rxshift, bits_per_char, reg;
+ unsigned int rx_word, mask;
+ void *rxbuf;
+
+ if ( rarg->options & 0x1 ) {
+ /* Read mask registers */
+ for (i=0; i<4; i++) {
+ rarg->masks[i] = priv->regs->am_mask[i];
+ }
+ }
+
+ if ( rarg->options & 0x2 ) {
+ /* Read receive registers (after updating masks so that the caller can
+ * read current buffer without knowning of actual register mask).
+ */
+
+ /* If not started we could be hanging here forever. */
+ if ( !priv->periodic_started )
+ return -1;
+
+ rxshift = priv->rxshift;
+ bits_per_char = priv->bits_per_char;
+ rx_word = 0;
+
+ rxbuf = rarg->data;
+ if ( !rxbuf ) {
+ /* If no data pointer specified we cannot copy data... */
+ return -1;
+ }
+
+ /* Wait until all data is available (if started) */
+ while ( (priv->regs->event & SPICTRL_EVENT_NE) == 0 ) {
+ ;
+ }
+
+ rxi = 0;
+ for (i=0; i<4; i++) {
+ mask = rarg->masks[i];
+ reg = 0;
+ while ( mask ) {
+ if ( mask & 1 ) {
+ /* Update Register */
+ rx_word = priv->regs->am_rx[i*32 + reg] >> rxshift;
+
+ if ( bits_per_char <= 8 ) {
+ *((unsigned char *)rxbuf + rxi) = rx_word;
+ } else if ( bits_per_char <= 16 ) {
+ *((unsigned short *)rxbuf + rxi) = rx_word;
+ } else {
+ *((unsigned int *)rxbuf + rxi) = rx_word;
+ }
+ rxi++;
+ }
+
+ mask = mask>>1;
+ reg++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int spictrl_write_periodic(
+ struct spictrl_priv *priv,
+ struct spictrl_period_io *warg)
+{
+ int i, txi, txshift, bits_per_char, reg;
+ unsigned int tx_word, mask;
+ void *txbuf;
+
+ if ( warg->options & 0x2 ) {
+
+ /* Make sure core is enabled, otherwise TX registers writes are lost */
+ priv->regs->mode |= SPICTRL_MODE_EN;
+
+ /* Update Transmit registers (before updating masks so that we do not
+ * transmit invalid data)
+ */
+
+ txshift = priv->txshift;
+ bits_per_char = priv->bits_per_char;
+ tx_word = 0;
+
+ txbuf = warg->data;
+ if ( !txbuf ) {
+ /* If no data pointer specified we fill up with
+ * idle chars.
+ */
+ tx_word = priv->idle_char << txshift;
+ }
+
+ txi = 0;
+ for (i=0; i<4; i++) {
+ mask = warg->masks[i];
+ reg = 0;
+ while ( mask ) {
+ if ( mask & 1 ) {
+ if ( txbuf ) {
+ if ( bits_per_char <= 8 ) {
+ tx_word = *((unsigned char *)txbuf + txi);
+ } else if ( bits_per_char <= 16 ) {
+ tx_word = *((unsigned short *)txbuf + txi);
+ } else {
+ tx_word = *((unsigned int *)txbuf + txi);
+ }
+ tx_word = tx_word << txshift;
+ txi++;
+ }
+
+ /* Update Register */
+ DBG("WRITE 0x%08x to 0x%08x\n", tx_word, &priv->regs->am_tx[i*32 + reg]);
+ priv->regs->am_tx[i*32 + reg] = tx_word;
+ }
+
+ mask = mask>>1;
+ reg++;
+ }
+ }
+ }
+
+ if ( warg->options & 0x1 ) {
+ /* Update mask registers */
+ for (i=0; i<4; i++) {
+ DBG("WRITE 0x%08x to 0x%08x (MSK%d)\n", warg->masks[i], &priv->regs->am_mask[i], i);
+ priv->regs->am_mask[i] = warg->masks[i];
+ }
+ }
+
+ return 0;
+}
+
+static int spictrl_read_write(
+ struct spictrl_priv *priv,
+ void *rxbuf,
+ void *txbuf,
+ int len)
+{
+ unsigned int tx_word, rx_word, tmp;
+ int txshift = priv->txshift;
+ int rxshift = priv->rxshift;
+ int txi, rxi, bits_per_char;
+ int length;
+
+ /* Use IOCTL for periodic reads. The FIFO is not supported in automated
+ * periodic mode
+ */
+ if ( priv->periodic_cfg.periodic_mode ) {
+ return -1;
+ }
+
+ bits_per_char = priv->bits_per_char;
+ tx_word = 0;
+ if ( !txbuf ) {
+ tx_word = priv->idle_char << txshift;
+ }
+
+ /* Clear the events */
+ priv->regs->event = 0xffffffff;
+
+ /* Enable core */
+ priv->regs->mode |= SPICTRL_MODE_EN | SPICTRL_MODE_MS;
+
+ length = len;
+ if ( bits_per_char > 8 ) {
+ length = length / 2;
+ if ( bits_per_char > 16 )
+ length = length / 2;
+ }
+ DBG("SPICTRL: LENGTH = %d, Bits/Char: %d, Shift: %d, %d\n", length, bits_per_char, txshift, rxshift);
+
+ txi=0;
+ rxi=0;
+ while ( (rxi < length) || (txi < length) ) {
+ /* Get transmit word */
+ if ( length > txi ) {
+ if ( txbuf ) {
+ if ( bits_per_char <= 8 ) {
+ tx_word = *((unsigned char *)txbuf + txi);
+ } else if ( bits_per_char <= 16 ) {
+ tx_word = *((unsigned short *)txbuf + txi);
+ } else {
+ tx_word = *((unsigned int *)txbuf + txi);
+ }
+ tx_word = tx_word << txshift;
+ }
+
+ /* Wait for SPICTRL to get ready for another TX char */
+ while ( (priv->regs->event & SPICTRL_EVENT_NF) == 0 ) {
+ /* Wait for all chars to transmit */
+/* Could implement waiting for SPICTRL IRQ here */
+ }
+
+ DBG("SPICTRL: Writing 0x%x\n", tx_word);
+
+ /* Transmit word */
+ priv->regs->tx = tx_word;
+ txi++;
+ }
+
+ /* Read */
+ while ( priv->regs->event & SPICTRL_EVENT_NE ) {
+ /* Read to avoid overrun */
+ tmp = priv->regs->rx;
+ DBG("SPICTRL: Read 0x%x\n", tmp);
+
+ if ( rxbuf && (length > rxi) ) {
+ /* Copy word to user buffer */
+ rx_word = (tmp >> rxshift);
+
+ DBG("SPICTRL: Receiving 0x%x (0x%x, %d)\n", rx_word, tmp, rxshift);
+
+ if ( bits_per_char <= 8 ) {
+ *((unsigned char *)rxbuf + rxi) = rx_word;
+ } else if ( bits_per_char <= 16 ) {
+ *((unsigned short *)rxbuf + rxi) = rx_word;
+ } else {
+ *((unsigned int *)rxbuf + rxi) = rx_word;
+ }
+
+ }
+ rxi++;
+ }
+ }
+
+ return len;
+}
+
+
+STATIC rtems_status_code spictrl_libi2c_init(rtems_libi2c_bus_t *bushdl)
+{
+ struct spictrl_priv *priv = (struct spictrl_priv *)bushdl;
+
+ DBG("SPICTRL: spictrl_libi2c_init\n");
+
+ /* Disable SPICTTRL, Select Master mode */
+ priv->regs->mode = SPICTRL_MODE_MS;
+
+ /* Mask all Interrupts */
+ priv->regs->mask = 0;
+
+ /* Select no slave */
+ priv->regs->slvsel = 0xffffffff;
+
+ /* Clear all events */
+ priv->regs->event = 0xffffffff;
+
+ return 0;
+}
+
+/* Nothing to be done in start */
+STATIC rtems_status_code spictrl_libi2c_send_start(rtems_libi2c_bus_t *bushdl)
+{
+ DBG("SPICTRL: spictrl_libi2c_send_start\n");
+
+ return 0;
+}
+
+/* Inactivate all chip selects, indicates "End of command" */
+STATIC rtems_status_code spictrl_libi2c_send_stop(rtems_libi2c_bus_t *bushdl)
+{
+ struct spictrl_priv *priv = (struct spictrl_priv *)bushdl;
+
+ priv->regs->slvsel = 0xffffffff;
+
+ if ( priv->slvSelFunc ) {
+ /* unslect all */
+ return priv->slvSelFunc(priv->regs, -1, 0);
+ }
+
+ DBG("SPICTRL: spictrl_libi2c_send_stop\n");
+ return 0;
+}
+
+/* Select Slave address by selecting apropriate chip select */
+STATIC rtems_status_code spictrl_libi2c_send_addr(rtems_libi2c_bus_t *bushdl,
+ uint32_t addr, int rw)
+{
+ struct spictrl_priv *priv = (struct spictrl_priv *)bushdl;
+
+ DBG("SPICTRL: spictrl_libi2c_send_addr, %d\n", addr);
+
+ if ( priv->slvSelFunc ) {
+ /* Let user set spi select using for example GPIO */
+ return priv->slvSelFunc(priv->regs, addr, 1);
+ } else if ( priv->regs->capability & SPICTRL_CAP_SSEN ) {
+ int slaves;
+
+ /* Maximum number of slaves the core support */
+ slaves = (priv->regs->capability & SPICTRL_CAP_SSSZ) >> SPICTRL_CAP_SSSZ_BIT;
+
+ if ( addr > slaves )
+ return -1;
+
+ if ( (priv->regs->capability & SPICTRL_CAP_ASELA) &&
+ (priv->periodic_cfg.period_flags & SPICTRL_PERIOD_FLAGS_ASEL) ) {
+ /* When automatic slave select is supported by hardware and
+ * enabled by configuration the SPI address is determined by
+ * the automatic slave select register and the "idle" slave
+ * select register is set by configuration.
+ */
+ priv->regs->am_slvsel = ~(1<<(addr-1));
+ priv->regs->slvsel = priv->periodic_cfg.period_slvsel;
+ /* Enable automatic slave select */
+ priv->regs->mode |= SPICTRL_MODE_ASEL;
+ } else {
+ /* Normal mode */
+ priv->regs->slvsel = ~(1<<(addr-1));
+ }
+ }
+
+ return 0;
+}
+
+/* Read a number of bytes */
+STATIC int spictrl_libi2c_read_bytes(rtems_libi2c_bus_t *bushdl,
+ unsigned char *bytes, int nbytes)
+{
+ struct spictrl_priv *priv = (struct spictrl_priv *)bushdl;
+ int ret;
+
+ DBG("SPICTRL: spictrl_libi2c_read_bytes %d\n", nbytes);
+ ret = spictrl_read_write(priv, bytes, NULL, nbytes);
+ if ( ret < 0 ) {
+ printk("SPICTRL: Error Reading\n");
+ }
+#ifdef DEBUG
+ else {
+ int i;
+ for(i=0; i<nbytes; i+=16) {
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x ",
+ bytes[0+i], bytes[1+i], bytes[2+i], bytes[3+i], bytes[4+i], bytes[5+i], bytes[6+i], bytes[7+i]);
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ bytes[8+i], bytes[9+i], bytes[10+i], bytes[11+i], bytes[12+i], bytes[13+i], bytes[14+i], bytes[15+i]);
+ }
+ }
+#endif
+ return ret;
+}
+
+/* Write a number of bytes */
+STATIC int spictrl_libi2c_write_bytes(rtems_libi2c_bus_t *bushdl,
+ unsigned char *bytes, int nbytes)
+{
+ struct spictrl_priv *priv = (struct spictrl_priv *)bushdl;
+
+#ifdef DEBUG
+ int i;
+ DBG("SPICTRL: spictrl_libi2c_write_bytes: %d\n", nbytes);
+
+ for(i=0; i<nbytes; i+=16) {
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x ",
+ bytes[0+i], bytes[1+i], bytes[2+i], bytes[3+i], bytes[4+i], bytes[5+i], bytes[6+i], bytes[7+i]);
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ bytes[8+i], bytes[9+i], bytes[10+i], bytes[11+i], bytes[12+i], bytes[13+i], bytes[14+i], bytes[15+i]);
+ }
+#endif
+
+ return spictrl_read_write(priv, NULL, bytes, nbytes);
+}
+
+/* Configure the interface and do simultaneous READ/WRITE operations */
+STATIC int spictrl_libi2c_ioctl(
+ rtems_libi2c_bus_t * bushdl,
+ int cmd,
+ void *buffer)
+{
+ struct spictrl_priv *priv = (struct spictrl_priv *)bushdl;
+ int ret;
+
+ DBG("SPICTRL: spictrl_libi2c_ioctl(%d, 0x%x)\n", cmd, (unsigned int)buffer);
+
+ switch (cmd) {
+ case RTEMS_LIBI2C_IOCTL_SET_TFRMODE:
+ {
+ rtems_libi2c_tfr_mode_t *trf_mode = buffer;
+ unsigned int mode;
+
+ /* Must disable core to write new values */
+ priv->regs->mode &= ~SPICTRL_MODE_EN;
+
+ /* Change bit frequency */
+ if ( spictrl_set_freq(priv, trf_mode->baudrate) ) {
+ /* Unable to set such a low frequency. */
+ return -1;
+ }
+
+ /* Set Clock Polarity, Clock Phase, Reverse mode and Word Length */
+ mode = (priv->regs->mode &
+ ~(SPICTRL_MODE_CPOL|SPICTRL_MODE_CPHA|SPICTRL_MODE_REV|SPICTRL_MODE_LEN));
+ if ( trf_mode->clock_inv )
+ mode |= SPICTRL_MODE_CPOL;
+ if ( trf_mode->clock_phs )
+ mode |= SPICTRL_MODE_CPHA;
+ if ( trf_mode->lsb_first == 0 )
+ mode |= SPICTRL_MODE_REV; /* Set Reverse mode (MSB first) */
+
+ if ( (trf_mode->bits_per_char < 4) ||
+ ((trf_mode->bits_per_char > 16) && (trf_mode->bits_per_char != 32)) )
+ return -1;
+ if ( trf_mode->bits_per_char == 32 ) {
+ priv->txshift = 0;
+ priv->rxshift = 0;
+ } else {
+ mode |= (trf_mode->bits_per_char-1) << SPICTRL_MODE_LEN_BIT;
+ if ( trf_mode->lsb_first == 0 ) {
+ /* REV bit 1 */
+ priv->txshift = 32 - trf_mode->bits_per_char;
+ priv->rxshift = 16;
+ } else {
+ /* REV bit 0 */
+ priv->txshift = 0;
+ priv->rxshift = 16 - trf_mode->bits_per_char;
+ }
+ }
+
+ priv->bits_per_char = trf_mode->bits_per_char;
+ priv->lsb_first = trf_mode->lsb_first;
+ priv->idle_char = trf_mode->idle_char;
+
+ /* Update hardware */
+ priv->regs->mode = mode;
+
+ return 0;
+ }
+
+ case RTEMS_LIBI2C_IOCTL_READ_WRITE:
+ {
+ rtems_libi2c_read_write_t *arg = buffer;
+
+ DBG("SPICTRL: IOCTL READ/WRITE, RX: 0x%x, TX: 0x%x, len: %d\n", arg->rd_buf, arg->wr_buf, arg->byte_cnt);
+#ifdef DEBUG
+ /* Printf out what is going to be transmitted */
+ if ( arg->wr_buf ) {
+ unsigned char *bytes = (unsigned char *)arg->wr_buf;
+ int i;
+ for(i=0; i<arg->byte_cnt; i+=16) {
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x ",
+ bytes[0+i], bytes[1+i], bytes[2+i], bytes[3+i], bytes[4+i], bytes[5+i], bytes[6+i], bytes[7+i]);
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ bytes[8+i], bytes[9+i], bytes[10+i], bytes[11+i], bytes[12+i], bytes[13+i], bytes[14+i], bytes[15+i]);
+ }
+ }
+#endif
+
+ ret = spictrl_read_write(priv, arg->rd_buf, (unsigned char *)arg->wr_buf,
+ arg->byte_cnt);
+#ifdef DEBUG
+ /* Printf out what was read */
+ if ( arg->rd_buf ) {
+ unsigned char *bytes = (unsigned char *)arg->rd_buf;
+ int i;
+ for(i=0; i<arg->byte_cnt; i+=16) {
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x ",
+ bytes[0+i], bytes[1+i], bytes[2+i], bytes[3+i], bytes[4+i], bytes[5+i], bytes[6+i], bytes[7+i]);
+ DBG("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ bytes[8+i], bytes[9+i], bytes[10+i], bytes[11+i], bytes[12+i], bytes[13+i], bytes[14+i], bytes[15+i]);
+ }
+ }
+#endif
+ return ret;
+ }
+
+ /* Enable Periodic mode */
+ case SPICTRL_IOCTL_CONFIG:
+ {
+ struct spictrl_ioctl_config *cfg;
+
+ DBG("SPICTRL: Configuring Periodic mode\n");
+
+ if ( priv->periodic_started ) {
+ DBG("SPICTRL: Periodic mode already started, too late to configure\n");
+ return -1;
+ }
+
+ cfg = buffer;
+ if ( cfg == NULL ) {
+ memset(&priv->periodic_cfg, 0, sizeof(priv->periodic_cfg));
+ } else {
+ priv->periodic_cfg = *cfg;
+ }
+ cfg = &priv->periodic_cfg;
+ if ( cfg->periodic_mode ) {
+ /* Enable Automated Periodic mode */
+ priv->regs->mode |= SPICTRL_MODE_AMEN;
+
+ /* Check that hardware has support for periodic mode */
+ if ( (priv->regs->mode & SPICTRL_MODE_AMEN) == 0 ) {
+ priv->periodic_cfg.periodic_mode = 0;
+ DBG("SPICTRL: Periodic mode not supported by hardware\n");
+ return -1;
+ }
+ } else {
+ /* Disable Periodic mode */
+ priv->regs->mode &= ~SPICTRL_MODE_AMEN;
+ }
+ priv->periodic_started = 0;
+
+ /* Set clockgap and TAC */
+ priv->regs->mode = (priv->regs->mode & ~(SPICTRL_MODE_CG|SPICTRL_MODE_TAC)) |
+ (cfg->clock_gap << SPICTRL_MODE_CG_BIT) |
+ (cfg->flags & SPICTRL_MODE_TAC);
+ return 0;
+ }
+ case SPICTRL_IOCTL_PERIOD_START:
+ {
+ if ( !priv->periodic_cfg.periodic_mode || priv->periodic_started ) {
+ return -1;
+ }
+ if ( spictrl_start_periodic(priv) == 0 ) {
+ priv->periodic_started = 1;
+ return 0;
+ } else
+ return -1;
+ }
+ case SPICTRL_IOCTL_PERIOD_STOP:
+ {
+ if ( !priv->periodic_cfg.periodic_mode || !priv->periodic_started ) {
+ return -1;
+ }
+ spictrl_stop_periodic(priv);
+ priv->periodic_started = 0;
+ return 0;
+ }
+ case SPICTRL_IOCTL_STATUS:
+ {
+ if ( !buffer )
+ return 0;
+ *(unsigned int *)buffer = spictrl_status(priv);
+ return 0;
+ }
+
+ case SPICTRL_IOCTL_PERIOD_WRITE:
+ {
+ if ( !priv->periodic_cfg.periodic_mode || !buffer ) {
+ return -1;
+ }
+ if ( spictrl_write_periodic(priv, (struct spictrl_period_io *)
+ buffer) == 0 ) {
+ return 0;
+ } else
+ return -1;
+ }
+
+ case SPICTRL_IOCTL_PERIOD_READ:
+ {
+ if ( !priv->periodic_cfg.periodic_mode || !buffer ) {
+ return -1;
+ }
+ if ( spictrl_read_periodic(priv, (struct spictrl_period_io *)
+ buffer) == 0 ) {
+ return 0;
+ } else
+ return -1;
+ }
+
+ case SPICTRL_IOCTL_REGS:
+ {
+ /* Copy Register Base Address to user space */
+ if ( !buffer ) {
+ return -1;
+ }
+ *(struct spictrl_regs **)buffer = priv->regs;
+ return 0;
+ }
+
+ default:
+ /* Unknown IOCTL */
+ return -1;
+ }
+
+ return 0;
+}
+
+STATIC rtems_libi2c_bus_ops_t spictrl_libi2c_ops =
+{
+ .init = spictrl_libi2c_init,
+ .send_start = spictrl_libi2c_send_start,
+ .send_stop = spictrl_libi2c_send_stop,
+ .send_addr = spictrl_libi2c_send_addr,
+ .read_bytes = spictrl_libi2c_read_bytes,
+ .write_bytes = spictrl_libi2c_write_bytes,
+ .ioctl = spictrl_libi2c_ioctl
+};
+
+int spictrl_device_init(struct spictrl_priv *priv)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ priv->irq = pnpinfo->irq;
+ priv->regs = (struct spictrl_regs *)pnpinfo->apb_slv->start;
+ priv->fdepth = (priv->regs->capability & SPICTRL_CAP_FDEPTH) >> SPICTRL_CAP_FDEPTH_BIT;
+
+ DBG("SPCTRL: 0x%x irq %d, FIFO: %d\n", (unsigned int)priv->regs, priv->irq, priv->fdepth);
+
+ /* Mask all Interrupts */
+ priv->regs->mask = 0;
+
+ /* Disable SPICTTRL */
+ priv->regs->mode = 0;
+
+ /* Get custom */
+ value = drvmgr_dev_key_get(priv->dev, "slvSelFunc", DRVMGR_KT_POINTER);
+ if ( value ) {
+ priv->slvSelFunc = value->ptr;
+ }
+
+ /* Prepare I2C layer */
+ priv->i2clib_desc.ops = &spictrl_libi2c_ops;
+ priv->i2clib_desc.size = sizeof(spictrl_libi2c_ops);
+ return 0;
+}
diff --git a/bsps/shared/grlib/spw/grspw.c b/bsps/shared/grlib/spw/grspw.c
new file mode 100644
index 0000000000..ca0f63edd8
--- /dev/null
+++ b/bsps/shared/grlib/spw/grspw.c
@@ -0,0 +1,2038 @@
+/*
+ * This file contains the GRSPW SpaceWire Driver for LEON2 and LEON3.
+ *
+ * COPYRIGHT (c) 2006
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+#include <grlib/ambapp.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grspw.h>
+
+#include <grlib/grlib_impl.h>
+
+#define DBGSPW_IOCALLS 1
+#define DBGSPW_TX 2
+#define DBGSPW_RX 4
+#define DBGSPW_IOCTRL 1
+#define DBGSPW_DUMP 16
+#define DEBUG_SPACEWIRE_FLAGS (DBGSPW_IOCALLS | DBGSPW_TX | DBGSPW_RX )
+
+/* #define DEBUG_SPACEWIRE_ONOFF */
+
+#ifdef DEBUG_SPACEWIRE_ONOFF
+#define SPACEWIRE_DBG(fmt, args...) do { { printk(" : %03d @ %18s()]:" fmt , __LINE__,__FUNCTION__,## args); }} while(0)
+#define SPACEWIRE_DBG2(fmt) do { { printk(" : %03d @ %18s()]:" fmt , __LINE__,__FUNCTION__); }} while(0)
+#define SPACEWIRE_DBGC(c,fmt, args...) do { if (DEBUG_SPACEWIRE_FLAGS & c) { printk(" : %03d @ %18s()]:" fmt , __LINE__,__FUNCTION__,## args); }} while(0)
+#else
+#define SPACEWIRE_DBG(fmt, args...)
+#define SPACEWIRE_DBG2(fmt, args...)
+#define SPACEWIRE_DBGC(c, fmt, args...)
+#endif
+
+typedef struct {
+ volatile unsigned int ctrl;
+ volatile unsigned int status;
+ volatile unsigned int nodeaddr;
+ volatile unsigned int clkdiv;
+ volatile unsigned int destkey;
+ volatile unsigned int time;
+ volatile unsigned int timer;
+ volatile unsigned int pad;
+
+ volatile unsigned int dma0ctrl;
+ volatile unsigned int dma0rxmax;
+ volatile unsigned int dma0txdesc;
+ volatile unsigned int dma0rxdesc;
+
+ /* For GRSPW core 2 and onwards */
+ volatile unsigned int dma0addr;
+
+} LEON3_SPACEWIRE_Regs_Map;
+
+typedef struct {
+ volatile unsigned int ctrl;
+ volatile unsigned int addr;
+} SPACEWIRE_RXBD;
+
+typedef struct {
+ volatile unsigned int ctrl;
+ volatile unsigned int addr_header;
+ volatile unsigned int len;
+ volatile unsigned int addr_data;
+} SPACEWIRE_TXBD;
+
+#define SPACEWIRE_INIT_TIMEOUT 10
+#define SPACEWIRE_BDTABLE_SIZE 0x400
+#define SPACEWIRE_TXD_SIZE 1024
+#define SPACEWIRE_TXH_SIZE 64
+#define SPACEWIRE_RXPCK_SIZE 1024
+#define SPACEWIRE_TXBUFS_NR 64
+#define SPACEWIRE_RXBUFS_NR 128
+
+#define BUFMEM_PER_LINK (SPACEWIRE_TXBUFS_NR*(SPACEWIRE_TXD_SIZE+SPACEWIRE_TXH_SIZE) + SPACEWIRE_RXBUFS_NR*SPACEWIRE_RXPCK_SIZE)
+
+typedef struct {
+ /* configuration parameters */
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ LEON3_SPACEWIRE_Regs_Map *regs;
+ spw_config config;
+
+ unsigned int tx_all_in_use;
+ unsigned int tx_sent;
+ unsigned int tx_cur;
+ unsigned int rxcur;
+ unsigned int rxbufcur;
+ unsigned int txdbufsize;
+ unsigned int txhbufsize;
+ unsigned int rxbufsize;
+ unsigned int txbufcnt;
+ unsigned int rxbufcnt;
+
+ /* DMA Area set by user */
+ unsigned int rx_dma_area;
+ unsigned int tx_data_dma_area;
+ unsigned int tx_hdr_dma_area;
+ unsigned int bd_dma_area;
+
+ /* statistics */
+ spw_stats stat;
+
+ unsigned int _ptr_rxbuf0;
+ char *ptr_rxbuf0;
+ char *ptr_txdbuf0;
+ char *ptr_txhbuf0;
+ char *_ptr_bd0, *ptr_bd0;
+
+ char *ptr_rxbuf0_remote;
+ char *ptr_txdbuf0_remote;
+ char *ptr_txhbuf0_remote;
+ char *ptr_bd0_remote;
+
+ unsigned int irq;
+ int minor;
+ int core_ver;
+ int open;
+ int running;
+ unsigned int core_freq_khz;
+ unsigned int rtimeout;
+
+ /* semaphores*/
+ rtems_id txsp;
+ rtems_id rxsp;
+
+ SPACEWIRE_RXBD *rx;
+ SPACEWIRE_TXBD *tx;
+
+ unsigned int rx_remote;
+ unsigned int tx_remote;
+} GRSPW_DEV;
+
+/* Function pointer called upon timecode receive */
+void (*grspw_timecode_callback)
+ (void *pDev, void *regs, int minor, unsigned int tc) = NULL;
+
+#ifdef GRSPW_DONT_BYPASS_CACHE
+#define _SPW_READ(address) (*(volatile unsigned int *)(address))
+#define _MEM_READ8(address) (*(volatile unsigned char *)(address))
+#define _MEM_READ32(address) (*(volatile unsigned int *)(address))
+#else
+static inline unsigned int _SPW_READ(volatile void *addr) {
+ unsigned int tmp;
+ __asm__ (" lda [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+
+static inline unsigned int _MEM_READ8(volatile void *addr) {
+ unsigned int tmp;
+ __asm__ (" lduba [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+
+static inline unsigned int _MEM_READ32(volatile void *addr) {
+ unsigned int tmp;
+ __asm__ (" lda [%1]1, %0 "
+ : "=r"(tmp)
+ : "r"(addr)
+ );
+ return tmp;
+}
+#endif
+
+#define MEM_READ8(addr) _MEM_READ8((volatile void *)(addr))
+#define MEM_READ32(addr) _MEM_READ32((volatile void *)(addr))
+#define SPW_READ(addr) _SPW_READ((volatile void *)(addr))
+#define SPW_WRITE(addr,v) (*(volatile unsigned int *)addr)=v
+
+#define SPW_REG(c,r) (c->regs->r)
+#define SPW_REG_CTRL(c) SPW_REG(c,ctrl)
+#define SPW_REG_STATUS(c) SPW_REG(c,status)
+#define SPW_REG_NODEADDR(c) SPW_REG(c,nodeaddr)
+
+#define SPW_CTRL_READ(c) SPW_READ(&SPW_REG_CTRL(c))
+#define SPW_CTRL_WRITE(c,v) SPW_WRITE(&SPW_REG_CTRL(c),v)
+#define SPW_STATUS_READ(c) SPW_READ(&SPW_REG_STATUS(c))
+#define SPW_STATUS_WRITE(c,v) SPW_WRITE(&SPW_REG_STATUS(c),v)
+
+#define SPW_LINKSTATE(c) (((c) >> 21) & 0x7)
+
+#define SPACEWIRE_RXNR(c) ((c&~(SPACEWIRE_BDTABLE_SIZE-1))>>3)
+#define SPACEWIRE_TXNR(c) ((c&~(SPACEWIRE_BDTABLE_SIZE-1))>>4)
+
+#define SPW_RXBD_LENGTH 0x1ffffff
+#define SPW_RXBD_EN (1 << 25)
+#define SPW_RXBD_WR (1 << 26)
+#define SPW_RXBD_IE (1 << 27)
+
+#define SPW_RXBD_EEP (1 << 28)
+#define SPW_RXBD_EHC (1 << 29)
+#define SPW_RXBD_EDC (1 << 30)
+#define SPW_RXBD_ETR (1 << 31)
+
+#define SPW_RXBD_ERROR (SPW_RXBD_EEP | \
+ SPW_RXBD_ETR)
+
+#define SPW_RXBD_RMAPERROR (SPW_RXBD_EHC | SPW_RXBD_EDC)
+
+#define SPW_TXBD_LENGTH 0xffffff
+
+#define SPW_TXBD_EN (1 << 12)
+#define SPW_TXBD_WR (1 << 13)
+#define SPW_TXBD_IE (1 << 14)
+#define SPW_TXBD_LE (1 << 15)
+#define SPW_TXBD_HC (1 << 16)
+#define SPW_TXBD_DC (1 << 17)
+
+#define SPW_TXBD_ERROR (SPW_TXBD_LE)
+
+#define SPW_CTRL_LINKDISABLED (1 << 0)
+#define SPW_CTRL_LINKSTART (1 << 1)
+#define SPW_CTRL_AUTOSTART (1 << 2)
+#define SPW_CTRL_IE (1 << 3)
+#define SPW_CTRL_TI (1 << 4)
+#define SPW_CTRL_PM (1 << 5)
+#define SPW_CTRL_RESET (1 << 6)
+#define SPW_CTRL_TQ (1 << 8)
+#define SPW_CTRL_LI (1 << 9)
+#define SPW_CTRL_TT (1 << 10)
+#define SPW_CTRL_TR (1 << 11)
+#define SPW_CTRL_RE (1 << 16)
+#define SPW_CTRL_RD (1 << 17)
+
+#define SPW_CTRL_RC (1 << 29)
+#define SPW_CTRL_RX (1 << 30)
+#define SPW_CTRL_RA (1 << 31)
+
+#define SPW_STATUS_TO (1 << 0)
+#define SPW_STATUS_CE (1 << 1)
+#define SPW_STATUS_ER (1 << 2)
+#define SPW_STATUS_DE (1 << 3)
+#define SPW_STATUS_PE (1 << 4)
+#define SPW_STATUS_WE (1 << 6)
+#define SPW_STATUS_IA (1 << 7)
+#define SPW_STATUS_EE (1 << 8)
+
+#define SPW_DMACTRL_TXEN (1 << 0)
+#define SPW_DMACTRL_RXEN (1 << 1)
+#define SPW_DMACTRL_TXIE (1 << 2)
+#define SPW_DMACTRL_RXIE (1 << 3)
+#define SPW_DMACTRL_AI (1 << 4)
+#define SPW_DMACTRL_PS (1 << 5)
+#define SPW_DMACTRL_PR (1 << 6)
+#define SPW_DMACTRL_TA (1 << 7)
+#define SPW_DMACTRL_RA (1 << 8)
+#define SPW_DMACTRL_AT (1 << 9)
+#define SPW_DMACTRL_RX (1 << 10)
+#define SPW_DMACTRL_RD (1 << 11)
+#define SPW_DMACTRL_NS (1 << 12)
+
+#define SPW_PREPAREMASK_TX (SPW_DMACTRL_RXEN | SPW_DMACTRL_RXIE | SPW_DMACTRL_PS | SPW_DMACTRL_TA | SPW_DMACTRL_RD | SPW_DMACTRL_NS)
+#define SPW_PREPAREMASK_RX (SPW_DMACTRL_TXEN | SPW_DMACTRL_TXIE | SPW_DMACTRL_AI | SPW_DMACTRL_PR | SPW_DMACTRL_RA)
+
+static int grspw_hw_init(GRSPW_DEV *pDev);
+static int grspw_hw_send(GRSPW_DEV *pDev, unsigned int hlen, char *hdr, unsigned int dlen, char *data, unsigned int options);
+static int grspw_hw_receive(GRSPW_DEV *pDev,char *b,int c);
+static int grspw_hw_startup (GRSPW_DEV *pDev, int timeout);
+static int grspw_hw_stop (GRSPW_DEV *pDev, int rx, int tx);
+static void grspw_hw_wait_rx_inactive(GRSPW_DEV *pDev);
+static int grspw_hw_waitlink (GRSPW_DEV *pDev, int timeout);
+static void grspw_hw_reset(GRSPW_DEV *pDev);
+static void grspw_hw_read_config(GRSPW_DEV *pDev);
+
+static void check_rx_errors(GRSPW_DEV *pDev, int ctrl);
+static void grspw_rxnext(GRSPW_DEV *pDev);
+static void grspw_interrupt(void *arg);
+static int grspw_buffer_alloc(GRSPW_DEV *pDev);
+
+static rtems_device_driver grspw_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_close(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_read(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_write(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+static rtems_device_driver grspw_control(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ );
+
+#define GRSPW_DRIVER_TABLE_ENTRY \
+ { grspw_initialize, \
+ grspw_open, \
+ grspw_close, \
+ grspw_read, \
+ grspw_write, \
+ grspw_control }
+
+static rtems_driver_address_table grspw_driver = GRSPW_DRIVER_TABLE_ENTRY;
+static int grspw_driver_io_registered = 0;
+static rtems_device_major_number grspw_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+int grspw_register_io(rtems_device_major_number *m);
+int grspw_device_init(GRSPW_DEV *pDev);
+
+int grspw_init2(struct drvmgr_dev *dev);
+int grspw_init3(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops grspw_ops =
+{
+ .init = {NULL, grspw_init2, grspw_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id grspw_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPW},
+ {VENDOR_GAISLER, GAISLER_SPW2},
+ {VENDOR_GAISLER, GAISLER_SPW2_DMA},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grspw_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRSPW_ID, /* Driver ID */
+ "GRSPW_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grspw_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grspw_ids[0]
+};
+
+void grspw_register_drv (void)
+{
+ SPACEWIRE_DBG("Registering GRSPW driver\n");
+ drvmgr_drv_register(&grspw_drv_info.general);
+}
+
+int grspw_init2(struct drvmgr_dev *dev)
+{
+ GRSPW_DEV *priv;
+
+ SPACEWIRE_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
+ dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+int grspw_init3(struct drvmgr_dev *dev)
+{
+ GRSPW_DEV *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( grspw_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grspw_register_io(&grspw_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ free(dev->priv);
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grspw_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+
+ /* Get frequency in Hz */
+ if ( drvmgr_freq_get(dev, DEV_APB_SLV, &priv->core_freq_khz) ) {
+ return DRVMGR_FAIL;
+ }
+ /* Convert from Hz -> kHz */
+ priv->core_freq_khz = priv->core_freq_khz / 1000;
+
+ if ( grspw_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grspw%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrspw%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grspw_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+int grspw_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grspw_driver, m)) == RTEMS_SUCCESSFUL) {
+ SPACEWIRE_DBG("GRSPW driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("GRSPW rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("GRSPW rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("GRSPW rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("GRSPW rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int grspw_device_init(GRSPW_DEV *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (LEON3_SPACEWIRE_Regs_Map *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+
+ /* Get SpaceWire core version */
+ switch( pnpinfo->device ) {
+ case GAISLER_SPW:
+ pDev->core_ver = 1;
+ break;
+ case GAISLER_SPW2:
+ pDev->core_ver = 2;
+ break;
+ case GAISLER_SPW2_DMA:
+ pDev->core_ver = 3;
+ break;
+ default:
+ return -1;
+ }
+
+ /* initialize the code with some resonable values,
+ * actual initialization is done later using ioctl(fd)
+ * on the opened device */
+ pDev->config.rxmaxlen = SPACEWIRE_RXPCK_SIZE;
+ pDev->txdbufsize = SPACEWIRE_TXD_SIZE;
+ pDev->txhbufsize = SPACEWIRE_TXH_SIZE;
+ pDev->rxbufsize = SPACEWIRE_RXPCK_SIZE;
+ pDev->txbufcnt = SPACEWIRE_TXBUFS_NR;
+ pDev->rxbufcnt = SPACEWIRE_RXBUFS_NR;
+
+ pDev->_ptr_rxbuf0 = 0;
+ pDev->ptr_rxbuf0 = 0;
+ pDev->ptr_txdbuf0 = 0;
+ pDev->ptr_txhbuf0 = 0;
+ pDev->ptr_bd0 = 0;
+ pDev->rx_dma_area = 0;
+ pDev->tx_data_dma_area = 0;
+ pDev->tx_hdr_dma_area = 0;
+ pDev->bd_dma_area = 0;
+
+ /* Get Configuration from Bus resources (Let user override defaults) */
+
+ value = drvmgr_dev_key_get(pDev->dev, "txBdCnt", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txbufcnt = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxBdCnt", DRVMGR_KT_INT);
+ if ( value )
+ pDev->rxbufcnt = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txDataSize", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txdbufsize = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txHdrSize", DRVMGR_KT_INT);
+ if ( value )
+ pDev->txhbufsize = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxPktSize", DRVMGR_KT_INT);
+ if ( value ) {
+ pDev->rxbufsize = value->i;
+ pDev->config.rxmaxlen = pDev->rxbufsize;
+ }
+
+ value = drvmgr_dev_key_get(pDev->dev, "rxDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->rx_dma_area = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txDataDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->tx_data_dma_area = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "txHdrDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->tx_hdr_dma_area = value->i;
+
+ value = drvmgr_dev_key_get(pDev->dev, "bdDmaArea", DRVMGR_KT_INT);
+ if ( value )
+ pDev->bd_dma_area = value->i;
+
+ if (grspw_buffer_alloc(pDev))
+ return RTEMS_NO_MEMORY;
+
+ /* Create semaphores */
+ rtems_semaphore_create(
+ rtems_build_name('T', 'x', 'S', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &(pDev->txsp));
+
+ rtems_semaphore_create(
+ rtems_build_name('R', 'x', 'S', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY | \
+ RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &(pDev->rxsp));
+
+ grspw_hw_init(pDev);
+
+ return 0;
+}
+
+/* Get a value at least 6.4us in number of clock cycles */
+static unsigned int grspw_calc_timer64(int freq_khz){
+ unsigned int timer64 = (freq_khz*64+9999)/10000;
+ return timer64 & 0xfff;
+}
+
+/* Get a value at least 850ns in number of clock cycles - 3 */
+static unsigned int grspw_calc_disconnect(int freq_khz){
+ unsigned int disconnect = ((freq_khz*85+99999)/100000) - 3;
+ return disconnect & 0x3ff;
+}
+
+static int grspw_buffer_alloc(GRSPW_DEV *pDev)
+{
+ /* RX DMA AREA */
+ if (pDev->rx_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_rxbuf0_remote = (char *)(pDev->rx_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_rxbuf0_remote,
+ (void **)&pDev->ptr_rxbuf0,
+ pDev->rxbufsize * pDev->rxbufcnt);
+
+ } else {
+ if (pDev->rx_dma_area == 0) {
+ if (pDev->_ptr_rxbuf0)
+ free((void *)pDev->_ptr_rxbuf0);
+ pDev->_ptr_rxbuf0 = (unsigned int) grlib_malloc(
+ pDev->rxbufsize * pDev->rxbufcnt+4);
+ pDev->ptr_rxbuf0 = (char *)((pDev->_ptr_rxbuf0+7)&~7);
+ if ( !pDev->ptr_rxbuf0 )
+ return 1;
+ } else {
+ pDev->ptr_rxbuf0 = (char *)pDev->rx_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_rxbuf0,
+ (void **)&pDev->ptr_rxbuf0_remote,
+ pDev->rxbufsize * pDev->rxbufcnt);
+ }
+
+ /* TX-DATA DMA AREA */
+ if (pDev->tx_data_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_txdbuf0_remote = (char*)(pDev->tx_data_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_txdbuf0_remote,
+ (void **)&pDev->ptr_txdbuf0,
+ pDev->txdbufsize * pDev->txbufcnt);
+ } else {
+ if (pDev->tx_data_dma_area == 0) {
+ if (pDev->ptr_txdbuf0)
+ free(pDev->ptr_txdbuf0);
+ pDev->ptr_txdbuf0 = (char *) grlib_malloc(
+ pDev->txdbufsize * pDev->txbufcnt);
+ if (!pDev->ptr_txdbuf0)
+ return 1;
+ } else {
+ pDev->ptr_txdbuf0 = (char *)pDev->tx_data_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_txdbuf0,
+ (void **)&pDev->ptr_txdbuf0_remote,
+ pDev->txdbufsize * pDev->txbufcnt);
+ }
+
+ /* TX-HEADER DMA AREA */
+ if (pDev->tx_hdr_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_txhbuf0_remote = (char *)(pDev->tx_hdr_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_txhbuf0_remote,
+ (void **)&pDev->ptr_txhbuf0,
+ pDev->txhbufsize * pDev->txbufcnt);
+ } else {
+ if (pDev->tx_hdr_dma_area == 0) {
+ if (pDev->ptr_txhbuf0)
+ free(pDev->ptr_txhbuf0);
+ pDev->ptr_txhbuf0 = (char *) grlib_malloc(
+ pDev->txhbufsize * pDev->txbufcnt);
+ if (!pDev->ptr_txhbuf0)
+ return 1;
+ } else {
+ pDev->ptr_txhbuf0 = (char *)pDev->tx_hdr_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_txhbuf0,
+ (void **)&pDev->ptr_txhbuf0_remote,
+ pDev->txhbufsize * pDev->txbufcnt);
+ }
+
+ /* DMA DESCRIPTOR TABLES */
+ if (pDev->bd_dma_area & 1) {
+ /* Address given in remote address */
+ pDev->ptr_bd0_remote = (char *)(pDev->bd_dma_area & ~1);
+ drvmgr_translate_check(
+ pDev->dev,
+ DMAMEM_TO_CPU,
+ (void *)pDev->ptr_bd0_remote,
+ (void **)&pDev->ptr_bd0,
+ 2 * SPACEWIRE_BDTABLE_SIZE);
+ } else {
+ if (pDev->bd_dma_area == 0) {
+ if (pDev->_ptr_bd0)
+ free(pDev->_ptr_bd0);
+ pDev->_ptr_bd0 =
+ rtems_heap_allocate_aligned_with_boundary(
+ SPACEWIRE_BDTABLE_SIZE*2, 1024, 0);
+ if (!pDev->_ptr_bd0)
+ return 1;
+ pDev->ptr_bd0 = (char *)pDev->_ptr_bd0;
+ } else {
+ pDev->ptr_bd0 = (char *)pDev->bd_dma_area;
+ }
+ drvmgr_translate_check(
+ pDev->dev,
+ CPUMEM_TO_DMA,
+ (void *)pDev->ptr_bd0,
+ (void **)&pDev->ptr_bd0_remote,
+ 2 * SPACEWIRE_BDTABLE_SIZE);
+ }
+
+ return 0;
+}
+
+static void grspw_interrupt(void *arg)
+{
+ GRSPW_DEV *pDev = (GRSPW_DEV *)arg;
+ int dmactrl;
+ int status;
+ int ctrl;
+ unsigned int timecode;
+
+ status = SPW_STATUS_READ(pDev);
+ /*SPW_STATUS_WRITE(pDev, SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE | SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE | SPW_STATUS_TO);*/
+ SPW_STATUS_WRITE(pDev, status & (SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE | SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE));
+
+ /* Make sure to put the timecode handling first in order to get the smallest
+ * possible interrupt latency
+ */
+ if ( (status & SPW_STATUS_TO) && (grspw_timecode_callback != NULL) ) {
+ /* Timecode received. Let custom function handle this */
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO);
+ timecode = SPW_READ(&pDev->regs->time);
+ (grspw_timecode_callback)(pDev,pDev->regs,pDev->minor,timecode);
+ }
+
+ /* Clear SPW_DMACTRL_PR if set */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ /*SPW_WRITE(&pDev->regs->dma0ctrl, dmactrl | SPW_DMACTRL_PR);*/
+ SPW_WRITE(&pDev->regs->dma0ctrl, dmactrl);
+
+ /* If linkinterrupts are enabled check if it was a linkerror irq and then send an event to the
+ process set in the config */
+ if (pDev->config.link_err_irq) {
+ if (status & (SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE | SPW_STATUS_WE)) {
+ rtems_event_send(pDev->config.event_id, SPW_LINKERR_EVENT);
+ if (pDev->config.disable_err) {
+ /* disable link*/
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFFFFFC) | SPW_CTRL_LINKDISABLED);
+ pDev->config.linkdisabled = 1;
+ pDev->config.linkstart = 0;
+ pDev->running = 0;
+ }
+ }
+ }
+ if (status & SPW_STATUS_CE) {
+ pDev->stat.credit_err++;
+ }
+ if (status & SPW_STATUS_ER) {
+ pDev->stat.escape_err++;
+ }
+ if (status & SPW_STATUS_DE) {
+ pDev->stat.disconnect_err++;
+ }
+ if (status & SPW_STATUS_PE) {
+ pDev->stat.parity_err++;
+ }
+ if (status & SPW_STATUS_WE) {
+ pDev->stat.write_sync_err++;
+ }
+ if (status & SPW_STATUS_IA) {
+ pDev->stat.invalid_address++;
+ }
+ if (status & SPW_STATUS_EE) {
+ pDev->stat.early_ep++;
+ }
+
+ /* Check for tx interrupts */
+ while( (pDev->tx_sent != pDev->tx_cur) || pDev->tx_all_in_use) {
+ /* Has this descriptor been sent? */
+ ctrl = SPW_READ((volatile void *)&pDev->tx[pDev->tx_sent].ctrl);
+ if ( ctrl & SPW_TXBD_EN ) {
+ break;
+ }
+ /* Yes, increment status counters & tx_sent so we can use this descriptor to send more packets with */
+ pDev->stat.packets_sent++;
+
+ rtems_semaphore_release(pDev->txsp);
+
+ if ( ctrl & SPW_TXBD_LE ) {
+ pDev->stat.tx_link_err++;
+ }
+
+ /* step to next descriptor */
+ pDev->tx_sent = (pDev->tx_sent + 1) % pDev->txbufcnt;
+ pDev->tx_all_in_use = 0; /* not all of the descriptors can be in use since we just freed one. */
+ }
+
+ /* Check for rx interrupts */
+ if (dmactrl & SPW_DMACTRL_PR) {
+ rtems_semaphore_release(pDev->rxsp);
+ }
+}
+
+static rtems_device_driver grspw_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg
+)
+{
+ /* Initialize device-common data structures here */
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "open [%i,%i]\n", major, minor);
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ SPACEWIRE_DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ if ( pDev->open )
+ return RTEMS_RESOURCE_IN_USE;
+
+ /* Mark device open */
+ pDev->open = 1;
+
+ pDev->stat.tx_link_err = 0;
+ pDev->stat.rx_rmap_header_crc_err = 0;
+ pDev->stat.rx_rmap_data_crc_err = 0;
+ pDev->stat.rx_eep_err = 0;
+ pDev->stat.rx_truncated = 0;
+ pDev->stat.parity_err = 0;
+ pDev->stat.escape_err = 0;
+ pDev->stat.credit_err = 0;
+ pDev->stat.write_sync_err = 0;
+ pDev->stat.disconnect_err = 0;
+ pDev->stat.early_ep = 0;
+ pDev->stat.invalid_address = 0;
+ pDev->stat.packets_sent = 0;
+ pDev->stat.packets_received = 0;
+
+ pDev->config.rm_prot_id = 0;
+ pDev->config.keep_source = 0;
+ pDev->config.check_rmap_err = 0;
+ pDev->config.tx_blocking = 0;
+ pDev->config.tx_block_on_full = 0;
+ pDev->config.rx_blocking = 0;
+ pDev->config.disable_err = 0;
+ pDev->config.link_err_irq = 0;
+ pDev->config.event_id = 0;
+ pDev->config.rtimeout = 0;
+
+ pDev->running = 0;
+ pDev->core_freq_khz = 0;
+
+ /* Reset Core */
+ grspw_hw_reset(pDev);
+
+ /* Read default configuration */
+ grspw_hw_read_config(pDev);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_close(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "close [%i,%i]\n", major, minor);
+ rtems_semaphore_delete(pDev->txsp);
+ rtems_semaphore_delete(pDev->rxsp);
+
+ grspw_hw_stop(pDev,1,1);
+
+ grspw_hw_reset(pDev);
+
+ /* Mark device closed - not open */
+ pDev->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_read(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ rtems_libio_rw_args_t *rw_args;
+ unsigned int count = 0;
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+ int status;
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+
+ /* is link up? */
+ if ( !pDev->running ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ((rw_args->count < 1) || (rw_args->buffer == NULL)) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "read [%i,%i]: buf:0x%x len:%i \n", major, minor, (unsigned int)rw_args->buffer, rw_args->count);
+
+ while ( (count = grspw_hw_receive(pDev, rw_args->buffer, rw_args->count)) == 0) {
+ /* wait a moment for any descriptors to get available
+ *
+ * Semaphore is signaled by interrupt handler
+ */
+ if (pDev->config.rx_blocking) {
+ SPACEWIRE_DBG2("Rx blocking\n");
+ if ( pDev->config.rtimeout ) {
+ status = rtems_semaphore_obtain(pDev->rxsp, RTEMS_WAIT, pDev->config.rtimeout);
+ if ( status == RTEMS_TIMEOUT )
+ return RTEMS_TIMEOUT;
+ } else {
+ rtems_semaphore_obtain(pDev->rxsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ }
+ } else {
+ SPACEWIRE_DBG2("Rx non blocking\n");
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+
+#ifdef DEBUG_SPACEWIRE_ONOFF
+ if (DEBUG_SPACEWIRE_FLAGS & DBGSPW_DUMP) {
+ int k;
+ for (k = 0; k < count; k++){
+ if (k % 16 == 0) {
+ printf ("\n");
+ }
+ printf ("%.2x(%c) ", rw_args->buffer[k] & 0xff, isprint(rw_args->buffer[k] & 0xff) ? rw_args->buffer[k] & 0xff : ' ');
+ }
+ printf ("\n");
+ }
+#endif
+
+ rw_args->bytes_moved = count;
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_write(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+)
+{
+ rtems_libio_rw_args_t *rw_args;
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "write [%i,%i]: buf:0x%x len:%i\n", major, minor, (unsigned int)rw_args->buffer, rw_args->count);
+
+ /* is link up? */
+ if ( !pDev->running ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ((rw_args->count > pDev->txdbufsize) || (rw_args->count < 1) || (rw_args->buffer == NULL)) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ while ((rw_args->bytes_moved = grspw_hw_send(pDev, 0, NULL, rw_args->count, rw_args->buffer, 0)) == 0) {
+ if (pDev->config.tx_block_on_full == 1) {
+ SPACEWIRE_DBG2("Tx Block on full \n");
+ rtems_semaphore_obtain(pDev->txsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else {
+ SPACEWIRE_DBG2("Tx non blocking return when full \n");
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grspw_control(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void * arg
+ )
+{
+ spw_ioctl_pkt_send *args;
+ spw_ioctl_packetsize *ps;
+ int status;
+ unsigned int tmp,mask,nodeaddr,nodemask;
+ int timeout;
+ rtems_device_driver ret;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *) arg;
+ GRSPW_DEV *pDev;
+ struct drvmgr_dev *dev;
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "ctrl [%i,%i]\n", major, minor);
+
+ if ( drvmgr_get_dev(&grspw_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev = (GRSPW_DEV *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case SPACEWIRE_IOCTRL_SET_NODEADDR:
+ /*set node address*/
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_NODEADDR %i\n",(unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ nodeaddr = ((unsigned int)ioarg->buffer) & 0xff;
+ tmp = SPW_READ(&pDev->regs->nodeaddr);
+ tmp &= 0xffffff00; /* Remove old address */
+ tmp |= nodeaddr;
+ SPW_WRITE(&pDev->regs->nodeaddr, tmp);
+ if ((SPW_READ(&pDev->regs->nodeaddr)&0xff) != nodeaddr) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.nodeaddr = nodeaddr;
+ break;
+ case SPACEWIRE_IOCTRL_SET_NODEMASK:
+ /*set node address*/
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_NODEMASK %i\n",(unsigned int)ioarg->buffer);
+ if ( pDev->core_ver > 1 ){
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ nodemask = ((unsigned int)ioarg->buffer) & 0xff;
+ tmp = SPW_READ(&pDev->regs->nodeaddr);
+ tmp &= 0xffff00ff; /* Remove old mask */
+ tmp |= nodemask<<8;
+ SPW_WRITE(&pDev->regs->nodeaddr, tmp);
+ if (((SPW_READ(&pDev->regs->nodeaddr)>>8)&0xff) != nodemask) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.nodemask = nodemask;
+ }else{
+ SPACEWIRE_DBG("SPACEWIRE_IOCTRL_SET_NODEMASK: not implemented in GRSPW1 HW\n");
+ }
+ break;
+ case SPACEWIRE_IOCTRL_SET_RXBLOCK:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_RXBLOCK %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.rx_blocking = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_DESTKEY:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_DESTKEY %i\n", (unsigned int)ioarg->buffer);
+ if (!pDev->config.is_rmap) {
+ return RTEMS_NOT_IMPLEMENTED;
+ }
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_WRITE(&pDev->regs->destkey, (unsigned int)ioarg->buffer);
+ if (SPW_READ(&pDev->regs->destkey) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.destkey = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_CLKDIV:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_CLKDIV %i\n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ if ( pDev->core_ver == 3 )
+ break;
+ tmp = SPW_READ(&pDev->regs->clkdiv);
+ tmp &= ~0xff; /* Remove old Clockdiv Setting */
+ tmp |= ((unsigned int)ioarg->buffer) & 0xff; /* add new clockdiv setting */
+ SPW_WRITE(&pDev->regs->clkdiv, tmp);
+ if (SPW_READ(&pDev->regs->clkdiv) != tmp) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.clkdiv = tmp;
+ break;
+ case SPACEWIRE_IOCTRL_SET_CLKDIVSTART:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_CLKDIVSTART %i\n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 255) {
+ return RTEMS_INVALID_NAME;
+ }
+ if ( pDev->core_ver == 3 )
+ break;
+ tmp = SPW_READ(&pDev->regs->clkdiv);
+ tmp &= ~0xff00; /* Remove old Clockdiv Start Setting */
+ tmp |= (((unsigned int)ioarg->buffer) & 0xff)<<8; /* add new clockdiv start setting */
+ SPW_WRITE(&pDev->regs->clkdiv, tmp);
+ if (SPW_READ(&pDev->regs->clkdiv) != tmp) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.clkdiv = tmp;
+ break;
+ case SPACEWIRE_IOCTRL_SET_TIMER:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_TIMER %i\n", (unsigned int)ioarg->buffer);
+ if ( pDev->core_ver <= 1 ) {
+ if ((unsigned int)ioarg->buffer > 4095) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_WRITE(&pDev->regs->timer, (SPW_READ(&pDev->regs->timer) & 0xFFFFF000) | ((unsigned int)ioarg->buffer & 0xFFF));
+ if ((SPW_READ(&pDev->regs->timer) & 0xFFF) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.timer = (unsigned int)ioarg->buffer;
+ }else{
+ SPACEWIRE_DBG("SPACEWIRE_IOCTRL_SET_TIMER: removed in GRSPW2 HW\n");
+ }
+ break;
+ case SPACEWIRE_IOCTRL_SET_DISCONNECT:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_DISCONNECT %i\n", (unsigned int)ioarg->buffer);
+ if ( pDev->core_ver <= 1 ) {
+ if ((unsigned int)ioarg->buffer > 1023) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_WRITE(&pDev->regs->timer, (SPW_READ(&pDev->regs->timer) & 0xFFC00FFF) | (((unsigned int)ioarg->buffer & 0x3FF) << 12));
+ if (((SPW_READ(&pDev->regs->timer) >> 12) & 0x3FF) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.disconnect = (unsigned int)ioarg->buffer;
+ }else{
+ SPACEWIRE_DBG("SPACEWIRE_IOCTRL_SET_DISCONNECT: not implemented for GRSPW2\n");
+ }
+ break;
+ case SPACEWIRE_IOCTRL_SET_PROMISCUOUS:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_PROMISCUOUS %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_CTRL_WRITE(pDev, SPW_CTRL_READ(pDev) | ((unsigned int)ioarg->buffer << 5));
+ if (((SPW_CTRL_READ(pDev) >> 5) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.promiscuous = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_RMAPEN:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_RMAPEN %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFEFFFF) | ((unsigned int)ioarg->buffer << 16));
+ if (((SPW_CTRL_READ(pDev) >> 16) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.rmapen = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_RMAPBUFDIS:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_RMAPBUFDIS %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFDFFFF) | ((unsigned int)ioarg->buffer << 17));
+ if (((SPW_CTRL_READ(pDev) >> 17) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.rmapbufdis = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_CHECK_RMAP:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_CHECK_RMAP %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.check_rmap_err = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_RM_PROT_ID:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_RM_PROT_ID %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.rm_prot_id = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_KEEP_SOURCE:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_KEEP_SOURCE %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.keep_source = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_TXBLOCK:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_TXBLOCK %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.tx_blocking = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_TXBLOCK_ON_FULL:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_TXBLOCK_ON_FULL %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.tx_block_on_full = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_DISABLE_ERR:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_DISABLE_ERR %i \n", (unsigned int)ioarg->buffer);
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ pDev->config.disable_err = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_LINK_ERR_IRQ:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_LINK_ERR_IRQ %i \n", (unsigned int)ioarg->buffer);
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "CTRL REG: %x\n", SPW_CTRL_READ(pDev));
+ if ((unsigned int)ioarg->buffer > 1) {
+ return RTEMS_INVALID_NAME;
+ }
+ tmp = (SPW_CTRL_READ(pDev) & 0xFFFFFDF7) | ((unsigned int)ioarg->buffer << 9);
+ if (tmp & (SPW_CTRL_LI|SPW_CTRL_TQ))
+ tmp |= SPW_CTRL_IE;
+ SPW_CTRL_WRITE(pDev, tmp);
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "CTRL REG: %x\n", SPW_CTRL_READ(pDev));
+ if (((SPW_CTRL_READ(pDev) >> 9) & 1) != (unsigned int)ioarg->buffer) {
+ return RTEMS_IO_ERROR;
+ }
+ pDev->config.link_err_irq = (unsigned int)ioarg->buffer;
+ break;
+ case SPACEWIRE_IOCTRL_SET_EVENT_ID:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "SPACEWIRE_IOCTRL_SET_EVENT_ID %i \n", (unsigned int)ioarg->buffer);
+ pDev->config.event_id = (rtems_id)ioarg->buffer;
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL, "Event id: %i\n", pDev->config.event_id);
+ break;
+
+ /* Change MAX Packet size by:
+ * - stop RX/TX (if on)
+ * - wait for hw to complete RX DMA (if on)
+ * - reallocate buffers with new size
+ * - tell hw about new size & start RX/TX again (if previously on)
+ */
+ case SPACEWIRE_IOCTRL_SET_PACKETSIZE:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ ps = (spw_ioctl_packetsize*) ioarg->buffer;
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_SET_RXPACKETSIZE %i \n", (unsigned int)ioarg->buffer);
+
+ tmp = pDev->running;
+
+ if ( pDev->running ){
+ /* Stop RX */
+ grspw_hw_stop(pDev,1,1);
+
+ /* If packetsize fails it is good to know if in running mode */
+ pDev->running = 0;
+
+ /* Wait for Receiver to finnish pending DMA transfers if any */
+ grspw_hw_wait_rx_inactive(pDev);
+ }
+
+ /* Save new buffer sizes */
+ pDev->rxbufsize = ((ps->rxsize+7)&~7);
+ pDev->txdbufsize = ps->txdsize;
+ pDev->txhbufsize = ps->txhsize;
+ pDev->config.rxmaxlen = pDev->rxbufsize;
+
+ /* Free previous buffers & allocate buffers with new size */
+ if (grspw_buffer_alloc(pDev))
+ return RTEMS_NO_MEMORY;
+
+ /* if RX was actived before, we reactive it again */
+ if ( tmp ) {
+ if ( (status = grspw_hw_startup(pDev,-1)) != RTEMS_SUCCESSFUL ) {
+ return status;
+ }
+ pDev->running = 1;
+ }
+#if 0
+ /* Rewrite previous config which was wasted due to reset in hw_startup */
+ SPW_WRITE(&pDev->regs->nodeaddr, pDev->config.nodeaddr);
+ SPW_WRITE(&pDev->regs->destkey, pDev->config.destkey);
+ SPW_WRITE(&pDev->regs->clkdiv, pDev->config.clkdiv);
+ SPW_WRITE(&pDev->regs->timer, pDev->config.timer | ( (pDev->config.disconnect & 0x3FF) << 12) );
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & !(SPW_CTRL_LINKSTART | SPW_CTRL_PM | SPW_CTRL_RE | SPW_CTRL_RD | SPW_CTRL_TT | SPW_CTRL_TR)) | \
+ (pDev->config.promiscuous << 5) | (pDev->config.rmapen << 16) | (pDev->config.rmapbufdis << 17) | \
+ (pDev->config.linkdisabled) | (pDev->config.linkstart << 1));
+#endif
+ break;
+ case SPACEWIRE_IOCTRL_GET_CONFIG:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ SPACEWIRE_DBG2("SPACEWIRE_IOCTRL_GET_CONFIG \n");
+ (*(spw_config *)ioarg->buffer).nodeaddr = pDev->config.nodeaddr;
+ (*(spw_config *)ioarg->buffer).nodemask = pDev->config.nodemask;
+ (*(spw_config *)ioarg->buffer).destkey = pDev->config.destkey;
+ (*(spw_config *)ioarg->buffer).clkdiv = pDev->config.clkdiv;
+ (*(spw_config *)ioarg->buffer).rxmaxlen = pDev->config.rxmaxlen;
+ (*(spw_config *)ioarg->buffer).timer = pDev->config.timer;
+ (*(spw_config *)ioarg->buffer).disconnect = pDev->config.disconnect;
+ (*(spw_config *)ioarg->buffer).promiscuous = pDev->config.promiscuous;
+ (*(spw_config *)ioarg->buffer).rmapen = pDev->config.rmapen;
+ (*(spw_config *)ioarg->buffer).rmapbufdis = pDev->config.rmapbufdis;
+ (*(spw_config *)ioarg->buffer).check_rmap_err = pDev->config.check_rmap_err;
+ (*(spw_config *)ioarg->buffer).rm_prot_id = pDev->config.rm_prot_id;
+ (*(spw_config *)ioarg->buffer).tx_blocking = pDev->config.tx_blocking;
+ (*(spw_config *)ioarg->buffer).disable_err = pDev->config.disable_err;
+ (*(spw_config *)ioarg->buffer).link_err_irq = pDev->config.link_err_irq;
+ (*(spw_config *)ioarg->buffer).event_id = pDev->config.event_id;
+ (*(spw_config *)ioarg->buffer).is_rmap = pDev->config.is_rmap;
+ (*(spw_config *)ioarg->buffer).is_rmapcrc = pDev->config.is_rmapcrc;
+ (*(spw_config *)ioarg->buffer).is_rxunaligned = pDev->config.is_rxunaligned;
+ (*(spw_config *)ioarg->buffer).linkdisabled = pDev->config.linkdisabled;
+ (*(spw_config *)ioarg->buffer).linkstart = pDev->config.linkstart;
+ (*(spw_config *)ioarg->buffer).rx_blocking = pDev->config.rx_blocking;
+ (*(spw_config *)ioarg->buffer).tx_block_on_full = pDev->config.tx_block_on_full;
+ (*(spw_config *)ioarg->buffer).keep_source = pDev->config.keep_source;
+ (*(spw_config *)ioarg->buffer).rtimeout = pDev->config.rtimeout;
+ break;
+ case SPACEWIRE_IOCTRL_GET_LINK_STATUS:
+ SPACEWIRE_DBGC(DBGSPW_IOCTRL,"SPACEWIRE_IOCTRL_GET_STATUS=%i \n", (unsigned int)((SPW_STATUS_READ(pDev) >> 21) & 0x7));
+ *(unsigned int *)ioarg->buffer = (unsigned int )((SPW_STATUS_READ(pDev) >> 21) & 0x7);
+ break;
+ case SPACEWIRE_IOCTRL_GET_STATISTICS:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ SPACEWIRE_DBG2("SPACEWIRE_IOCTRL_GET_STATISTICS \n");
+ (*(spw_stats *)ioarg->buffer).tx_link_err = pDev->stat.tx_link_err;
+ (*(spw_stats *)ioarg->buffer).rx_rmap_header_crc_err = pDev->stat.rx_rmap_header_crc_err;
+ (*(spw_stats *)ioarg->buffer).rx_rmap_data_crc_err = pDev->stat.rx_rmap_data_crc_err;
+ (*(spw_stats *)ioarg->buffer).rx_eep_err = pDev->stat.rx_eep_err;
+ (*(spw_stats *)ioarg->buffer).rx_truncated = pDev->stat.rx_truncated;
+ (*(spw_stats *)ioarg->buffer).parity_err = pDev->stat.parity_err;
+ (*(spw_stats *)ioarg->buffer).escape_err = pDev->stat.escape_err;
+ (*(spw_stats *)ioarg->buffer).credit_err = pDev->stat.credit_err;
+ (*(spw_stats *)ioarg->buffer).write_sync_err = pDev->stat.write_sync_err;
+ (*(spw_stats *)ioarg->buffer).disconnect_err = pDev->stat.disconnect_err;
+ (*(spw_stats *)ioarg->buffer).early_ep = pDev->stat.early_ep;
+ (*(spw_stats *)ioarg->buffer).invalid_address = pDev->stat.invalid_address;
+ (*(spw_stats *)ioarg->buffer).packets_sent = pDev->stat.packets_sent;
+ (*(spw_stats *)ioarg->buffer).packets_received = pDev->stat.packets_received;
+ break;
+ case SPACEWIRE_IOCTRL_CLR_STATISTICS:
+ SPACEWIRE_DBG2("SPACEWIRE_IOCTRL_CLR_STATISTICS \n");
+ pDev->stat.tx_link_err = 0;
+ pDev->stat.rx_rmap_header_crc_err = 0;
+ pDev->stat.rx_rmap_data_crc_err = 0;
+ pDev->stat.rx_eep_err = 0;
+ pDev->stat.rx_truncated = 0;
+ pDev->stat.parity_err = 0;
+ pDev->stat.escape_err = 0;
+ pDev->stat.credit_err = 0;
+ pDev->stat.write_sync_err = 0;
+ pDev->stat.disconnect_err = 0;
+ pDev->stat.early_ep = 0;
+ pDev->stat.invalid_address = 0;
+ pDev->stat.packets_sent = 0;
+ pDev->stat.packets_received = 0;
+ break;
+ case SPACEWIRE_IOCTRL_SEND:
+ if (ioarg->buffer == NULL)
+ return RTEMS_INVALID_NAME;
+ args = (spw_ioctl_pkt_send *)ioarg->buffer;
+ args->sent = 0;
+
+ /* is link up? */
+ if ( !pDev->running ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "write [%i,%i]: hlen: %i hbuf:0x%x dlen:%i dbuf:0x%x\n", major, minor,
+ (unsigned int)args->hlen, (int)args->hdr,(unsigned int)args->dlen, (int)args->data);
+
+ if ((args->hlen > pDev->txhbufsize) || (args->dlen > pDev->txdbufsize) ||
+ ((args->hlen+args->dlen) < 1) ||
+ ((args->hdr == NULL) && (args->hlen != 0)) || ((args->data == NULL) && (args->dlen != 0))) {
+ return RTEMS_INVALID_NAME;
+ }
+ while ((args->sent = grspw_hw_send(pDev, args->hlen, args->hdr, args->dlen, args->data, args->options)) == 0) {
+ if (pDev->config.tx_block_on_full == 1) {
+ SPACEWIRE_DBG2("Tx Block on full \n");
+ rtems_semaphore_obtain(pDev->txsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ } else {
+ SPACEWIRE_DBG2("Tx non blocking return when full \n");
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ }
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "Tx ioctl return: %i \n", args->sent);
+ break;
+
+ case SPACEWIRE_IOCTRL_LINKDISABLE:
+ pDev->config.linkdisabled = 1;
+ pDev->config.linkstart = 0;
+ if ( pDev->core_ver != 3 ) {
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFFFFFC) | 1);
+ if ((SPW_CTRL_READ(pDev) & 3) != 1) {
+ return RTEMS_IO_ERROR;
+ }
+ }
+ break;
+
+ case SPACEWIRE_IOCTRL_LINKSTART:
+ pDev->config.linkdisabled = 0;
+ pDev->config.linkstart = 1;
+ if ( pDev->core_ver != 3 ) {
+ SPW_CTRL_WRITE(pDev, (SPW_CTRL_READ(pDev) & 0xFFFFFFFC) | 2);
+ if ((SPW_CTRL_READ(pDev) & 3) != 2) {
+ return RTEMS_IO_ERROR;
+ }
+ }
+ break;
+
+ /* Calculate timer register from GRSPW Core frequency
+ * Also possible to set disconnect and timer64 from
+ * - SPACEWIRE_IOCTRL_SET_DISCONNECT
+ * - SPACEWIRE_IOCTRL_SET_TIMER
+ */
+ case SPACEWIRE_IOCTRL_SET_COREFREQ:
+ pDev->core_freq_khz = (unsigned int)ioarg->buffer;
+ if ( pDev->core_freq_khz == 0 ){
+ /* Get GRSPW clock frequency from system clock.
+ * System clock has been read from timer inited
+ * by RTEMS loader (mkprom)
+ */
+ drvmgr_freq_get(pDev->dev, DEV_APB_SLV,
+ &pDev->core_freq_khz);
+ /* Convert from Hz -> kHz */
+ pDev->core_freq_khz = pDev->core_freq_khz / 1000;
+ }
+
+ /* Only GRSPW1 needs the Timer64 and Disconnect values
+ * GRSPW2 and onwards doesn't have this register.
+ */
+ if ( pDev->core_ver <= 1 ){
+ /* Calculate Timer64 & Disconnect */
+ pDev->config.timer = grspw_calc_timer64(pDev->core_freq_khz);
+ pDev->config.disconnect = grspw_calc_disconnect(pDev->core_freq_khz);
+
+ /* Set Timer64 & Disconnect Register */
+ SPW_WRITE(&pDev->regs->timer,
+ (SPW_READ(&pDev->regs->timer) & 0xFFC00000) |
+ ((pDev->config.disconnect & 0x3FF)<<12) |
+ (pDev->config.timer & 0xFFF));
+
+ /* Check that the registers were written successfully */
+ tmp = SPW_READ(&pDev->regs->timer) & 0x003fffff;
+ if ( ((tmp & 0xFFF) != pDev->config.timer) ||
+ (((tmp >> 12) & 0x3FF) != pDev->config.disconnect) ) {
+ return RTEMS_IO_ERROR;
+ }
+ }
+ break;
+
+ case SPACEWIRE_IOCTRL_START:
+ if ( pDev->running ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Get timeout from userspace
+ * timeout:
+ * ¤ -1 = Default timeout
+ * ¤ less than -1 = forever
+ * ¤ 0 = no wait, proceed if link is up
+ * ¤ positive = specifies number of system clock ticks that
+ * startup will wait for link to enter ready mode.
+ */
+ timeout = (int)ioarg->buffer;
+
+ if ( (ret=grspw_hw_startup(pDev,timeout)) != RTEMS_SUCCESSFUL ) {
+ return ret;
+ }
+ pDev->running = 1;
+ /* Register interrupt routine and unmask IRQ */
+ drvmgr_interrupt_register(pDev->dev, 0, "grspw", grspw_interrupt, pDev);
+
+ break;
+
+ case SPACEWIRE_IOCTRL_STOP:
+ if ( !pDev->running ){
+ return RTEMS_INVALID_NAME;
+ }
+ /* Disable interrupts */
+ drvmgr_interrupt_unregister(dev, 0, grspw_interrupt, pDev);
+
+ pDev->running = 0;
+
+ /* Stop Receiver and transmitter */
+ grspw_hw_stop(pDev,1,1);
+ break;
+
+ /* Set time-code control register bits, and Enables/Disables
+ * Time code interrupt, make sure to connect the callback
+ * grspw_timecode_callback if using interrupts.
+ */
+ case SPACEWIRE_IOCTRL_SET_TCODE_CTRL:
+ tmp = (unsigned int)ioarg->buffer;
+ mask = tmp & (SPACEWIRE_TCODE_CTRL_IE_MSK|SPACEWIRE_TCODE_CTRL_TT_MSK|SPACEWIRE_TCODE_CTRL_TR_MSK);
+ mask <<= 8;
+ tmp &= mask;
+ tmp = (SPW_CTRL_READ(pDev) & ~(mask | SPW_CTRL_IE)) | tmp;
+ if (tmp & (SPW_CTRL_LI|SPW_CTRL_TQ))
+ tmp |= SPW_CTRL_IE;
+ SPW_CTRL_WRITE(pDev, tmp);
+ break;
+
+ /* Set time register and optionaly send a time code */
+ case SPACEWIRE_IOCTRL_SET_TCODE:
+ tmp = (unsigned int)ioarg->buffer;
+ /* Set timecode register */
+ if (tmp & SPACEWIRE_TCODE_SET) {
+ SPW_WRITE(&pDev->regs->time,
+ ((SPW_READ(&pDev->regs->time) & ~(0xff)) |
+ (tmp & SPACEWIRE_TCODE_TCODE)));
+ }
+ /* Send timecode directly (tick-in) ? */
+ if (tmp & SPACEWIRE_TCODE_TX) {
+ SPW_CTRL_WRITE(pDev,
+ ((SPW_CTRL_READ(pDev) & ~(SPW_CTRL_TI)) | SPW_CTRL_TI));
+ }
+ break;
+
+ /* Read time code register and tick-out status bit */
+ case SPACEWIRE_IOCTRL_GET_TCODE:
+ tmp = (unsigned int)ioarg->buffer;
+ if ( !tmp ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Copy timecode register */
+ if (SPW_READ(&pDev->regs->status) & SPW_STATUS_TO) {
+ *(unsigned int *)tmp = (1 << 8) | SPW_READ(&pDev->regs->time);
+ } else {
+ *(unsigned int *)tmp = SPW_READ(&pDev->regs->time);
+ }
+ break;
+
+ case SPACEWIRE_IOCTRL_SET_READ_TIMEOUT:
+ pDev->config.rtimeout = (unsigned int)ioarg->buffer;
+ break;
+
+ default:
+ return RTEMS_NOT_IMPLEMENTED;
+ }
+
+ SPACEWIRE_DBGC(DBGSPW_IOCALLS, "SPW_IOCTRL Return\n");
+ return RTEMS_SUCCESSFUL;
+}
+
+/* ============================================================================== */
+
+static int grspw_set_rxmaxlen(GRSPW_DEV *pDev) {
+ unsigned int rxmax;
+ SPW_WRITE(&pDev->regs->dma0rxmax,pDev->config.rxmaxlen); /*set rx maxlength*/
+ rxmax = SPW_READ(&pDev->regs->dma0rxmax);
+ if (rxmax != pDev->config.rxmaxlen) {
+ return 0;
+ }
+ return 1;
+}
+
+static int grspw_hw_init(GRSPW_DEV *pDev) {
+ unsigned int ctrl;
+
+ ctrl = SPW_CTRL_READ(pDev);
+
+ pDev->rx = (SPACEWIRE_RXBD *) pDev->ptr_bd0;
+ pDev->tx = (SPACEWIRE_TXBD *) (pDev->ptr_bd0 + SPACEWIRE_BDTABLE_SIZE);
+
+ /* Set up remote addresses */
+ pDev->rx_remote = (unsigned int)pDev->ptr_bd0_remote;
+ pDev->tx_remote = pDev->rx_remote + SPACEWIRE_BDTABLE_SIZE;
+
+ SPACEWIRE_DBG("hw_init [minor %i]\n", pDev->minor);
+
+ pDev->config.is_rmap = ctrl & SPW_CTRL_RA;
+ pDev->config.is_rxunaligned = ctrl & SPW_CTRL_RX;
+ pDev->config.is_rmapcrc = ctrl & SPW_CTRL_RC;
+ return 0;
+}
+
+static int grspw_hw_waitlink (GRSPW_DEV *pDev, int timeout)
+{
+ int j;
+
+ /* No actual link interface on a DMA-only GRSPW2 connected to the
+ * SPW router
+ */
+ if (pDev->core_ver == 3)
+ return 0;
+
+ if ( timeout == -1 ){
+ /* Wait default timeout */
+ timeout = SPACEWIRE_INIT_TIMEOUT;
+ }
+
+ j=0;
+ while (SPW_LINKSTATE(SPW_STATUS_READ(pDev)) != 5) {
+ if ( timeout < -1 ) {
+ /* wait forever */
+ }else if ( j >= timeout ){
+ /* timeout reached, return fail */
+ return 1;
+ }
+
+ /* Sleep for 10 ticks */
+ rtems_task_wake_after(10);
+ j+=10;
+ }
+ return 0;
+}
+
+static void grspw_hw_reset(GRSPW_DEV *pDev)
+{
+ SPW_CTRL_WRITE(pDev, SPW_CTRL_RESET); /*reset core*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+
+ /* Add extra writes to make sure we wait the number of clocks required
+ * after reset
+ */
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+ SPW_STATUS_WRITE(pDev, SPW_STATUS_TO | SPW_STATUS_CE | SPW_STATUS_ER | SPW_STATUS_DE | SPW_STATUS_PE |
+ SPW_STATUS_WE | SPW_STATUS_IA | SPW_STATUS_EE); /*clear status*/
+
+ SPW_CTRL_WRITE(pDev, SPW_CTRL_LINKSTART); /*start link core*/
+}
+
+static void grspw_hw_read_config(GRSPW_DEV *pDev)
+{
+ unsigned int tmp;
+
+ tmp = SPW_READ(&pDev->regs->nodeaddr);
+ pDev->config.nodeaddr = 0xFF & tmp;
+ pDev->config.nodemask = 0xFF & (tmp>>8);
+ pDev->config.destkey = 0xFF & SPW_READ(&pDev->regs->destkey);
+ pDev->config.clkdiv = 0xFFFF & SPW_READ(&pDev->regs->clkdiv);
+
+ tmp = SPW_CTRL_READ(pDev);
+ pDev->config.promiscuous = 1 & (tmp >> 5);
+ pDev->config.rmapen = 1 & (tmp >> 16);
+ pDev->config.rmapbufdis = 1 & (tmp >> 17);
+ pDev->config.is_rmap = 1 & (tmp >> 31);
+ pDev->config.is_rxunaligned = 1 & (tmp >> 30);
+ pDev->config.is_rmapcrc = 1 & (tmp >> 29);
+ pDev->config.linkdisabled = 1 & tmp;
+ pDev->config.linkstart = 1 & (tmp >> 1);
+
+ if ( pDev->core_ver <= 1 ){
+ tmp = SPW_READ(&pDev->regs->timer);
+ pDev->config.timer = 0xFFF & tmp;
+ pDev->config.disconnect = 0x3FF & (tmp >> 12);
+ }else{
+ pDev->config.timer = 0;
+ pDev->config.disconnect = 0;
+ }
+
+ return;
+}
+
+/* timeout is given in ticks */
+static int grspw_hw_startup (GRSPW_DEV *pDev, int timeout)
+{
+ int i;
+ unsigned int dmactrl;
+
+ SPW_WRITE(&pDev->regs->status, (SPW_STATUS_TO|SPW_STATUS_CE|SPW_STATUS_ER|SPW_STATUS_DE|SPW_STATUS_PE|SPW_STATUS_WE|SPW_STATUS_IA|SPW_STATUS_EE)); /*clear status*/
+
+ if (grspw_hw_waitlink(pDev,timeout)) {
+ SPACEWIRE_DBG2("Device open. Link is not up\n");
+ return RTEMS_TIMEOUT;
+ }
+
+ SPW_WRITE(&pDev->regs->dma0ctrl, SPW_DMACTRL_PS | SPW_DMACTRL_PR | SPW_DMACTRL_TA | SPW_DMACTRL_RA); /*clear status, set ctrl*/
+
+ if ((dmactrl = SPW_READ(&pDev->regs->dma0ctrl)) != 0) {
+ SPACEWIRE_DBG2("DMACtrl is not cleared\n");
+ return RTEMS_IO_ERROR;
+ }
+
+ /* prepare transmit buffers */
+ for (i = 0; i < pDev->txbufcnt; i++) {
+ pDev->tx[i].ctrl = 0;
+ pDev->tx[i].addr_header = ((unsigned int)&pDev->ptr_txhbuf0_remote[0]) + (i * pDev->txhbufsize);
+ pDev->tx[i].addr_data = ((unsigned int)&pDev->ptr_txdbuf0_remote[0]) + (i * pDev->txdbufsize);
+ }
+ pDev->tx_cur = 0;
+ pDev->tx_sent = 0;
+ pDev->tx_all_in_use = 0;
+
+ /* prepare receive buffers */
+ for (i = 0; i < pDev->rxbufcnt; i++) {
+ if (i+1 == pDev->rxbufcnt) {
+ pDev->rx[i].ctrl = SPW_RXBD_IE | SPW_RXBD_EN | SPW_RXBD_WR;
+ } else {
+ pDev->rx[i].ctrl = SPW_RXBD_IE | SPW_RXBD_EN;
+ }
+ pDev->rx[i].addr = ((unsigned int)&pDev->ptr_rxbuf0_remote[0]) + (i * pDev->rxbufsize);
+ }
+ pDev->rxcur = 0;
+ pDev->rxbufcur = -1;
+ grspw_set_rxmaxlen(pDev);
+
+ SPW_WRITE(&pDev->regs->dma0txdesc, pDev->tx_remote);
+ SPW_WRITE(&pDev->regs->dma0rxdesc, pDev->rx_remote);
+
+ /* start RX */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_RX) | SPW_DMACTRL_RD | SPW_DMACTRL_RXEN | SPW_DMACTRL_NS | SPW_DMACTRL_TXIE | SPW_DMACTRL_RXIE);
+
+ SPACEWIRE_DBGC(DBGSPW_TX,"0x%x: setup complete\n", (unsigned int)pDev->regs);
+ return RTEMS_SUCCESSFUL;
+}
+
+/* Wait until the receiver is inactive */
+static void grspw_hw_wait_rx_inactive(GRSPW_DEV *pDev)
+{
+ while( SPW_READ(&pDev->regs->dma0ctrl) & SPW_DMACTRL_RX ){
+ /* switching may be needed:
+ * - low frequency GRSPW
+ * - mega packet incoming
+ */
+ rtems_task_wake_after(1);
+ }
+}
+
+/* Stop the rx or/and tx by disabling the receiver/transmitter */
+static int grspw_hw_stop (GRSPW_DEV *pDev, int rx, int tx)
+{
+ unsigned int dmactrl;
+
+ /* stop rx and/or tx */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ if ( rx ) {
+ dmactrl &= ~(SPW_DMACTRL_RXEN|SPW_DMACTRL_RXIE|SPW_DMACTRL_RD);
+ }
+ if ( tx ) {
+ dmactrl &= ~(SPW_DMACTRL_TXEN|SPW_DMACTRL_TXIE);
+ }
+ /*SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_RX) & ~(SPW_DMACTRL_RD | SPW_DMACTRL_RXEN) & ~(SPW_DMACTRL_TXEN));*/
+
+ /* don't clear status flags */
+ dmactrl &= ~(SPW_DMACTRL_RA|SPW_DMACTRL_PR|SPW_DMACTRL_AI);
+ SPW_WRITE(&pDev->regs->dma0ctrl, dmactrl);
+ return RTEMS_SUCCESSFUL;
+}
+
+
+
+int grspw_hw_send(GRSPW_DEV *pDev, unsigned int hlen, char *hdr, unsigned int dlen, char *data, unsigned int options)
+{
+ unsigned int dmactrl, ctrl;
+#ifdef DEBUG_SPACEWIRE_ONOFF
+ unsigned int k;
+#endif
+ rtems_interrupt_level level;
+ unsigned int cur = pDev->tx_cur, bdctrl;
+ char *txh = pDev->ptr_txhbuf0 + (cur * pDev->txhbufsize);
+ char *txd = pDev->ptr_txdbuf0 + (cur * pDev->txdbufsize);
+ char *txh_remote = pDev->ptr_txhbuf0_remote + (cur * pDev->txhbufsize);
+ char *txd_remote = pDev->ptr_txdbuf0_remote + (cur * pDev->txdbufsize);
+
+ ctrl = SPW_READ((volatile void *)&pDev->tx[cur].ctrl);
+
+ if (ctrl & SPW_TXBD_EN) {
+ return 0;
+ }
+
+ memcpy(&txd[0], data, dlen);
+ memcpy(&txh[0], hdr, hlen);
+
+#ifdef DEBUG_SPACEWIRE_ONOFF
+ if (DEBUG_SPACEWIRE_FLAGS & DBGSPW_DUMP) {
+ for (k = 0; k < hlen; k++){
+ if (k % 16 == 0) {
+ printf ("\n");
+ }
+ printf ("%.2x(%c) ",txh[k] & 0xff,isprint(txh[k] & 0xff) ? txh[k] & 0xff : ' ');
+ }
+ printf ("\n");
+ }
+ if (DEBUG_SPACEWIRE_FLAGS & DBGSPW_DUMP) {
+ for (k = 0; k < dlen; k++){
+ if (k % 16 == 0) {
+ printf ("\n");
+ }
+ printf ("%.2x(%c) ",txd[k] & 0xff,isprint(txd[k] & 0xff) ? txd[k] & 0xff : ' ');
+ }
+ printf ("\n");
+ }
+#endif
+
+ pDev->tx[cur].addr_header = (unsigned int)txh_remote;
+ pDev->tx[cur].len = dlen;
+ pDev->tx[cur].addr_data = (unsigned int)txd_remote;
+
+ bdctrl = SPW_TXBD_IE | SPW_TXBD_EN | hlen;
+ if ( options & GRSPW_PKTSEND_OPTION_HDR_CRC )
+ bdctrl |= SPW_TXBD_HC;
+ if ( options & GRSPW_PKTSEND_OPTION_DATA_CRC )
+ bdctrl |= SPW_TXBD_DC;
+ bdctrl |= options & GRSPW_PKTSEND_OPTION_NOCRCLEN_MASK;
+
+ /* Update counters */
+ rtems_interrupt_disable(level);
+ if (pDev->tx_cur == (pDev->txbufcnt - 1) ) {
+ bdctrl |= SPW_TXBD_WR;
+ }
+ pDev->tx[cur].ctrl = bdctrl;
+
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_TX) | SPW_DMACTRL_TXEN | SPW_DMACTRL_TXIE);
+
+ pDev->tx_cur = (pDev->tx_cur + 1) % pDev->txbufcnt;
+ if (pDev->tx_cur == pDev->tx_sent) {
+ pDev->tx_all_in_use = 1;
+ }
+ rtems_interrupt_enable(level);
+
+ /* In blocking mode wait until message is sent */
+ if (pDev->config.tx_blocking) {
+ while ( SPW_READ(&pDev->regs->dma0ctrl) & SPW_DMACTRL_TXEN) {
+ /* if changed to blocking mode */
+ SPACEWIRE_DBGC(DBGSPW_TX, "Tx blocking\n");
+ rtems_semaphore_obtain(pDev->txsp, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ }
+ }
+ SPACEWIRE_DBGC(DBGSPW_TX, "0x%x: transmitted <%i> bytes\n", (unsigned int) pDev->regs, dlen+hlen);
+ return hlen + dlen;
+}
+
+static int grspw_hw_receive(GRSPW_DEV *pDev, char *b, int c) {
+ unsigned int len, rxlen, ctrl;
+ unsigned int cur;
+ unsigned int tmp;
+ unsigned int dump_start_len;
+ int i;
+ char *rxb;
+
+ if ( pDev->config.promiscuous || pDev->config.keep_source ) {
+ dump_start_len = 0; /* make sure address and prot can be read in promiscuous mode */
+ } else if (pDev->config.rm_prot_id) {
+ dump_start_len = 2; /* skip source address and protocol id */
+ } else {
+ dump_start_len = 1; /* default: skip only source address */
+ }
+
+ rxlen = 0;
+ cur = pDev->rxcur;
+ rxb = pDev->ptr_rxbuf0 + (cur * pDev->rxbufsize);
+
+ SPACEWIRE_DBGC(DBGSPW_RX, "0x%x: waitin packet at pos %i\n", (unsigned int) pDev->regs, cur);
+
+ ctrl = SPW_READ((volatile void *)&pDev->rx[cur].ctrl);
+ if (ctrl & SPW_RXBD_EN) {
+ return rxlen;
+ }
+ SPACEWIRE_DBGC(DBGSPW_RX, "checking packet\n");
+
+ len = SPW_RXBD_LENGTH & ctrl;
+ if (!((ctrl & SPW_RXBD_ERROR) || (pDev->config.check_rmap_err && (ctrl & SPW_RXBD_RMAPERROR)))) {
+ if (pDev->rxbufcur == -1) {
+ SPACEWIRE_DBGC(DBGSPW_RX, "incoming packet len %i\n", len);
+ pDev->stat.packets_received++;
+ pDev->rxbufcur = dump_start_len;
+ }
+ rxlen = tmp = len - pDev->rxbufcur;
+ SPACEWIRE_DBGC(DBGSPW_RX, "C %i\n", c);
+ SPACEWIRE_DBGC(DBGSPW_RX, "Dump %i\n", dump_start_len);
+ SPACEWIRE_DBGC(DBGSPW_RX, "Bufcur %i\n", pDev->rxbufcur);
+ SPACEWIRE_DBGC(DBGSPW_RX, "Rxlen %i\n", rxlen );
+ if (rxlen > c) {
+ rxlen = c;
+ }
+ if (CPU_SPARC_HAS_SNOOPING) {
+/* if ( 1 ) {*/
+ /*printf("RX_MEMCPY(0x%x, 0x%x, 0x%x)\n", (unsigned int)b, (unsigned int)(rxb+pDev->rxbufcur), (unsigned int)rxlen);*/
+ memcpy(b, rxb+pDev->rxbufcur, rxlen);
+ } else {
+ int left = rxlen;
+ /* Copy word wise if Aligned */
+ if ( (((int)b & 3) == 0) && (((int)(rxb+pDev->rxbufcur) & 3) == 0) ){
+ while(left>=32){
+ *(int *)(b+0) = MEM_READ32(rxb+pDev->rxbufcur+0);
+ *(int *)(b+4) = MEM_READ32(rxb+pDev->rxbufcur+4);
+ *(int *)(b+8) = MEM_READ32(rxb+pDev->rxbufcur+8);
+ *(int *)(b+12) = MEM_READ32(rxb+pDev->rxbufcur+12);
+ *(int *)(b+16) = MEM_READ32(rxb+pDev->rxbufcur+16);
+ *(int *)(b+20) = MEM_READ32(rxb+pDev->rxbufcur+20);
+ *(int *)(b+24) = MEM_READ32(rxb+pDev->rxbufcur+24);
+ *(int *)(b+28) = MEM_READ32(rxb+pDev->rxbufcur+28);
+ rxb+=32;
+ b+=32;
+ left-=32;
+ }
+ while(left>=4){
+ *(int *)b = MEM_READ32(rxb+pDev->rxbufcur);
+ rxb+=4;
+ b+=4;
+ left-=4;
+ }
+ }
+ for(i = 0; i < left; i++) {
+ b[i] = MEM_READ8(rxb+pDev->rxbufcur+i);
+ }
+ }
+
+ pDev->rxbufcur += rxlen;
+ if (c >= tmp) {
+ SPACEWIRE_DBGC(DBGSPW_RX, "Next descriptor\n");
+ grspw_rxnext(pDev);
+ }
+ } else {
+ check_rx_errors(pDev, ctrl);
+ grspw_rxnext(pDev);
+ }
+ return rxlen;
+}
+
+static void grspw_rxnext(GRSPW_DEV *pDev)
+{
+ unsigned int dmactrl;
+ unsigned int cur = pDev->rxcur;
+ unsigned int ctrl = 0;
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+
+ if (cur == (pDev->rxbufcnt - 1)) {
+ pDev->rx[cur].ctrl = ctrl | SPW_RXBD_EN | SPW_RXBD_IE | SPW_RXBD_WR;
+ cur = 0;
+ } else {
+ pDev->rx[cur].ctrl = ctrl | SPW_RXBD_EN | SPW_RXBD_IE;
+ cur++;
+ }
+
+ pDev->rxcur = cur;
+ pDev->rxbufcur = -1;
+
+ /* start RX */
+ dmactrl = SPW_READ(&pDev->regs->dma0ctrl);
+ SPW_WRITE(&pDev->regs->dma0ctrl, (dmactrl & SPW_PREPAREMASK_RX) | SPW_DMACTRL_RD | SPW_DMACTRL_RXEN | SPW_DMACTRL_RXIE | SPW_DMACTRL_NS);
+
+ rtems_interrupt_enable(level);
+}
+
+static void check_rx_errors(GRSPW_DEV *pDev, int ctrl)
+{
+ if (ctrl & SPW_RXBD_EEP) {
+ pDev->stat.rx_eep_err++;
+ }
+ if (ctrl & SPW_RXBD_EHC) {
+ if (pDev->config.check_rmap_err) {
+ pDev->stat.rx_rmap_header_crc_err++;
+ }
+ }
+ if (ctrl & SPW_RXBD_EDC) {
+ if (pDev->config.check_rmap_err) {
+ pDev->stat.rx_rmap_data_crc_err++;
+ }
+ }
+ if (ctrl & SPW_RXBD_ETR) {
+ pDev->stat.rx_truncated++;
+ }
+}
+
+
+static void grspw_print_dev(struct drvmgr_dev *dev, int options)
+{
+ GRSPW_DEV *pDev = dev->priv;
+
+ /* Print */
+ printf("--- GRSPW %s ---\n", pDev->devName);
+ printf(" REGS: 0x%x\n", (unsigned int)pDev->regs);
+ printf(" IRQ: %d\n", pDev->irq);
+ printf(" CORE VERSION: %d\n", pDev->core_ver);
+ printf(" CTRL: 0x%x\n", pDev->regs->ctrl);
+ printf(" STATUS: 0x%x\n", pDev->regs->status);
+ printf(" DMA0CTRL: 0x%x\n", pDev->regs->dma0ctrl);
+ printf(" TXBD: 0x%x\n", (unsigned int)pDev->tx);
+ printf(" RXBD: 0x%x\n", (unsigned int)pDev->rx);
+}
+
+void grspw_print(int options)
+{
+ struct amba_drv_info *drv = &grspw_drv_info;
+ struct drvmgr_dev *dev;
+
+ dev = drv->general.dev;
+ while(dev) {
+ grspw_print_dev(dev, options);
+ dev = dev->next_in_drv;
+ }
+}
diff --git a/bsps/shared/grlib/spw/grspw_pkt.c b/bsps/shared/grlib/spw/grspw_pkt.c
new file mode 100644
index 0000000000..208f5a14f7
--- /dev/null
+++ b/bsps/shared/grlib/spw/grspw_pkt.c
@@ -0,0 +1,3295 @@
+/*
+ * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
+ *
+ * This driver can be used to implement a standard I/O system "char"-driver
+ * or used directly.
+ *
+ * COPYRIGHT (c) 2011
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grspw_pkt.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define GRSPW_DBG(args...) printk(args)*/
+#define GRSPW_DBG(args...)
+
+struct grspw_dma_regs {
+ volatile unsigned int ctrl; /* DMA Channel Control */
+ volatile unsigned int rxmax; /* RX Max Packet Length */
+ volatile unsigned int txdesc; /* TX Descriptor Base/Current */
+ volatile unsigned int rxdesc; /* RX Descriptor Base/Current */
+ volatile unsigned int addr; /* Address Register */
+ volatile unsigned int resv[3];
+};
+
+struct grspw_regs {
+ volatile unsigned int ctrl;
+ volatile unsigned int status;
+ volatile unsigned int nodeaddr;
+ volatile unsigned int clkdiv;
+ volatile unsigned int destkey;
+ volatile unsigned int time;
+ volatile unsigned int timer; /* Used only in GRSPW1 */
+ volatile unsigned int resv1;
+
+ /* DMA Registers, ctrl.NCH determines number of ports,
+ * up to 4 channels are supported
+ */
+ struct grspw_dma_regs dma[4];
+
+ volatile unsigned int icctrl;
+ volatile unsigned int icrx;
+ volatile unsigned int icack;
+ volatile unsigned int ictimeout;
+ volatile unsigned int ictickomask;
+ volatile unsigned int icaamask;
+ volatile unsigned int icrlpresc;
+ volatile unsigned int icrlisr;
+ volatile unsigned int icrlintack;
+ volatile unsigned int resv2;
+ volatile unsigned int icisr;
+ volatile unsigned int resv3;
+};
+
+/* GRSPW - Control Register - 0x00 */
+#define GRSPW_CTRL_RA_BIT 31
+#define GRSPW_CTRL_RX_BIT 30
+#define GRSPW_CTRL_RC_BIT 29
+#define GRSPW_CTRL_NCH_BIT 27
+#define GRSPW_CTRL_PO_BIT 26
+#define GRSPW_CTRL_CC_BIT 25
+#define GRSPW_CTRL_ID_BIT 24
+#define GRSPW_CTRL_LE_BIT 22
+#define GRSPW_CTRL_PS_BIT 21
+#define GRSPW_CTRL_NP_BIT 20
+#define GRSPW_CTRL_RD_BIT 17
+#define GRSPW_CTRL_RE_BIT 16
+#define GRSPW_CTRL_TF_BIT 12
+#define GRSPW_CTRL_TR_BIT 11
+#define GRSPW_CTRL_TT_BIT 10
+#define GRSPW_CTRL_LI_BIT 9
+#define GRSPW_CTRL_TQ_BIT 8
+#define GRSPW_CTRL_RS_BIT 6
+#define GRSPW_CTRL_PM_BIT 5
+#define GRSPW_CTRL_TI_BIT 4
+#define GRSPW_CTRL_IE_BIT 3
+#define GRSPW_CTRL_AS_BIT 2
+#define GRSPW_CTRL_LS_BIT 1
+#define GRSPW_CTRL_LD_BIT 0
+
+#define GRSPW_CTRL_RA (1<<GRSPW_CTRL_RA_BIT)
+#define GRSPW_CTRL_RX (1<<GRSPW_CTRL_RX_BIT)
+#define GRSPW_CTRL_RC (1<<GRSPW_CTRL_RC_BIT)
+#define GRSPW_CTRL_NCH (0x3<<GRSPW_CTRL_NCH_BIT)
+#define GRSPW_CTRL_PO (1<<GRSPW_CTRL_PO_BIT)
+#define GRSPW_CTRL_CC (1<<GRSPW_CTRL_CC_BIT)
+#define GRSPW_CTRL_ID (1<<GRSPW_CTRL_ID_BIT)
+#define GRSPW_CTRL_LE (1<<GRSPW_CTRL_LE_BIT)
+#define GRSPW_CTRL_PS (1<<GRSPW_CTRL_PS_BIT)
+#define GRSPW_CTRL_NP (1<<GRSPW_CTRL_NP_BIT)
+#define GRSPW_CTRL_RD (1<<GRSPW_CTRL_RD_BIT)
+#define GRSPW_CTRL_RE (1<<GRSPW_CTRL_RE_BIT)
+#define GRSPW_CTRL_TF (1<<GRSPW_CTRL_TF_BIT)
+#define GRSPW_CTRL_TR (1<<GRSPW_CTRL_TR_BIT)
+#define GRSPW_CTRL_TT (1<<GRSPW_CTRL_TT_BIT)
+#define GRSPW_CTRL_LI (1<<GRSPW_CTRL_LI_BIT)
+#define GRSPW_CTRL_TQ (1<<GRSPW_CTRL_TQ_BIT)
+#define GRSPW_CTRL_RS (1<<GRSPW_CTRL_RS_BIT)
+#define GRSPW_CTRL_PM (1<<GRSPW_CTRL_PM_BIT)
+#define GRSPW_CTRL_TI (1<<GRSPW_CTRL_TI_BIT)
+#define GRSPW_CTRL_IE (1<<GRSPW_CTRL_IE_BIT)
+#define GRSPW_CTRL_AS (1<<GRSPW_CTRL_AS_BIT)
+#define GRSPW_CTRL_LS (1<<GRSPW_CTRL_LS_BIT)
+#define GRSPW_CTRL_LD (1<<GRSPW_CTRL_LD_BIT)
+
+#define GRSPW_CTRL_IRQSRC_MASK \
+ (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
+#define GRSPW_ICCTRL_IRQSRC_MASK \
+ (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
+
+
+/* GRSPW - Status Register - 0x04 */
+#define GRSPW_STS_LS_BIT 21
+#define GRSPW_STS_AP_BIT 9
+#define GRSPW_STS_EE_BIT 8
+#define GRSPW_STS_IA_BIT 7
+#define GRSPW_STS_WE_BIT 6 /* GRSPW1 */
+#define GRSPW_STS_PE_BIT 4
+#define GRSPW_STS_DE_BIT 3
+#define GRSPW_STS_ER_BIT 2
+#define GRSPW_STS_CE_BIT 1
+#define GRSPW_STS_TO_BIT 0
+
+#define GRSPW_STS_LS (0x7<<GRSPW_STS_LS_BIT)
+#define GRSPW_STS_AP (1<<GRSPW_STS_AP_BIT)
+#define GRSPW_STS_EE (1<<GRSPW_STS_EE_BIT)
+#define GRSPW_STS_IA (1<<GRSPW_STS_IA_BIT)
+#define GRSPW_STS_WE (1<<GRSPW_STS_WE_BIT) /* GRSPW1 */
+#define GRSPW_STS_PE (1<<GRSPW_STS_PE_BIT)
+#define GRSPW_STS_DE (1<<GRSPW_STS_DE_BIT)
+#define GRSPW_STS_ER (1<<GRSPW_STS_ER_BIT)
+#define GRSPW_STS_CE (1<<GRSPW_STS_CE_BIT)
+#define GRSPW_STS_TO (1<<GRSPW_STS_TO_BIT)
+
+/* GRSPW - Default Address Register - 0x08 */
+#define GRSPW_DEF_ADDR_BIT 0
+#define GRSPW_DEF_MASK_BIT 8
+#define GRSPW_DEF_ADDR (0xff<<GRSPW_DEF_ADDR_BIT)
+#define GRSPW_DEF_MASK (0xff<<GRSPW_DEF_MASK_BIT)
+
+/* GRSPW - Clock Divisor Register - 0x0C */
+#define GRSPW_CLKDIV_START_BIT 8
+#define GRSPW_CLKDIV_RUN_BIT 0
+#define GRSPW_CLKDIV_START (0xff<<GRSPW_CLKDIV_START_BIT)
+#define GRSPW_CLKDIV_RUN (0xff<<GRSPW_CLKDIV_RUN_BIT)
+#define GRSPW_CLKDIV_MASK (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
+
+/* GRSPW - Destination key Register - 0x10 */
+#define GRSPW_DK_DESTKEY_BIT 0
+#define GRSPW_DK_DESTKEY (0xff<<GRSPW_DK_DESTKEY_BIT)
+
+/* GRSPW - Time Register - 0x14 */
+#define GRSPW_TIME_CTRL_BIT 6
+#define GRSPW_TIME_CNT_BIT 0
+#define GRSPW_TIME_CTRL (0x3<<GRSPW_TIME_CTRL_BIT)
+#define GRSPW_TIME_TCNT (0x3f<<GRSPW_TIME_CNT_BIT)
+
+/* GRSPW - DMA Control Register - 0x20*N */
+#define GRSPW_DMACTRL_LE_BIT 16
+#define GRSPW_DMACTRL_SP_BIT 15
+#define GRSPW_DMACTRL_SA_BIT 14
+#define GRSPW_DMACTRL_EN_BIT 13
+#define GRSPW_DMACTRL_NS_BIT 12
+#define GRSPW_DMACTRL_RD_BIT 11
+#define GRSPW_DMACTRL_RX_BIT 10
+#define GRSPW_DMACTRL_AT_BIT 9
+#define GRSPW_DMACTRL_RA_BIT 8
+#define GRSPW_DMACTRL_TA_BIT 7
+#define GRSPW_DMACTRL_PR_BIT 6
+#define GRSPW_DMACTRL_PS_BIT 5
+#define GRSPW_DMACTRL_AI_BIT 4
+#define GRSPW_DMACTRL_RI_BIT 3
+#define GRSPW_DMACTRL_TI_BIT 2
+#define GRSPW_DMACTRL_RE_BIT 1
+#define GRSPW_DMACTRL_TE_BIT 0
+
+#define GRSPW_DMACTRL_LE (1<<GRSPW_DMACTRL_LE_BIT)
+#define GRSPW_DMACTRL_SP (1<<GRSPW_DMACTRL_SP_BIT)
+#define GRSPW_DMACTRL_SA (1<<GRSPW_DMACTRL_SA_BIT)
+#define GRSPW_DMACTRL_EN (1<<GRSPW_DMACTRL_EN_BIT)
+#define GRSPW_DMACTRL_NS (1<<GRSPW_DMACTRL_NS_BIT)
+#define GRSPW_DMACTRL_RD (1<<GRSPW_DMACTRL_RD_BIT)
+#define GRSPW_DMACTRL_RX (1<<GRSPW_DMACTRL_RX_BIT)
+#define GRSPW_DMACTRL_AT (1<<GRSPW_DMACTRL_AT_BIT)
+#define GRSPW_DMACTRL_RA (1<<GRSPW_DMACTRL_RA_BIT)
+#define GRSPW_DMACTRL_TA (1<<GRSPW_DMACTRL_TA_BIT)
+#define GRSPW_DMACTRL_PR (1<<GRSPW_DMACTRL_PR_BIT)
+#define GRSPW_DMACTRL_PS (1<<GRSPW_DMACTRL_PS_BIT)
+#define GRSPW_DMACTRL_AI (1<<GRSPW_DMACTRL_AI_BIT)
+#define GRSPW_DMACTRL_RI (1<<GRSPW_DMACTRL_RI_BIT)
+#define GRSPW_DMACTRL_TI (1<<GRSPW_DMACTRL_TI_BIT)
+#define GRSPW_DMACTRL_RE (1<<GRSPW_DMACTRL_RE_BIT)
+#define GRSPW_DMACTRL_TE (1<<GRSPW_DMACTRL_TE_BIT)
+
+/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
+#define GRSPW_DMARXLEN_MAX_BIT 0
+#define GRSPW_DMARXLEN_MAX (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
+
+/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
+#define GRSPW_DMAADR_ADDR_BIT 0
+#define GRSPW_DMAADR_MASK_BIT 8
+#define GRSPW_DMAADR_ADDR (0xff<<GRSPW_DMAADR_ADDR_BIT)
+#define GRSPW_DMAADR_MASK (0xff<<GRSPW_DMAADR_MASK_BIT)
+
+/* GRSPW - Interrupt code receive register - 0xa4 */
+#define GRSPW_ICCTRL_INUM_BIT 27
+#define GRSPW_ICCTRL_IA_BIT 24
+#define GRSPW_ICCTRL_LE_BIT 23
+#define GRSPW_ICCTRL_PR_BIT 22
+#define GRSPW_ICCTRL_DQ_BIT 21 /* never used */
+#define GRSPW_ICCTRL_TQ_BIT 20
+#define GRSPW_ICCTRL_AQ_BIT 19
+#define GRSPW_ICCTRL_IQ_BIT 18
+#define GRSPW_ICCTRL_IR_BIT 17
+#define GRSPW_ICCTRL_IT_BIT 16
+#define GRSPW_ICCTRL_NUMI_BIT 13
+#define GRSPW_ICCTRL_BIRQ_BIT 8
+#define GRSPW_ICCTRL_ID_BIT 7
+#define GRSPW_ICCTRL_II_BIT 6
+#define GRSPW_ICCTRL_TXIRQ_BIT 0
+#define GRSPW_ICCTRL_INUM (0x1f << GRSPW_ICCTRL_INUM_BIT)
+#define GRSPW_ICCTRL_IA (1 << GRSPW_ICCTRL_IA_BIT)
+#define GRSPW_ICCTRL_LE (1 << GRSPW_ICCTRL_LE_BIT)
+#define GRSPW_ICCTRL_PR (1 << GRSPW_ICCTRL_PR_BIT)
+#define GRSPW_ICCTRL_DQ (1 << GRSPW_ICCTRL_DQ_BIT)
+#define GRSPW_ICCTRL_TQ (1 << GRSPW_ICCTRL_TQ_BIT)
+#define GRSPW_ICCTRL_AQ (1 << GRSPW_ICCTRL_AQ_BIT)
+#define GRSPW_ICCTRL_IQ (1 << GRSPW_ICCTRL_IQ_BIT)
+#define GRSPW_ICCTRL_IR (1 << GRSPW_ICCTRL_IR_BIT)
+#define GRSPW_ICCTRL_IT (1 << GRSPW_ICCTRL_IT_BIT)
+#define GRSPW_ICCTRL_NUMI (0x7 << GRSPW_ICCTRL_NUMI_BIT)
+#define GRSPW_ICCTRL_BIRQ (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
+#define GRSPW_ICCTRL_ID (1 << GRSPW_ICCTRL_ID_BIT)
+#define GRSPW_ICCTRL_II (1 << GRSPW_ICCTRL_II_BIT)
+#define GRSPW_ICCTRL_TXIRQ (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
+
+/* RX Buffer Descriptor */
+struct grspw_rxbd {
+ volatile unsigned int ctrl;
+ volatile unsigned int addr;
+};
+
+/* TX Buffer Descriptor */
+struct grspw_txbd {
+ volatile unsigned int ctrl;
+ volatile unsigned int haddr;
+ volatile unsigned int dlen;
+ volatile unsigned int daddr;
+};
+
+/* GRSPW - DMA RXBD Ctrl */
+#define GRSPW_RXBD_LEN_BIT 0
+#define GRSPW_RXBD_LEN (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
+#define GRSPW_RXBD_EN (1<<25)
+#define GRSPW_RXBD_WR (1<<26)
+#define GRSPW_RXBD_IE (1<<27)
+#define GRSPW_RXBD_EP (1<<28)
+#define GRSPW_RXBD_HC (1<<29)
+#define GRSPW_RXBD_DC (1<<30)
+#define GRSPW_RXBD_TR (1<<31)
+
+#define GRSPW_TXBD_HLEN (0xff<<0)
+#define GRSPW_TXBD_NCL (0xf<<8)
+#define GRSPW_TXBD_EN (1<<12)
+#define GRSPW_TXBD_WR (1<<13)
+#define GRSPW_TXBD_IE (1<<14)
+#define GRSPW_TXBD_LE (1<<15)
+#define GRSPW_TXBD_HC (1<<16)
+#define GRSPW_TXBD_DC (1<<17)
+
+#define GRSPW_DMAADR_MASK_BIT 8
+#define GRSPW_DMAADR_ADDR (0xff<<GRSPW_DMAADR_ADDR_BIT)
+#define GRSPW_DMAADR_MASK (0xff<<GRSPW_DMAADR_MASK_BIT)
+
+
+/* GRSPW Error Condition */
+#define GRSPW_STAT_ERROR (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
+#define GRSPW_DMA_STATUS_ERROR (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
+/* GRSPW Link configuration options */
+#define GRSPW_LINK_CFG (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
+#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
+
+/* Software Defaults */
+#define DEFAULT_RXMAX 1024 /* 1 KBytes Max RX Packet Size */
+
+/* GRSPW Constants */
+#define GRSPW_TXBD_NR 64 /* Maximum number of TX Descriptors */
+#define GRSPW_RXBD_NR 128 /* Maximum number of RX Descriptors */
+#define GRSPW_TXBD_SIZE 16 /* Size in bytes of one TX descriptor */
+#define GRSPW_RXBD_SIZE 8 /* Size in bytes of one RX descriptor */
+#define BDTAB_SIZE 0x400 /* BD Table Size (RX or TX) */
+#define BDTAB_ALIGN 0x400 /* BD Table Alignment Requirement */
+
+/* Memory and HW Registers Access routines. All 32-bit access routines */
+#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
+#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+struct grspw_ring {
+ struct grspw_ring *next; /* Next Descriptor */
+ union {
+ struct grspw_txbd *tx; /* Descriptor Address */
+ struct grspw_rxbd *rx; /* Descriptor Address */
+ } bd;
+ struct grspw_pkt *pkt; /* Packet description associated.NULL if none*/
+};
+
+/* An entry in the TX descriptor Ring */
+struct grspw_txring {
+ struct grspw_txring *next; /* Next Descriptor */
+ struct grspw_txbd *bd; /* Descriptor Address */
+ struct grspw_pkt *pkt; /* Packet description associated.NULL if none*/
+};
+
+/* An entry in the RX descriptor Ring */
+struct grspw_rxring {
+ struct grspw_rxring *next; /* Next Descriptor */
+ struct grspw_rxbd *bd; /* Descriptor Address */
+ struct grspw_pkt *pkt; /* Packet description associated.NULL if none*/
+};
+
+
+struct grspw_dma_priv {
+ struct grspw_priv *core; /* GRSPW Core */
+ struct grspw_dma_regs *regs; /* DMA Channel Registers */
+ int index; /* DMA Channel Index @ GRSPW core */
+ int open; /* DMA Channel opened by user */
+ int started; /* DMA Channel activity (start|stop) */
+ rtems_id sem_rxdma; /* DMA Channel RX Semaphore */
+ rtems_id sem_txdma; /* DMA Channel TX Semaphore */
+ struct grspw_dma_stats stats; /* DMA Channel Statistics */
+ struct grspw_dma_config cfg; /* DMA Channel Configuration */
+
+ /*** RX ***/
+
+ /* RX Descriptor Ring */
+ struct grspw_rxbd *rx_bds; /* Descriptor Address */
+ struct grspw_rxbd *rx_bds_hwa; /* Descriptor HW Address */
+ struct grspw_rxring *rx_ring_base;
+ struct grspw_rxring *rx_ring_head; /* Next descriptor to enable */
+ struct grspw_rxring *rx_ring_tail; /* Oldest enabled Descriptor */
+ int rx_irq_en_cnt_curr;
+ struct {
+ int waiting;
+ int ready_cnt;
+ int op;
+ int recv_cnt;
+ rtems_id sem_wait; /* RX Semaphore used to implement RX blocking */
+ } rx_wait;
+
+ /* Queue of Packets READY to be scheduled */
+ struct grspw_list ready;
+ int ready_cnt;
+
+ /* Scheduled RX Packets Queue */
+ struct grspw_list rx_sched;
+ int rx_sched_cnt;
+
+ /* Queue of Packets that has been RECIEVED */
+ struct grspw_list recv;
+ int recv_cnt;
+
+
+ /*** TX ***/
+
+ /* TX Descriptor Ring */
+ struct grspw_txbd *tx_bds; /* Descriptor Address */
+ struct grspw_txbd *tx_bds_hwa; /* Descriptor HW Address */
+ struct grspw_txring *tx_ring_base;
+ struct grspw_txring *tx_ring_head;
+ struct grspw_txring *tx_ring_tail;
+ int tx_irq_en_cnt_curr;
+ struct {
+ int waiting;
+ int send_cnt;
+ int op;
+ int sent_cnt;
+ rtems_id sem_wait; /* TX Semaphore used to implement TX blocking */
+ } tx_wait;
+
+ /* Queue of Packets ready to be scheduled for transmission */
+ struct grspw_list send;
+ int send_cnt;
+
+ /* Scheduled TX Packets Queue */
+ struct grspw_list tx_sched;
+ int tx_sched_cnt;
+
+ /* Queue of Packets that has been SENT */
+ struct grspw_list sent;
+ int sent_cnt;
+};
+
+struct grspw_priv {
+ char devname[8]; /* Device name "grspw%d" */
+ struct drvmgr_dev *dev; /* Device */
+ struct grspw_regs *regs; /* Virtual Address of APB Registers */
+ int irq; /* AMBA IRQ number of core */
+ int index; /* Index in order it was probed */
+ int core_index; /* Core Bus Index */
+ int open; /* If Device is alrady opened (=1) or not (=0) */
+ void *data; /* User private Data for this device instance, set by grspw_initialize_user */
+
+ /* Features supported by Hardware */
+ struct grspw_hw_sup hwsup;
+
+ /* Pointer to an array of Maximally 4 DMA Channels */
+ struct grspw_dma_priv *dma;
+
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+
+ /* Descriptor Memory Area for TX & RX and all DMA channels */
+ unsigned int bd_mem;
+ unsigned int bd_mem_alloced;
+
+ /*** Time Code Handling ***/
+ void (*tcisr)(void *data, int timecode);
+ void *tcisr_arg;
+
+ /*** Interrupt-code Handling ***/
+ spwpkt_ic_isr_t icisr;
+ void *icisr_arg;
+
+ /* Bit mask representing events which shall cause link disable. */
+ unsigned int dis_link_on_err;
+
+ /* Bit mask for link status bits to clear by ISR */
+ unsigned int stscfg;
+
+ /*** Message Queue Handling ***/
+ struct grspw_work_config wc;
+
+ /* "Core Global" Statistics gathered, not dependent on DMA channel */
+ struct grspw_core_stats stats;
+};
+
+int grspw_initialized = 0;
+int grspw_count = 0;
+rtems_id grspw_sem;
+static struct grspw_priv *priv_tab[GRSPW_MAX];
+
+/* callback to upper layer when devices are discovered/removed */
+void *(*grspw_dev_add)(int) = NULL;
+void (*grspw_dev_del)(int,void*) = NULL;
+
+/* Defaults to do nothing - user can override this function.
+ * Called from work-task.
+ */
+void __attribute__((weak)) grspw_work_event(
+ enum grspw_worktask_ev ev,
+ unsigned int msg)
+{
+
+}
+
+/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
+ * the work-task and work-queue to save space.
+ */
+int grspw_work_task_priority __attribute__((weak)) = 100;
+rtems_id grspw_work_task;
+static struct grspw_work_config grspw_wc_def;
+
+STATIC void grspw_hw_stop(struct grspw_priv *priv);
+STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
+STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
+STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
+STATIC void grspw_isr(void *data);
+
+void *grspw_open(int dev_no)
+{
+ struct grspw_priv *priv;
+ unsigned int bdtabsize, hwa;
+ int i;
+ union drvmgr_key_value *value;
+
+ if (grspw_initialized != 1 || (dev_no >= grspw_count))
+ return NULL;
+
+ priv = priv_tab[dev_no];
+
+ /* Take GRSPW lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return NULL;
+
+ if (priv->open) {
+ priv = NULL;
+ goto out;
+ }
+
+ /* Initialize Spin-lock for GRSPW Device. This is to protect
+ * CTRL and DMACTRL registers from ISR.
+ */
+ SPIN_INIT(&priv->devlock, priv->devname);
+
+ priv->tcisr = NULL;
+ priv->tcisr_arg = NULL;
+ priv->icisr = NULL;
+ priv->icisr_arg = NULL;
+ priv->stscfg = LINKSTS_MASK;
+
+ /* Default to common work queue and message queue, if not created
+ * during initialization then its disabled.
+ */
+ grspw_work_cfg(priv, &grspw_wc_def);
+
+ grspw_stats_clr(priv);
+
+ /* Allocate TX & RX Descriptor memory area for all DMA
+ * channels. Max-size descriptor area is allocated (or user assigned):
+ * - 128 RX descriptors per DMA Channel
+ * - 64 TX descriptors per DMA Channel
+ * Specified address must be in CPU RAM.
+ */
+ bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
+ value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
+ if (value) {
+ priv->bd_mem = value->i;
+ priv->bd_mem_alloced = 0;
+ if (priv->bd_mem & (BDTAB_ALIGN-1)) {
+ GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
+ priv->index);
+ priv = NULL;
+ goto out;
+ }
+ } else {
+ priv->bd_mem_alloced = (unsigned int)grlib_malloc(bdtabsize + BDTAB_ALIGN - 1);
+ if (priv->bd_mem_alloced == 0) {
+ priv = NULL;
+ goto out;
+ }
+ /* Align memory */
+ priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
+ ~(BDTAB_ALIGN-1);
+ }
+
+ /* Translate into DMA address that HW can use to access DMA
+ * descriptors
+ */
+ drvmgr_translate_check(
+ priv->dev,
+ CPUMEM_TO_DMA,
+ (void *)priv->bd_mem,
+ (void **)&hwa,
+ bdtabsize);
+
+ GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
+ priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ /* Do DMA Channel Init, other variables etc. are inited
+ * when respective DMA channel is opened.
+ *
+ * index & core are initialized by probe function.
+ */
+ priv->dma[i].open = 0;
+ priv->dma[i].rx_bds = (struct grspw_rxbd *)
+ (priv->bd_mem + i*BDTAB_SIZE*2);
+ priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
+ (hwa + BDTAB_SIZE*(2*i));
+ priv->dma[i].tx_bds = (struct grspw_txbd *)
+ (priv->bd_mem + BDTAB_SIZE*(2*i+1));
+ priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
+ (hwa + BDTAB_SIZE*(2*i+1));
+ GRSPW_DBG(" DMA[%i]: RX %p - %p (%p - %p) TX %p - %p (%p - %p)\n",
+ i,
+ priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
+ priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
+ priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
+ priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
+ }
+
+ /* Basic initialization of hardware, clear some registers but
+ * keep Link/RMAP/Node-Address registers intact.
+ */
+ grspw_hw_stop(priv);
+
+ /* Register Interrupt handler and enable IRQ at IRQ ctrl */
+ drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
+
+ /* Take the device */
+ priv->open = 1;
+out:
+ rtems_semaphore_release(grspw_sem);
+ return priv;
+}
+
+int grspw_close(void *d)
+{
+ struct grspw_priv *priv = d;
+ int i;
+
+ /* Take GRSPW lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Check that user has stopped and closed all DMA channels
+ * appropriately. At this point the Hardware shall not be doing DMA
+ * or generating Interrupts. We want HW in a "startup-state".
+ */
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ if (priv->dma[i].open) {
+ rtems_semaphore_release(grspw_sem);
+ return 1;
+ }
+ }
+ grspw_hw_stop(priv);
+
+ /* Uninstall Interrupt handler */
+ drvmgr_interrupt_unregister(priv->dev, 0, grspw_isr, priv);
+
+ /* Free descriptor table memory if allocated using malloc() */
+ if (priv->bd_mem_alloced) {
+ free((void *)priv->bd_mem_alloced);
+ priv->bd_mem_alloced = 0;
+ }
+
+ /* Mark not open */
+ priv->open = 0;
+ rtems_semaphore_release(grspw_sem);
+ return 0;
+}
+
+void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
+{
+ struct grspw_priv *priv = d;
+
+ *hw = priv->hwsup;
+}
+
+void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl, nodeaddr;
+ SPIN_IRQFLAGS(irqflags);
+ int i;
+
+ if (!priv || !cfg)
+ return;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ if (cfg->promiscuous != -1) {
+ /* Set Configuration */
+ ctrl = REG_READ(&regs->ctrl);
+ if (cfg->promiscuous)
+ ctrl |= GRSPW_CTRL_PM;
+ else
+ ctrl &= ~GRSPW_CTRL_PM;
+ REG_WRITE(&regs->ctrl, ctrl);
+ REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
+
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ ctrl = REG_READ(&regs->dma[i].ctrl);
+ ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
+ if (cfg->dma_nacfg[i].node_en) {
+ ctrl |= GRSPW_DMACTRL_EN;
+ REG_WRITE(&regs->dma[i].addr,
+ (cfg->dma_nacfg[i].node_addr & 0xff) |
+ ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
+ } else {
+ ctrl &= ~GRSPW_DMACTRL_EN;
+ }
+ REG_WRITE(&regs->dma[i].ctrl, ctrl);
+ }
+ }
+
+ /* Read Current Configuration */
+ cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
+ nodeaddr = REG_READ(&regs->nodeaddr);
+ cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
+ cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
+ GRSPW_DMACTRL_EN;
+ ctrl = REG_READ(&regs->dma[i].addr);
+ cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
+ GRSPW_DMAADR_ADDR_BIT;
+ cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
+ GRSPW_DMAADR_MASK_BIT;
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ for (; i<4; i++) {
+ cfg->dma_nacfg[i].node_en = 0;
+ cfg->dma_nacfg[i].node_addr = 0;
+ cfg->dma_nacfg[i].node_mask = 0;
+ }
+}
+
+/* Return Current DMA CTRL/Status Register */
+unsigned int grspw_dma_ctrlsts(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ return REG_READ(&dma->regs->ctrl);
+}
+
+/* Return Current Status Register */
+unsigned int grspw_link_status(void *d)
+{
+ struct grspw_priv *priv = d;
+
+ return REG_READ(&priv->regs->status);
+}
+
+/* Clear Status Register bits */
+void grspw_link_status_clr(void *d, unsigned int mask)
+{
+ struct grspw_priv *priv = d;
+
+ REG_WRITE(&priv->regs->status, mask);
+}
+
+/* Return Current Link State */
+spw_link_state_t grspw_link_state(void *d)
+{
+ struct grspw_priv *priv = d;
+ unsigned int status = REG_READ(&priv->regs->status);
+
+ return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
+}
+
+/* Enable Global IRQ only if some irq source is set */
+static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
+{
+ return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
+ (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
+}
+
+
+/* options and clkdiv [in/out]: set to -1 to only read current config */
+void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Write? */
+ if (clkdiv) {
+ if (*clkdiv != -1)
+ REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
+ *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
+ }
+ if (options) {
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ if (*options != -1) {
+ ctrl = (ctrl & ~GRSPW_LINK_CFG) |
+ (*options & GRSPW_LINK_CFG);
+
+ /* Enable Global IRQ only if some irq source is set */
+ if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
+ ctrl |= GRSPW_CTRL_IE;
+ else
+ ctrl &= ~GRSPW_CTRL_IE;
+
+ REG_WRITE(&regs->ctrl, ctrl);
+ /* Store the link disable events for use in
+ ISR. The LINKOPTS_DIS_ON_* options are actually the
+ corresponding bits in the status register, shifted
+ by 16. */
+ priv->dis_link_on_err = *options &
+ (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
+ }
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
+ }
+ if (stscfg) {
+ if (*stscfg != -1) {
+ priv->stscfg = *stscfg & LINKSTS_MASK;
+ }
+ *stscfg = priv->stscfg;
+ }
+}
+
+/* Generate Tick-In (increment Time Counter, Send Time Code) */
+void grspw_tc_tx(void *d)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+void grspw_tc_ctrl(void *d, int *options)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (options == NULL)
+ return;
+
+ /* Write? */
+ if (*options != -1) {
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
+ ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
+
+ /* Enable Global IRQ only if some irq source is set */
+ if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
+ ctrl |= GRSPW_CTRL_IE;
+ else
+ ctrl &= ~GRSPW_CTRL_IE;
+
+ REG_WRITE(&regs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ } else
+ ctrl = REG_READ(&regs->ctrl);
+ *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
+}
+
+/* Assign ISR Function to TimeCode RX IRQ */
+void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
+{
+ struct grspw_priv *priv = d;
+
+ priv->tcisr_arg = data;
+ priv->tcisr = tcisr;
+}
+
+/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
+ * TCTRL = bits 7 and 6
+ * TIMECNT = bits 5 to 0
+ */
+void grspw_tc_time(void *d, int *time)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+
+ if (time == NULL)
+ return;
+ if (*time != -1)
+ REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
+ *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
+}
+
+/* Generate Tick-In for the given Interrupt-code and check for generation
+ * error.
+ *
+ * Returns zero on success and non-zero on failure
+ */
+int grspw_ic_tickin(void *d, int ic)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ SPIN_IRQFLAGS(irqflags);
+ unsigned int icctrl, mask;
+
+ /* Prepare before turning off IRQ */
+ mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
+ ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
+ GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ icctrl = REG_READ(&regs->icctrl);
+ icctrl &= ~mask;
+ icctrl |= ic;
+ REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
+ /* the ID bit is valid after two clocks, so we not to wait here */
+ icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return icctrl & GRSPW_ICCTRL_ID;
+}
+
+#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
+#define ICOPTS_ICCTRL_MASK \
+ (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE | ICOPTS_EN_SPWIRQ_ON_IA | \
+ ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
+ ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
+ ICOPTS_BASEIRQ)
+
+/* Control Interrupt-code settings of core
+ * Write if not pointing to -1, always read current value
+ *
+ * TODO: A lot of code duplication with grspw_tc_ctrl
+ */
+void grspw_ic_ctrl(void *d, unsigned int *options)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ unsigned int icctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (options == NULL)
+ return;
+
+ if (*options != -1) {
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
+ * irqopts bits and ctrl bits */
+ ctrl |= (*options & ICOPTS_CTRL_MASK) <<
+ (GRSPW_CTRL_TF_BIT - 0);
+
+ icctrl = REG_READ(&regs->icctrl);
+ icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
+ * irqopts bits and icctrl bits */
+ icctrl |= *options & ICOPTS_ICCTRL_MASK;
+
+ /* Enable Global IRQ only if some irq source is set */
+ if (grspw_is_irqsource_set(ctrl, icctrl))
+ ctrl |= GRSPW_CTRL_IE;
+ else
+ ctrl &= ~GRSPW_CTRL_IE;
+
+ REG_WRITE(&regs->ctrl, ctrl);
+ REG_WRITE(&regs->icctrl, icctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+ *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
+ (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
+}
+
+void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+
+ if (!cfg)
+ return;
+
+ if (rw & 1) {
+ REG_WRITE(&regs->ictickomask, cfg->tomask);
+ REG_WRITE(&regs->icaamask, cfg->aamask);
+ REG_WRITE(&regs->icrlpresc, cfg->scaler);
+ REG_WRITE(&regs->icrlisr, cfg->isr_reload);
+ REG_WRITE(&regs->icrlintack, cfg->ack_reload);
+ }
+ if (rw & 2) {
+ cfg->tomask = REG_READ(&regs->ictickomask);
+ cfg->aamask = REG_READ(&regs->icaamask);
+ cfg->scaler = REG_READ(&regs->icrlpresc);
+ cfg->isr_reload = REG_READ(&regs->icrlisr);
+ cfg->ack_reload = REG_READ(&regs->icrlintack);
+ }
+}
+
+/* Read or Write Interrupt-code status registers */
+void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+
+ /* No locking needed since the status bits are clear-on-write */
+
+ if (rxirq) {
+ if (*rxirq != 0)
+ REG_WRITE(&regs->icrx, *rxirq);
+ else
+ *rxirq = REG_READ(&regs->icrx);
+ }
+
+ if (rxack) {
+ if (*rxack != 0)
+ REG_WRITE(&regs->icack, *rxack);
+ else
+ *rxack = REG_READ(&regs->icack);
+ }
+
+ if (intto) {
+ if (*intto != 0)
+ REG_WRITE(&regs->ictimeout, *intto);
+ else
+ *intto = REG_READ(&regs->ictimeout);
+ }
+}
+
+/* Assign handler function to Interrupt-code tick out IRQ */
+void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
+{
+ struct grspw_priv *priv = d;
+
+ priv->icisr_arg = data;
+ priv->icisr = handler;
+}
+
+/* Set (not -1) and/or read RMAP options. */
+int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (dstkey) {
+ if (*dstkey != -1)
+ REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
+ *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
+ }
+ if (options) {
+ if (*options != -1) {
+ if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
+ return -1;
+
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
+ ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
+ REG_WRITE(&regs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+ *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
+ }
+
+ return 0;
+}
+
+void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
+{
+ struct grspw_priv *priv = d;
+
+ if (rmap)
+ *rmap = priv->hwsup.rmap;
+ if (rmap_crc)
+ *rmap_crc = priv->hwsup.rmap_crc;
+}
+
+/* Select port, if
+ * -1=The current selected port is returned
+ * 0=Port 0
+ * 1=Port 1
+ * Others=Both Port0 and Port1
+ */
+int grspw_port_ctrl(void *d, int *port)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_regs *regs = priv->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (port == NULL)
+ return -1;
+
+ if ((*port == 1) || (*port == 0)) {
+ /* Select port user selected */
+ if ((*port == 1) && (priv->hwsup.nports < 2))
+ return -1; /* Changing to Port 1, but only one port available */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ ctrl = REG_READ(&regs->ctrl);
+ ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
+ ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
+ REG_WRITE(&regs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ } else if (*port > 1) {
+ /* Select both ports */
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+ }
+
+ /* Get current settings */
+ ctrl = REG_READ(&regs->ctrl);
+ if (ctrl & GRSPW_CTRL_NP) {
+ /* Any port, selected by hardware */
+ if (priv->hwsup.nports > 1)
+ *port = 3;
+ else
+ *port = 0; /* Port0 the only port available */
+ } else {
+ *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
+ }
+
+ return 0;
+}
+
+/* Returns Number ports available in hardware */
+int grspw_port_count(void *d)
+{
+ struct grspw_priv *priv = d;
+
+ return priv->hwsup.nports;
+}
+
+/* Current active port: 0 or 1 */
+int grspw_port_active(void *d)
+{
+ struct grspw_priv *priv = d;
+ unsigned int status;
+
+ status = REG_READ(&priv->regs->status);
+
+ return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
+}
+
+void grspw_stats_read(void *d, struct grspw_core_stats *sts)
+{
+ struct grspw_priv *priv = d;
+
+ if (sts == NULL)
+ return;
+ memcpy(sts, &priv->stats, sizeof(priv->stats));
+}
+
+void grspw_stats_clr(void *d)
+{
+ struct grspw_priv *priv = d;
+
+ /* Clear most of the statistics */
+ memset(&priv->stats, 0, sizeof(priv->stats));
+}
+
+/*** DMA Interface ***/
+
+/* Initialize the RX and TX Descriptor Ring, empty of packets */
+STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
+{
+ struct grspw_ring *r;
+ int i;
+
+ /* Empty BD rings */
+ dma->rx_ring_head = dma->rx_ring_base;
+ dma->rx_ring_tail = dma->rx_ring_base;
+ dma->tx_ring_head = dma->tx_ring_base;
+ dma->tx_ring_tail = dma->tx_ring_base;
+
+ /* Init RX Descriptors */
+ r = (struct grspw_ring *)dma->rx_ring_base;
+ for (i=0; i<GRSPW_RXBD_NR; i++) {
+
+ /* Init Ring Entry */
+ r[i].next = &r[i+1];
+ r[i].bd.rx = &dma->rx_bds[i];
+ r[i].pkt = NULL;
+
+ /* Init HW Descriptor */
+ BD_WRITE(&r[i].bd.rx->ctrl, 0);
+ BD_WRITE(&r[i].bd.rx->addr, 0);
+ }
+ r[GRSPW_RXBD_NR-1].next = &r[0];
+
+ /* Init TX Descriptors */
+ r = (struct grspw_ring *)dma->tx_ring_base;
+ for (i=0; i<GRSPW_TXBD_NR; i++) {
+
+ /* Init Ring Entry */
+ r[i].next = &r[i+1];
+ r[i].bd.tx = &dma->tx_bds[i];
+ r[i].pkt = NULL;
+
+ /* Init HW Descriptor */
+ BD_WRITE(&r[i].bd.tx->ctrl, 0);
+ BD_WRITE(&r[i].bd.tx->haddr, 0);
+ BD_WRITE(&r[i].bd.tx->dlen, 0);
+ BD_WRITE(&r[i].bd.tx->daddr, 0);
+ }
+ r[GRSPW_TXBD_NR-1].next = &r[0];
+}
+
+/* Try to populate descriptor ring with as many as possible READY unused packet
+ * buffers. The packets assigned with to a descriptor are put in the end of
+ * the scheduled list.
+ *
+ * The number of Packets scheduled is returned.
+ *
+ * - READY List -> RX-SCHED List
+ * - Descriptors are initialized and enabled for reception
+ */
+STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
+{
+ int cnt;
+ unsigned int ctrl, dmactrl;
+ void *hwaddr;
+ struct grspw_rxring *curr_bd;
+ struct grspw_pkt *curr_pkt, *last_pkt;
+ struct grspw_list lst;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Is Ready Q empty? */
+ if (grspw_list_is_empty(&dma->ready))
+ return 0;
+
+ cnt = 0;
+ lst.head = curr_pkt = dma->ready.head;
+ curr_bd = dma->rx_ring_head;
+ while (!curr_bd->pkt) {
+
+ /* Assign Packet to descriptor */
+ curr_bd->pkt = curr_pkt;
+
+ /* Prepare descriptor address. */
+ hwaddr = curr_pkt->data;
+ if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
+ drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
+ hwaddr, &hwaddr);
+ if (curr_pkt->data == hwaddr) /* translation needed? */
+ curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
+ }
+ BD_WRITE(&curr_bd->bd->addr, hwaddr);
+
+ ctrl = GRSPW_RXBD_EN;
+ if (curr_bd->next == dma->rx_ring_base) {
+ /* Wrap around (only needed when smaller descriptor
+ * table)
+ */
+ ctrl |= GRSPW_RXBD_WR;
+ }
+
+ /* Is this Packet going to be an interrupt Packet? */
+ if ((--dma->rx_irq_en_cnt_curr) <= 0) {
+ if (dma->cfg.rx_irq_en_cnt == 0) {
+ /* IRQ is disabled. A big number to avoid
+ * equal to zero too often
+ */
+ dma->rx_irq_en_cnt_curr = 0x3fffffff;
+ } else {
+ dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
+ ctrl |= GRSPW_RXBD_IE;
+ }
+ }
+
+ if (curr_pkt->flags & RXPKT_FLAG_IE)
+ ctrl |= GRSPW_RXBD_IE;
+
+ /* Enable descriptor */
+ BD_WRITE(&curr_bd->bd->ctrl, ctrl);
+
+ last_pkt = curr_pkt;
+ curr_bd = curr_bd->next;
+ cnt++;
+
+ /* Get Next Packet from Ready Queue */
+ if (curr_pkt == dma->ready.tail) {
+ /* Handled all in ready queue. */
+ curr_pkt = NULL;
+ break;
+ }
+ curr_pkt = curr_pkt->next;
+ }
+
+ /* Has Packets been scheduled? */
+ if (cnt > 0) {
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove scheduled packets from ready queue */
+ grspw_list_remove_head_list(&dma->ready, &lst);
+ dma->ready_cnt -= cnt;
+ if (dma->stats.ready_cnt_min > dma->ready_cnt)
+ dma->stats.ready_cnt_min = dma->ready_cnt;
+
+ /* Insert scheduled packets into scheduled queue */
+ grspw_list_append_list(&dma->rx_sched, &lst);
+ dma->rx_sched_cnt += cnt;
+ if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
+ dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
+
+ /* Update TX ring posistion */
+ dma->rx_ring_head = curr_bd;
+
+ /* Make hardware aware of the newly enabled descriptors
+ * We must protect from ISR which writes RI|TI
+ */
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ dmactrl = REG_READ(&dma->regs->ctrl);
+ dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
+ dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
+ REG_WRITE(&dma->regs->ctrl, dmactrl);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+ }
+
+ return cnt;
+}
+
+/* Scans the RX desciptor table for scheduled Packet that has been received,
+ * and moves these Packet from the head of the scheduled queue to the
+ * tail of the recv queue.
+ *
+ * Also, for all packets the status is updated.
+ *
+ * - SCHED List -> SENT List
+ *
+ * Return Value
+ * Number of packets moved
+ */
+STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
+{
+ struct grspw_rxring *curr;
+ struct grspw_pkt *last_pkt;
+ int recv_pkt_cnt = 0;
+ unsigned int ctrl;
+ struct grspw_list lst;
+
+ curr = dma->rx_ring_tail;
+
+ /* Step into RX ring to find if packets have been scheduled for
+ * reception.
+ */
+ if (!curr->pkt)
+ return 0; /* No scheduled packets, thus no received, abort */
+
+ /* There has been Packets scheduled ==> scheduled Packets may have been
+ * received and needs to be collected into RECV List.
+ *
+ * A temporary list "lst" with all received packets is created.
+ */
+ lst.head = curr->pkt;
+
+ /* Loop until first enabled "unrecveived" SpW Packet is found.
+ * An unused descriptor is indicated by an unassigned pkt field.
+ */
+ while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
+ /* Handle one received Packet */
+
+ /* Remember last handled Packet so that insertion/removal from
+ * Packet lists go fast.
+ */
+ last_pkt = curr->pkt;
+
+ /* Get Length of Packet in bytes, and reception options */
+ last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
+
+ /* Set flags to indicate error(s) and CRC information,
+ * and Mark Received.
+ */
+ last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
+ ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
+ RXPKT_FLAG_RX;
+
+ /* Packet was Truncated? */
+ if (ctrl & GRSPW_RXBD_TR)
+ dma->stats.rx_err_trunk++;
+
+ /* Error End-Of-Packet? */
+ if (ctrl & GRSPW_RXBD_EP)
+ dma->stats.rx_err_endpkt++;
+ curr->pkt = NULL; /* Mark descriptor unused */
+
+ /* Increment */
+ curr = curr->next;
+ recv_pkt_cnt++;
+ }
+
+ /* 1. Remove all handled packets from scheduled queue
+ * 2. Put all handled packets into recv queue
+ */
+ if (recv_pkt_cnt > 0) {
+
+ /* Update Stats, Number of Received Packets */
+ dma->stats.rx_pkts += recv_pkt_cnt;
+
+ /* Save RX ring posistion */
+ dma->rx_ring_tail = curr;
+
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove received Packets from RX-SCHED queue */
+ grspw_list_remove_head_list(&dma->rx_sched, &lst);
+ dma->rx_sched_cnt -= recv_pkt_cnt;
+ if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
+ dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
+
+ /* Insert received Packets into RECV queue */
+ grspw_list_append_list(&dma->recv, &lst);
+ dma->recv_cnt += recv_pkt_cnt;
+ if (dma->stats.recv_cnt_max < dma->recv_cnt)
+ dma->stats.recv_cnt_max = dma->recv_cnt;
+ }
+
+ return recv_pkt_cnt;
+}
+
+/* Try to populate descriptor ring with as many SEND packets as possible. The
+ * packets assigned with to a descriptor are put in the end of
+ * the scheduled list.
+ *
+ * The number of Packets scheduled is returned.
+ *
+ * - SEND List -> TX-SCHED List
+ * - Descriptors are initialized and enabled for transmission
+ */
+STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
+{
+ int cnt;
+ unsigned int ctrl, dmactrl;
+ void *hwaddr;
+ struct grspw_txring *curr_bd;
+ struct grspw_pkt *curr_pkt, *last_pkt;
+ struct grspw_list lst;
+ SPIN_IRQFLAGS(irqflags);
+
+ /* Is Ready Q empty? */
+ if (grspw_list_is_empty(&dma->send))
+ return 0;
+
+ cnt = 0;
+ lst.head = curr_pkt = dma->send.head;
+ curr_bd = dma->tx_ring_head;
+ while (!curr_bd->pkt) {
+
+ /* Assign Packet to descriptor */
+ curr_bd->pkt = curr_pkt;
+
+ /* Set up header transmission */
+ if (curr_pkt->hdr && curr_pkt->hlen) {
+ hwaddr = curr_pkt->hdr;
+ if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
+ drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
+ hwaddr, &hwaddr);
+ /* translation needed? */
+ if (curr_pkt->hdr == hwaddr)
+ curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
+ }
+ BD_WRITE(&curr_bd->bd->haddr, hwaddr);
+ ctrl = GRSPW_TXBD_EN |
+ (curr_pkt->hlen & GRSPW_TXBD_HLEN);
+ } else {
+ ctrl = GRSPW_TXBD_EN;
+ }
+ /* Enable IRQ generation and CRC options as specified
+ * by user.
+ */
+ ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
+
+ if (curr_bd->next == dma->tx_ring_base) {
+ /* Wrap around (only needed when smaller descriptor table) */
+ ctrl |= GRSPW_TXBD_WR;
+ }
+
+ /* Is this Packet going to be an interrupt Packet? */
+ if ((--dma->tx_irq_en_cnt_curr) <= 0) {
+ if (dma->cfg.tx_irq_en_cnt == 0) {
+ /* IRQ is disabled.
+ * A big number to avoid equal to zero too often
+ */
+ dma->tx_irq_en_cnt_curr = 0x3fffffff;
+ } else {
+ dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
+ ctrl |= GRSPW_TXBD_IE;
+ }
+ }
+
+ /* Prepare descriptor address. Parts of CTRL is written to
+ * DLEN for debug-only (CTRL is cleared by HW).
+ */
+ if (curr_pkt->data && curr_pkt->dlen) {
+ hwaddr = curr_pkt->data;
+ if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
+ drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
+ hwaddr, &hwaddr);
+ /* translation needed? */
+ if (curr_pkt->data == hwaddr)
+ curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
+ }
+ BD_WRITE(&curr_bd->bd->daddr, hwaddr);
+ BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
+ ((ctrl & 0x3f000) << 12));
+ } else {
+ BD_WRITE(&curr_bd->bd->daddr, 0);
+ BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
+ }
+
+ /* Enable descriptor */
+ BD_WRITE(&curr_bd->bd->ctrl, ctrl);
+
+ last_pkt = curr_pkt;
+ curr_bd = curr_bd->next;
+ cnt++;
+
+ /* Get Next Packet from Ready Queue */
+ if (curr_pkt == dma->send.tail) {
+ /* Handled all in ready queue. */
+ curr_pkt = NULL;
+ break;
+ }
+ curr_pkt = curr_pkt->next;
+ }
+
+ /* Have Packets been scheduled? */
+ if (cnt > 0) {
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove scheduled packets from ready queue */
+ grspw_list_remove_head_list(&dma->send, &lst);
+ dma->send_cnt -= cnt;
+ if (dma->stats.send_cnt_min > dma->send_cnt)
+ dma->stats.send_cnt_min = dma->send_cnt;
+
+ /* Insert scheduled packets into scheduled queue */
+ grspw_list_append_list(&dma->tx_sched, &lst);
+ dma->tx_sched_cnt += cnt;
+ if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
+ dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
+
+ /* Update TX ring posistion */
+ dma->tx_ring_head = curr_bd;
+
+ /* Make hardware aware of the newly enabled descriptors */
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ dmactrl = REG_READ(&dma->regs->ctrl);
+ dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
+ dmactrl |= GRSPW_DMACTRL_TE;
+ REG_WRITE(&dma->regs->ctrl, dmactrl);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+ }
+ return cnt;
+}
+
+/* Scans the TX desciptor table for transmitted packets, and moves these
+ * packets from the head of the scheduled queue to the tail of the sent queue.
+ *
+ * Also, for all packets the status is updated.
+ *
+ * - SCHED List -> SENT List
+ *
+ * Return Value
+ * Number of packet moved
+ */
+STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
+{
+ struct grspw_txring *curr;
+ struct grspw_pkt *last_pkt;
+ int sent_pkt_cnt = 0;
+ unsigned int ctrl;
+ struct grspw_list lst;
+
+ curr = dma->tx_ring_tail;
+
+ /* Step into TX ring to find if packets have been scheduled for
+ * transmission.
+ */
+ if (!curr->pkt)
+ return 0; /* No scheduled packets, thus no sent, abort */
+
+ /* There has been Packets scheduled ==> scheduled Packets may have been
+ * transmitted and needs to be collected into SENT List.
+ *
+ * A temporary list "lst" with all sent packets is created.
+ */
+ lst.head = curr->pkt;
+
+ /* Loop until first enabled "un-transmitted" SpW Packet is found.
+ * An unused descriptor is indicated by an unassigned pkt field.
+ */
+ while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
+ /* Handle one sent Packet */
+
+ /* Remember last handled Packet so that insertion/removal from
+ * packet lists go fast.
+ */
+ last_pkt = curr->pkt;
+
+ /* Set flags to indicate error(s) and Mark Sent.
+ */
+ last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
+ (ctrl & TXPKT_FLAG_LINKERR) |
+ TXPKT_FLAG_TX;
+
+ /* Sent packet experienced link error? */
+ if (ctrl & GRSPW_TXBD_LE)
+ dma->stats.tx_err_link++;
+
+ curr->pkt = NULL; /* Mark descriptor unused */
+
+ /* Increment */
+ curr = curr->next;
+ sent_pkt_cnt++;
+ }
+
+ /* 1. Remove all handled packets from TX-SCHED queue
+ * 2. Put all handled packets into SENT queue
+ */
+ if (sent_pkt_cnt > 0) {
+ /* Update Stats, Number of Transmitted Packets */
+ dma->stats.tx_pkts += sent_pkt_cnt;
+
+ /* Save TX ring posistion */
+ dma->tx_ring_tail = curr;
+
+ /* Prepare list for insertion/deleation */
+ lst.tail = last_pkt;
+
+ /* Remove sent packets from TX-SCHED queue */
+ grspw_list_remove_head_list(&dma->tx_sched, &lst);
+ dma->tx_sched_cnt -= sent_pkt_cnt;
+ if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
+ dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
+
+ /* Insert received packets into SENT queue */
+ grspw_list_append_list(&dma->sent, &lst);
+ dma->sent_cnt += sent_pkt_cnt;
+ if (dma->stats.sent_cnt_max < dma->sent_cnt)
+ dma->stats.sent_cnt_max = dma->sent_cnt;
+ }
+
+ return sent_pkt_cnt;
+}
+
+void *grspw_dma_open(void *d, int chan_no)
+{
+ struct grspw_priv *priv = d;
+ struct grspw_dma_priv *dma;
+ int size;
+
+ if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
+ return NULL;
+
+ dma = &priv->dma[chan_no];
+
+ /* Take GRSPW lock */
+ if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return NULL;
+
+ if (dma->open) {
+ dma = NULL;
+ goto out;
+ }
+
+ dma->started = 0;
+
+ /* Set Default Configuration:
+ *
+ * - MAX RX Packet Length =
+ * - Disable IRQ generation
+ * -
+ */
+ dma->cfg.rxmaxlen = DEFAULT_RXMAX;
+ dma->cfg.rx_irq_en_cnt = 0;
+ dma->cfg.tx_irq_en_cnt = 0;
+ dma->cfg.flags = DMAFLAG_NO_SPILL;
+
+ /* set to NULL so that error exit works correctly */
+ dma->sem_rxdma = RTEMS_ID_NONE;
+ dma->sem_txdma = RTEMS_ID_NONE;
+ dma->rx_wait.sem_wait = RTEMS_ID_NONE;
+ dma->tx_wait.sem_wait = RTEMS_ID_NONE;
+ dma->rx_ring_base = NULL;
+
+ /* DMA Channel Semaphore created with count = 1 */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
+ dma->sem_rxdma = RTEMS_ID_NONE;
+ goto err;
+ }
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
+ dma->sem_txdma = RTEMS_ID_NONE;
+ goto err;
+ }
+
+ /* Allocate memory for the two descriptor rings */
+ size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
+ dma->rx_ring_base = grlib_malloc(size);
+ dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
+ if (dma->rx_ring_base == NULL)
+ goto err;
+
+ /* Create DMA RX and TX Channel sempahore with count = 0 */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
+ dma->rx_wait.sem_wait = RTEMS_ID_NONE;
+ goto err;
+ }
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
+ dma->tx_wait.sem_wait = RTEMS_ID_NONE;
+ goto err;
+ }
+
+ /* Reset software structures */
+ grspw_dma_reset(dma);
+
+ /* Take the device */
+ dma->open = 1;
+out:
+ /* Return GRSPW Lock */
+ rtems_semaphore_release(grspw_sem);
+
+ return dma;
+
+ /* initialization error happended */
+err:
+ if (dma->sem_rxdma != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->sem_rxdma);
+ if (dma->sem_txdma != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->sem_txdma);
+ if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->rx_wait.sem_wait);
+ if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
+ rtems_semaphore_delete(dma->tx_wait.sem_wait);
+ if (dma->rx_ring_base)
+ free(dma->rx_ring_base);
+ dma = NULL;
+ goto out;
+}
+
+/* Initialize Software Structures:
+ * - Clear all Queues
+ * - init BD ring
+ * - init IRQ counter
+ * - clear statistics counters
+ * - init wait structures and semaphores
+ */
+STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
+{
+ /* Empty RX and TX queues */
+ grspw_list_clr(&dma->ready);
+ grspw_list_clr(&dma->rx_sched);
+ grspw_list_clr(&dma->recv);
+ grspw_list_clr(&dma->send);
+ grspw_list_clr(&dma->tx_sched);
+ grspw_list_clr(&dma->sent);
+ dma->ready_cnt = 0;
+ dma->rx_sched_cnt = 0;
+ dma->recv_cnt = 0;
+ dma->send_cnt = 0;
+ dma->tx_sched_cnt = 0;
+ dma->sent_cnt = 0;
+
+ dma->rx_irq_en_cnt_curr = 0;
+ dma->tx_irq_en_cnt_curr = 0;
+
+ grspw_bdrings_init(dma);
+
+ dma->rx_wait.waiting = 0;
+ dma->tx_wait.waiting = 0;
+
+ grspw_dma_stats_clr(dma);
+}
+
+int grspw_dma_close(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ if (!dma->open)
+ return 0;
+
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ rtems_semaphore_release(dma->sem_rxdma);
+ return -1;
+ }
+
+ /* Can not close active DMA channel. User must stop DMA and make sure
+ * no threads are active/blocked within driver.
+ */
+ if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
+ rtems_semaphore_release(dma->sem_txdma);
+ rtems_semaphore_release(dma->sem_rxdma);
+ return 1;
+ }
+
+ /* Free resources */
+ rtems_semaphore_delete(dma->rx_wait.sem_wait);
+ rtems_semaphore_delete(dma->tx_wait.sem_wait);
+ /* Release and delete lock. Operations requiring lock will fail */
+ rtems_semaphore_delete(dma->sem_txdma);
+ rtems_semaphore_delete(dma->sem_rxdma);
+ dma->sem_txdma = RTEMS_ID_NONE;
+ dma->sem_rxdma = RTEMS_ID_NONE;
+
+ /* Free memory */
+ if (dma->rx_ring_base)
+ free(dma->rx_ring_base);
+ dma->rx_ring_base = NULL;
+ dma->tx_ring_base = NULL;
+
+ dma->open = 0;
+ return 0;
+}
+
+unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
+{
+ struct grspw_dma_priv *dma = c;
+ int rc = 0;
+ unsigned int ctrl, ctrl_old;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ if (dma->started == 0) {
+ rc = 1; /* DMA stopped */
+ goto out;
+ }
+ ctrl = REG_READ(&dma->regs->ctrl);
+ ctrl_old = ctrl;
+
+ /* Read/Write DMA error ? */
+ if (ctrl & GRSPW_DMA_STATUS_ERROR) {
+ rc = 2; /* DMA error */
+ goto out;
+ }
+
+ /* DMA has finished a TX/RX packet and user wants work-task to
+ * take care of DMA table processing.
+ */
+ ctrl &= ~GRSPW_DMACTRL_AT;
+
+ if ((rxtx & 1) == 0)
+ ctrl &= ~GRSPW_DMACTRL_PR;
+ else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
+ (dma->cfg.flags & DMAFLAG2_RXIE)))
+ ctrl |= GRSPW_DMACTRL_RI;
+
+ if ((rxtx & 2) == 0)
+ ctrl &= ~GRSPW_DMACTRL_PS;
+ else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
+ (dma->cfg.flags & DMAFLAG2_TXIE)))
+ ctrl |= GRSPW_DMACTRL_TI;
+
+ REG_WRITE(&dma->regs->ctrl, ctrl);
+ /* Re-enabled interrupts previously enabled */
+ rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
+out:
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+ return rc;
+}
+
+/* Schedule List of packets for transmission at some point in
+ * future.
+ *
+ * 1. Move transmitted packets to SENT List (SCHED->SENT)
+ * 2. Add the requested packets to the SEND List (USER->SEND)
+ * 3. Schedule as many packets as possible (SEND->SCHED)
+ */
+int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ if (dma->started == 0) {
+ ret = 1; /* signal DMA has been stopped */
+ goto out;
+ }
+ ret = 0;
+
+ /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
+ if ((opts & 1) == 0)
+ grspw_tx_process_scheduled(dma);
+
+ /* 2. Add the requested packets to the SEND List (USER->SEND) */
+ if (pkts && (count > 0)) {
+ grspw_list_append_list(&dma->send, pkts);
+ dma->send_cnt += count;
+ if (dma->stats.send_cnt_max < dma->send_cnt)
+ dma->stats.send_cnt_max = dma->send_cnt;
+ }
+
+ /* 3. Schedule as many packets as possible (SEND->SCHED) */
+ if ((opts & 2) == 0)
+ grspw_tx_schedule_send(dma);
+
+out:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+
+ return ret;
+}
+
+int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
+{
+ struct grspw_dma_priv *dma = c;
+ struct grspw_pkt *pkt, *lastpkt;
+ int cnt, started;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
+ started = dma->started;
+ if ((started > 0) && ((opts & 1) == 0))
+ grspw_tx_process_scheduled(dma);
+
+ /* Move all/count SENT packet to the callers list (SENT->USER) */
+ if (pkts) {
+ if ((count == NULL) || (*count == -1) ||
+ (*count >= dma->sent_cnt)) {
+ /* Move all SENT Packets */
+ *pkts = dma->sent;
+ grspw_list_clr(&dma->sent);
+ if (count)
+ *count = dma->sent_cnt;
+ dma->sent_cnt = 0;
+ } else {
+ /* Move a number of SENT Packets */
+ pkts->head = pkt = lastpkt = dma->sent.head;
+ cnt = 0;
+ while (cnt < *count) {
+ lastpkt = pkt;
+ pkt = pkt->next;
+ cnt++;
+ }
+ if (cnt > 0) {
+ pkts->tail = lastpkt;
+ grspw_list_remove_head_list(&dma->sent, pkts);
+ dma->sent_cnt -= cnt;
+ } else {
+ grspw_list_clr(pkts);
+ }
+ }
+ } else if (count) {
+ *count = 0;
+ }
+
+ /* 3. Schedule as many packets as possible (SEND->SCHED) */
+ if ((started > 0) && ((opts & 2) == 0))
+ grspw_tx_schedule_send(dma);
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+
+ return (~started) & 1; /* signal DMA has been stopped */
+}
+
+void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
+{
+ struct grspw_dma_priv *dma = c;
+ int sched_cnt, diff;
+ unsigned int hwbd;
+ struct grspw_txbd *tailbd;
+
+ /* Take device lock - Wait until we get semaphore.
+ * The lock is taken so that the counters are in sync with each other
+ * and that DMA descriptor table and tx_ring_tail is not being updated
+ * during HW counter processing in this function.
+ */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ if (send)
+ *send = dma->send_cnt;
+ sched_cnt = dma->tx_sched_cnt;
+ if (sched)
+ *sched = sched_cnt;
+ if (sent)
+ *sent = dma->sent_cnt;
+ if (hw) {
+ /* Calculate number of descriptors (processed by HW) between
+ * HW pointer and oldest SW pointer.
+ */
+ hwbd = REG_READ(&dma->regs->txdesc);
+ tailbd = dma->tx_ring_tail->bd;
+ diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
+ (GRSPW_TXBD_NR - 1);
+ /* Handle special case when HW and SW pointers are equal
+ * because all TX descriptors have been processed by HW.
+ */
+ if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
+ ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
+ diff = GRSPW_TXBD_NR;
+ }
+ *hw = diff;
+ }
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+}
+
+static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
+{
+ int send_val, sent_val;
+
+ if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
+ send_val = 1;
+ else
+ send_val = 0;
+
+ if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
+ sent_val = 1;
+ else
+ sent_val = 0;
+
+ /* AND or OR ? */
+ if (dma->tx_wait.op == 0)
+ return send_val & sent_val; /* AND */
+ else
+ return send_val | sent_val; /* OR */
+}
+
+/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
+ * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
+ * is met.
+ * If a link error occurs and the Stop on Link error is defined, this function
+ * will also return to caller.
+ */
+int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret, rc, initialized = 0;
+
+ if (timeout == 0)
+ timeout = RTEMS_NO_TIMEOUT;
+
+check_condition:
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Check so that no other thread is waiting, this driver only supports
+ * one waiter at a time.
+ */
+ if (initialized == 0 && dma->tx_wait.waiting) {
+ ret = 3;
+ goto out_release;
+ }
+
+ /* Stop if link error or similar (DMA stopped), abort */
+ if (dma->started == 0) {
+ ret = 1;
+ goto out_release;
+ }
+
+ /* Set up Condition */
+ dma->tx_wait.send_cnt = send_cnt;
+ dma->tx_wait.op = op;
+ dma->tx_wait.sent_cnt = sent_cnt;
+
+ if (grspw_tx_wait_eval(dma) == 0) {
+ /* Prepare Wait */
+ initialized = 1;
+ dma->tx_wait.waiting = 1;
+
+ /* Release DMA channel lock */
+ rtems_semaphore_release(dma->sem_txdma);
+
+ /* Try to take Wait lock, if this fail link may have gone down
+ * or user stopped this DMA channel
+ */
+ rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
+ timeout);
+ if (rc == RTEMS_TIMEOUT) {
+ ret = 2;
+ goto out;
+ } else if (rc == RTEMS_UNSATISFIED ||
+ rc == RTEMS_OBJECT_WAS_DELETED) {
+ ret = 1; /* sem was flushed/deleted, means DMA stop */
+ goto out;
+ } else if (rc != RTEMS_SUCCESSFUL) {
+ /* Unknown Error */
+ ret = -1;
+ goto out;
+ } else if (dma->started == 0) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Check condition once more */
+ goto check_condition;
+ }
+
+ ret = 0;
+
+out_release:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_txdma);
+
+out:
+ if (initialized)
+ dma->tx_wait.waiting = 0;
+ return ret;
+}
+
+int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
+{
+ struct grspw_dma_priv *dma = c;
+ struct grspw_pkt *pkt, *lastpkt;
+ int cnt, started;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
+ started = dma->started;
+ if (((opts & 1) == 0) && (started > 0))
+ grspw_rx_process_scheduled(dma);
+
+ /* Move all RECV packet to the callers list */
+ if (pkts) {
+ if ((count == NULL) || (*count == -1) ||
+ (*count >= dma->recv_cnt)) {
+ /* Move all Received packets */
+ *pkts = dma->recv;
+ grspw_list_clr(&dma->recv);
+ if ( count )
+ *count = dma->recv_cnt;
+ dma->recv_cnt = 0;
+ } else {
+ /* Move a number of RECV Packets */
+ pkts->head = pkt = lastpkt = dma->recv.head;
+ cnt = 0;
+ while (cnt < *count) {
+ lastpkt = pkt;
+ pkt = pkt->next;
+ cnt++;
+ }
+ if (cnt > 0) {
+ pkts->tail = lastpkt;
+ grspw_list_remove_head_list(&dma->recv, pkts);
+ dma->recv_cnt -= cnt;
+ } else {
+ grspw_list_clr(pkts);
+ }
+ }
+ } else if (count) {
+ *count = 0;
+ }
+
+ /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
+ if (((opts & 2) == 0) && (started > 0))
+ grspw_rx_schedule_ready(dma);
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+ return (~started) & 1;
+}
+
+int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret;
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ if (dma->started == 0) {
+ ret = 1;
+ goto out;
+ }
+
+ /* 1. Move Received packets to RECV List (SCHED->RECV) */
+ if ((opts & 1) == 0)
+ grspw_rx_process_scheduled(dma);
+
+ /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
+ if (pkts && (count > 0)) {
+ grspw_list_append_list(&dma->ready, pkts);
+ dma->ready_cnt += count;
+ if (dma->stats.ready_cnt_max < dma->ready_cnt)
+ dma->stats.ready_cnt_max = dma->ready_cnt;
+ }
+
+ /* 3. Schedule as many packets as possible (READY->SCHED) */
+ if ((opts & 2) == 0)
+ grspw_rx_schedule_ready(dma);
+
+ ret = 0;
+out:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+ return ret;
+}
+
+void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
+{
+ struct grspw_dma_priv *dma = c;
+ int sched_cnt, diff;
+ unsigned int hwbd;
+ struct grspw_rxbd *tailbd;
+
+ /* Take device lock - Wait until we get semaphore.
+ * The lock is taken so that the counters are in sync with each other
+ * and that DMA descriptor table and rx_ring_tail is not being updated
+ * during HW counter processing in this function.
+ */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ if (ready)
+ *ready = dma->ready_cnt;
+ sched_cnt = dma->rx_sched_cnt;
+ if (sched)
+ *sched = sched_cnt;
+ if (recv)
+ *recv = dma->recv_cnt;
+ if (hw) {
+ /* Calculate number of descriptors (processed by HW) between
+ * HW pointer and oldest SW pointer.
+ */
+ hwbd = REG_READ(&dma->regs->rxdesc);
+ tailbd = dma->rx_ring_tail->bd;
+ diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
+ (GRSPW_RXBD_NR - 1);
+ /* Handle special case when HW and SW pointers are equal
+ * because all RX descriptors have been processed by HW.
+ */
+ if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
+ ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
+ diff = GRSPW_RXBD_NR;
+ }
+ *hw = diff;
+ }
+
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+}
+
+static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
+{
+ int ready_val, recv_val;
+
+ if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
+ ready_val = 1;
+ else
+ ready_val = 0;
+
+ if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
+ recv_val = 1;
+ else
+ recv_val = 0;
+
+ /* AND or OR ? */
+ if (dma->rx_wait.op == 0)
+ return ready_val & recv_val; /* AND */
+ else
+ return ready_val | recv_val; /* OR */
+}
+
+/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
+ * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
+ * condition is met.
+ * If a link error occurs and the Stop on Link error is defined, this function
+ * will also return to caller, however with an error.
+ */
+int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
+{
+ struct grspw_dma_priv *dma = c;
+ int ret, rc, initialized = 0;
+
+ if (timeout == 0)
+ timeout = RTEMS_NO_TIMEOUT;
+
+check_condition:
+
+ /* Take DMA channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Check so that no other thread is waiting, this driver only supports
+ * one waiter at a time.
+ */
+ if (initialized == 0 && dma->rx_wait.waiting) {
+ ret = 3;
+ goto out_release;
+ }
+
+ /* Stop if link error or similar (DMA stopped), abort */
+ if (dma->started == 0) {
+ ret = 1;
+ goto out_release;
+ }
+
+ /* Set up Condition */
+ dma->rx_wait.recv_cnt = recv_cnt;
+ dma->rx_wait.op = op;
+ dma->rx_wait.ready_cnt = ready_cnt;
+
+ if (grspw_rx_wait_eval(dma) == 0) {
+ /* Prepare Wait */
+ initialized = 1;
+ dma->rx_wait.waiting = 1;
+
+ /* Release channel lock */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+ /* Try to take Wait lock, if this fail link may have gone down
+ * or user stopped this DMA channel
+ */
+ rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
+ timeout);
+ if (rc == RTEMS_TIMEOUT) {
+ ret = 2;
+ goto out;
+ } else if (rc == RTEMS_UNSATISFIED ||
+ rc == RTEMS_OBJECT_WAS_DELETED) {
+ ret = 1; /* sem was flushed/deleted, means DMA stop */
+ goto out;
+ } else if (rc != RTEMS_SUCCESSFUL) {
+ /* Unknown Error */
+ ret = -1;
+ goto out;
+ } else if (dma->started == 0) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Check condition once more */
+ goto check_condition;
+ }
+
+ ret = 0;
+
+out_release:
+ /* Unlock DMA channel */
+ rtems_semaphore_release(dma->sem_rxdma);
+
+out:
+ if (initialized)
+ dma->rx_wait.waiting = 0;
+ return ret;
+}
+
+int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
+{
+ struct grspw_dma_priv *dma = c;
+
+ if (dma->started || !cfg)
+ return -1;
+
+ if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
+ return -1;
+
+ /* Update Configuration */
+ memcpy(&dma->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
+{
+ struct grspw_dma_priv *dma = c;
+
+ /* Copy Current Configuration */
+ memcpy(cfg, &dma->cfg, sizeof(*cfg));
+}
+
+void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
+{
+ struct grspw_dma_priv *dma = c;
+
+ memcpy(sts, &dma->stats, sizeof(dma->stats));
+}
+
+void grspw_dma_stats_clr(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ /* Clear most of the statistics */
+ memset(&dma->stats, 0, sizeof(dma->stats));
+
+ /* Init proper default values so that comparisons will work the
+ * first time.
+ */
+ dma->stats.send_cnt_min = 0x3fffffff;
+ dma->stats.tx_sched_cnt_min = 0x3fffffff;
+ dma->stats.ready_cnt_min = 0x3fffffff;
+ dma->stats.rx_sched_cnt_min = 0x3fffffff;
+}
+
+int grspw_dma_start(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+ struct grspw_dma_regs *dregs = dma->regs;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (dma->started)
+ return 0;
+
+ /* Initialize Software Structures:
+ * - Clear all Queues
+ * - init BD ring
+ * - init IRQ counter
+ * - clear statistics counters
+ * - init wait structures and semaphores
+ */
+ grspw_dma_reset(dma);
+
+ /* RX&RD and TX is not enabled until user fills SEND and READY Queue
+ * with SpaceWire Packet buffers. So we do not have to worry about
+ * IRQs for this channel just yet. However other DMA channels
+ * may be active.
+ *
+ * Some functionality that is not changed during started mode is set up
+ * once and for all here:
+ *
+ * - RX MAX Packet length
+ * - TX Descriptor base address to first BD in TX ring (not enabled)
+ * - RX Descriptor base address to first BD in RX ring (not enabled)
+ * - IRQs (TX DMA, RX DMA, DMA ERROR)
+ * - Strip PID
+ * - Strip Address
+ * - No Spill
+ * - Receiver Enable
+ * - disable on link error (LE)
+ *
+ * Note that the address register and the address enable bit in DMACTRL
+ * register must be left untouched, they are configured on a GRSPW
+ * core level.
+ *
+ * Note that the receiver is enabled here, but since descriptors are
+ * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
+ * descriptors are enabled or it may ignore RX packets (NS=0) until
+ * descriptors are enabled (writing RD bit).
+ */
+ REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
+ REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
+
+ /* MAX Packet length */
+ REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
+
+ ctrl = GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
+ GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
+ (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
+ if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
+ ctrl |= GRSPW_DMACTRL_LE;
+ if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
+ ctrl |= GRSPW_DMACTRL_RI;
+ if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
+ ctrl |= GRSPW_DMACTRL_TI;
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
+ REG_WRITE(&dregs->ctrl, ctrl);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+
+ dma->started = 1; /* open up other DMA interfaces */
+
+ return 0;
+}
+
+STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
+{
+ SPIN_IRQFLAGS(irqflags);
+
+ if (dma->started == 0)
+ return;
+ dma->started = 0;
+
+ SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
+ grspw_hw_dma_stop(dma);
+ SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
+
+ /* From here no more packets will be sent, however
+ * there may still exist scheduled packets that has been
+ * sent, and packets in the SEND Queue waiting for free
+ * descriptors. All packets are moved to the SENT Queue
+ * so that the user may get its buffers back, the user
+ * must look at the TXPKT_FLAG_TX in order to determine
+ * if the packet was sent or not.
+ */
+
+ /* Retreive scheduled all sent packets */
+ grspw_tx_process_scheduled(dma);
+
+ /* Move un-sent packets in SEND and SCHED queue to the
+ * SENT Queue. (never marked sent)
+ */
+ if (!grspw_list_is_empty(&dma->tx_sched)) {
+ grspw_list_append_list(&dma->sent, &dma->tx_sched);
+ grspw_list_clr(&dma->tx_sched);
+ dma->sent_cnt += dma->tx_sched_cnt;
+ dma->tx_sched_cnt = 0;
+ }
+ if (!grspw_list_is_empty(&dma->send)) {
+ grspw_list_append_list(&dma->sent, &dma->send);
+ grspw_list_clr(&dma->send);
+ dma->sent_cnt += dma->send_cnt;
+ dma->send_cnt = 0;
+ }
+
+ /* Similar for RX */
+ grspw_rx_process_scheduled(dma);
+ if (!grspw_list_is_empty(&dma->rx_sched)) {
+ grspw_list_append_list(&dma->recv, &dma->rx_sched);
+ grspw_list_clr(&dma->rx_sched);
+ dma->recv_cnt += dma->rx_sched_cnt;
+ dma->rx_sched_cnt = 0;
+ }
+ if (!grspw_list_is_empty(&dma->ready)) {
+ grspw_list_append_list(&dma->recv, &dma->ready);
+ grspw_list_clr(&dma->ready);
+ dma->recv_cnt += dma->ready_cnt;
+ dma->ready_cnt = 0;
+ }
+
+ /* Throw out blocked threads */
+ rtems_semaphore_flush(dma->rx_wait.sem_wait);
+ rtems_semaphore_flush(dma->tx_wait.sem_wait);
+}
+
+void grspw_dma_stop(void *c)
+{
+ struct grspw_dma_priv *dma = c;
+
+ /* If DMA channel is closed we should not access the semaphore */
+ if (!dma->open)
+ return;
+
+ /* Take DMA Channel lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ rtems_semaphore_release(dma->sem_rxdma);
+ return;
+ }
+
+ grspw_dma_stop_locked(dma);
+
+ rtems_semaphore_release(dma->sem_txdma);
+ rtems_semaphore_release(dma->sem_rxdma);
+}
+
+/* Do general work, invoked indirectly from ISR */
+static void grspw_work_shutdown_func(struct grspw_priv *priv)
+{
+ int i;
+
+ /* Link is down for some reason, and the user has configured
+ * that we stop all (open) DMA channels and throw out all their
+ * blocked threads.
+ */
+ for (i=0; i<priv->hwsup.ndma_chans; i++)
+ grspw_dma_stop(&priv->dma[i]);
+ grspw_hw_stop(priv);
+}
+
+/* Do DMA work on one channel, invoked indirectly from ISR */
+static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
+{
+ int tx_cond_true, rx_cond_true, rxtx;
+
+ /* If DMA channel is closed we should not access the semaphore */
+ if (dma->open == 0)
+ return;
+
+ dma->stats.irq_cnt++;
+
+ /* Look at cause we were woken up and clear source */
+ rxtx = 0;
+ if (msg & WORK_DMA_RX_MASK)
+ rxtx |= 1;
+ if (msg & WORK_DMA_TX_MASK)
+ rxtx |= 2;
+ switch (grspw_dma_enable_int(dma, rxtx, 0)) {
+ case 1:
+ /* DMA stopped */
+ return;
+ case 2:
+ /* DMA error -> Stop DMA channel (both RX and TX) */
+ if (msg & WORK_DMA_ER_MASK) {
+ /* DMA error and user wants work-task to handle error */
+ grspw_dma_stop(dma);
+ grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
+ }
+ return;
+ default:
+ break;
+ }
+ if (msg == 0)
+ return;
+
+ rx_cond_true = 0;
+ tx_cond_true = 0;
+
+ if ((dma->cfg.flags & DMAFLAG2_IRQD_MASK) == DMAFLAG2_IRQD_BOTH) {
+ /* In case both interrupt sources are disabled simultaneously
+ * by the ISR the re-enabling of the interrupt source must also
+ * do so to avoid missing interrupts. Both RX and TX process
+ * will be forced.
+ */
+ msg |= WORK_DMA_RX_MASK | WORK_DMA_TX_MASK;
+ }
+
+ if (msg & WORK_DMA_RX_MASK) {
+ /* Do RX Work */
+
+ /* Take DMA channel RX lock */
+ if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ dma->stats.rx_work_cnt++;
+ grspw_rx_process_scheduled(dma);
+ if (dma->started) {
+ dma->stats.rx_work_enabled +=
+ grspw_rx_schedule_ready(dma);
+ /* Check to see if condition for waking blocked
+ * USER task is fullfilled.
+ */
+ if (dma->rx_wait.waiting)
+ rx_cond_true = grspw_rx_wait_eval(dma);
+ }
+ rtems_semaphore_release(dma->sem_rxdma);
+ }
+
+ if (msg & WORK_DMA_TX_MASK) {
+ /* Do TX Work */
+
+ /* Take DMA channel TX lock */
+ if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return;
+
+ dma->stats.tx_work_cnt++;
+ grspw_tx_process_scheduled(dma);
+ if (dma->started) {
+ dma->stats.tx_work_enabled +=
+ grspw_tx_schedule_send(dma);
+ /* Check to see if condition for waking blocked
+ * USER task is fullfilled.
+ */
+ if (dma->tx_wait.waiting)
+ tx_cond_true = grspw_tx_wait_eval(dma);
+ }
+ rtems_semaphore_release(dma->sem_txdma);
+ }
+
+ if (rx_cond_true)
+ rtems_semaphore_release(dma->rx_wait.sem_wait);
+
+ if (tx_cond_true)
+ rtems_semaphore_release(dma->tx_wait.sem_wait);
+}
+
+/* Work task is receiving work for the work message queue posted from
+ * the ISR.
+ */
+void grspw_work_func(rtems_id msgQ)
+{
+ unsigned int message = 0, msg;
+ size_t size;
+ struct grspw_priv *priv;
+ int i;
+
+ /* Wait for ISR to schedule work */
+ while (rtems_message_queue_receive(msgQ, &message, &size,
+ RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
+ if (message & WORK_QUIT_TASK)
+ break;
+
+ /* Handle work */
+ priv = priv_tab[message >> WORK_CORE_BIT];
+ if (message & WORK_SHUTDOWN) {
+ grspw_work_shutdown_func(priv);
+
+ grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
+ } else if (message & WORK_DMA_MASK) {
+ for (i = 0; i < priv->hwsup.ndma_chans; i++) {
+ msg = message &
+ (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
+ if (msg)
+ grspw_work_dma_func(&priv->dma[i], msg);
+ }
+ }
+ message = 0;
+ }
+
+ if (message & WORK_FREE_MSGQ)
+ rtems_message_queue_delete(msgQ);
+
+ grspw_work_event(WORKTASK_EV_QUIT, message);
+ rtems_task_exit();
+}
+
+STATIC void grspw_isr(void *data)
+{
+ struct grspw_priv *priv = data;
+ unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
+ unsigned int rxirq, rxack, intto;
+ int i, handled = 0, call_user_int_isr;
+ unsigned int message = WORK_NONE, dma_en;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Get Status from Hardware */
+ stat = REG_READ(&priv->regs->status);
+ stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
+ (GRSPW_STS_TO | priv->stscfg);
+
+ /* Make sure to put the timecode handling first in order to get the
+ * smallest possible interrupt latency
+ */
+ if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
+ ctrl = REG_READ(&priv->regs->ctrl);
+ if (ctrl & GRSPW_CTRL_TQ) {
+ /* Timecode received. Let custom function handle this */
+ timecode = REG_READ(&priv->regs->time) &
+ (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
+ (priv->tcisr)(priv->tcisr_arg, timecode);
+ }
+ }
+
+ /* Get Interrupt status from hardware */
+ icctrl = REG_READ(&priv->regs->icctrl);
+ if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
+ call_user_int_isr = 0;
+ rxirq = rxack = intto = 0;
+
+ if ((icctrl & GRSPW_ICCTRL_IQ) &&
+ (rxirq = REG_READ(&priv->regs->icrx)) != 0)
+ call_user_int_isr = 1;
+
+ if ((icctrl & GRSPW_ICCTRL_AQ) &&
+ (rxack = REG_READ(&priv->regs->icack)) != 0)
+ call_user_int_isr = 1;
+
+ if ((icctrl & GRSPW_ICCTRL_TQ) &&
+ (intto = REG_READ(&priv->regs->ictimeout)) != 0)
+ call_user_int_isr = 1;
+
+ /* Let custom functions handle this POTENTIAL SPW interrupt. The
+ * user function is called even if no such IRQ has happened!
+ * User must make sure to clear all interrupts that have been
+ * handled from the three registers by writing a one.
+ */
+ if (call_user_int_isr)
+ priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
+ }
+
+ /* An Error occured? */
+ if (stat & GRSPW_STAT_ERROR) {
+ /* Wake Global WorkQ */
+ handled = 1;
+
+ if (stat & GRSPW_STS_EE)
+ priv->stats.err_eeop++;
+
+ if (stat & GRSPW_STS_IA)
+ priv->stats.err_addr++;
+
+ if (stat & GRSPW_STS_PE)
+ priv->stats.err_parity++;
+
+ if (stat & GRSPW_STS_DE)
+ priv->stats.err_disconnect++;
+
+ if (stat & GRSPW_STS_ER)
+ priv->stats.err_escape++;
+
+ if (stat & GRSPW_STS_CE)
+ priv->stats.err_credit++;
+
+ if (stat & GRSPW_STS_WE)
+ priv->stats.err_wsync++;
+
+ if (((priv->dis_link_on_err >> 16) & stat) &&
+ (REG_READ(&priv->regs->ctrl) & GRSPW_CTRL_IE)) {
+ /* Disable the link, no more transfers are expected
+ * on any DMA channel.
+ */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ ctrl = REG_READ(&priv->regs->ctrl);
+ REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
+ (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+ /* Signal to work-thread to stop DMA and clean up */
+ message = WORK_SHUTDOWN;
+ }
+ }
+
+ /* Clear Status Flags */
+ if (stat_clrmsk) {
+ handled = 1;
+ REG_WRITE(&priv->regs->status, stat_clrmsk);
+ }
+
+ /* A DMA transfer or Error occured? In that case disable more IRQs
+ * from the DMA channel, then invoke the workQ.
+ *
+ * Also the GI interrupt flag may not be available for older
+ * designs where (was added together with mutiple DMA channels).
+ */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
+ /* Check for Errors and if Packets been sent or received if
+ * respective IRQ are enabled
+ */
+ irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
+ | GRSPW_DMA_STATUS_ERROR) & dma_stat;
+ if (!irqs)
+ continue;
+
+ handled = 1;
+
+ /* DMA error has priority, if error happens it is assumed that
+ * the common work-queue stops the DMA operation for that
+ * channel and makes the DMA tasks exit from their waiting
+ * functions (both RX and TX tasks).
+ *
+ * Disable Further IRQs (until enabled again)
+ * from this DMA channel. Let the status
+ * bit remain so that they can be handled by
+ * work function.
+ */
+ if (irqs & GRSPW_DMA_STATUS_ERROR) {
+ REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
+ ~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
+ GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
+ GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
+ GRSPW_DMACTRL_AT));
+ message |= WORK_DMA_ER(i);
+ } else {
+ /* determine if RX/TX interrupt source(s) shall remain
+ * enabled.
+ */
+ if (priv->dma[i].cfg.flags & DMAFLAG2_IRQD_SRC) {
+ dma_en = ~irqs >> 3;
+ } else {
+ dma_en = priv->dma[i].cfg.flags >>
+ (DMAFLAG2_IRQD_BIT - GRSPW_DMACTRL_TI_BIT);
+ }
+ dma_en &= (GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI);
+ REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
+ (~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
+ GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
+ GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
+ GRSPW_DMACTRL_AT) | dma_en));
+ message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
+ }
+ }
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ if (handled != 0)
+ priv->stats.irq_cnt++;
+
+ /* Schedule work by sending message to work thread */
+ if (message != WORK_NONE && priv->wc.msgisr) {
+ int status;
+ message |= WORK_CORE(priv->index);
+ /* func interface compatible with msgQSend() on purpose, but
+ * at the same time the user can assign a custom function to
+ * handle DMA RX/TX operations as indicated by the "message"
+ * and clear the handled bits before given to msgQSend().
+ */
+ status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
+ if (status != RTEMS_SUCCESSFUL) {
+ printk("grspw_isr(%d): message fail %d (0x%x)\n",
+ priv->index, status, message);
+ }
+ }
+}
+
+STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
+{
+ unsigned int ctrl;
+ struct grspw_dma_regs *dregs = dma->regs;
+
+ ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
+ GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
+ ctrl |= GRSPW_DMACTRL_AT;
+ REG_WRITE(&dregs->ctrl, ctrl);
+}
+
+STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
+{
+ unsigned int ctrl;
+ struct grspw_dma_regs *dregs = dma->regs;
+
+ ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
+ REG_WRITE(&dregs->ctrl, ctrl);
+
+ REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
+ REG_WRITE(&dregs->txdesc, 0);
+ REG_WRITE(&dregs->rxdesc, 0);
+}
+
+/* Hardware Action:
+ * - stop DMA
+ * - do not bring down the link (RMAP may be active)
+ * - RMAP settings untouched (RMAP may be active)
+ * - port select untouched (RMAP may be active)
+ * - timecodes are disabled
+ * - IRQ generation disabled
+ * - status not cleared (let user analyze it if requested later on)
+ * - Node address / First DMA channels Node address
+ * is untouched (RMAP may be active)
+ */
+STATIC void grspw_hw_stop(struct grspw_priv *priv)
+{
+ int i;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ for (i=0; i<priv->hwsup.ndma_chans; i++)
+ grspw_hw_dma_stop(&priv->dma[i]);
+
+ ctrl = REG_READ(&priv->regs->ctrl);
+ REG_WRITE(&priv->regs->ctrl, ctrl & (
+ GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
+ GRSPW_CTRL_RE | GRSPW_CTRL_RD |
+ GRSPW_CTRL_NP | GRSPW_CTRL_PS));
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+}
+
+/* Soft reset of GRSPW core registers */
+STATIC void grspw_hw_softreset(struct grspw_priv *priv)
+{
+ int i;
+ unsigned int tmp;
+
+ for (i=0; i<priv->hwsup.ndma_chans; i++)
+ grspw_hw_dma_softreset(&priv->dma[i]);
+
+ REG_WRITE(&priv->regs->status, 0xffffffff);
+ REG_WRITE(&priv->regs->time, 0);
+ /* Clear all but valuable reset values of ICCTRL */
+ tmp = REG_READ(&priv->regs->icctrl);
+ tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
+ tmp |= GRSPW_ICCTRL_ID;
+ REG_WRITE(&priv->regs->icctrl, tmp);
+ REG_WRITE(&priv->regs->icrx, 0xffffffff);
+ REG_WRITE(&priv->regs->icack, 0xffffffff);
+ REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
+}
+
+int grspw_dev_count(void)
+{
+ return grspw_count;
+}
+
+void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
+{
+ int i;
+ struct grspw_priv *priv;
+
+ /* Set new Device Found Handler */
+ grspw_dev_add = devfound;
+ grspw_dev_del = devremove;
+
+ if (grspw_initialized == 1 && grspw_dev_add) {
+ /* Call callback for every previously found device */
+ for (i=0; i<grspw_count; i++) {
+ priv = priv_tab[i];
+ if (priv)
+ priv->data = grspw_dev_add(i);
+ }
+ }
+}
+
+/* Get a value at least 6.4us in number of clock cycles */
+static unsigned int grspw1_calc_timer64(int freq_khz)
+{
+ unsigned int timer64 = (freq_khz * 64 + 9999) / 10000;
+ return timer64 & 0xfff;
+}
+
+/* Get a value at least 850ns in number of clock cycles - 3 */
+static unsigned int grspw1_calc_discon(int freq_khz)
+{
+ unsigned int discon = ((freq_khz * 85 + 99999) / 100000) - 3;
+ return discon & 0x3ff;
+}
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static int grspw_common_init(void);
+static int grspw2_init3(struct drvmgr_dev *dev);
+
+static struct drvmgr_drv_ops grspw2_ops =
+{
+ .init = {NULL, NULL, grspw2_init3, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+static struct amba_dev_id grspw2_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
+ {VENDOR_GAISLER, GAISLER_SPW2},
+ {VENDOR_GAISLER, GAISLER_SPW2_DMA},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info grspw2_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
+ "GRSPW_PKT_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grspw2_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct grspw_priv), /* Let DrvMgr alloc priv */
+ },
+ &grspw2_ids[0]
+};
+
+void grspw2_register_drv (void)
+{
+ GRSPW_DBG("Registering GRSPW2 packet driver\n");
+ drvmgr_drv_register(&grspw2_drv_info.general);
+}
+
+static int grspw2_init3(struct drvmgr_dev *dev)
+{
+ struct grspw_priv *priv;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ int i;
+ unsigned int ctrl, icctrl, numi;
+ union drvmgr_key_value *value;
+
+ GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
+ dev->parent->dev->name);
+
+ if (grspw_count >= GRSPW_MAX)
+ return DRVMGR_ENORES;
+
+ priv = dev->priv;
+ if (priv == NULL)
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* If first device init common part of driver */
+ if (grspw_common_init())
+ return DRVMGR_FAIL;
+
+ /*** Now we take care of device initialization ***/
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if (ambadev == NULL)
+ return -1;
+ pnpinfo = &ambadev->info;
+ priv->irq = pnpinfo->irq;
+ priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
+
+ /* Read Hardware Support from Control Register */
+ ctrl = REG_READ(&priv->regs->ctrl);
+ priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
+ priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
+ priv->hwsup.ccsds_crc = (ctrl & GRSPW_CTRL_CC) >> GRSPW_CTRL_CC_BIT;
+ priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
+ priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
+ priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
+ priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
+ icctrl = REG_READ(&priv->regs->icctrl);
+ numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
+ if (numi > 0)
+ priv->hwsup.irq_num = 1 << (numi - 1);
+ else
+ priv->hwsup.irq_num = 0;
+
+ /* Construct hardware version identification */
+ priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
+
+ if ((pnpinfo->device == GAISLER_SPW2) ||
+ (pnpinfo->device == GAISLER_SPW2_DMA)) {
+ priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
+ priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
+ } else {
+ unsigned int apb_hz, apb_khz;
+
+ /* Autodetect GRSPW1 features? */
+ priv->hwsup.strip_adr = 0;
+ priv->hwsup.strip_pid = 0;
+
+ drvmgr_freq_get(dev, DEV_APB_SLV, &apb_hz);
+ apb_khz = apb_hz / 1000;
+
+ REG_WRITE(&priv->regs->timer,
+ ((grspw1_calc_discon(apb_khz) & 0x3FF) << 12) |
+ (grspw1_calc_timer64(apb_khz) & 0xFFF));
+ }
+
+ /* Probe width of SpaceWire Interrupt ISR timers. All have the same
+ * width... so only the first is probed, if no timer result will be
+ * zero.
+ */
+ REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
+ ctrl = REG_READ(&priv->regs->icrlpresc);
+ REG_WRITE(&priv->regs->icrlpresc, 0);
+ priv->hwsup.itmr_width = 0;
+ while (ctrl & 1) {
+ priv->hwsup.itmr_width++;
+ ctrl = ctrl >> 1;
+ }
+
+ /* Let user limit the number of DMA channels on this core to save
+ * space. Only the first nDMA channels will be available.
+ */
+ value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
+ if (value && (value->i < priv->hwsup.ndma_chans))
+ priv->hwsup.ndma_chans = value->i;
+
+ /* Allocate and init Memory for all DMA channels */
+ priv->dma = grlib_calloc(priv->hwsup.ndma_chans, sizeof(*priv->dma));
+ if (priv->dma == NULL)
+ return DRVMGR_NOMEM;
+ for (i=0; i<priv->hwsup.ndma_chans; i++) {
+ priv->dma[i].core = priv;
+ priv->dma[i].index = i;
+ priv->dma[i].regs = &priv->regs->dma[i];
+ }
+
+ /* Startup Action:
+ * - stop DMA
+ * - do not bring down the link (RMAP may be active)
+ * - RMAP settings untouched (RMAP may be active)
+ * - port select untouched (RMAP may be active)
+ * - timecodes are diabled
+ * - IRQ generation disabled
+ * - status cleared
+ * - Node address / First DMA channels Node address
+ * is untouched (RMAP may be active)
+ */
+ grspw_hw_stop(priv);
+ grspw_hw_softreset(priv);
+
+ /* Register character device in registered region */
+ priv->index = grspw_count;
+ priv_tab[priv->index] = priv;
+ grspw_count++;
+
+ /* Device name */
+ sprintf(priv->devname, "grspw%d", priv->index);
+
+ /* Tell above layer about new device */
+ if (grspw_dev_add)
+ priv->data = grspw_dev_add(priv->index);
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+/* Creates a MsgQ (optional) and spawns a worker task associated with the
+ * message Q. The task can also be associated with a custom msgQ if *msgQ.
+ * is non-zero.
+ */
+rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
+{
+ rtems_id tid;
+ int created_msgq = 0;
+ static char work_name = 'A';
+
+ if (pMsgQ == NULL)
+ return OBJECTS_ID_NONE;
+
+ if (*pMsgQ == OBJECTS_ID_NONE) {
+ if (msgMax <= 0)
+ msgMax = 32;
+
+ if (rtems_message_queue_create(
+ rtems_build_name('S', 'G', 'Q', work_name),
+ msgMax, 4, RTEMS_FIFO, pMsgQ) !=
+ RTEMS_SUCCESSFUL)
+ return OBJECTS_ID_NONE;
+ created_msgq = 1;
+ }
+
+ if (prio < 0)
+ prio = grspw_work_task_priority; /* default prio */
+ if (stack < 0x800)
+ stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
+
+ if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
+ prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
+ RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
+ tid = OBJECTS_ID_NONE;
+ else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
+ RTEMS_SUCCESSFUL) {
+ rtems_task_delete(tid);
+ tid = OBJECTS_ID_NONE;
+ }
+
+ if (tid == OBJECTS_ID_NONE && created_msgq) {
+ rtems_message_queue_delete(*pMsgQ);
+ *pMsgQ = OBJECTS_ID_NONE;
+ } else {
+ if (++work_name > 'Z')
+ work_name = 'A';
+ }
+ return tid;
+}
+
+/* Free task associated with message queue and optionally also the message
+ * queue itself. The message queue is deleted by the work task and is therefore
+ * delayed until it the work task resumes its execution.
+ */
+rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
+{
+ int msg = WORK_QUIT_TASK;
+ if (freeMsgQ)
+ msg |= WORK_FREE_MSGQ;
+ return rtems_message_queue_send(msgQ, &msg, 4);
+}
+
+void grspw_work_cfg(void *d, struct grspw_work_config *wc)
+{
+ struct grspw_priv *priv = (struct grspw_priv *)d;
+
+ if (wc == NULL)
+ wc = &grspw_wc_def; /* use default config */
+ priv->wc = *wc;
+}
+
+#ifdef RTEMS_SMP
+int grspw_isr_affinity(void *d, const cpu_set_t *cpus)
+{
+ return -1; /* BSP support only static configured IRQ affinity */
+}
+#endif
+
+static int grspw_common_init(void)
+{
+ if (grspw_initialized == 1)
+ return 0;
+ if (grspw_initialized == -1)
+ return -1;
+ grspw_initialized = -1;
+
+ /* Device Semaphore created with count = 1 */
+ if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
+ return -1;
+
+ /* Work queue, Work thread. Not created if user disables it.
+ * user can disable it when interrupt is not used to save resources
+ */
+ if (grspw_work_task_priority != -1) {
+ grspw_work_task = grspw_work_spawn(-1, 0,
+ (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
+ if (grspw_work_task == OBJECTS_ID_NONE)
+ return -2;
+ grspw_wc_def.msgisr =
+ (grspw_msgqisr_t) rtems_message_queue_send;
+ } else {
+ grspw_wc_def.msgisr = NULL;
+ grspw_wc_def.msgisr_arg = NULL;
+ }
+
+ grspw_initialized = 1;
+ return 0;
+}
diff --git a/bsps/shared/grlib/spw/grspw_router.c b/bsps/shared/grlib/spw/grspw_router.c
new file mode 100644
index 0000000000..d8ba8feef3
--- /dev/null
+++ b/bsps/shared/grlib/spw/grspw_router.c
@@ -0,0 +1,1939 @@
+/* GRSPW ROUTER APB-Register Driver.
+ *
+ * COPYRIGHT (c) 2010-2017.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <rtems/libio.h>
+#include <rtems/bspIo.h>
+#include <stdio.h>
+#include <bsp.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grspw_router.h>
+
+#include <grlib/grlib_impl.h>
+
+//#define STATIC
+#define STATIC static
+
+#define UNUSED __attribute__((unused))
+
+//#define DEBUG 1
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+#define THREAD_SAFE 1
+
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+
+/*
+ * ROUTER RTPMAP register fields
+ */
+#define RTPMAP_PE (0x7fffffff << RTPMAP_PE_BIT)
+#define RTPMAP_PD (0x1 << RTPMAP_PD_BIT)
+
+#define RTPMAP_PE_BIT 1
+#define RTPMAP_PD_BIT 0
+
+/*
+ * ROUTER RTACTRL register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PCTRL register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PSTSCFG register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PSTS register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER PTIMER register fields
+ */
+#define PTIMER_RL (0xffff << PTIMER_RL_BIT)
+
+#define PTIMER_RL_BIT 0
+
+/*
+ * ROUTER PCTRL2 register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER RTRCFG register fields
+ */
+#define RTRCFG_SP (0x1f << RTRCFG_SP_BIT)
+#define RTRCFG_AP (0x1f << RTRCFG_AP_BIT)
+#define RTRCFG_FP (0x1f << RTRCFG_FP_BIT)
+#define RTRCFG_SR (0x1 << RTRCFG_SR_BIT)
+#define RTRCFG_PE (0x1 << RTRCFG_PE_BIT)
+#define RTRCFG_IC (0x1 << RTRCFG_IC_BIT)
+#define RTRCFG_IS (0x1 << RTRCFG_IS_BIT)
+#define RTRCFG_IP (0x1 << RTRCFG_IP_BIT)
+#define RTRCFG_AI (0x1 << RTRCFG_AI_BIT)
+#define RTRCFG_AT (0x1 << RTRCFG_AT_BIT)
+#define RTRCFG_IE (0x1 << RTRCFG_IE_BIT)
+#define RTRCFG_RE (0x1 << RTRCFG_RE_BIT)
+#define RTRCFG_EE (0x1 << RTRCFG_EE_BIT)
+#define RTRCFG_LS (0x1 << RTRCFG_LS_BIT)
+#define RTRCFG_SA (0x1 << RTRCFG_SA_BIT)
+#define RTRCFG_TF (0x1 << RTRCFG_TF_BIT)
+#define RTRCFG_ME (0x1 << RTRCFG_ME_BIT)
+#define RTRCFG_TA (0x1 << RTRCFG_TA_BIT)
+#define RTRCFG_PP (0x1 << RTRCFG_PP_BIT)
+#define RTRCFG_WCLEAR (RTRCFG_ME)
+
+#define RTRCFG_SP_BIT 27
+#define RTRCFG_AP_BIT 22
+#define RTRCFG_FP_BIT 17
+#define RTRCFG_SR_BIT 15
+#define RTRCFG_PE_BIT 14
+#define RTRCFG_IC_BIT 13
+#define RTRCFG_IS_BIT 12
+#define RTRCFG_IP_BIT 11
+#define RTRCFG_AI_BIT 10
+#define RTRCFG_AT_BIT 9
+#define RTRCFG_IE_BIT 8
+#define RTRCFG_RE_BIT 7
+#define RTRCFG_EE_BIT 6
+#define RTRCFG_LS_BIT 5
+#define RTRCFG_SA_BIT 4
+#define RTRCFG_TF_BIT 3
+#define RTRCFG_ME_BIT 2
+#define RTRCFG_TA_BIT 1
+#define RTRCFG_PP_BIT 0
+
+/*
+ * ROUTER TC register fields
+ */
+#define TC_RE (0x3f << TC_RE_BIT)
+#define TC_EN (0x3f << TC_EN_BIT)
+#define TC_CF (0x3f << TC_CF_BIT)
+#define TC_TC (0x3f << TC_TC_BIT)
+
+#define TC_RE_BIT 9
+#define TC_EN_BIT 8
+#define TC_CF_BIT 6
+#define TC_TC_BIT 0
+
+/*
+ * ROUTER VER register fields
+ */
+#define VER_MA (0xff << VER_MA_BIT)
+#define VER_MI (0xff << VER_MI_BIT)
+#define VER_PA (0xff << VER_PA_BIT)
+#define VER_ID (0xff << VER_ID_BIT)
+
+#define VER_MA_BIT 24
+#define VER_MI_BIT 16
+#define VER_PA_BIT 8
+#define VER_ID_BIT 0
+
+/*
+ * ROUTER IDIV register fields
+ */
+#define IDIV_ID (0xff << IDIV_ID_BIT)
+
+#define IDIV_ID_BIT 0
+
+/*
+ * ROUTER CFGWE register fields
+ */
+#define CFGWE_WE (0x1 << CFGWE_WE_BIT)
+
+#define CFGWE_WE_BIT 0
+
+/*
+ * ROUTER PRESCALER register fields
+ */
+#define PRESCALER_RL (0xffff << PRESCALER_RL_BIT)
+
+#define PRESCALER_RL_BIT 0
+
+/*
+ * ROUTER IMASK register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER ICODEGEN register fields
+ * DEFINED IN HEADER
+ */
+
+/*
+ * ROUTER ISRTIMER register fields
+ */
+#define ISRTIMER_RL (0xffff << ISRTIMER_RL_BIT)
+
+#define ISRTIMER_RL_BIT 0
+
+/*
+ * ROUTER AITIMER register fields
+ */
+#define AITIMER_RL (0xffff << AITIMER_RL_BIT)
+
+#define AITIMER_RL_BIT 0
+
+/*
+ * ROUTER ISRCTIMER register fields
+ */
+#define ISRCTIMER_RL (0x1f << ISRCTIMER_RL_BIT)
+
+#define ISRCTIMER_RL_BIT 0
+
+/*
+ * ROUTER CAP register fields
+ */
+#define CAP_AF (0x3 << CAP_AF_BIT)
+#define CAP_PF (0x7 << CAP_PF_BIT)
+#define CAP_RM (0x7 << CAP_RM_BIT)
+#define CAP_AS (0x1 << CAP_AS_BIT)
+#define CAP_AX (0x1 << CAP_AX_BIT)
+#define CAP_DP (0x1 << CAP_DP_BIT)
+#define CAP_ID (0x1 << CAP_ID_BIT)
+#define CAP_SD (0x1 << CAP_SD_BIT)
+#define CAP_PC (0x1f << CAP_PC_BIT)
+#define CAP_CC (0x1f << CAP_CC_BIT)
+
+#define CAP_AF_BIT 24
+#define CAP_PF_BIT 29
+#define CAP_RM_BIT 16
+#define CAP_AS_BIT 14
+#define CAP_AX_BIT 13
+#define CAP_DP_BIT 12
+#define CAP_ID_BIT 11
+#define CAP_SD_BIT 10
+#define CAP_PC_BIT 4
+#define CAP_CC_BIT 0
+
+/*
+ * ROUTER PNPVEND register fields
+ */
+#define PNPVEND_VI (0xffff << PNPVEND_VI_BIT)
+#define PNPVEND_PI (0xffff << PNPVEND_PI_BIT)
+
+#define PNPVEND_VI_BIT 16
+#define PNPVEND_PI_BIT 0
+
+/*
+ * ROUTER PNPUVEND register fields
+ */
+#define PNPUVEND_VI (0xffff << PNPUVEND_VI_BIT)
+#define PNPUVEND_PI (0xffff << PNPUVEND_PI_BIT)
+
+#define PNPUVEND_VI_BIT 16
+#define PNPUVEND_PI_BIT 0
+
+/*
+ * ROUTER MAXPLEN register fields
+ */
+#define MAXPLEN_ML (0xffffff << MAXPLEN_ML_BIT)
+
+#define MAXPLEN_ML_BIT 0
+
+/*
+ * ROUTER CHARO register fields
+ */
+#define CHARO_OR (0x1 << CHARO_OR_BIT)
+#define CHARO_CC (0x7fffffff << CHARO_CC_BIT)
+
+#define CHARO_OR_BIT 31
+#define CHARO_CC_BIT 0
+
+/*
+ * ROUTER CHARI register fields
+ */
+#define CHARI_OR (0x1 << CHARI_OR_BIT)
+#define CHARI_CC (0x7fffffff << CHARI_CC_BIT)
+
+#define CHARI_OR_BIT 31
+#define CHARI_CC_BIT 0
+
+/*
+ * ROUTER PKTO register fields
+ */
+#define PKTO_OR (0x1 << PKTO_OR_BIT)
+#define PKTO_CC (0x7fffffff << PKTO_CC_BIT)
+
+#define PKTO_OR_BIT 31
+#define PKTO_CC_BIT 0
+
+/*
+ * ROUTER PKTI register fields
+ */
+#define PKTI_OR (0x1 << PKTI_OR_BIT)
+#define PKTI_CC (0x7fffffff << PKTI_CC_BIT)
+
+#define PKTI_OR_BIT 31
+#define PKTI_CC_BIT 0
+
+/*
+ * ROUTER CRED register fields
+ */
+#define CRED_OC (0x3f << CRED_OC_BIT)
+#define CRED_IC (0x3f << CRED_IC_BIT)
+
+#define CRED_OC_BIT 6
+#define CRED_IC_BIT 0
+
+/*
+ * ROUTER RTRCOMB register fields
+ */
+#define RTRCOMB_SR (0x1 << RTRCOMB_SR_BIT)
+#define RTRCOMB_EN (0x1 << RTRCOMB_EN_BIT)
+#define RTRCOMB_PR (0x1 << RTRCOMB_PR_BIT)
+#define RTRCOMB_HD (0x1 << RTRCOMB_HD_BIT)
+#define RTRCOMB_PE (0x7ffff << RTRCOMB_PE_BIT)
+#define RTRCOMB_PD (0x1 << RTRCOMB_PD_BIT)
+
+#define RTRCOMB_SR_BIT 31
+#define RTRCOMB_EN_BIT 30
+#define RTRCOMB_PR_BIT 29
+#define RTRCOMB_HD_BIT 28
+#define RTRCOMB_PE_BIT 1
+#define RTRCOMB_PD_BIT 0
+
+struct router_regs {
+ unsigned int resv1; /* 0x000 */
+ unsigned int psetup[255]; /* 0x004 */
+ unsigned int resv2; /* 0x400 */
+ unsigned int routes[255]; /* 0x404 */
+ unsigned int pctrl[32]; /* 0x800 */
+ unsigned int psts[32]; /* 0x880 */
+ unsigned int treload[32]; /* 0x900 */
+ unsigned int pctrl2[32]; /* 0x980 */
+ unsigned int cfgsts; /* 0xA00 */
+ unsigned int timecode; /* 0xA04 */
+ unsigned int ver; /* 0xA08 */
+ unsigned int idiv; /* 0xA0C */
+ unsigned int cfgwe; /* 0xA10 */
+ unsigned int tprescaler; /* 0xA14 */
+ unsigned int imask; /* 0xA18 */
+ unsigned int ipmask; /* 0xA1C */
+ unsigned int pip; /* 0xA20 */
+ unsigned int icodegen; /* 0xA24 */
+ unsigned int isr0; /* 0xA28 */
+ unsigned int isr1; /* 0xA2C */
+ unsigned int isrtimer; /* 0xA30 */
+ unsigned int aitimer; /* 0xA34 */
+ unsigned int isrctimer; /* 0xA38 */
+ unsigned int resv4; /* 0xA3C */
+ unsigned int lrunsts; /* 0xA40 */
+ unsigned int cap; /* 0xA44 */
+ unsigned int resv5[111]; /* 0xA48 */
+ unsigned int charo[31]; /* 0xC04 */ /* TODO check GR718 */
+ unsigned int resv6; /* 0xC80 */
+ unsigned int chari[31]; /* 0xC84 */
+ unsigned int resv7; /* 0xD00 */
+ unsigned int pkto[31]; /* 0xD04 */
+ unsigned int resv8; /* 0xD80 */
+ unsigned int pkti[31]; /* 0xD84 */
+ unsigned int maxplen[32]; /* 0xE00 */
+ unsigned int resv9; /* 0xE80 */
+ unsigned int credcnt[31]; /* 0xE84 */
+ unsigned int resv10[64]; /* 0xF00 */
+ unsigned int resv11; /* 0x1000 */
+ unsigned int rtcomb[255]; /* 0x1004 */
+};
+
+struct router_priv {
+ struct drvmgr_dev *dev;
+
+ /* ROUTER control registers */
+ struct router_regs *regs;
+
+ #ifdef THREAD_SAFE
+ /* ROUTER semaphore */
+ rtems_id sem;
+ #endif
+
+ /* ROUTER driver register */
+ char devname[9];
+ int index; /* Index in order it was probed */
+
+ int minor;
+ int open;
+ struct router_hw_info hwinfo;
+ int nports;
+ int irq_init;
+
+ SPIN_DECLARE(plock[32])
+
+};
+
+int router_count = 0;
+static struct router_priv *priv_tab[ROUTER_MAX];
+
+/* Driver prototypes */
+
+STATIC int router_init(struct router_priv *priv);
+STATIC void router_hwinfo(struct router_priv *priv,
+ struct router_hw_info *hwinfo);
+STATIC int router_acontrol_set(struct router_priv *priv,
+ struct router_route_acontrol *control);
+STATIC int router_acontrol_get(struct router_priv *priv,
+ struct router_route_acontrol *control);
+STATIC int router_portmap_set(struct router_priv *priv,
+ struct router_route_portmap *pmap);
+STATIC int router_portmap_get(struct router_priv *priv,
+ struct router_route_portmap *pmap);
+
+/* -IRQ handler */
+void router_isr(void *arg);
+
+int router_init2(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops router_ops =
+{
+ .init = {NULL, router_init2, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id router_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPW_ROUTER},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info router_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_SPW_ROUTER_ID,/* Driver ID */
+ "ROUTER_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &router_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct router_priv), /* Let DRVMGR allocate for us */
+ },
+ &router_ids[0],
+};
+
+void router_register_drv (void)
+{
+ DBG("Registering SPW ROUTER driver\n");
+ drvmgr_drv_register(&router_drv_info.general);
+}
+
+STATIC void router_hwinfo(struct router_priv *priv,
+ struct router_hw_info *hwinfo)
+{
+ unsigned int tmp;
+
+ /* Find router info */
+ tmp = REG_READ(&priv->regs->cfgsts);
+ hwinfo->nports_spw = (tmp & RTRCFG_SP) >> RTRCFG_SP_BIT;
+ hwinfo->nports_amba = (tmp & RTRCFG_AP) >> RTRCFG_AP_BIT;
+ hwinfo->nports_fifo = (tmp & RTRCFG_FP) >> RTRCFG_FP_BIT;
+ hwinfo->srouting = (tmp & RTRCFG_SR) >> RTRCFG_SR_BIT;
+ hwinfo->pnp_enable = (tmp & RTRCFG_PE) >> RTRCFG_PE_BIT;
+ hwinfo->timers_avail = (tmp & RTRCFG_TA) >> RTRCFG_TA_BIT;
+ hwinfo->pnp_avail = (tmp & RTRCFG_PP) >> RTRCFG_PP_BIT;
+
+ tmp = REG_READ(&priv->regs->ver);
+ hwinfo->ver_major = (tmp & VER_MA) >> VER_MA_BIT;
+ hwinfo->ver_minor = (tmp & VER_MI) >> VER_MI_BIT;
+ hwinfo->ver_patch = (tmp & VER_PA) >> VER_PA_BIT;
+ hwinfo->iid = (tmp & VER_ID) >> VER_ID_BIT;
+
+ /* Find router capabilities */
+ tmp = REG_READ(&priv->regs->cap);
+ hwinfo->amba_port_fifo_size = 4 << ((tmp & CAP_AF) >> CAP_AF_BIT);
+ hwinfo->spw_port_fifo_size = 16 << ((tmp & CAP_PF) >> CAP_PF_BIT);
+ hwinfo->rmap_maxdlen = 4 << ((tmp & CAP_RM) >> CAP_RM_BIT);
+ hwinfo->aux_async = (tmp & CAP_AS) >> CAP_AS_BIT;
+ hwinfo->aux_dist_int_support = (tmp & CAP_AX) >> CAP_AX_BIT;
+ hwinfo->dual_port_support = (tmp & CAP_ID) >> CAP_ID_BIT;
+ hwinfo->dist_int_support = (tmp & CAP_DP) >> CAP_DP_BIT;
+ hwinfo->spwd_support = (tmp & CAP_SD) >> CAP_SD_BIT;
+ hwinfo->pktcnt_support = (tmp & CAP_PC) >> CAP_PC_BIT;
+ hwinfo->charcnt_support = (tmp & CAP_CC) >> CAP_CC_BIT;
+}
+
+STATIC void router_hwinfo_print(struct router_hw_info *hwinfo)
+{
+ DBG(" -PORTS= SPW: %d, AMBA: %d, FIFO: %d\n", hwinfo->nports_spw,
+ hwinfo->nports_amba, hwinfo->nports_fifo);
+ DBG(" -Static routing: %s, Timers: %s\n",
+ (hwinfo->srouting?"Enabled":"Disabled"),
+ (hwinfo->timers_avail?"Available":"N/A"));
+ DBG(" -PnP: %s, %s\n",
+ (hwinfo->pnp_avail?"Available":"N/A"),
+ (hwinfo->pnp_enable?"Enabled":"Disabled"));
+ DBG(" -Version= Major: 0x%02x, Minor: 0x%02x, Patch: 0x%02x, ID: 0x%02x\n",
+ hwinfo->ver_major, hwinfo->ver_minor,
+ hwinfo->ver_patch, hwinfo->iid);
+ DBG(" -Aux: %s, AuxDistInt: %s, DistInt: %s, SPWD: %s, PKTCNT: %s, "
+ "CHARCNT: %s\n",
+ (hwinfo->aux_async?"Async":"Sync"),
+ (hwinfo->aux_dist_int_support?"Supported":"N/A"),
+ (hwinfo->dist_int_support?"Supported":"N/A"),
+ (hwinfo->spwd_support?"Supported":"N/A"),
+ (hwinfo->pktcnt_support?"Supported":"N/A"),
+ (hwinfo->charcnt_support?"Supported":"N/A"));
+}
+
+STATIC int router_acontrol_set(struct router_priv *priv,
+ struct router_route_acontrol *control)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ REG_WRITE(&priv->regs->routes[i], control->control[i]);
+ }
+ for (i=0; i<224; i++) {
+ REG_WRITE(&priv->regs->routes[i+31], control->control_logical[i]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_acontrol_get(struct router_priv *priv,
+ struct router_route_acontrol *control)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ control->control[i] = REG_READ(&priv->regs->routes[i]);
+ }
+ for (i=0; i<224; i++) {
+ control->control_logical[i] = REG_READ(&priv->regs->routes[i+31]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_portmap_set(struct router_priv *priv,
+ struct router_route_portmap *pmap)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ REG_WRITE(&priv->regs->psetup[i], pmap->pmap[i]);
+ }
+ for (i=0; i<224; i++) {
+ REG_WRITE(&priv->regs->psetup[i+31], pmap->pmap_logical[i]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_portmap_get(struct router_priv *priv,
+ struct router_route_portmap *pmap)
+{
+ int i;
+ for (i=0; i<31; i++) {
+ pmap->pmap[i] = REG_READ(&priv->regs->psetup[i]);
+ }
+ for (i=0; i<224; i++) {
+ pmap->pmap_logical[i] = REG_READ(&priv->regs->psetup[i+31]);
+ }
+ return ROUTER_ERR_OK;
+}
+
+STATIC int router_init(struct router_priv *priv)
+{
+ #ifdef THREAD_SAFE
+ int i;
+
+ /* Device Semaphore created with count = 1 */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'R', 'O', '0' + priv->index), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &priv->sem) != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+ #endif
+
+ /* Find router info */
+ router_hwinfo(priv, &priv->hwinfo);
+
+ priv->open = 0;
+ /* Number of ports has to consider the configuration port (1 + SPW + AMBA + FIFO) */
+ priv->nports = 1 + priv->hwinfo.nports_spw + priv->hwinfo.nports_amba +
+ priv->hwinfo.nports_fifo;
+ if ((priv->nports < 2) || (priv->nports > 32)) {
+ return DRVMGR_EIO;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Init port spin-lock memory structures */
+ for (i=0; i<priv->nports; i++) {
+ SPIN_INIT(&priv->plock[i],"portlock");
+ }
+ #endif
+
+ /* DEBUG print */
+ DBG("SPW ROUTER[%d] with following capabilities:\n", priv->index);
+ router_hwinfo_print(&priv->hwinfo);
+
+ return DRVMGR_OK;
+}
+
+int router_init2(struct drvmgr_dev *dev)
+{
+ struct router_priv *priv = dev->priv;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ unsigned int tmp;
+ int i;
+ int status;
+
+ DBG("SPW ROUTER[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (router_count >= ROUTER_MAX) {
+ return DRVMGR_ENORES;
+ }
+
+ if (priv == NULL) {
+ return DRVMGR_NOMEM;
+ }
+ priv->dev = dev;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if (ambadev == NULL) {
+ return DRVMGR_FAIL;
+ }
+ pnpinfo = &ambadev->info;
+ priv->regs = (struct router_regs *)pnpinfo->ahb_slv->start[0];
+ priv->minor = dev->minor_drv;
+
+ /* Initilize driver struct */
+ status = router_init(priv);
+ if (status != DRVMGR_OK) {
+ return status;
+ }
+
+ /* Startup Action:
+ * - Clear interrupts
+ * - Mask interrupts
+ */
+
+ /* Mask interrupts in ROTUER */
+ REG_WRITE(&priv->regs->imask,0);
+ REG_WRITE(&priv->regs->ipmask,0);
+
+ /* Clear interrupts in ROTUER */
+ REG_WRITE(&priv->regs->pip,0xffffffff);
+
+ /* Clear errors in router and ports */
+ tmp = REG_READ(&priv->regs->cfgsts);
+ REG_WRITE(&priv->regs->cfgsts, tmp | RTRCFG_WCLEAR);
+ tmp = REG_READ(&priv->regs->psts[0]);
+ REG_WRITE(&priv->regs->psts[0], (tmp & PSTSCFG_WCLEAR) | PSTSCFG_WCLEAR2);
+ for (i=1; i<priv->nports; i++) {
+ tmp = REG_READ(&priv->regs->psts[i]);
+ REG_WRITE(&priv->regs->psts[i], tmp & PSTS_WCLEAR);
+ }
+
+ /* Register driver internally */
+ priv->index = router_count;
+ priv_tab[priv->index] = priv;
+ router_count++;
+
+ /* Device name */
+ sprintf(priv->devname, "router%d", priv->index);
+
+ return DRVMGR_OK;
+}
+
+void *router_open(unsigned int dev_no)
+{
+ struct router_priv *priv, *ret;
+
+ if (dev_no >= router_count) {
+ DBG("ROUTER Wrong index %u\n", dev_no);
+ return NULL;
+ }
+
+ priv = priv_tab[dev_no];
+
+ if (priv == NULL) {
+ DBG("ROUTER Device not initialized\n");
+ return NULL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return NULL;
+ }
+ #endif
+
+ if (priv->open) {
+ DBG("ROUTER Device already opened\n");
+ ret = NULL;
+ } else {
+ /* Take the device */
+ priv->open = 1;
+ ret = priv;
+ }
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ret;
+}
+
+int router_close(void *d)
+{
+ struct router_priv *priv = d;
+ int ret;
+
+ if (priv == NULL) {
+ DBG("ROUTER Device not initialized\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ if (priv->open == 0) {
+ DBG("ROUTER Device already closed\n");
+ ret = ROUTER_ERR_ERROR;
+ } else {
+ /* Mark not open */
+ priv->open = 0;
+ ret = ROUTER_ERR_OK;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ret;
+}
+
+STATIC int router_check_open(void *d)
+{
+ struct router_priv *priv = d;
+
+ if (priv == NULL) {
+ DBG("ROUTER Device not initialized\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if (priv->open == 0) {
+ DBG("ROUTER Device closed\n");
+ return ROUTER_ERR_ERROR;
+ }
+
+ return 0;
+}
+
+STATIC int router_check_port(void *d, int port)
+{
+ int ret = router_check_open(d);
+
+ if (ret == 0) {
+ struct router_priv *priv = d;
+ if((port < 0) || (port >= priv->nports)) {
+ DBG("ROUTER wrong port\n");
+ ret = ROUTER_ERR_EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+STATIC int router_check_distint_support(void *d)
+{
+ int ret = router_check_open(d);
+
+ if (ret == 0) {
+ struct router_priv *priv = d;
+ if (priv->hwinfo.dist_int_support == 0) {
+ DBG("ROUTER Dist interrupts not supported\n");
+ ret = ROUTER_ERR_IMPLEMENTED;
+ }
+ }
+
+ return ret;
+}
+
+int router_hwinfo_get(void *d, struct router_hw_info *hwinfo)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (hwinfo == NULL) {
+ DBG("ROUTER Wrong pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ /* Get hwinfo */
+ router_hwinfo(priv, hwinfo);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_print(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ /* DEBUG print */
+ DBG("Number of routers: %d\n", router_count);
+ DBG("SPW ROUTER[%d] with following capabilities:\n", priv->index);
+ router_hwinfo_print(&priv->hwinfo);
+
+ return ROUTER_ERR_OK;
+}
+
+/* Configure Router. Leave field NULL in order to skip configuration
+ */
+int router_config_set(void *d, struct router_config *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if ((cfg->flags & (ROUTER_FLG_TPRES|ROUTER_FLG_TRLD)) &&
+ !priv->hwinfo.timers_avail) {
+ return ROUTER_ERR_IMPLEMENTED;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Write only configuration bits in Config register */
+ if (cfg->flags & ROUTER_FLG_CFG) {
+ REG_WRITE(&priv->regs->cfgsts, cfg->config & ~(RTRCFG_WCLEAR));
+ }
+
+ /* Write Instance ID to Version Register */
+ if (cfg->flags & ROUTER_FLG_IID) {
+ REG_WRITE(&priv->regs->ver, (cfg->iid << VER_ID_BIT) & VER_ID);
+ }
+
+ /* Write startup-clock-divisor Register */
+ if (cfg->flags & ROUTER_FLG_IDIV) {
+ REG_WRITE(&priv->regs->idiv, (cfg->idiv << IDIV_ID_BIT) & IDIV_ID);
+ }
+
+ /* Write Timer Prescaler Register */
+ if (cfg->flags & ROUTER_FLG_TPRES) {
+ REG_WRITE(&priv->regs->tprescaler,
+ (cfg->timer_prescaler << PRESCALER_RL_BIT) & PRESCALER_RL);
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_config_get(void *d, struct router_config *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ cfg->config = REG_READ(&priv->regs->cfgsts) &
+ ~(RTRCFG_SP|RTRCFG_AP|RTRCFG_FP|RTRCFG_SR|RTRCFG_PE|RTRCFG_ME|
+ RTRCFG_TA|RTRCFG_PP);
+ cfg->iid = (REG_READ(&priv->regs->ver) & VER_ID) >> VER_ID_BIT;
+ cfg->idiv = (REG_READ(&priv->regs->idiv) & IDIV_ID) >> IDIV_ID_BIT;
+ cfg->timer_prescaler =
+ (REG_READ(&priv->regs->tprescaler) & PRESCALER_RL) >> PRESCALER_RL_BIT;
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+/* Configure Router routing table.
+ * Leave field NULL in order to skip configuration
+ */
+int router_routing_table_set(void *d, struct router_routing_table *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Write Address control */
+ if (cfg->flags & ROUTER_ROUTE_FLG_CTRL) {
+ router_acontrol_set(priv,&cfg->acontrol);
+ }
+
+ /* Write Port map */
+ if (cfg->flags & ROUTER_ROUTE_FLG_MAP) {
+ router_portmap_set(priv,&cfg->portmap);
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_routing_table_get(void *d, struct router_routing_table *cfg)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER CFG wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Read Address control */
+ router_acontrol_get(priv,&cfg->acontrol);
+
+ /* Read Port map */
+ router_portmap_get(priv,&cfg->portmap);
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_route_set(void *d, struct router_route *route)
+{
+ struct router_priv *priv = d;
+ int i;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (route == NULL) {
+ DBG("ROUTER route wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if (route->from_address < 32) {
+ /* Physical address */
+ if ((route->from_address == 0) ||
+ (route->from_address >= priv->nports)) {
+ DBG("ROUTER wrong physical address\n");
+ return ROUTER_ERR_TOOMANY;
+ }
+ }
+
+ /* Compute port map */
+ mask=0;
+ for (i=0; i < route->count; i++) {
+ if ((route->to_port[i] == 0) || (route->to_port[i] >= priv->nports)) {
+ DBG("ROUTER route wrong destiny port\n");
+ return ROUTER_ERR_EINVAL;
+ }
+ mask |= (0x1 << route->to_port[i]);
+ }
+ if (route->options & ROUTER_ROUTE_PACKETDISTRIBUTION_ENABLE) {
+ mask |= RTPMAP_PD;
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Write port map */
+ REG_WRITE(&priv->regs->psetup[route->from_address-1], mask);
+
+ /* Write Address control */
+ REG_WRITE(&priv->regs->routes[route->from_address-1],
+ route->options & (0xf));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_route_get(void *d, struct router_route *route)
+{
+ struct router_priv *priv = d;
+ int i,count;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (route == NULL) {
+ DBG("ROUTER route wrong\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ if (route->from_address < 32) {
+ /* Physical address */
+ if ((route->from_address == 0) ||
+ (route->from_address >= priv->nports)) {
+ DBG("ROUTER wrong physical address\n");
+ return ROUTER_ERR_TOOMANY;
+ }
+ }
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Get Address control */
+ route->options =
+ REG_READ(&priv->regs->routes[route->from_address-1]) & (0xf);
+
+ /* Read port map */
+ mask=REG_READ(&priv->regs->psetup[route->from_address-1]);
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ if (mask & RTPMAP_PD) {
+ route->options |= ROUTER_ROUTE_PACKETDISTRIBUTION_ENABLE;
+ }
+
+ /*DBG("ROUTE from address 0x%02x read, PMAP: 0x%08x, CTRL: 0x%08x\n",
+ * (unsigned int) route->from_address, mask,
+ * (unsigned int) route->options);*/
+
+ i=0;
+ count=0;
+ mask &= (RTPMAP_PE);
+ while (mask != 0) {
+ if (mask & 0x1) {
+ route->to_port[count] = i;
+ count++;
+ }
+ mask >>= 1;
+ i++;
+ }
+ route->count=count;
+
+ return ROUTER_ERR_OK;
+}
+
+int router_write_enable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->cfgwe, 0x1);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_write_disable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->cfgwe, 0x0);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_ioc(void *d, int port, struct router_port *cfg)
+{
+ struct router_priv *priv = d;
+ unsigned int ctrl, ctrl2, sts, timer, pktl;
+ SPIN_IRQFLAGS(irqflags);
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (cfg == NULL) {
+ DBG("ROUTER Wrong cfg\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ ctrl = cfg->ctrl;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_CTRL) {
+ ctrl = REG_READ(&priv->regs->pctrl[port]);
+ }
+ ctrl2 = cfg->ctrl;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_CTRL2) {
+ ctrl2 = REG_READ(&priv->regs->pctrl2[port]);
+ }
+ sts = cfg->sts;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_STS) {
+ sts = REG_READ(&priv->regs->psts[port]);
+ }
+ timer = cfg->timer_reload;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_TIMER) {
+ REG_READ(&priv->regs->treload[port]);
+ }
+ pktl = cfg->packet_length;
+ if (cfg->flag & ROUTER_PORT_FLG_GET_PKTLEN) {
+ REG_READ(&priv->regs->maxplen[port]);
+ }
+
+ if (cfg->flag & ROUTER_PORT_FLG_SET_CTRL) {
+ REG_WRITE(&priv->regs->pctrl[port], cfg->ctrl);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_CTRL2) {
+ REG_WRITE(&priv->regs->pctrl2[port], cfg->ctrl2);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_STS) {
+ REG_WRITE(&priv->regs->psts[port], cfg->sts);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_TIMER) {
+ REG_WRITE(&priv->regs->treload[port], cfg->timer_reload & PTIMER_RL);
+ }
+ if (cfg->flag & ROUTER_PORT_FLG_SET_PKTLEN) {
+ REG_WRITE(&priv->regs->maxplen[port], cfg->packet_length & MAXPLEN_ML);
+ }
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ cfg->ctrl = ctrl;
+ cfg->ctrl2 = ctrl2;
+ cfg->sts = sts;
+ cfg->timer_reload = timer;
+ cfg->packet_length = pktl;
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_ctrl_rmw(void *d, int port, uint32_t *oldvalue, uint32_t bitmask, uint32_t value)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+ unsigned int oldctrl, ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ oldctrl = REG_READ(&priv->regs->pctrl[port]);
+ ctrl = ((oldctrl & ~(bitmask)) | (value & bitmask));
+ REG_WRITE(&priv->regs->pctrl[port], ctrl);
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ if (oldvalue != NULL) {
+ *oldvalue = oldctrl;
+ }
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_ctrl2_rmw(void *d, int port, uint32_t *oldvalue, uint32_t bitmask, uint32_t value)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+ unsigned int oldctrl, ctrl;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ oldctrl = REG_READ(&priv->regs->pctrl2[port]);
+ ctrl = ((oldctrl & ~(bitmask)) | (value & bitmask));
+ REG_WRITE(&priv->regs->pctrl2[port], ctrl);
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ if (oldvalue != NULL) {
+ *oldvalue = oldctrl;
+ }
+
+ return ROUTER_ERR_OK;
+}
+
+/* Read Port Control register */
+int router_port_ctrl_get(void *d, int port, uint32_t *ctrl)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (ctrl == NULL) {
+ DBG("ROUTER Wrong ctrl\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *ctrl = REG_READ(&priv->regs->pctrl[port]);
+ return ROUTER_ERR_OK;
+}
+
+/* Read Port Status register and clear errors if there are */
+int router_port_status(void *d, int port, uint32_t *sts, uint32_t clrmsk)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+ SPIN_IRQFLAGS(irqflags);
+
+ if (error)
+ return error;
+
+ if (sts == NULL) {
+ DBG("ROUTER Wrong sts\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+ *sts = REG_READ(&priv->regs->psts[port]);
+ if (port == 0) {
+ REG_WRITE(&priv->regs->psts[port], ((*sts) & (PSTSCFG_WCLEAR & clrmsk)) | (PSTSCFG_WCLEAR2 & clrmsk));
+ }else{
+ REG_WRITE(&priv->regs->psts[port], (*sts) & (PSTS_WCLEAR & clrmsk));
+ }
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+ return ROUTER_ERR_OK;
+}
+
+/* Read Port Control2 register */
+int router_port_ctrl2_get(void *d, int port, uint32_t *ctrl2)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (ctrl2 == NULL) {
+ DBG("ROUTER Wrong ctrl2\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *ctrl2 = REG_READ(&priv->regs->pctrl2[port]);
+ return ROUTER_ERR_OK;
+}
+
+/* Write Port Control Register */
+int router_port_ctrl_set(void *d, int port, uint32_t mask, uint32_t ctrl)
+{
+ return router_port_ctrl_rmw(d, port, NULL, mask, ctrl);
+}
+
+/* Write Port Control2 Register */
+int router_port_ctrl2_set(void *d, int port, uint32_t mask, uint32_t ctrl2)
+{
+ return router_port_ctrl_rmw(d, port, NULL, mask, ctrl2);
+}
+
+int router_port_treload_set(void *d, int port, uint32_t reload)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->treload[port], reload & PTIMER_RL);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_treload_get(void *d, int port, uint32_t *reload)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (reload == NULL) {
+ DBG("ROUTER Wrong reload pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *reload = REG_READ(&priv->regs->treload[port]) & PTIMER_RL;
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_maxplen_set(void *d, int port, uint32_t length)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->maxplen[port], length & MAXPLEN_ML);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_maxplen_get(void *d, int port, uint32_t *length)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (length == NULL) {
+ DBG("ROUTER Wrong length pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *length = REG_READ(&priv->regs->maxplen[port]);
+
+ return ROUTER_ERR_OK;
+}
+
+/* Get Port Link Status */
+int router_port_link_status(void *d, int port)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ return ((REG_READ(&priv->regs->psts[port]) & PSTS_LS) >> PSTS_LS_BIT);
+}
+
+int router_port_disable(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_DI, PCTRL_DI);
+}
+
+int router_port_enable(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_DI, 0);
+}
+
+int router_port_link_stop(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_LD | PCTRL_LS, PCTRL_LD);
+}
+
+int router_port_link_start(void *d, int port)
+{
+ return router_port_ctrl_rmw(d, port, NULL, PCTRL_LD | PCTRL_LS, PCTRL_LS);
+}
+
+int router_port_link_receive_spill(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ ctrl = REG_READ(&priv->regs->pctrl[port]);
+ REG_WRITE(&priv->regs->pctrl[port], (ctrl| (PCTRL_RS)));
+
+ /* Wait until the spill is done */
+ while(REG_READ(&priv->regs->pctrl[port]) & PCTRL_RS) {};
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_link_transmit_reset(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int ctrl;
+ SPIN_IRQFLAGS(irqflags);
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ SPIN_LOCK_IRQ(&priv->plock[port], irqflags);
+
+ ctrl = REG_READ(&priv->regs->pctrl[port]);
+ REG_WRITE(&priv->regs->pctrl[port], (ctrl| (PCTRL_TF)));
+
+ /* Wait until the spill is done */
+ while(REG_READ(&priv->regs->pctrl[port]) & PCTRL_TF) {};
+
+ SPIN_UNLOCK_IRQ(&priv->plock[port], irqflags);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_cred_get(void *d, int port, uint32_t *cred)
+{
+ struct router_priv *priv = d;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ if (cred == NULL) {
+ DBG("ROUTER Wrong cred pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *cred = REG_READ(&priv->regs->credcnt[port]);
+ return ROUTER_ERR_OK;
+}
+
+int router_instance_set(void *d, uint8_t instance)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->ver, (instance << VER_ID_BIT) & VER_ID);
+ return ROUTER_ERR_OK;
+}
+
+int router_idiv_set(void *d, uint8_t idiv)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->idiv, (idiv << IDIV_ID_BIT) & IDIV_ID);
+ return ROUTER_ERR_OK;
+}
+
+int router_tpresc_set(void *d, uint32_t prescaler)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->tprescaler,
+ (prescaler << PRESCALER_RL_BIT) & PRESCALER_RL);
+ return ROUTER_ERR_OK;
+}
+
+int router_instance_get(void *d, uint8_t *instance)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (instance == NULL) {
+ DBG("ROUTER Wrong instance pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *instance = REG_READ(&priv->regs->ver);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_idiv_get(void *d, uint8_t *idiv)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (idiv == NULL) {
+ DBG("ROUTER Wrong idiv pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *idiv = REG_READ(&priv->regs->idiv);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tpresc_get(void *d, uint32_t *prescaler)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (prescaler == NULL) {
+ DBG("ROUTER Wrong prescaler pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *prescaler = REG_READ(&priv->regs->tprescaler);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_cfgsts_set(void *d, uint32_t cfgsts)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->cfgsts, cfgsts);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_cfgsts_get(void *d, uint32_t *cfgsts)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ if (cfgsts == NULL) {
+ DBG("ROUTER Wrong cfgsts pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ *cfgsts = REG_READ(&priv->regs->cfgsts);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_enable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->timecode, TC_EN);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_disable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->timecode, 0);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_reset(void *d)
+{
+ struct router_priv *priv = d;
+ unsigned int tc;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ tc = REG_READ(&priv->regs->timecode);
+ REG_WRITE(&priv->regs->timecode, tc | TC_RE);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_tc_get(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ return (REG_READ(&priv->regs->timecode) & (TC_CF | TC_TC)) >> TC_TC_BIT;
+}
+
+int router_interrupt_unmask(void *d, int options)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Unmask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->imask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, 0xffffffff);
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->imask, mask | options);
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_interrupt_mask(void *d, int options)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Mask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->imask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, 0xffffffff);
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->imask, mask & ~(options));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_interrupt_unmask(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Unmask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->ipmask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, (0x1 << port));
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->ipmask, mask | (0x1 << port));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_port_interrupt_mask(void *d, int port)
+{
+ struct router_priv *priv = d;
+ unsigned int mask;
+ int error = router_check_port(d, port);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ /* Mask interrupts in ROTUER */
+ /* Get previous mask */
+ mask = REG_READ(&priv->regs->ipmask);
+
+ /* Clear previous interrupts*/
+ REG_WRITE(&priv->regs->pip, (0x1 << port));
+
+ /* Set new mask */
+ REG_WRITE(&priv->regs->ipmask, mask & ~(0x1 << port));
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_reset(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_open(d);
+
+ if (error)
+ return error;
+
+ /* Reset router */
+ REG_WRITE(&priv->regs->cfgsts, RTRCFG_RE);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_icodegen_enable(void *d, uint8_t intn, uint32_t aitimer,
+ int options)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ #ifdef THREAD_SAFE
+ /* Take device lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL) {
+ DBG("ROUTER Sempahore failed\n");
+ return ROUTER_ERR_ERROR;
+ }
+ #endif
+
+ REG_WRITE(&priv->regs->icodegen, (options & ~(ICODEGEN_IN)) |
+ ICODEGEN_EN | (intn & ICODEGEN_IN));
+
+ if (options & ICODEGEN_TE) {
+ REG_WRITE(&priv->regs->aitimer, (aitimer & AITIMER_RL));
+ }
+
+ #ifdef THREAD_SAFE
+ /* Unlock dev */
+ rtems_semaphore_release(priv->sem);
+ #endif
+
+ return ROUTER_ERR_OK;
+}
+
+int router_icodegen_disable(void *d)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ REG_WRITE(&priv->regs->icodegen, ICODEGEN_TE);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrctimer_set(void *d, uint32_t reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ /* Set ISRC TIMER */
+ REG_WRITE(&priv->regs->isrctimer, (reloadvalue & (ISRCTIMER_RL)));
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrtimer_set(void *d, uint32_t reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ /* Set ISR TIMER */
+ REG_WRITE(&priv->regs->isrtimer, (reloadvalue & (ISRTIMER_RL)));
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrctimer_get(void *d, uint32_t *reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ if (reloadvalue == NULL) {
+ DBG("ROUTER Wrong reloadvalue pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ /* Set ISRC TIMER */
+ *reloadvalue = REG_READ(&priv->regs->isrctimer) & (ISRCTIMER_RL);
+
+ return ROUTER_ERR_OK;
+}
+
+int router_isrtimer_get(void *d, uint32_t *reloadvalue)
+{
+ struct router_priv *priv = d;
+ int error = router_check_distint_support(d);
+
+ if (error)
+ return error;
+
+ if (reloadvalue == NULL) {
+ DBG("ROUTER Wrong reloadvalue pointer\n");
+ return ROUTER_ERR_EINVAL;
+ }
+
+ /* Set ISR TIMER */
+ *reloadvalue = REG_READ(&priv->regs->isrtimer) & (ISRTIMER_RL);
+
+ return ROUTER_ERR_OK;
+}
diff --git a/bsps/shared/grlib/spw/spwtdp.c b/bsps/shared/grlib/spw/spwtdp.c
new file mode 100644
index 0000000000..df74675355
--- /dev/null
+++ b/bsps/shared/grlib/spw/spwtdp.c
@@ -0,0 +1,991 @@
+/* SPWTDP - SpaceWire Time Distribution Protocol. The driver provides
+ * device discovery and interrupt management.
+ *
+ * COPYRIGHT (c) 2017.
+ * Cobham Gaisler AB
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <rtems.h>
+#include <rtems/bspIo.h>
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+#include <bsp.h>
+#include <grlib/spwtdp.h>
+
+#include <grlib/grlib_impl.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define INLINE*/
+#define INLINE inline
+
+/*#define UNUSED*/
+#define UNUSED __attribute__((unused))
+
+#define DEBUG 1
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+/* Memory and HW Registers Access routines. All 32-bit access routines */
+#define REG_WRITE(addr, val) \
+ (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+/*
+ * Configuration register definitions
+ * DEFINED in header
+ */
+
+/*
+ * Control register definitions
+ * DEFINED in header
+ */
+
+/*
+ * TSTX Control register definitions
+ */
+#define TSTXCTRL_TSTC (0xff<<TSTXCTRL_TSTC_BIT)
+#define ETCTRL_PF (0xffff<<ETCTRL_PF_BIT)
+
+#define TSTXCTRL_TSTC_BIT 24
+#define ETCTRL_PF_BIT 0
+
+#define DEVNAME_LEN 11
+/* Private structure of SPWTDP driver. */
+struct spwtdp_priv {
+ char devname[DEVNAME_LEN];
+ struct drvmgr_dev *dev; /* Device */
+ struct spwtdp_regs *regs;
+ int open;
+ int index;
+ int initiator; /* Initiator configured */
+ int target; /* Target configured */
+ int freq; /* Frequency configured */
+
+ /* Spin-lock ISR protection */
+ SPIN_DECLARE(devlock);
+
+ /* Driver semaphore */
+ rtems_id sem;
+ spwtdp_isr_t isr;
+ void * isr_arg;
+};
+int spwtdp_count = 0;
+static struct spwtdp_priv *priv_tab[SPWTDP_MAX];
+
+
+STATIC void spwtdp_isr(void *data);
+STATIC int spwtdp_hw_reset(struct spwtdp_priv *priv);
+STATIC int spwtdp_init2(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops spwtdp_ops =
+{
+ {NULL, spwtdp_init2, NULL, NULL},
+ NULL,
+ NULL
+};
+
+struct amba_dev_id spwtdp_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPWTDP},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info spwtdp_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_SPWTDP_ID,/* Driver ID */
+ "SPWTDP_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &spwtdp_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct spwtdp_priv), /* Let DrvMgr allocate priv */
+ },
+ &spwtdp_ids[0]
+};
+
+/* Register the SPWTDP Driver */
+void spwtdp_register_drv(void)
+{
+ DBG("Registering SPWTDP driver\n");
+ drvmgr_drv_register(&spwtdp_drv_info.general);
+}
+
+STATIC int spwtdp_init(struct spwtdp_priv *priv)
+{
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+ struct ambapp_apb_info *apb;
+
+ /* Get device information from AMBA PnP information */
+ if (ainfo == NULL) {
+ return -1;
+ }
+ apb = ainfo->info.apb_slv;
+ priv->regs = (struct spwtdp_regs *)apb->start;
+
+ spwtdp_hw_reset(priv);
+ /* Only support 56 bits counter */
+ if (REG_READ(&priv->regs->dat_ctrl) != 0x2f00) {
+ DBG("SPWTDP only supports 56 bit precission counters.\n");
+ return -1;
+ }
+ DBG("SPWTDP driver initialized\n");
+
+ return 0;
+}
+
+/*** INTERFACE TO DRIVER MANAGER ***/
+STATIC int spwtdp_init2(struct drvmgr_dev *dev)
+{
+ int status;
+ struct spwtdp_priv *priv;
+
+ DBG("SPWTDP[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (spwtdp_count >= SPWTDP_MAX)
+ return DRVMGR_ENORES;
+
+ priv = dev->priv;
+ if (priv == NULL)
+ return DRVMGR_NOMEM;
+
+ /* Register device */
+ priv->dev = dev;
+ priv->index = spwtdp_count;
+ priv_tab[priv->index] = priv;
+ snprintf(priv->devname, DEVNAME_LEN, "spwtdp%01u", priv->index);
+ spwtdp_count++;
+
+ /* Initialize Semaphore */
+ if (rtems_semaphore_create(
+ rtems_build_name('S', 'T', 'P', '0' + priv->index), 1,
+ RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
+ RTEMS_NO_PRIORITY_CEILING, 0, &priv->sem) != RTEMS_SUCCESSFUL) {
+ priv->sem = RTEMS_ID_NONE;
+ return DRVMGR_FAIL;
+ }
+
+ /* Initialize SPWTDP Hardware */
+ status = spwtdp_init(priv);
+ if (status) {
+ printk("Failed to initialize spwtdp driver %d\n", status);
+ return -1;
+ }
+
+ return DRVMGR_OK;
+}
+
+/* Hardware Reset of SPWTDP */
+STATIC int spwtdp_hw_reset(struct spwtdp_priv *priv)
+{
+ int i = 1000;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+
+ /* Clear interrupts */
+ REG_WRITE(&priv->regs->ists, SPWTDP_IRQ_WCLEAR);
+
+ /* Reset the SPWTDP core */
+ REG_WRITE(&priv->regs->conf[0], CONF0_RS);
+
+ /* Wait for reset */
+ while ((REG_READ(&priv->regs->conf[0]) & CONF0_RS) && (i > 0)) {
+ i--;
+ }
+
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ return ((i > 0)? SPWTDP_ERR_OK : SPWTDP_ERR_ERROR);
+}
+
+int spwtdp_reset(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv and unregister isr */
+ int ret = spwtdp_isr_unregister(spwtdp);
+ if (ret != SPWTDP_ERR_OK)
+ return ret;
+
+ priv->initiator=0;
+ priv->target=0;
+ priv->freq=0;
+
+ return spwtdp_hw_reset(priv);
+}
+
+void *spwtdp_open(int dev_no)
+{
+ struct spwtdp_priv *priv;
+
+ if (dev_no >= spwtdp_count)
+ return NULL;
+
+ /* Get Device */
+ priv = priv_tab[dev_no];
+ if ((priv == NULL)||(priv->open == 1)) {
+ return NULL;
+ }
+
+ /* Set initial state of software */
+ priv->open = 1;
+
+ return priv;
+}
+
+int spwtdp_close(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv and reset core */
+ int ret = spwtdp_reset(spwtdp);
+ if (ret != SPWTDP_ERR_OK)
+ return ret;
+
+ priv->open = 0;
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_freq_setup(void *spwtdp, uint32_t fsinc, uint32_t cv, uint8_t etinc)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ REG_WRITE(&priv->regs->conf[1], fsinc & CONF1_FSINC);
+ REG_WRITE(&priv->regs->conf[2],
+ ((cv<<CONF2_CV_BIT) & CONF2_CV) |
+ ((uint32_t)etinc & CONF2_ETINC));
+
+ rtems_semaphore_release(priv->sem);
+ priv->freq = 1;
+
+ return SPWTDP_ERR_OK;
+}
+
+#define CONF0_INI_MASK (CONF0_EP|CONF0_ET|CONF0_SP|CONF0_SE|CONF0_LE| \
+ CONF0_TD)
+int spwtdp_initiator_conf(void *spwtdp, uint8_t mapping, uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target == 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ conf0 &= ~(CONF0_INI_MASK|CONF0_MAP);
+ REG_WRITE(&priv->regs->conf[0],
+ conf0 | (options & CONF0_INI_MASK) |
+ (((uint32_t)mapping << CONF0_MAP_BIT) & CONF0_MAP));
+
+ priv->initiator = 1;
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+#define CONF0_TAR_MASK (CONF0_JE|CONF0_ST|CONF0_EP|CONF0_ET|CONF0_SP| \
+ CONF0_SE|CONF0_LE|CONF0_TD|CONF0_ME)
+int spwtdp_target_conf(void *spwtdp, uint8_t mapping, uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator == 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ conf0 &= ~(CONF0_TAR_MASK|CONF0_MAP);
+ REG_WRITE(&priv->regs->conf[0],
+ conf0 | (options & CONF0_TAR_MASK) |
+ (((uint32_t)mapping << CONF0_MAP_BIT) & CONF0_MAP));
+
+ priv->initiator = 1;
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_int_conf(void *spwtdp, uint8_t stm, uint8_t inrx,
+ uint8_t intx)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ REG_WRITE(&priv->regs->conf[3],
+ (((uint32_t)stm << CONF3_STM_BIT) & CONF3_STM) |
+ (((uint32_t)inrx << CONF3_INRX_BIT) & CONF3_INRX) |
+ (((uint32_t)intx << CONF3_INTX_BIT) & CONF3_INTX));
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_target_int_conf(void *spwtdp, uint8_t inrx, uint8_t intx,
+ uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL) {
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ REG_WRITE(&priv->regs->conf[3],
+ (options & CONF3_DI) |
+ (((uint32_t)inrx << CONF3_INRX_BIT) & CONF3_INRX) |
+ (((uint32_t)intx << CONF3_INTX_BIT) & CONF3_INTX));
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_enable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL) {
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if frequency is configured */
+ if (priv->freq != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 | CONF0_TE);
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_target_enable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if frequency is configured */
+ if (priv->freq != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 | CONF0_RE);
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_disable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 & ~(CONF0_TE));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_target_disable(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ /* Check priv */
+ if (priv == NULL)
+ return SPWTDP_ERR_NOINIT;
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int conf0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], conf0 & ~(CONF0_RE));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Get and Clear status */
+int spwtdp_status(void *spwtdp, uint32_t *sts, uint32_t clrmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ unsigned int status = REG_READ(&priv->regs->stat[0]);
+ REG_WRITE(&priv->regs->stat[0], status & clrmask);
+
+ if (sts != NULL)
+ *sts = status;
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Get and Clear interrupts */
+int spwtdp_interrupt_status(void *spwtdp, uint32_t *sts, uint32_t clrmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ SPIN_IRQFLAGS(irqflags);
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ SPIN_LOCK_IRQ(&priv->devlock, irqflags);
+ unsigned int status = REG_READ(&priv->regs->ists);
+ REG_WRITE(&priv->regs->ists, status & clrmask);
+ SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
+
+ if (sts != NULL)
+ *sts = status;
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Unmask interrupts */
+int spwtdp_interrupt_unmask(void *spwtdp, uint32_t irqmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int ctrl = REG_READ(&priv->regs->ien);
+ REG_WRITE(&priv->regs->ien, ctrl | irqmask);
+
+ return SPWTDP_ERR_OK;
+}
+
+/* Mask interrupts */
+int spwtdp_interrupt_mask(void *spwtdp, uint32_t irqmask)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int ctrl = REG_READ(&priv->regs->ien);
+ REG_WRITE(&priv->regs->ien, ctrl & ~(irqmask));
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_isr_register(void *spwtdp, spwtdp_isr_t func, void *data)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check isr */
+ if (func == NULL) {
+ /* No ISR */
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ priv->isr = func;
+ priv->isr_arg = data;
+
+ /* Register and Enable Interrupt at Interrupt controller */
+ drvmgr_interrupt_register(priv->dev, 0, "spwtdp", spwtdp_isr, priv);
+
+ /* Enable AMBA Interrupts */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ unsigned int cfg0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], cfg0 | CONF0_AE);
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_isr_unregister(void *spwtdp)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Disable IRQS (and check for priv!=NULL) */
+ int ret=spwtdp_interrupt_mask(spwtdp, SPWTDP_IRQ_WCLEAR);
+ if (ret != SPWTDP_ERR_OK)
+ return ret;
+
+ /* Disable AMBA Interrupts */
+ SPIN_LOCK(&priv->devlock, irqflags);
+ unsigned int cfg0 = REG_READ(&priv->regs->conf[0]);
+ REG_WRITE(&priv->regs->conf[0], cfg0 & ~(CONF0_AE));
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* Disable Interrupt at Interrupt controller */
+ drvmgr_interrupt_unregister(priv->dev, 0, spwtdp_isr, priv);
+
+ /* Unregister isr */
+ priv->isr = NULL;
+ priv->isr_arg = NULL;
+
+ return SPWTDP_ERR_OK;
+}
+
+STATIC void spwtdp_isr(void *arg)
+{
+ struct spwtdp_priv *priv = arg;
+ unsigned int ists = REG_READ(&priv->regs->ists);
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Return if the SPWTDP didn't generate the IRQ */
+ if (ists == 0)
+ return;
+
+ SPIN_LOCK(&priv->devlock, irqflags);
+ REG_WRITE(&priv->regs->ists, ists); /* clear handled interrupt events */
+ SPIN_UNLOCK(&priv->devlock, irqflags);
+
+ /* Let user Handle Interrupt */
+ if (priv->isr!=NULL)
+ priv->isr(ists, priv->isr_arg);
+
+ return;
+}
+
+int spwtdp_dat_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->dat_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->dat_et[0]);
+ buffer[1] = REG_READ(&priv->regs->dat_et[1]);
+ buffer[2] = REG_READ(&priv->regs->dat_et[2]);
+ buffer[3] = REG_READ(&priv->regs->dat_et[3]);
+ buffer[4] = REG_READ(&priv->regs->dat_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_tsrx_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->ts_rx_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->ts_rx_et[0]);
+ buffer[1] = REG_READ(&priv->regs->ts_rx_et[1]);
+ buffer[2] = REG_READ(&priv->regs->ts_rx_et[2]);
+ buffer[3] = REG_READ(&priv->regs->ts_rx_et[3]);
+ buffer[4] = REG_READ(&priv->regs->ts_rx_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_tstx_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->ts_tx_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->ts_tx_et[0]);
+ buffer[1] = REG_READ(&priv->regs->ts_tx_et[1]);
+ buffer[2] = REG_READ(&priv->regs->ts_tx_et[2]);
+ buffer[3] = REG_READ(&priv->regs->ts_tx_et[3]);
+ buffer[4] = REG_READ(&priv->regs->ts_tx_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_lat_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->lat_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->lat_et[0]);
+ buffer[1] = REG_READ(&priv->regs->lat_et[1]);
+ buffer[2] = REG_READ(&priv->regs->lat_et[2]);
+ buffer[3] = REG_READ(&priv->regs->lat_et[3]);
+ buffer[4] = REG_READ(&priv->regs->lat_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_cmd_et_get(void * spwtdp, spwtdp_time_t * val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check pointer */
+ if (val == NULL) {
+ return SPWTDP_ERR_EINVAL;
+ }
+
+ val->preamble = REG_READ(&priv->regs->cmd_ctrl) & ETCTRL_PF;
+ unsigned int * buffer = (unsigned int *) val->data;
+ buffer[0] = REG_READ(&priv->regs->cmd_et[0]);
+ buffer[1] = REG_READ(&priv->regs->cmd_et[1]);
+ buffer[2] = REG_READ(&priv->regs->cmd_et[2]);
+ buffer[3] = REG_READ(&priv->regs->cmd_et[3]);
+ buffer[4] = REG_READ(&priv->regs->cmd_et[4]);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_tstx_conf(void * spwtdp, uint8_t tstc)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ REG_WRITE(&priv->regs->ts_tx_ctrl,
+ (((uint32_t)tstc) << TSTXCTRL_TSTC_BIT) & TSTXCTRL_TSTC);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_cmd_et_set(void *spwtdp, spwtdp_time_t val)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int * buffer = (unsigned int *) val.data;
+ REG_WRITE(&priv->regs->lat_et[0], buffer[0]);
+ REG_WRITE(&priv->regs->lat_et[1], buffer[1]);
+ REG_WRITE(&priv->regs->lat_et[2], buffer[2]);
+ REG_WRITE(&priv->regs->lat_et[3], buffer[3]);
+ REG_WRITE(&priv->regs->lat_et[4], buffer[4]);
+
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ /* Signal new command */
+ unsigned int ctrl = REG_READ(&priv->regs->cmd_ctrl);
+ REG_WRITE(&priv->regs->cmd_ctrl, ctrl | CTRL_NC);
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_initiator_cmd_spwtc_set(void *spwtdp, uint8_t spwtc)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as initiator */
+ if (priv->initiator != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ unsigned int ctrl = (REG_READ(&priv->regs->cmd_ctrl) &~ CTRL_SPWTC);
+ REG_WRITE(&priv->regs->cmd_ctrl,
+ ctrl | (((uint32_t)spwtc << CTRL_SPWTC_BIT) & CTRL_SPWTC));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+#define CTRL_TAR_MASK (CTRL_NC|CTRL_IS)
+int spwtdp_target_cmd_conf(void *spwtdp, uint8_t spwtc, uint16_t cpf,
+ uint32_t options)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Check if configured as target */
+ if (priv->target != 1)
+ return SPWTDP_ERR_EINVAL;
+
+ /* Take SPWTDP lock - Wait until we get semaphore */
+ if (rtems_semaphore_obtain(priv->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
+ != RTEMS_SUCCESSFUL)
+ return SPWTDP_ERR_ERROR;
+
+ REG_WRITE(&priv->regs->cmd_ctrl,
+ (options & CTRL_TAR_MASK) |
+ ((cpf << CTRL_CPF_BIT) & CTRL_CPF) |
+ (((uint32_t)spwtc << CTRL_SPWTC_BIT) & CTRL_SPWTC));
+
+ rtems_semaphore_release(priv->sem);
+
+ return SPWTDP_ERR_OK;
+}
+
+int spwtdp_precision_get(void *spwtdp, uint8_t *fine, uint8_t *coarse)
+{
+ struct spwtdp_priv *priv = (struct spwtdp_priv *)spwtdp;
+ int coarse_precision, fine_precision;
+
+ if (priv == NULL) {
+ /* SPWTDP not initialized */
+ return SPWTDP_ERR_NOINIT;
+ }
+
+ if (priv->open == 0)
+ return SPWTDP_ERR_EINVAL;
+
+ unsigned int preamble = REG_READ(&priv->regs->dat_ctrl);
+
+ if (preamble & 0x80) {
+ DBG("Pfield second extension set: unknown format");
+ return SPWTDP_ERR_ERROR;
+ }
+ if (!((preamble & 0x7000) == 0x2000 || (preamble & 0x7000) == 0x1000)) {
+ DBG(" PField indicates not unsegmented code: unknown format");
+ return SPWTDP_ERR_ERROR;
+ }
+ /*
+ * coarse_precision = 32;
+ * fine_precision = 24;
+ */
+ coarse_precision = ((preamble >> 10) & 0x3) + 1;
+ if (preamble & 0x80)
+ coarse_precision += (preamble >> 5) & 0x3;
+ fine_precision = (preamble >> 8) & 0x3;
+ if (preamble & 0x80)
+ fine_precision += (preamble >> 2) & 0x7;
+ if (coarse!=NULL)
+ *coarse = coarse_precision;
+ if (fine!=NULL)
+ *fine = fine_precision;
+
+ return SPWTDP_ERR_OK;
+}
+
diff --git a/bsps/shared/grlib/stat/l4stat.c b/bsps/shared/grlib/stat/l4stat.c
new file mode 100644
index 0000000000..ee4ae7d6a4
--- /dev/null
+++ b/bsps/shared/grlib/stat/l4stat.c
@@ -0,0 +1,626 @@
+/* L4STAT APB-Register Driver.
+ *
+ * COPYRIGHT (c) 2017.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <rtems/libio.h>
+#include <stdio.h>
+#include <bsp.h>
+#include <rtems/bspIo.h> /* printk */
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/l4stat.h>
+
+/*#define STATIC*/
+#define STATIC static
+
+/*#define DEBUG 1*/
+
+#ifdef DEBUG
+#define DBG(x...) printf(x)
+#else
+#define DBG(x...)
+#endif
+
+#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
+#define REG_READ(addr) (*(volatile unsigned int *)(addr))
+
+
+/*
+ * L4STAT CCTRL register fields
+ * DEFINED IN HEADER file
+ */
+
+struct l4stat_regs {
+ unsigned int cval[32]; /* 0x000 */
+ unsigned int cctrl[32]; /* 0x080 */
+ unsigned int cmax[32]; /* 0x100 */
+ unsigned int timestamp; /* 0x180 */
+};
+
+struct l4stat_priv {
+ struct drvmgr_dev *dev;
+
+ /* L4STAT control registers */
+ struct l4stat_regs *regs;
+
+ /* L4STAT driver register */
+ char devname[9];
+
+ int ncpu;
+ int ncnt;
+
+ /* L4stat capabilities */
+ int max_count_support;
+ int internalahb_event_support;
+ int dsu_event_support;
+ int external_event_support;
+ int ahbtrace_event_support;
+};
+
+STATIC struct l4stat_priv *l4statpriv = NULL;
+
+/* Event names */
+#ifdef DEBUG
+#define L4STAT_BAD_CMD "N/A. Wrong event"
+STATIC const char *l4stat_event_names[] = {
+ "Instruction cache miss", /* 0x00 */
+ "Instruction MMU TLB miss", /* 0x01 */
+ "Instruction cache hold", /* 0x02 */
+ "Instruction MMU hold", /* 0x03 */
+ L4STAT_BAD_CMD, /* 0x04 */
+ L4STAT_BAD_CMD, /* 0x05 */
+ L4STAT_BAD_CMD, /* 0x06 */
+ L4STAT_BAD_CMD, /* 0x07 */
+ "Data cache (read) miss", /* 0x08 */
+ "Data MMU TLB miss", /* 0x09 */
+ "Data cache hold", /* 0x0a */
+ "Data MMU hold", /* 0x0b */
+ L4STAT_BAD_CMD, /* 0x0c */
+ L4STAT_BAD_CMD, /* 0x0d */
+ L4STAT_BAD_CMD, /* 0x0e */
+ L4STAT_BAD_CMD, /* 0x0f */
+ "Data write buffer hold", /* 0x10 */
+ "Total instruction count", /* 0x11 */
+ "Integer instruction count", /* 0x12 */
+ "Floating-point unit instruction count", /* 0x13 */
+ "Branch prediction miss", /* 0x14 */
+ "Execution time, exluding debug mode", /* 0x15 */
+ L4STAT_BAD_CMD, /* 0x16 */
+ "AHB utilization (per AHB master)", /* 0x17 */
+ "AHB utilization (total)", /* 0x18 */
+ L4STAT_BAD_CMD, /* 0x19 */
+ L4STAT_BAD_CMD, /* 0x1a */
+ L4STAT_BAD_CMD, /* 0x1b */
+ L4STAT_BAD_CMD, /* 0x1c */
+ L4STAT_BAD_CMD, /* 0x1d */
+ L4STAT_BAD_CMD, /* 0x1e */
+ L4STAT_BAD_CMD, /* 0x1f */
+ L4STAT_BAD_CMD, /* 0x20 */
+ L4STAT_BAD_CMD, /* 0x21 */
+ "Integer branches", /* 0x22 */
+ L4STAT_BAD_CMD, /* 0x23 */
+ L4STAT_BAD_CMD, /* 0x24 */
+ L4STAT_BAD_CMD, /* 0x25 */
+ L4STAT_BAD_CMD, /* 0x26 */
+ L4STAT_BAD_CMD, /* 0x27 */
+ "CALL instructions", /* 0x28 */
+ L4STAT_BAD_CMD, /* 0x29 */
+ L4STAT_BAD_CMD, /* 0x2a */
+ L4STAT_BAD_CMD, /* 0x2b */
+ L4STAT_BAD_CMD, /* 0x2c */
+ L4STAT_BAD_CMD, /* 0x2d */
+ L4STAT_BAD_CMD, /* 0x2e */
+ L4STAT_BAD_CMD, /* 0x2f */
+ "Regular type 2 instructions", /* 0x30 */
+ L4STAT_BAD_CMD, /* 0x31 */
+ L4STAT_BAD_CMD, /* 0x32 */
+ L4STAT_BAD_CMD, /* 0x33 */
+ L4STAT_BAD_CMD, /* 0x34 */
+ L4STAT_BAD_CMD, /* 0x35 */
+ L4STAT_BAD_CMD, /* 0x36 */
+ L4STAT_BAD_CMD, /* 0x37 */
+ "LOAD and STORE instructions", /* 0x38 */
+ "LOAD instructions", /* 0x39 */
+ "STORE instructions", /* 0x3a */
+ L4STAT_BAD_CMD, /* 0x3b */
+ L4STAT_BAD_CMD, /* 0x3c */
+ L4STAT_BAD_CMD, /* 0x3d */
+ L4STAT_BAD_CMD, /* 0x3e */
+ L4STAT_BAD_CMD, /* 0x3f */
+ "AHB IDLE cycles", /* 0x40 */
+ "AHB BUSY cycles", /* 0x41 */
+ "AHB Non-Seq. transfers", /* 0x42 */
+ "AHB Seq. transfers", /* 0x43 */
+ "AHB read accesses", /* 0x44 */
+ "AHB write accesses", /* 0x45 */
+ "AHB byte accesses", /* 0x46 */
+ "AHB half-word accesses", /* 0x47 */
+ "AHB word accesses", /* 0x48 */
+ "AHB double word accesses", /* 0x49 */
+ "AHB quad word accesses", /* 0x4A */
+ "AHB eight word accesses", /* 0x4B */
+ "AHB waitstates", /* 0x4C */
+ "AHB RETRY responses", /* 0x4D */
+ "AHB SPLIT responses", /* 0x4E */
+ "AHB SPLIT delay", /* 0x4F */
+ "AHB bus locked", /* 0x50 */
+ L4STAT_BAD_CMD, /* 0x51 */
+ L4STAT_BAD_CMD, /* 0x52 */
+ L4STAT_BAD_CMD, /* 0x53 */
+ L4STAT_BAD_CMD, /* 0x54 */
+ L4STAT_BAD_CMD, /* 0x55 */
+ L4STAT_BAD_CMD, /* 0x56 */
+ L4STAT_BAD_CMD, /* 0x57 */
+ L4STAT_BAD_CMD, /* 0x58 */
+ L4STAT_BAD_CMD, /* 0x59 */
+ L4STAT_BAD_CMD, /* 0x5a */
+ L4STAT_BAD_CMD, /* 0x5b */
+ L4STAT_BAD_CMD, /* 0x5c */
+ L4STAT_BAD_CMD, /* 0x5d */
+ L4STAT_BAD_CMD, /* 0x5e */
+ L4STAT_BAD_CMD, /* 0x5f */
+ "external event 0", /* 0x60 */
+ "external event 1", /* 0x61 */
+ "external event 2", /* 0x62 */
+ "external event 3", /* 0x63 */
+ "external event 4", /* 0x64 */
+ "external event 5", /* 0x65 */
+ "external event 6", /* 0x66 */
+ "external event 7", /* 0x67 */
+ "external event 8", /* 0x68 */
+ "external event 9", /* 0x69 */
+ "external event 10", /* 0x6A */
+ "external event 11", /* 0x6B */
+ "external event 12", /* 0x6C */
+ "external event 13", /* 0x6D */
+ "external event 14", /* 0x6E */
+ "external event 15", /* 0x6F */
+ "AHB IDLE cycles (2)", /* 0x70 */
+ "AHB BUSY cycles (2)", /* 0x71 */
+ "AHB Non-Seq. transfers (2)", /* 0x72 */
+ "AHB Seq. transfers (2)", /* 0x73 */
+ "AHB read accesses (2)", /* 0x74 */
+ "AHB write accesses (2)", /* 0x75 */
+ "AHB byte accesses (2)", /* 0x76 */
+ "AHB half-word accesses (2)", /* 0x77 */
+ "AHB word accesses (2)", /* 0x78 */
+ "AHB double word accesses (2)", /* 0x79 */
+ "AHB quad word accesses (2)", /* 0x7A */
+ "AHB eight word accesses (2)", /* 0x7B */
+ "AHB waitstates (2)", /* 0x7C */
+ "AHB RETRY responses (2)", /* 0x7D */
+ "AHB SPLIT responses (2)", /* 0x7E */
+ "AHB SPLIT delay (2)", /* 0x7F */
+ "PMC: master 0 has grant", /* 0x80 */
+ "PMC: master 1 has grant", /* 0x81 */
+ "PMC: master 2 has grant", /* 0x82 */
+ "PMC: master 3 has grant", /* 0x83 */
+ "PMC: master 4 has grant", /* 0x84 */
+ "PMC: master 5 has grant", /* 0x85 */
+ "PMC: master 6 has grant", /* 0x86 */
+ "PMC: master 7 has grant", /* 0x87 */
+ "PMC: master 8 has grant", /* 0x88 */
+ "PMC: master 9 has grant", /* 0x89 */
+ "PMC: master 10 has grant", /* 0x8A */
+ "PMC: master 11 has grant", /* 0x8B */
+ "PMC: master 12 has grant", /* 0x8C */
+ "PMC: master 13 has grant", /* 0x8D */
+ "PMC: master 14 has grant", /* 0x8E */
+ "PMC: master 15 has grant", /* 0x8F */
+ "PMC: master 0 lacks grant", /* 0x90 */
+ "PMC: master 1 lacks grant", /* 0x91 */
+ "PMC: master 2 lacks grant", /* 0x92 */
+ "PMC: master 3 lacks grant", /* 0x93 */
+ "PMC: master 4 lacks grant", /* 0x94 */
+ "PMC: master 5 lacks grant", /* 0x95 */
+ "PMC: master 6 lacks grant", /* 0x96 */
+ "PMC: master 7 lacks grant", /* 0x97 */
+ "PMC: master 8 lacks grant", /* 0x98 */
+ "PMC: master 9 lacks grant", /* 0x99 */
+ "PMC: master 10 lacks grant", /* 0x9A */
+ "PMC: master 11 lacks grant", /* 0x9B */
+ "PMC: master 12 lacks grant", /* 0x9C */
+ "PMC: master 13 lacks grant", /* 0x9D */
+ "PMC: master 14 lacks grant", /* 0x9E */
+ "PMC: master 15 lacks grant", /* 0x9F */
+ ""
+};
+#endif /* DEBUG */
+
+/* Driver prototypes */
+
+STATIC int l4stat_init(struct l4stat_priv *priv);
+
+int l4stat_init1(struct drvmgr_dev *dev);
+
+struct drvmgr_drv_ops l4stat_ops =
+{
+ .init = {l4stat_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = NULL
+};
+
+struct amba_dev_id l4stat_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_L4STAT},
+ {VENDOR_GAISLER, GAISLER_L3STAT},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info l4stat_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_L4STAT_ID,/* Driver ID */
+ "L4STAT_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &l4stat_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct l4stat_priv), /* Let DRVMGR allocate for us */
+ },
+ &l4stat_ids[0],
+};
+
+void l4stat_register_drv (void)
+{
+ DBG("Registering L4STAT driver\n");
+ drvmgr_drv_register(&l4stat_drv_info.general);
+}
+
+STATIC int l4stat_init(struct l4stat_priv *priv)
+{
+ struct ambapp_apb_info *apb;
+ struct amba_dev_info *ainfo = priv->dev->businfo;
+ unsigned int tmp;
+ unsigned short dev_id;
+
+ /* Find L4STAT core from Plug&Play information */
+ apb = ainfo->info.apb_slv;
+
+ /* Check if L4STAT or L3STAT core from Plug&Play information */
+ dev_id = ainfo->id.device;
+
+ /* Check if rev 1 of core (only rev 0 supported) */
+ if (apb->ver != 0) {
+ DBG("L4STAT rev 0 only supported.\n");
+ return L4STAT_ERR_ERROR;
+ }
+
+ /* Found L4STAT core, init private structure */
+ priv->regs = (struct l4stat_regs *)apb->start;
+
+ DBG("L4STAT regs 0x%08x\n", (unsigned int) priv->regs);
+
+ /* Find L4STAT capabilities */
+ tmp = REG_READ(&priv->regs->cctrl[0]);
+ /* The CPU field in the register is just information of the
+ * cpus that are connected to the stat unit, but it is not
+ * really used for anything else. I can still have more masters
+ * on the bus (e.g. IOMMU) that I can collect stats from,
+ * so it makes no sense to limit the cpus to the actual cpus.
+ * Therefore, I will take the maximum number as 16. */
+ /*priv->ncpu = ((tmp & CCTRL_NCPU) >> CCTRL_NCPU_BIT) + 1;*/
+ priv->ncpu = 16;
+ if (dev_id == GAISLER_L3STAT) {
+ priv->ncnt = ((tmp & CCTRL_NCNT_L3STAT) >> CCTRL_NCNT_BIT) + 1;
+ }else{
+ priv->ncnt = ((tmp & CCTRL_NCNT) >> CCTRL_NCNT_BIT) + 1;
+ }
+ priv->max_count_support = (tmp & CCTRL_MC) >> CCTRL_MC_BIT;
+ priv->internalahb_event_support = (tmp & CCTRL_IA) >> CCTRL_IA_BIT;
+ priv->dsu_event_support = (tmp & CCTRL_DS) >> CCTRL_DS_BIT;
+ priv->external_event_support = (tmp & CCTRL_EE) >> CCTRL_EE_BIT;
+ priv->ahbtrace_event_support = (tmp & CCTRL_AE) >> CCTRL_AE_BIT;
+
+ /* DEBUG print */
+ DBG("L4STAT with following capabilities:\n");
+ DBG(" -NCPU: %d, NCNT: %d, MaxCNT: %s\n", priv->ncpu, priv->ncnt,
+ (priv->max_count_support?"Available":"N/A"));
+ DBG(" -Events= InternalAHB: %s, DSU: %s, External: %s, AHBTRACE: %s\n",
+ (priv->internalahb_event_support?"Available":"N/A"),
+ (priv->dsu_event_support?"Available":"N/A"),
+ (priv->external_event_support?"Available":"N/A"),
+ (priv->ahbtrace_event_support?"Available":"N/A"));
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_init1(struct drvmgr_dev *dev)
+{
+ struct l4stat_priv *priv = dev->priv;
+
+ DBG("L4STAT[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+
+ if (l4statpriv) {
+ DBG("Driver only supports one L4STAT core\n");
+ return DRVMGR_FAIL;
+ }
+
+ if (priv == NULL) {
+ return DRVMGR_NOMEM;
+ }
+ priv->dev = dev;
+ l4statpriv = priv;
+
+ /* Initilize driver struct */
+ if (l4stat_init(priv) != L4STAT_ERR_OK) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Startup Action:
+ * - None
+ */
+
+ /* Device name */
+ sprintf(priv->devname, "l4stat0");
+
+ return DRVMGR_OK;
+}
+
+int l4stat_counter_enable(unsigned int counter, int event, int cpu, int options)
+{
+ struct l4stat_priv *priv = l4statpriv;
+ unsigned int ctrl;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if ((cpu < 0) || (cpu >= priv->ncpu)) {
+ DBG("L4STAT Wrong cpu\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if ((options & L4STAT_OPTIONS_MAXIMUM_DURATION) ||
+ (options & L4STAT_OPTIONS_EVENT_LEVEL_ENABLE)) {
+ if (priv->max_count_support == 0) {
+ DBG("L4STAT maximum duration count not supported\n");
+ return L4STAT_ERR_IMPLEMENTED;
+ }
+ }
+
+ /* Check event is supported */
+ if ((event < 0) || (event >= 0x80)) {
+ DBG("L4STAT Wrong event\n");
+ return L4STAT_ERR_EINVAL;
+ }
+ if ((event == 0x18) || (event == 0x17)) {
+ if (priv->internalahb_event_support == 0) {
+ DBG("L4STAT internal ahb event not supported\n");
+ return L4STAT_ERR_IMPLEMENTED;
+ }
+ }
+ if ((event >= 0x40) && (event < 0x60)) {
+ if (priv->dsu_event_support == 0) {
+ DBG("L4STAT dsu event not supported\n");
+ return L4STAT_ERR_IMPLEMENTED;
+ }
+ }
+ if ((event >= 0x60) && (event < 0x70)) {
+ if (priv->external_event_support == 0) {
+ DBG("L4STAT external event not supported\n");
+ return L4STAT_ERR_IMPLEMENTED;
+ }
+ }
+ if ((event >= 0x70) && (event < 0x80)) {
+ if (priv->ahbtrace_event_support == 0) {
+ DBG("L4STAT ahbtrace event not supported\n");
+ return L4STAT_ERR_IMPLEMENTED;
+ }
+ }
+
+ /* Prepare counter control */
+ ctrl = (options & ~(CCTRL_EVENTID | CCTRL_CPUAHBM));
+ /* Put event id */
+ ctrl = (ctrl | ((event << CCTRL_EVENTID_BIT) & CCTRL_EVENTID));
+ /* Put cpu id */
+ ctrl = (ctrl | ((cpu << CCTRL_CPUAHBM_BIT) & CCTRL_CPUAHBM));
+ /* Enable counter */
+ ctrl = (ctrl | CCTRL_EN);
+
+ REG_WRITE(&priv->regs->cctrl[counter], ctrl);
+
+ /* DEBUG print */
+ DBG("L4STAT COUNTER[%d] enabled with event: %s, cpu: %d\n", counter,
+ l4stat_event_names[event],cpu);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_counter_disable(unsigned int counter)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ /* Disable counter */
+ REG_WRITE(&priv->regs->cctrl[counter], 0);
+
+ /* DEBUG print */
+ DBG("L4STAT COUNTER[%d] disabled\n", counter);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_counter_get(unsigned int counter, uint32_t * val)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (val == NULL) {
+ DBG("L4STAT Wrong pointer\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ *val = REG_READ(&priv->regs->cval[counter]);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_counter_set(unsigned int counter, uint32_t val)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ REG_WRITE(&priv->regs->cval[counter],val);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_counter_max_get(unsigned int counter, uint32_t * val)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (val == NULL) {
+ DBG("L4STAT Wrong pointer\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ *val = REG_READ(&priv->regs->cmax[counter]);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_counter_max_set(unsigned int counter, uint32_t val)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ REG_WRITE(&priv->regs->cmax[counter],val);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_tstamp_get(uint32_t * val)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (val == NULL) {
+ DBG("L4STAT Wrong pointer\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ *val = REG_READ(&priv->regs->timestamp);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_tstamp_set(uint32_t val)
+{
+ struct l4stat_priv *priv = l4statpriv;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ REG_WRITE(&priv->regs->timestamp,val);
+
+ return L4STAT_ERR_OK;
+}
+
+int l4stat_counter_print(unsigned int counter)
+{
+#ifdef DEBUG
+ struct l4stat_priv *priv = l4statpriv;
+ unsigned int val;
+ unsigned int ctrl;
+ unsigned int event;
+
+ if (priv == NULL) {
+ DBG("L4STAT Device not initialized\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ if (counter >= priv->ncnt) {
+ DBG("L4STAT Wrong counter\n");
+ return L4STAT_ERR_EINVAL;
+ }
+
+ /* Get counter val*/
+ val = REG_READ(&priv->regs->cval[counter]);
+
+ /* Get counter info*/
+ ctrl = REG_READ(&priv->regs->cctrl[counter]);
+ if ((ctrl & CCTRL_EN) == 0) {
+ DBG("L4STAT COUNTER[%d] disabled\n", counter);
+ return L4STAT_ERR_OK;
+ }
+
+ event = (ctrl & CCTRL_EVENTID) >> CCTRL_EVENTID_BIT;
+
+ /* DEBUG print */
+ DBG("L4STAT COUNTER[%d], Event: %s, Count: %d [0x%08x]\n",
+ counter, l4stat_event_names[event],val,val);
+#endif /* DEBUG */
+
+ return L4STAT_ERR_OK;
+}
+
diff --git a/bsps/shared/grlib/time/grctm.c b/bsps/shared/grlib/time/grctm.c
new file mode 100644
index 0000000000..038c3ddf34
--- /dev/null
+++ b/bsps/shared/grlib/time/grctm.c
@@ -0,0 +1,411 @@
+/* GRCTM - CCSDS Time Manager - register driver interface.
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <grlib/grctm.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Private structure of GRCTM driver */
+struct grctm_priv {
+ struct drvmgr_dev *dev;
+ struct grctm_regs *regs;
+ int open;
+
+ grctm_isr_t user_isr;
+ void *user_isr_arg;
+
+ struct grctm_stats stats;
+};
+
+void grctm_isr(void *data);
+
+struct amba_drv_info grctm_drv_info;
+
+void *grctm_open(int minor)
+{
+ struct grctm_priv *priv;
+ struct drvmgr_dev *dev;
+
+ /* Get Device from Minor */
+ if ( drvmgr_get_dev(&grctm_drv_info.general, minor, &dev) ) {
+ return NULL;
+ }
+
+ priv = dev->priv;
+ if ( (priv == NULL) || priv->open )
+ return NULL;
+
+ /* Set initial state of software */
+ priv->open = 1;
+
+ /* Clear Statistics */
+ grctm_clr_stats(priv);
+ priv->user_isr = NULL;
+ priv->user_isr_arg = NULL;
+
+ return priv;
+}
+
+void grctm_close(void *grctm)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ if ( priv->open == 0 )
+ return;
+
+ /* Reset Hardware */
+ grctm_reset(priv);
+
+ priv->open = 0;
+}
+
+/* Hardware Reset of GRCTM */
+int grctm_reset(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+ struct grctm_regs *r = priv->regs;
+
+ r->grr = 0x55000001;
+
+ int i = 1000;
+ while ((r->grr & 1) && i > 0) {
+ i--;
+ }
+
+ return i ? 0 : -1;
+}
+
+void grctm_int_enable(void *grctm)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ /* Register and Enable Interrupt at Interrupt controller */
+ drvmgr_interrupt_register(priv->dev, 0, "grctm", grctm_isr, priv);
+}
+
+void grctm_int_disable(void *grctm)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ /* Enable Interrupt at Interrupt controller */
+ drvmgr_interrupt_unregister(priv->dev, 0, grctm_isr, priv);
+}
+
+void grctm_clr_stats(void *grctm)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ memset(&priv->stats, 0, sizeof(priv->stats));
+}
+
+void grctm_get_stats(void *grctm, struct grctm_stats *stats)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ memcpy(stats, &priv->stats, sizeof(priv->stats));
+}
+
+/* Enable external synchronisation (from grctm) */
+void grctm_enable_ext_sync(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr |= 0x55<<24 | 1<<9;
+}
+
+/* Disable external synchronisation (from grctm) */
+void grctm_disable_ext_sync(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr &= ~((0xAA<<24) | 1<<9);
+}
+
+/* Enable TimeWire synchronisation */
+void grctm_enable_tw_sync(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr |= 0x55<<24 | 1<<8;
+}
+
+/* Disable TimeWire synchronisation */
+void grctm_disable_tw_sync(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr &= ~((0xAA<<24) | 1<<8);
+}
+
+/* Disable frequency synthesizer from driving ET */
+void grctm_disable_fs(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr |= 0x55<<24 | 1<<7;
+}
+
+/* Enable frequency synthesizer to drive ET */
+void grctm_enable_fs(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr &= ~((0xAA<<24) | 1<<7);
+}
+
+/* Return elapsed coarse time */
+unsigned int grctm_get_et_coarse(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ return priv->regs->etcr;
+}
+
+/* Return elapsed fine time */
+unsigned int grctm_get_et_fine(void *grctm)
+{
+ struct grctm_priv *priv = grctm;
+
+ return (priv->regs->etfr & 0xffffff00) >> 8;
+}
+
+/* Return elapsed time (coarse and fine) */
+unsigned long long grctm_get_et(void *grctm)
+{
+ return (((unsigned long)grctm_get_et_coarse(grctm)) << 24) | grctm_get_et_fine(grctm);
+}
+
+
+/* Return 1 if specified datation has been latched */
+int grctm_is_dat_latched(void *grctm, int dat)
+{
+ struct grctm_priv *priv = grctm;
+
+ return (priv->regs->gsr >> dat) & 1;
+}
+
+/* Set triggering edge of datation input */
+void grctm_set_dat_edge(void *grctm, int dat, int edge)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->gcr &= ~((0xAA<<24) | 1 << (10+dat));
+ priv->regs->gcr |= 0x55<<24 | (edge&1) << (10+dat);
+}
+
+/* Return latched datation coarse time */
+unsigned int grctm_get_dat_coarse(void *grctm, int dat)
+{
+ struct grctm_priv *priv = grctm;
+
+ switch (dat) {
+ case 0 : return priv->regs->dcr0;
+ case 1 : return priv->regs->dcr1;
+ case 2 : return priv->regs->dcr2;
+ default: return -1;
+ }
+}
+
+/* Return latched datation fine time */
+unsigned int grctm_get_dat_fine(void *grctm, int dat)
+{
+ struct grctm_priv *priv = grctm;
+
+ switch (dat) {
+ case 0 : return (priv->regs->dfr0 & 0xffffff00) >> 8;
+ case 1 : return (priv->regs->dfr1 & 0xffffff00) >> 8;
+ case 2 : return (priv->regs->dfr2 & 0xffffff00) >> 8;
+ default: return -1;
+ }
+}
+
+
+/* Return latched datation ET */
+unsigned long long grctm_get_dat_et(void *grctm, int dat)
+{
+ return (((unsigned long)grctm_get_dat_coarse(grctm, dat)) << 24) |
+ grctm_get_dat_fine(grctm, dat);
+}
+
+
+/* Return current pulse configuration */
+unsigned int grctm_get_pulse_reg(void *grctm, int pulse)
+{
+ struct grctm_priv *priv = grctm;
+
+ return priv->regs->pdr[pulse];
+}
+
+/* Set pulse register */
+void grctm_set_pulse_reg(void *grctm, int pulse, unsigned int val)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->pdr[pulse] = val;
+}
+
+/* Configure pulse: pp = period, pw = width, pl = level, en = enable */
+void grctm_cfg_pulse(void *grctm, int pulse, int pp, int pw, int pl, int en)
+{
+ grctm_set_pulse_reg(grctm, pulse, (pp&0xf)<<20 | (pw&0xf)<<16 | (pl&1)<<10 | (en&1)<<1);
+}
+
+/* Enable pulse output */
+void grctm_enable_pulse(void *grctm, int pulse)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->pdr[pulse] |= 0x2;
+}
+
+/* Disable pulse output */
+void grctm_disable_pulse(void *grctm, int pulse)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->pdr[pulse] &= ~0x2;
+}
+
+/* Clear interrupts */
+void grctm_clear_irqs(void *grctm, int irqs)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->picr = irqs;
+}
+
+/* Enable interrupts */
+void grctm_enable_irqs(void *grctm, int irqs)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->imr = irqs;
+}
+
+/* Set Frequency synthesizer increment */
+void grctm_set_fs_incr(void *grctm, int incr)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->fsir = incr;
+}
+
+/* Set ET increment */
+void grctm_set_et_incr(void *grctm, int incr)
+{
+ struct grctm_priv *priv = grctm;
+
+ priv->regs->etir = incr;
+}
+
+
+void grctm_isr(void *data)
+{
+ struct grctm_priv *priv = data;
+ struct grctm_stats *stats = &priv->stats;
+ unsigned int pimr = priv->regs->pimr;
+
+ if ( pimr == 0 )
+ return;
+
+ stats->nirqs++;
+ if (pimr & PULSE0_IRQ )
+ stats->pulse++;
+
+ /* Let user Handle Interrupt */
+ if ( priv->user_isr )
+ priv->user_isr(pimr, priv->user_isr_arg);
+}
+
+struct grctm_regs *grctm_get_regs(void *grctm)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ return priv->regs;
+}
+
+void grctm_int_register(void *grctm, grctm_isr_t func, void *data)
+{
+ struct grctm_priv *priv = (struct grctm_priv *)grctm;
+
+ priv->user_isr = func;
+ priv->user_isr_arg = data;
+}
+
+/*** INTERFACE TO DRIVER MANAGER ***/
+
+static int grctm_init2(struct drvmgr_dev *dev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ struct grctm_priv *priv;
+ struct grctm_regs *regs;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( priv == NULL )
+ return -1;
+ priv->dev = dev;
+ dev->priv = priv;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ regs = (struct grctm_regs *)pnpinfo->ahb_slv->start[0];
+
+ priv->regs = regs;
+
+ grctm_reset(priv);
+
+ return 0;
+}
+
+struct drvmgr_drv_ops grctm_ops =
+{
+ {NULL, grctm_init2, NULL, NULL},
+ NULL,
+ NULL
+};
+
+struct amba_dev_id grctm_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRCTM},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info grctm_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRCTM_ID, /* Driver ID */
+ "GRCTM_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grctm_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grctm_ids[0]
+};
+
+/* Register the grctm Driver */
+void grctm_register(void)
+{
+ drvmgr_drv_register(&grctm_drv_info.general);
+}
diff --git a/bsps/shared/grlib/time/spwcuc.c b/bsps/shared/grlib/time/spwcuc.c
new file mode 100644
index 0000000000..d742f2d225
--- /dev/null
+++ b/bsps/shared/grlib/time/spwcuc.c
@@ -0,0 +1,371 @@
+/* SPWCUC - SpaceWire - CCSDS unsegmented Code Transfer Protocol GRLIB core
+ * register driver interface.
+ *
+ * COPYRIGHT (c) 2009.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <grlib/spwcuc.h>
+
+#include <grlib/grlib_impl.h>
+
+/* Private structure of SPWCUC driver. */
+struct spwcuc_priv {
+ struct drvmgr_dev *dev;
+ struct spwcuc_regs *regs;
+ int open;
+
+ spwcuc_isr_t user_isr;
+ void *user_isr_arg;
+
+ struct spwcuc_stats stats;
+};
+
+void spwcuc_isr(void *data);
+
+struct amba_drv_info spwcuc_drv_info;
+
+/* Hardware Reset of SPWCUC */
+static int spwcuc_hw_reset(struct spwcuc_priv *priv)
+{
+ struct spwcuc_regs *r = priv->regs;
+ int i = 1000;
+
+ r->control = 1;
+
+ while ((r->control & 1) && i > 0) {
+ i--;
+ }
+
+ spwcuc_clear_irqs(priv, -1);
+
+ return i ? 0 : -1;
+}
+
+int spwcuc_reset(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return spwcuc_hw_reset(priv);
+}
+
+void *spwcuc_open(int minor)
+{
+ struct spwcuc_priv *priv;
+ struct drvmgr_dev *dev;
+
+ /* Get Device from Minor */
+ if ( drvmgr_get_dev(&spwcuc_drv_info.general, minor, &dev) ) {
+ return NULL;
+ }
+
+ priv = dev->priv;
+ if ( (priv == NULL) || priv->open )
+ return NULL;
+
+ /* Set initial state of software */
+ priv->open = 1;
+
+ /* Clear Statistics */
+ spwcuc_clr_stats(priv);
+ priv->user_isr = NULL;
+ priv->user_isr_arg = NULL;
+
+ return priv;
+}
+
+void spwcuc_close(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ if ( priv->open == 0 )
+ return;
+
+ /* Reset Hardware */
+ spwcuc_hw_reset(priv);
+
+ priv->open = 0;
+}
+
+void spwcuc_int_enable(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ /* Register and Enable Interrupt at Interrupt controller */
+ drvmgr_interrupt_register(priv->dev, 0, "spwcuc", spwcuc_isr, priv);
+}
+
+void spwcuc_int_disable(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ /* Enable Interrupt at Interrupt controller */
+ drvmgr_interrupt_unregister(priv->dev, 0, spwcuc_isr, priv);
+}
+
+void spwcuc_clr_stats(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ memset(&priv->stats, 0, sizeof(priv->stats));
+}
+
+void spwcuc_get_stats(void *spwcuc, struct spwcuc_stats *stats)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ memcpy(stats, &priv->stats, sizeof(priv->stats));
+}
+
+/* Configure the spwcuc core */
+void spwcuc_config(void *spwcuc, struct spwcuc_cfg *cfg)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+ struct spwcuc_regs *r = priv->regs;
+
+ r->config = (cfg->sel_out & 0x1f) << 28 |
+ (cfg->sel_in & 0x1f) << 24 |
+ (cfg->mapping & 0x1f) << 16 |
+ (cfg->tolerance & 0x1f) << 8 |
+ (cfg->tid & 0x7) << 4 |
+ (cfg->ctf & 1) << 1 |
+ (cfg->cp & 1);
+
+ r->control = (cfg->txen & 1) << 1 |
+ (cfg->rxen & 1) << 2 |
+ (cfg->pktsyncen & 1) << 3 |
+ (cfg->pktiniten & 1) << 4 |
+ (cfg->pktrxen & 1) << 5;
+
+ r->dla = (cfg->dla_mask & 0xff)<<8 | (cfg->dla & 0xff);
+
+ r->pid = cfg->pid;
+
+ r->offset = cfg->offset;
+}
+
+/* Return elapsed coarse time */
+unsigned int spwcuc_get_et_coarse(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return priv->regs->etct;
+}
+
+/* Return elapsed fine time */
+unsigned int spwcuc_get_et_fine(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return (priv->regs->etft & 0xffffff) >> 8;
+}
+
+/* Return elapsed time (coarse and fine) */
+unsigned long long spwcuc_get_et(void *spwcuc)
+{
+ return (((unsigned long long)spwcuc_get_et_coarse(spwcuc)) << 24) | spwcuc_get_et_fine(spwcuc);
+}
+
+/* Return next elapsed coarse time (for use when sending SpW time packet) */
+unsigned int spwcuc_get_next_et_coarse(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return priv->regs->etct_next;
+}
+
+/* Return next elapsed fine time (for use when sending SpW time packet) */
+unsigned int spwcuc_get_next_et_fine(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return (priv->regs->etft_next & 0xffffff) >> 8;
+}
+
+/* Return next elapsed time (for use when sending SpW time packet) */
+unsigned long long spwcuc_get_next_et(void *spwcuc)
+{
+ return (((unsigned long long)spwcuc_get_next_et_coarse(spwcuc)) << 24) | spwcuc_get_next_et_fine(spwcuc);
+}
+
+/* Force/Set the elapsed time (coarse 32-bit and fine 24-bit) by writing the
+ * T-Field Time Packet Registers then the FORCE, NEW and INIT bits.
+ * The latter three are needed for the ET to be set with the new value.
+ */
+void spwcuc_force_et(void *spwcuc, unsigned long long time)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+ struct spwcuc_regs *regs = priv->regs;
+
+ regs->etft_next = (time & 0xffffff) << 8;
+ regs->etct_next = (time >> 24) & 0xffffffff;
+ regs->pkt_pf_crc = (1 << 29) | (1 << 30) | (1 << 31);
+}
+
+/* Return received (from time packet) elapsed coarse time */
+unsigned int spwcuc_get_tp_et_coarse(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return priv->regs->pkt_ct;
+}
+
+/* Return received (from time packet) elapsed fine time */
+unsigned int spwcuc_get_tp_et_fine(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return (priv->regs->pkt_ft & 0xffffff) >> 8;
+}
+
+/* Return received (from time packet) elapsed time (coarse and fine) */
+unsigned long long spwcuc_get_tp_et(void *spwcuc)
+{
+ return (((unsigned long long)spwcuc_get_tp_et_coarse(spwcuc)) << 24) | spwcuc_get_tp_et_fine(spwcuc);
+}
+
+/* Clear interrupts */
+void spwcuc_clear_irqs(void *spwcuc, int irqs)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ priv->regs->picr = irqs;
+}
+
+/* Enable interrupts */
+void spwcuc_enable_irqs(void *spwcuc, int irqs)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ priv->regs->imr = irqs;
+}
+
+struct spwcuc_regs *spwcuc_get_regs(void *spwcuc)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ return priv->regs;
+}
+
+void spwcuc_int_register(void *spwcuc, spwcuc_isr_t func, void *data)
+{
+ struct spwcuc_priv *priv = (struct spwcuc_priv *)spwcuc;
+
+ priv->user_isr = func;
+ priv->user_isr_arg = data;
+}
+
+void spwcuc_isr(void *data)
+{
+ struct spwcuc_priv *priv = data;
+ struct spwcuc_stats *stats = &priv->stats;
+ unsigned int pimr = priv->regs->pimr;
+
+ stats->nirqs++;
+
+ if (pimr & PKT_INIT_IRQ)
+ stats->pkt_init++;
+ if (pimr & PKT_ERR_IRQ)
+ stats->pkt_err++;
+ if (pimr & PKT_RX_IRQ)
+ stats->pkt_rx++;
+ if (pimr & WRAP_ERR_IRQ)
+ stats->wraperr++;
+ if (pimr & WRAP_IRQ)
+ stats->wrap++;
+ if (pimr & SYNC_ERR_IRQ)
+ stats->syncerr++;
+ if (pimr & SYNC_IRQ)
+ stats->sync++;
+ if (pimr & TOL_ERR_IRQ)
+ stats->tolerr++;
+ if (pimr & TICK_RX_ERR_IRQ)
+ stats->tick_rx_error++;
+ if (pimr & TICK_RX_WRAP_IRQ)
+ stats->tick_rx_wrap++;
+ if (pimr & TICK_RX_IRQ)
+ stats->tick_rx++;
+ if (pimr & TICK_TX_WRAP_IRQ)
+ stats->tick_tx_wrap++;
+ if (pimr & TICK_TX_IRQ)
+ stats->tick_tx++;
+
+ /* Let user Handle Interrupt */
+ if ( priv->user_isr )
+ priv->user_isr(pimr, priv->user_isr_arg);
+}
+
+/*** INTERFACE TO DRIVER MANAGER ***/
+
+static int spwcuc_init2(struct drvmgr_dev *dev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ struct spwcuc_priv *priv;
+ struct spwcuc_regs *regs;
+
+ priv = grlib_calloc(1, sizeof(*priv));
+ if ( priv == NULL )
+ return -1;
+ priv->dev = dev;
+ dev->priv = priv;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ regs = (struct spwcuc_regs *)pnpinfo->apb_slv->start;
+
+ priv->regs = regs;
+
+ spwcuc_hw_reset(priv);
+
+ return 0;
+}
+
+struct drvmgr_drv_ops spwcuc_ops =
+{
+ {NULL, spwcuc_init2, NULL, NULL},
+ NULL,
+ NULL
+};
+
+struct amba_dev_id spwcuc_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_SPWCUC},
+ {0, 0} /* Mark end of table */
+};
+
+struct amba_drv_info spwcuc_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_SPWCUC_ID,/* Driver ID */
+ "SPWCUC_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &spwcuc_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &spwcuc_ids[0]
+};
+
+/* Register the SPWCUC Driver */
+void spwcuc_register(void)
+{
+ drvmgr_drv_register(&spwcuc_drv_info.general);
+}
diff --git a/bsps/shared/grlib/tmtc/grtc.c b/bsps/shared/grlib/tmtc/grtc.c
new file mode 100644
index 0000000000..44e9685c3a
--- /dev/null
+++ b/bsps/shared/grlib/tmtc/grtc.c
@@ -0,0 +1,1984 @@
+/* GRTC Telecommand decoder driver
+ *
+ * COPYRIGHT (c) 2007.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/ambapp.h>
+#include <grlib/grtc.h>
+
+#include <grlib/grlib_impl.h>
+
+/*
+#define DEBUG
+#define DEBUGFUNCS
+*/
+
+#include <grlib/debug_defs.h>
+
+#ifdef DEBUG_ERROR
+#define DEBUG_ERR_LOG(device,error) grtc_log_error(device,error)
+#else
+#define DEBUG_ERR_LOG(device,error)
+#endif
+
+/* GRTC register map */
+struct grtc_regs {
+ volatile unsigned int grst; /* Global Reset Register (GRR 0x00) */
+ volatile unsigned int gctrl; /* Global Control Register (GCR 0x04) */
+ int unused0;
+ volatile unsigned int sir; /* Spacecraft Identifier Register (SIR 0x0c) */
+ volatile unsigned int far; /* Frame Acceptance Report Register (FAR 0x10) */
+
+ volatile unsigned int clcw1; /* CLCW Register 1 (CLCWR1 0x14) */
+ volatile unsigned int clcw2; /* CLCW Register 2 (CLCWR2 0x18) */
+ volatile unsigned int phir; /* Physical Interface Register (PHIR 0x1c) */
+ volatile unsigned int cor; /* Control Register (COR 0x20) */
+
+ volatile unsigned int str; /* Status Register (STR 0x24) */
+ volatile unsigned int asr; /* Address Space Register (ASR 0x28) */
+ volatile unsigned int rp; /* Receive Read Pointer Register (RRP 0x2c) */
+ volatile unsigned int wp; /* Receive Write Pointer Register (RWP 0x30) */
+
+ int unused1[(0x60-0x34)/4];
+
+ volatile unsigned int pimsr; /* Pending Interrupt Masked Status Register (PIMSR 0x60) */
+ volatile unsigned int pimr; /* Pending Interrupt Masked Register (PIMR 0x64) */
+ volatile unsigned int pisr; /* Pending Interrupt Status Register (PISR 0x68) */
+ volatile unsigned int pir; /* Pending Interrupt Register (PIR 0x6c) */
+ volatile unsigned int imr; /* Interrupt Mask Register (IMR 0x70) */
+ volatile unsigned int picr; /* Pending Interrupt Clear Register (PICR 0x74) */
+};
+
+/* Security Byte */
+#define GRTC_SEB 0x55000000
+
+/* Global Reset Register (GRR 0x00) */
+#define GRTC_GRR_SRST 0x1
+#define GRTC_GRR_SRST_BIT 0
+
+/* Global Control Register (GCR 0x04) */
+#define GRTC_GCR_PSR_BIT 10
+#define GRTC_GCR_NRZM_BIT 11
+#define GRTC_GCR_PSS_BIT 12
+
+#define GRTC_GCR_PSR (1<<GRTC_GCR_PSR_BIT)
+#define GRTC_GCR_NRZM (1<<GRTC_GCR_NRZM_BIT)
+#define GRTC_GCR_PSS (1<<GRTC_GCR_PSS_BIT)
+
+/* Spacecraft Identifier Register (SIR 0x0c) */
+
+
+/* Frame Acceptance Report Register (FAR 0x10) */
+#define GRTC_FAR_SCI_BIT 10
+#define GRTC_FAR_CSEC_BIT 11
+#define GRTC_FAR_CAC_BIT 12
+#define GRTC_FAR_SSD_BIT 13
+
+#define GRTC_FAR_SCI (0x7<<GRTC_FAR_SCI_BIT)
+#define GRTC_FAR_CSEC (0x7<<GRTC_FAR_CSEC_BIT)
+#define GRTC_FAR_CAC (0x3f<<GRTC_FAR_CAC_BIT)
+#define GRTC_FAR_SSD (1<<GRTC_FAR_SSD_BIT)
+
+/* CLCW Register 1 (CLCWR1 0x14) */
+/* CLCW Register 2 (CLCWR2 0x18) */
+#define GRTC_CLCW_RVAL_BIT 0
+#define GRTC_CLCW_RTYPE_BIT 8
+#define GRTC_CLCW_FBCO_BIT 9
+#define GRTC_CLCW_RTMI_BIT 11
+#define GRTC_CLCW_WAIT_BIT 12
+#define GRTC_CLCW_LOUT_BIT 13
+#define GRTC_CLCW_NBLO_BIT 14
+#define GRTC_CLCW_NRFA_BIT 15
+#define GRTC_CLCW_VCI_BIT 18
+#define GRTC_CLCW_CIE_BIT 24
+#define GRTC_CLCW_STAF_BIT 26
+#define GRTC_CLCW_VNUM_BIT 29
+#define GRTC_CLCW_CWTY_BIT 31
+
+#define GRTC_CLCW_RVAL (0xff<<GRTC_CLCW_RVAL_BIT)
+#define GRTC_CLCW_RTYPE (1<<GRTC_CLCW_RTYPE_BIT)
+#define GRTC_CLCW_FBCO (0x3<<GRTC_CLCW_FBCO_BIT)
+#define GRTC_CLCW_RTMI (0x3<<GRTC_CLCW_RTMI_BIT)
+#define GRTC_CLCW_WAIT (1<<GRTC_CLCW_WAIT_BIT)
+#define GRTC_CLCW_LOUT (1<<GRTC_CLCW_LOUT_BIT)
+#define GRTC_CLCW_NBLO (1<<GRTC_CLCW_NBLO_BIT)
+#define GRTC_CLCW_NRFA (1<<GRTC_CLCW_NRFA_BIT)
+#define GRTC_CLCW_VCI (0x3f<<GRTC_CLCW_VCI_BIT)
+#define GRTC_CLCW_CIE (0x3<<GRTC_CLCW_CIE_BIT)
+#define GRTC_CLCW_STAF (0x3<<GRTC_CLCW_STAF_BIT)
+#define GRTC_CLCW_VNUM (0x3<<GRTC_CLCW_VNUM_BIT)
+#define GRTC_CLCW_CWTY (1<<GRTC_CLCW_CWTY_BIT)
+
+/* Physical Interface Register (PIR 0x1c) */
+#define GRTC_PIR_BLO_BIT 0
+#define GRTC_PIR_RFA_BIT 8
+
+#define GRTC_PIR_BLO (0xff<<GRTC_PIR_BLO_BIT)
+#define GRTC_PIR_RFA (0xff<<GRTC_PIR_RFA_BIT)
+
+/* Control Register (COR 0x20) */
+#define GRTC_COR_RE_BIT 0
+#define GRTC_COR_CRST_BIT 9
+
+#define GRTC_COR_RE (1<<GRTC_COR_RE_BIT)
+#define GRTC_COR_CRST (1<<GRTC_COR_CRST_BIT)
+
+/* Status Register (STR 0x24) */
+#define GRTC_STR_CR_BIT 0
+#define GRTC_STR_OV_BIT 4
+#define GRTC_STR_RFF_BIT 7
+#define GRTC_STR_RBF_BIT 10
+
+#define GRTC_STR_CR (1<<GRTC_STR_CR_BIT)
+#define GRTC_STR_OV (1<<GRTC_STR_OV_BIT)
+#define GRTC_STR_RFF (1<<GRTC_STR_RFF_BIT)
+#define GRTC_STR_RBF (1<<GRTC_STR_RBF_BIT)
+
+/* Address Space Register (ASR 0x28) */
+#define GRTC_ASR_RXLEN_BIT 0
+#define GRTC_ASR_BUFST_BIT 10
+
+#define GRTC_ASR_RXLEN (0xff<<GRTC_ASR_RXLEN_BIT)
+#define GRTC_ASR_BUFST (0x3fffff<<GRTC_ASR_BUFST_BIT)
+
+/* Receive Read Pointer Register (RRP 0x2c) */
+#define GRTC_RRP_PTR_BIT 0
+
+#define GRTC_RRP_PTR (0xffffff<<GRTC_RRP_PTR_BIT)
+
+/* Receive Write Pointer Register (RWP 0x30) */
+#define GRTC_RWP_PTR_BIT 0
+
+#define GRTC_RWP_PTR (0xffffff<<GRTC_RWP_PTR_BIT)
+
+/* Pending Interrupt Masked Status Register (PIMSR 0x60) */
+/* Pending Interrupt Masked Register (PIMR 0x64) */
+/* Pending Interrupt Status Register (PISR 0x68) */
+/* Pending Interrupt Register (PIR 0x6c) */
+/* Interrupt Mask Register (IMR 0x70) */
+/* Pending Interrupt Clear Register (PICR 0x74) */
+#define GRTC_INT_RFA_BIT 0
+#define GRTC_INT_BLO_BIT 1
+#define GRTC_INT_FAR_BIT 2
+#define GRTC_INT_CR_BIT 3
+#define GRTC_INT_RBF_BIT 4
+#define GRTC_INT_OV_BIT 5
+#define GRTC_INT_CS_BIT 6
+
+#define GRTC_INT_RFA (1<<GRTC_INT_RFA_BIT)
+#define GRTC_INT_BLO (1<<GRTC_INT_BLO_BIT)
+#define GRTC_INT_FAR (1<<GRTC_INT_FAR_BIT)
+#define GRTC_INT_CR (1<<GRTC_INT_CR_BIT)
+#define GRTC_INT_OV (1<<GRTC_INT_OV_BIT)
+#define GRTC_INT_CS (1<<GRTC_INT_CS_BIT)
+
+#define GRTC_INT_ALL (GRTC_INT_RFA|GRTC_INT_BLO|GRTC_INT_FAR|GRTC_INT_CR|GRTC_INT_OV|GRTC_INT_CS)
+
+#define READ_REG(address) (*(volatile unsigned int *)address)
+
+/* Driver functions */
+static rtems_device_driver grtc_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define GRTC_DRIVER_TABLE_ENTRY { grtc_initialize, grtc_open, grtc_close, grtc_read, grtc_write, grtc_ioctl }
+
+static rtems_driver_address_table grtc_driver = GRTC_DRIVER_TABLE_ENTRY;
+
+enum {
+ FRM_STATE_NONE = 0, /* not started */
+ FRM_STATE_HDR = 1, /* Reading Header (Frame length isn't known) */
+ FRM_STATE_ALLOC = 2, /* Allocate Frame to hold data */
+ FRM_STATE_PAYLOAD = 3, /* Reading Payload (Frame length is known) */
+ FRM_STATE_FILLER = 4, /* Check filler */
+ FRM_STATE_DROP = 5 /* error, drop data until end marker */
+};
+
+/* Frame pool, all frames in pool have the same buffer length (frame mode only) */
+struct grtc_frame_pool {
+ unsigned int frame_len; /* Maximal length of frame (payload+hdr+crc..) */
+ unsigned int frame_cnt; /* Current number of frames in pool (in frms) */
+ struct grtc_frame *frms; /* Chain of frames in pool (this is the pool) */
+};
+
+struct grtc_priv {
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ struct grtc_regs *regs; /* TC Hardware Register MAP */
+ int irq; /* IRQ number of TC core */
+ SPIN_DECLARE(devlock); /* spin-lock of registers */
+
+ int major; /* Driver major */
+ int minor; /* Device Minor */
+
+ int open; /* Device has been opened by user */
+ int running; /* TC receiver running */
+ int mode; /* RAW or FRAME mode */
+ int overrun_condition; /* Overrun condition */
+ int blocking; /* Blocking/polling mode */
+ rtems_interval timeout; /* Timeout in blocking mode */
+ int wait_for_nbytes;/* Number of bytes to wait for in blocking mode */
+
+ struct grtc_ioc_config config;
+
+/* RAW MODE ONLY */
+ /* Buffer allocation (user provided or driver allocated using malloc) */
+ void *buf;
+ void *buf_remote;
+ void *_buf;
+ int buf_custom; /* 0=no custom buffer, 1=custom buffer (don't free it...) */
+ unsigned int len;
+
+/* FRAME MODE ONLY */
+ /* Frame management when user provides buffers. */
+ int pool_cnt; /* Number of Pools */
+ struct grtc_frame_pool *pools; /* Array of pools */
+
+ struct grtc_list ready; /* Ready queue (received frames) */
+
+ /* Frame read data (Frame mode only) */
+ int frame_state;
+ int filler;
+ unsigned int hdr[2]; /* 5 byte header */
+ struct grtc_frame *frm; /* Frame currently beeing copied */
+ int frmlen;
+
+ struct grtc_ioc_stats stats; /* Statistics */
+
+ rtems_id sem_rx;
+
+#ifdef DEBUG_ERROR
+ /* Buffer read/write state */
+ unsigned int rp;
+ unsigned int wp;
+
+ /* Debugging */
+ int last_error[128];
+ int last_error_cnt;
+#endif
+};
+
+/* Prototypes */
+static void grtc_hw_reset(struct grtc_priv *priv);
+static void grtc_interrupt(void *arg);
+
+/* Common Global Variables */
+static rtems_id grtc_dev_sem;
+static int grtc_driver_io_registered = 0;
+static rtems_device_major_number grtc_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static int grtc_register_io(rtems_device_major_number *m);
+static int grtc_device_init(struct grtc_priv *pDev);
+
+static int grtc_init2(struct drvmgr_dev *dev);
+static int grtc_init3(struct drvmgr_dev *dev);
+
+static struct drvmgr_drv_ops grtc_ops =
+{
+ {NULL, grtc_init2, grtc_init3, NULL},
+ NULL,
+ NULL,
+};
+
+static struct amba_dev_id grtc_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRTC},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info grtc_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRTC_ID, /* Driver ID */
+ "GRTC_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grtc_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct grtc_priv),
+ },
+ &grtc_ids[0]
+};
+
+void grtc_register_drv (void)
+{
+ DBG("Registering GRTC driver\n");
+ drvmgr_drv_register(&grtc_drv_info.general);
+}
+
+static int grtc_init2(struct drvmgr_dev *dev)
+{
+ struct grtc_priv *priv;
+
+ DBG("GRTC[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv;
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+static int grtc_init3(struct drvmgr_dev *dev)
+{
+ struct grtc_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( grtc_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grtc_register_io(&grtc_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grtc_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+ if ( grtc_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grtc%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrtc%d", prefix, dev->minor_bus);
+ }
+
+ SPIN_INIT(&priv->devlock, priv->devName);
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grtc_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+static int grtc_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grtc_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("GRTC driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("GRTC rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("GRTC rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("GRTC rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("GRTC rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int grtc_device_init(struct grtc_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (struct grtc_regs *)pnpinfo->ahb_slv->start[0];
+ pDev->minor = pDev->dev->minor_drv;
+ pDev->open = 0;
+ pDev->running = 0;
+
+ /* Create Binary RX Semaphore with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'C', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->sem_rx) != RTEMS_SUCCESSFUL ) {
+ return -1;
+ }
+
+ /* Reset Hardware before attaching IRQ handler */
+ grtc_hw_reset(pDev);
+
+ return 0;
+}
+
+static void grtc_hw_reset(struct grtc_priv *priv)
+{
+ /* Reset Core */
+ priv->regs->grst = GRTC_SEB | GRTC_GRR_SRST;
+}
+
+static void grtc_hw_get_defaults(struct grtc_priv *pDev, struct grtc_ioc_config *config)
+{
+ unsigned int gcr = READ_REG(&pDev->regs->gctrl);
+
+ config->psr_enable = (gcr & GRTC_GCR_PSR) ? 1:0;
+ config->nrzm_enable = (gcr & GRTC_GCR_NRZM) ? 1:0;
+ config->pss_enable = (gcr & GRTC_GCR_PSS) ? 1:0;
+
+ config->crc_calc = 0;
+}
+
+/* bufsize is given in bytes */
+static int __inline__ grtc_hw_data_avail_upper(unsigned int rrp, unsigned rwp, unsigned int bufsize)
+{
+ if ( rrp == rwp )
+ return 0;
+
+ if ( rwp > rrp ) {
+ return rwp-rrp;
+ }
+
+ return (bufsize-rrp);
+}
+
+/* bufsize is given in bytes */
+static int __inline__ grtc_hw_data_avail_lower(unsigned int rrp, unsigned rwp, unsigned int bufsize)
+{
+ if ( rrp == rwp )
+ return 0;
+
+ if ( rwp > rrp ) {
+ return 0;
+ }
+
+ return rwp;
+}
+
+/* bufsize is given in bytes */
+static int __inline__ grtc_hw_data_avail(unsigned int rrp, unsigned rwp, unsigned int bufsize)
+{
+ if ( rrp == rwp )
+ return 0;
+
+ if ( rwp > rrp ) {
+ return rwp-rrp;
+ }
+
+ return rwp+(bufsize-rrp);
+}
+
+/* Reads as much as possible but not more than 'max' bytes from the TC receive buffer.
+ * Number of bytes put into 'buf' is returned.
+ */
+static int grtc_hw_read_try(struct grtc_priv *pDev, char *buf, int max)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt, left;
+
+ FUNCDBG();
+
+ if ( max < 1 )
+ return 0;
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax);
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax);
+
+ DBG("grtc_hw_read_try: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_read_try: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buffer);
+
+ if ( (upper+lower) == 0 )
+ return 0;
+
+ /* Count bytes will be read */
+ count = (upper+lower) > max ? max : (upper+lower);
+ left = count;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ if ( left < upper ){
+ cnt = left;
+ }else{
+ cnt = upper; /* Read all upper data available */
+ }
+ DBG("grtc_hw_read_try: COPYING %d from upper\n",cnt);
+ /* Convert from Remote address (RP) into CPU Local address */
+ memcpy(buf, (void *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf), cnt);
+ buf += cnt;
+ left -= cnt;
+ }
+
+ /* Read from lower part of data buffer */
+ if ( left > 0 ){
+ if ( left < lower ){
+ cnt = left;
+ }else{
+ cnt = lower; /* Read all lower data available */
+ }
+ DBG("grtc_hw_read_try: COPYING %d from lower\n",cnt);
+ memcpy(buf, (void *)pDev->buf, cnt);
+ buf += cnt;
+ left -= cnt;
+ }
+
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+
+ return count;
+}
+
+/* Reads as much as possible but not more than 'max' bytes from the TC receive buffer.
+ * Number of bytes put into 'buf' is returned.
+ */
+static int grtc_data_avail(struct grtc_priv *pDev)
+{
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ struct grtc_regs *regs = pDev->regs;
+
+ FUNCDBG();
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ return grtc_hw_data_avail(rrp,rwp,bufmax);
+}
+
+static void *grtc_memalign(unsigned int boundary, unsigned int length, void *realbuf)
+{
+ *(int *)realbuf = (int)grlib_malloc(length+(~GRTC_ASR_BUFST)+1);
+ DBG("GRTC: Alloced %d (0x%x) bytes, requested: %d\n",length+(~GRTC_ASR_BUFST)+1,length+(~GRTC_ASR_BUFST)+1,length);
+ return (void *)(((*(unsigned int *)realbuf)+(~GRTC_ASR_BUFST)+1) & ~(boundary-1));
+}
+
+static int grtc_start(struct grtc_priv *pDev)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int tmp;
+
+ if ( !pDev->buf || (((unsigned int)pDev->buf & ~GRTC_ASR_BUFST) != 0) ||
+ (pDev->len>(1024*0x100)) || (pDev->len<1024) || ((pDev->len & (1024-1)) != 0)
+ ) {
+ DBG("GRTC: start: buffer not properly allocated(0x%x,0x%x,0x%x,0x%x)\n",pDev->buf,pDev->len,((unsigned int)pDev->buf & ~GRTC_ASR_BUFST),(pDev->len & ~(1024-1)));
+ return RTEMS_NO_MEMORY;
+ }
+
+ memset(pDev->buf,0,pDev->len);
+
+ /* Software init */
+ pDev->overrun_condition = 0;
+#ifdef DEBUG_ERROR
+ pDev->last_error_cnt = 0;
+ memset(&pDev->last_error[0],0,128*sizeof(int));
+#endif
+ memset(&pDev->stats,0,sizeof(struct grtc_ioc_stats));
+
+ /* Reset the receiver */
+ regs->cor = GRTC_SEB | GRTC_COR_CRST;
+ if ( READ_REG(&regs->cor) & GRTC_COR_CRST ){
+ /* Reset Failed */
+ DBG("GRTC: start: Reseting receiver failed\n");
+ return RTEMS_IO_ERROR;
+ }
+
+ /* make sure the RX semaphore is in the correct state when starting.
+ * In case of a previous overrun condition it could be in incorrect
+ * state (where rtems_semaphore_flush was used).
+ */
+ rtems_semaphore_obtain(pDev->sem_rx, RTEMS_NO_WAIT, 0);
+
+ /* Set operating modes */
+ tmp = 0;
+ if ( pDev->config.psr_enable )
+ tmp |= GRTC_GCR_PSR;
+ if ( pDev->config.nrzm_enable )
+ tmp |= GRTC_GCR_NRZM;
+ if ( pDev->config.pss_enable )
+ tmp |= GRTC_GCR_PSS;
+ regs->gctrl = GRTC_SEB | tmp;
+
+ /* Clear any pending interrupt */
+ tmp = READ_REG(&regs->pir);
+ regs->picr = GRTC_INT_ALL;
+
+ /* Unmask only the Overrun interrupt */
+ regs->imr = GRTC_INT_OV;
+
+ /* Set up DMA registers
+ * 1. Let hardware know about our DMA area (size and location)
+ * 2. Set DMA read/write posistions to zero.
+ */
+ regs->asr = (unsigned int)pDev->buf_remote | ((pDev->len>>10)-1);
+ regs->rp = (unsigned int)pDev->buf_remote;
+
+ /* Mark running before enabling the receiver, we could receive
+ * an interrupt directly after enabling the receiver and it would
+ * then interpret the interrupt as spurious (see interrupt handler)
+ */
+ pDev->running = 1;
+
+ /* Enable receiver */
+ regs->cor = GRTC_SEB | GRTC_COR_RE;
+
+ DBG("GRTC: STARTED\n");
+
+ return 0;
+}
+
+static void grtc_stop(struct grtc_priv *pDev, int overrun)
+{
+ struct grtc_regs *regs = pDev->regs;
+ SPIN_IRQFLAGS(irqflags);
+
+ SPIN_LOCK_IRQ(&pDev->devlock, irqflags);
+
+ /* Disable the receiver */
+ regs->cor = GRTC_SEB;
+
+ /* disable all interrupts and clear them */
+ regs->imr = 0;
+ READ_REG(&regs->pir);
+ regs->picr = GRTC_INT_ALL;
+
+ DBG("GRTC: STOPPED\n");
+
+ if (overrun) {
+ pDev->overrun_condition = 1;
+ } else {
+ pDev->running = 0;
+ }
+
+ SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags);
+
+ /* Flush semaphores in case a thread is stuck waiting for CLTUs (RX data) */
+ rtems_semaphore_flush(pDev->sem_rx);
+}
+
+/* Wait until 'count' bytes are available in receive buffer, or until
+ * the timeout expires.
+ */
+static int grtc_wait_data(struct grtc_priv *pDev, int count, rtems_interval timeout)
+{
+ int avail;
+ int ret;
+ SPIN_IRQFLAGS(irqflags);
+
+ FUNCDBG();
+
+ if ( count < 1 )
+ return 0;
+
+ SPIN_LOCK_IRQ(&pDev->devlock, irqflags);
+
+ /* Enable interrupts when receiving CLTUs, Also clear old pending CLTUs store
+ * interrupts.
+ */
+ pDev->regs->picr = GRTC_INT_CS;
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) | GRTC_INT_CS;
+
+ avail = grtc_data_avail(pDev);
+ if ( avail < count ) {
+ /* Wait for interrupt. */
+
+ SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags);
+
+ if ( timeout == 0 ){
+ timeout = RTEMS_NO_TIMEOUT;
+ }
+ ret = rtems_semaphore_obtain(pDev->sem_rx,RTEMS_WAIT,timeout);
+ /* RTEMS_SUCCESSFUL = interrupt signaled data is available
+ * RTEMS_TIMEOUT = timeout expired, probably not enough data available
+ * RTEMS_UNSATISFIED = driver has been closed or an error (overrun) occured
+ * which should cancel this operation.
+ * RTEMS_OBJECT_WAS_DELETED, RTEMS_INVALID_ID = driver error.
+ */
+ SPIN_LOCK_IRQ(&pDev->devlock, irqflags);
+ }else{
+ ret = RTEMS_SUCCESSFUL;
+ }
+
+ /* Disable interrupts when receiving CLTUs */
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) & ~GRTC_INT_CS;
+
+ SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags);
+
+ return ret;
+}
+
+static rtems_device_driver grtc_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ /* Wait until we get semaphore */
+ if ( rtems_semaphore_obtain(grtc_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Is device in use? */
+ if ( pDev->open ){
+ rtems_semaphore_release(grtc_dev_sem);
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Mark device taken */
+ pDev->open = 1;
+
+ rtems_semaphore_release(grtc_dev_sem);
+
+ DBG("grtc_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev);
+
+ /* Set defaults */
+ pDev->buf = NULL;
+ pDev->_buf = NULL;
+ pDev->buf_custom = 0;
+ pDev->buf_remote = 0;
+ pDev->len = 0;
+ pDev->timeout = 0; /* no timeout */
+ pDev->blocking = 0; /* polling mode */
+ pDev->mode = GRTC_MODE_RAW; /* Always default to Raw mode */
+ pDev->ready.head = NULL;
+ pDev->ready.tail = NULL;
+ pDev->ready.cnt = 0;
+
+ pDev->running = 0;
+ pDev->overrun_condition = 0;
+
+ memset(&pDev->config,0,sizeof(pDev->config));
+
+ /* The core has been reset when we execute here, so it is possible
+ * to read out defualts from core.
+ */
+ grtc_hw_get_defaults(pDev,&pDev->config);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtc_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ if ( pDev->running ){
+ grtc_stop(pDev, 0);
+ }
+
+ /* Reset core */
+ grtc_hw_reset(pDev);
+
+ /* Mark not open */
+ pDev->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtc_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+ int count;
+ int left;
+ int timedout;
+ int err;
+ rtems_interval timeout;
+ rtems_libio_rw_args_t *rw_args;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ if ( !pDev->running && !pDev->overrun_condition ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ if ( pDev->mode != GRTC_MODE_RAW ) {
+ return RTEMS_NOT_DEFINED;
+ }
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+ left = rw_args->count;
+ timedout = 0;
+ timeout = pDev->timeout;
+
+read_from_buffer:
+ /* Read maximally rw_args->count bytes from receive buffer */
+ count = grtc_hw_read_try(pDev,rw_args->buffer,left);
+
+ left -= count;
+
+ DBG("READ %d bytes from DMA, left: %d\n",count,left);
+
+ if ( !timedout && !pDev->overrun_condition && ((count < 1) || ((count < rw_args->count) && (pDev->blocking == GRTC_BLKMODE_COMPLETE))) ){
+ /* didn't read anything (no data available) or we want to wait for all bytes requested.
+ *
+ * Wait for data to arrive only in blocking mode
+ */
+ if ( pDev->blocking ) {
+ if ( (err=grtc_wait_data(pDev,left,timeout)) != RTEMS_SUCCESSFUL ){
+ /* Some kind of error, closed, overrun etc. */
+ if ( err == RTEMS_TIMEOUT ){
+ /* Got a timeout, we try to read as much as possible */
+ timedout = 1;
+ goto read_from_buffer;
+ }
+ return err;
+ }
+ goto read_from_buffer;
+ }
+ /* Non-blocking mode and no data read. */
+ return RTEMS_TIMEOUT;
+ }
+
+ /* Tell caller how much was read. */
+
+ DBG("READ returning %d bytes, left: %d\n",rw_args->count-left,left);
+
+ rw_args->bytes_moved = rw_args->count - left;
+ if ( rw_args->bytes_moved == 0 ) {
+ if ( pDev->overrun_condition ) {
+ /* signal to the user that overrun has happend when
+ * no more data can be read out.
+ */
+ return RTEMS_IO_ERROR;
+ }
+ return RTEMS_TIMEOUT;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtc_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ FUNCDBG();
+ return RTEMS_NOT_IMPLEMENTED;
+}
+
+static int grtc_pool_add_frms(struct grtc_frame *frms)
+{
+ struct grtc_frame *frm, *next;
+
+ /* Add frames to pools */
+ frm = frms;
+ while(frm){
+
+ if ( !frm->pool ) {
+ /* */
+ DBG("GRTC: Frame not assigned to a pool\n");
+ return -1;
+ }
+ next = frm->next; /* Remember next frame to process */
+
+ DBG("GRTC: adding frame 0x%x to pool %d (%d)\n",frm,frm->pool->frame_len,frm->pool->frame_cnt);
+
+ /* Insert Frame into pool */
+ frm->next = frm->pool->frms;
+ frm->pool->frms = frm;
+ frm->pool->frame_cnt++;
+
+ frm = next;
+ }
+
+ return 0;
+}
+
+static struct grtc_frame *grtc_pool_get_frm(struct grtc_priv *pDev, int frame_len, int *error)
+{
+ struct grtc_frame *frm;
+ struct grtc_frame_pool *pool;
+ int i;
+
+ /* Loop through all pools until a pool is found
+ * with a matching (or larger) frame length
+ */
+ pool = pDev->pools;
+ for (i=0; i<pDev->pool_cnt; i++,pool++) {
+ if ( pool->frame_len >= frame_len ) {
+ /* Found a good pool ==> get frame */
+ frm = pool->frms;
+ if ( !frm ) {
+ /* not enough frames available for this
+ * frame length, we try next
+ *
+ * If this is a severe error add your handling
+ * code here.
+ */
+#if 0
+ if ( error )
+ *error = 0;
+ return 0;
+#endif
+ continue;
+ }
+
+ /* Got a frame, the frame is taken out of the
+ * pool for usage.
+ */
+ pool->frms = frm->next;
+ pool->frame_cnt--;
+ return frm;
+ }
+ }
+
+ if ( error )
+ *error = 1;
+
+ /* Didn't find any frames */
+ return NULL;
+}
+
+/* Return number of bytes processed, Stops at the first occurance
+ * of the pattern given in 'pattern'
+ */
+static int grtc_scan(unsigned short *src, int max, unsigned char pattern, int *found)
+{
+ unsigned short tmp = 0;
+ unsigned int left = max;
+
+ while ( (left>1) && (((tmp=*src) & 0x00ff) != pattern) ) {
+ src++;
+ left-=2;
+ }
+ if ( (tmp & 0xff) == pattern ) {
+ *found = 1;
+ } else {
+ *found = 0;
+ }
+ return max-left;
+}
+
+static int grtc_copy(unsigned short *src, unsigned char *buf, int cnt)
+{
+ unsigned short tmp;
+ int left = cnt;
+
+ while ( (left>0) && ((((tmp=*src) & 0x00ff) == 0x00) || ((tmp & 0x00ff) == 0x01)) ) {
+ *buf++ = tmp>>8;
+ src++;
+ left--;
+ }
+
+ return cnt-left;
+}
+
+
+static int grtc_hw_find_frm(struct grtc_priv *pDev)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt;
+ int found;
+
+ FUNCDBG();
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ wp = READ_REG(&regs->wp);
+
+ /* Quick Check for most common case where Start of frame is at next
+ * data byte.
+ */
+ if ( rp != wp ) {
+ /* At least 1 byte in buffer */
+ if ( ((*(unsigned short *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf)) & 0x00ff) == 0x01 ) {
+ return 0;
+ }
+ }
+
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax);
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax);
+
+ DBG("grtc_hw_find_frm: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_find_frm: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buf_remote);
+
+ if ( (upper+lower) == 0 )
+ return 1;
+
+ /* Count bytes will be read */
+ count = 0;
+ found = 0;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ cnt = grtc_scan((unsigned short *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf), upper, 0x01, &found);
+ count = cnt;
+ if ( found ) {
+ DBG("grtc_hw_find_frm: SCANNED upper %d bytes until found\n",cnt);
+ goto out;
+ }
+
+ DBG("grtc_hw_find_frm: SCANNED all upper %d bytes, not found\n",cnt);
+ }
+
+ /* Read from lower part of data buffer */
+ if ( lower > 0 ){
+ cnt = grtc_scan((unsigned short *)pDev->buf, lower, 0x01, &found);
+ count += cnt;
+
+ if ( found ) {
+ DBG("grtc_hw_find_frm: SCANNED lower %d bytes until found\n",cnt);
+ goto out;
+ }
+
+ DBG("grtc_hw_find_frm: SCANNED all lower %d bytes, not found\n",cnt);
+ }
+
+out:
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( count > 0 ) {
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+ }
+ if ( found )
+ return 0;
+ return 1;
+
+}
+
+static int grtc_check_ending(unsigned short *src, int max, int end)
+{
+ while ( max > 0 ) {
+ /* Check Filler */
+ if ( *src != 0x5500 ) {
+ /* Filler is wrong */
+ return -1;
+ }
+ src++;
+ max-=2;
+ }
+
+ /* Check ending (at least */
+ if ( end ) {
+ if ( (*src & 0x00ff) != 0x02 ) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int grtc_hw_check_ending(struct grtc_priv *pDev, int max)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt, left;
+
+ FUNCDBG();
+
+ if ( max < 1 )
+ return 0;
+ max = max*2;
+ max += 2; /* Check ending also (2 byte extra) */
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax);
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax);
+
+ DBG("grtc_hw_check_ending: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_check_ending: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buf_remote);
+
+ if ( (upper+lower) < max )
+ return 0;
+
+ /* Count bytes will be read */
+ count = max;
+ left = count;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ if ( left <= upper ){
+ cnt = left;
+ if ( grtc_check_ending((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), cnt-2, 1) ) {
+ return -1;
+ }
+ }else{
+ cnt = upper; /* Read all upper data available */
+ if ( grtc_check_ending((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), cnt, 0) ) {
+ return -1;
+ }
+ }
+ left -= cnt;
+ }
+
+ /* Read from lower part of data buffer */
+ if ( left > 0 ){
+ cnt = left;
+ if ( grtc_check_ending((unsigned short *)pDev->buf, cnt-2, 1) ) {
+ return -1;
+ }
+ left -= cnt;
+ }
+
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+
+ return 0;
+}
+
+/* Copies Data from DMA area to buf, the control bytes are stripped. For
+ * every data byte, in the DMA area, one control byte is stripped.
+ */
+static int grtc_hw_copy(struct grtc_priv *pDev, unsigned char *buf, int max, int partial)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt, left;
+ int ret, tot, tmp;
+
+ FUNCDBG();
+
+ if ( max < 1 )
+ return 0;
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax) >> 1;
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax) >> 1;
+
+ DBG("grtc_hw_copy: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_copy: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buf_remote);
+
+ if ( (upper+lower) == 0 || (!partial && ((upper+lower)<max) ) )
+ return 0;
+
+ /* Count bytes will be read */
+ count = (upper+lower) > max ? max : (upper+lower);
+ left = count;
+ tot = 0;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ if ( left < upper ){
+ cnt = left;
+ }else{
+ cnt = upper; /* Read all upper data available */
+ }
+ DBG("grtc_hw_copy: COPYING %d from upper\n",cnt);
+ if ( (tot=grtc_copy((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), buf, cnt)) != cnt ) {
+ /* Failed to copy due to an receive error */
+ DBG("grtc_hw_copy(upper): not all in DMA buffer (%d)\n",tot);
+ count = tot;
+ ret = -1;
+ goto out;
+ }
+ buf += cnt;
+ left -= cnt;
+ }
+
+ /* Read from lower part of data buffer */
+ if ( left > 0 ){
+ if ( left < lower ){
+ cnt = left;
+ }else{
+ cnt = lower; /* Read all lower data available */
+ }
+ DBG("grtc_hw_copy: COPYING %d from lower\n",cnt);
+ if ( (tmp=grtc_copy((unsigned short *)pDev->buf, buf, cnt)) != cnt ) {
+ /* Failed to copy due to an receive error */
+ DBG("grtc_hw_copy(lower): not all in DMA buffer (%d)\n",tot);
+ count = tot+tmp;
+ ret = -1;
+ goto out;
+ }
+ buf += cnt;
+ left -= cnt;
+ }
+ ret = count;
+
+out:
+ count = count*2;
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+
+ return ret;
+}
+
+#ifdef DEBUG_ERROR
+void grtc_log_error(struct grtc_priv *pDev, int err)
+{
+ /* Stop Receiver */
+ *(volatile unsigned int *)&pDev->regs->cor = 0x55000000;
+ *(volatile unsigned int *)&pDev->regs->cor = 0x55000000;
+ pDev->last_error[pDev->last_error_cnt] = err;
+ if ( ++pDev->last_error_cnt > 128 )
+ pDev->last_error_cnt = 0;
+}
+#endif
+
+/* Read one frame from DMA buffer
+ *
+ * Return Values
+ * Zero - nothing more to process
+ * 1 - more to process, no free frames
+ * 2 - more to process, frame received
+ * negative - more to process, frame dropped
+ */
+static int process_dma(struct grtc_priv *pDev)
+{
+ int ret, err;
+ int left, total_len;
+ unsigned char *dst;
+ struct grtc_frame *frm;
+
+ switch( pDev->frame_state ) {
+ case FRM_STATE_NONE:
+ DBG2("FRAME_STATE_NONE\n");
+
+ /* Find Start of next frame by searching for 0x01 */
+ ret = grtc_hw_find_frm(pDev);
+ if ( ret != 0 ) {
+ /* Frame start not found */
+ return 0;
+ }
+
+ /* Start of frame found, Try to copy header */
+ pDev->frm = NULL;
+ pDev->frame_state = FRM_STATE_HDR;
+
+ case FRM_STATE_HDR:
+ DBG2("FRAME_STATE_HDR\n");
+
+ /* Wait for all of header to be in place by setting partial to 0 */
+ ret = grtc_hw_copy(pDev, (unsigned char *)pDev->hdr, 5, 0);
+ if ( ret < 0 ) {
+ /* Error copying header, restart scanning for new frame */
+ DEBUG_ERR_LOG(pDev,1);
+ pDev->stats.err++;
+ pDev->stats.err_hdr++;
+ DBG("FRAME_STATE_HDR: copying failed %d\n",ret);
+ pDev->frame_state = FRM_STATE_NONE;
+ return -1;
+ } else if ( ret != 5 ) {
+ DBG("FRAME_STATE_HDR: no header (%d)\n",ret);
+ /* Not all bytes available, come back later */
+ return 0;
+ }
+
+ /* The complete header has been copied, parse it */
+ pDev->frmlen = (((unsigned short *)pDev->hdr)[1] & 0x3ff)+1;
+ if ( pDev->frmlen < 5 ) {
+ /* Error: frame length is not correct */
+ pDev->stats.err++;
+ pDev->stats.err_hdr++;
+ DBG("FRAME_STATE_HDR: frame length error: %d\n", pDev->frmlen);
+ pDev->frame_state = FRM_STATE_NONE;
+ return -1;
+ }
+ pDev->frame_state = FRM_STATE_ALLOC;
+
+ case FRM_STATE_ALLOC:
+ DBG2("FRAME_STATE_ALLOC\n");
+ /* Header has been read, allocate a frame to put payload and header into */
+
+ /* Allocate Frame matching Frame length */
+ err = 0;
+ frm = grtc_pool_get_frm(pDev,pDev->frmlen,&err);
+ if ( !frm ) {
+ /* Couldn't find frame */
+ DEBUG_ERR_LOG(pDev,2);
+ pDev->stats.dropped++;
+ DBG2("No free frames\n");
+ if ( err == 0 ){
+ /* Frame length exist in pool configuration, but no
+ * frames are available for that frame length.
+ */
+ DEBUG_ERR_LOG(pDev,3);
+ pDev->stats.dropped_no_buf++;
+ return 1;
+ } else {
+ /* Frame length of incoming frame is larger than the
+ * frame length in any of the configured frame pools.
+ *
+ * This may be because of an corrupt header. We simply
+ * scan for the end of frame marker in the DMA buffer
+ * so we can drop the frame.
+ */
+ DEBUG_ERR_LOG(pDev,4);
+ pDev->stats.dropped_too_long++;
+ pDev->frame_state = FRM_STATE_NONE;
+ return -2;
+ }
+ }
+ frm->len = 5; /* Only header currenlty in frame */
+
+ /* Copy Frame Header into frame structure */
+ ((unsigned char*)&frm->hdr)[0] = ((unsigned char*)pDev->hdr)[0];
+ ((unsigned char*)&frm->hdr)[1] = ((unsigned char*)pDev->hdr)[1];
+ ((unsigned char*)&frm->hdr)[2] = ((unsigned char*)pDev->hdr)[2];
+ ((unsigned char*)&frm->hdr)[3] = ((unsigned char*)pDev->hdr)[3];
+ ((unsigned char*)&frm->hdr)[4] = ((unsigned char*)pDev->hdr)[4];
+
+ /* Calc Total and Filler byte count in frame */
+ total_len = pDev->frmlen / 7;
+ total_len = total_len * 7;
+ if ( pDev->frmlen != total_len )
+ total_len += 7;
+
+ pDev->filler = total_len - pDev->frmlen;
+
+ pDev->frame_state = FRM_STATE_PAYLOAD;
+ pDev->frm = frm;
+
+ case FRM_STATE_PAYLOAD:
+ DBG2("FRAME_STATE_PAYLOAD\n");
+ /* Parts of payload and the complete header has been read */
+ frm = pDev->frm;
+
+ dst = (unsigned char *)&frm->data[frm->len-5];
+ left = pDev->frmlen-frm->len;
+
+ ret = grtc_hw_copy(pDev,dst,left,1);
+ if ( ret < 0 ) {
+ DEBUG_ERR_LOG(pDev,5);
+ /* Error copying header, restart scanning for new frame */
+ pDev->frame_state = FRM_STATE_NONE;
+ frm->next = NULL;
+ grtc_pool_add_frms(frm);
+ pDev->frm = NULL;
+ pDev->stats.err++;
+ pDev->stats.err_payload++;
+ return -1;
+ } else if ( ret != left ) {
+ /* Not all bytes available, come back later */
+ frm->len += ret;
+ return 0;
+ }
+ frm->len += ret;
+ pDev->frame_state = FRM_STATE_FILLER;
+
+ case FRM_STATE_FILLER:
+ DBG2("FRAME_STATE_FILLER\n");
+ /* check filler data */
+ frm = pDev->frm;
+
+ ret = grtc_hw_check_ending(pDev,pDev->filler);
+ if ( ret != 0 ) {
+ /* Error in frame, drop frame */
+ DEBUG_ERR_LOG(pDev,6);
+ pDev->frame_state = FRM_STATE_NONE;
+ frm->next = NULL;
+ grtc_pool_add_frms(frm);
+ pDev->frm = NULL;
+ pDev->stats.err++;
+ pDev->stats.err_ending++;
+ return -1;
+ }
+
+ /* A complete frame received, put it into received frame queue */
+ if ( pDev->ready.head ) {
+ /* Queue not empty */
+ pDev->ready.tail->next = frm;
+ } else {
+ /* Queue empty */
+ pDev->ready.head = frm;
+ }
+ pDev->ready.tail = frm;
+ frm->next = NULL;
+ pDev->ready.cnt++;
+ pDev->stats.frames_recv++;
+
+ pDev->frame_state = FRM_STATE_NONE;
+ frm->next = NULL;
+ return 2;
+
+#if 0
+ case FRM_STATE_DROP:
+ DBG2("FRAME_STATE_DROP\n");
+ break;
+#endif
+
+ default:
+ printk("GRTC: internal error\n");
+ pDev->frame_state = FRM_STATE_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static rtems_device_driver grtc_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg;
+ unsigned int *data = ioarg->buffer;
+ int status,frm_len,i,ret;
+ struct grtc_ioc_buf_params *buf_arg;
+ struct grtc_ioc_config *cfg;
+ struct grtc_ioc_hw_status *hwregs;
+ struct grtc_ioc_pools_setup *pocfg;
+ struct grtc_ioc_assign_frm_pool *poassign;
+ struct grtc_frame *frm, *frms;
+ struct grtc_frame_pool *pool;
+ struct grtc_list *frmlist;
+ struct grtc_ioc_stats *stats;
+ unsigned int mem;
+ IRQ_LOCAL_DECLARE(oldLevel);
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case GRTC_IOC_START:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ if ( (status=grtc_start(pDev)) != RTEMS_SUCCESSFUL ){
+ return status;
+ }
+ /* Register ISR and Unmask interrupt */
+ drvmgr_interrupt_register(pDev->dev, 0, "grtc", grtc_interrupt, pDev);
+
+ /* Read and write are now open... */
+ break;
+
+ case GRTC_IOC_STOP:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ drvmgr_interrupt_unregister(pDev->dev, 0, grtc_interrupt, pDev);
+ grtc_stop(pDev, 0);
+ break;
+
+ case GRTC_IOC_ISSTARTED:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ } else if ( pDev->overrun_condition ) {
+ return RTEMS_IO_ERROR;
+ }
+ break;
+
+ case GRTC_IOC_SET_BLOCKING_MODE:
+ if ( (unsigned int)data > GRTC_BLKMODE_COMPLETE ) {
+ return RTEMS_INVALID_NAME;
+ }
+ DBG("GRTC: Set blocking mode: %d\n",(unsigned int)data);
+ pDev->blocking = (unsigned int)data;
+ break;
+
+ case GRTC_IOC_SET_TIMEOUT:
+ DBG("GRTC: Timeout: %d\n",(unsigned int)data);
+ pDev->timeout = (rtems_interval)data;
+ break;
+
+ case GRTC_IOC_SET_BUF_PARAM:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+
+ buf_arg = (struct grtc_ioc_buf_params *)data;
+ if ( !buf_arg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ DBG("GRTC: IOC_SET_BUF_PARAM: Len: 0x%x, Custom Buffer: 0x%x\n",buf_arg->length,buf_arg->custom_buffer);
+
+ /* Check alignment need, skip bit 0 since that bit only indicates remote address or not */
+ if ( (unsigned int)buf_arg->custom_buffer & (~GRTC_BUF_MASK) & (~0x1) ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ( buf_arg->length > 0x100 ){
+ DBG("GRTC: Too big buffer requested\n");
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* If current buffer allocated by driver we must free it */
+ if ( !pDev->buf_custom && pDev->buf ){
+ free(pDev->_buf);
+ pDev->_buf = NULL;
+ }
+ pDev->buf = NULL;
+ pDev->len = buf_arg->length*1024;
+
+ if (pDev->len <= 0)
+ break;
+ mem = (unsigned int)buf_arg->custom_buffer;
+ pDev->buf_custom = mem;
+
+ if (mem & 1) {
+ /* Remote address given, the address is as the GRTC
+ * core looks at it. Translate the base address into
+ * an address that the CPU can understand.
+ */
+ pDev->buf_remote = (void *)(mem & ~0x1);
+ drvmgr_translate_check(pDev->dev, DMAMEM_TO_CPU,
+ (void *)pDev->buf_remote,
+ (void **)&pDev->buf,
+ pDev->len);
+ } else {
+ if (mem == 0) {
+ pDev->buf = grtc_memalign((~GRTC_ASR_BUFST)+1,pDev->len,&pDev->_buf);
+ DBG("grtc_ioctl: SETBUF: new buf: 0x%x(0x%x), Len: %d\n",pDev->buf,pDev->_buf,pDev->len);
+ if (!pDev->buf){
+ pDev->len = 0;
+ pDev->buf_custom = 0;
+ pDev->_buf = NULL;
+ pDev->buf_remote = 0;
+ DBG("GRTC: Failed to allocate memory\n");
+ return RTEMS_NO_MEMORY;
+ }
+ } else{
+ pDev->buf = buf_arg->custom_buffer;
+ }
+
+ /* Translate into a remote address so that GRTC core
+ * on a remote AMBA bus (for example over the PCI bus)
+ * gets a valid address
+ */
+ drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA,
+ (void *)pDev->buf,
+ (void **)&pDev->buf_remote,
+ pDev->len);
+ }
+ break;
+
+ case GRTC_IOC_GET_BUF_PARAM:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+
+ buf_arg = (struct grtc_ioc_buf_params *)data;
+ if ( !buf_arg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ buf_arg->length = pDev->len >> 10; /* Length in 1kByte blocks */
+ if ( pDev->buf_custom )
+ buf_arg->custom_buffer =(void *)pDev->buf;
+ else
+ buf_arg->custom_buffer = 0; /* Don't reveal internal driver buffer */
+ break;
+
+ case GRTC_IOC_SET_CONFIG:
+ cfg = (struct grtc_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ pDev->config = *cfg;
+ break;
+
+ case GRTC_IOC_GET_CONFIG:
+ cfg = (struct grtc_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ *cfg = pDev->config;
+ break;
+
+ case GRTC_IOC_GET_HW_STATUS:
+ hwregs = (struct grtc_ioc_hw_status *)data;
+ if ( !hwregs ) {
+ return RTEMS_INVALID_NAME;
+ }
+ /* We disable interrupt on the local CPU in order to get a
+ * snapshot of the registers.
+ */
+ IRQ_LOCAL_DISABLE(oldLevel);
+ hwregs->sir = READ_REG(&pDev->regs->sir);
+ hwregs->far = READ_REG(&pDev->regs->far);
+ hwregs->clcw1 = READ_REG(&pDev->regs->clcw1);
+ hwregs->clcw2 = READ_REG(&pDev->regs->clcw2);
+ hwregs->phir = READ_REG(&pDev->regs->phir);
+ hwregs->str = READ_REG(&pDev->regs->str);
+ IRQ_LOCAL_ENABLE(oldLevel);
+ break;
+
+ case GRTC_IOC_GET_STATS:
+ stats = (struct grtc_ioc_stats *)data;
+ if ( !stats ) {
+ return RTEMS_INVALID_NAME;
+ }
+ memcpy(stats,&pDev->stats,sizeof(struct grtc_ioc_stats));
+ break;
+
+ case GRTC_IOC_CLR_STATS:
+ memset(&pDev->stats,0,sizeof(struct grtc_ioc_stats));
+ break;
+
+ case GRTC_IOC_SET_MODE:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ if ( (int)data == GRTC_MODE_FRAME ) {
+ pDev->mode = GRTC_MODE_FRAME;
+ } else if ( (int)data == GRTC_MODE_RAW ) {
+ pDev->mode = GRTC_MODE_RAW;
+ } else {
+ return RTEMS_INVALID_NAME;
+ }
+ break;
+
+ case GRTC_IOC_POOLS_SETUP:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ pocfg = (struct grtc_ioc_pools_setup *)data;
+ if ( (pDev->mode != GRTC_MODE_FRAME) || !pocfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Check that list is sorted */
+ frm_len = 0;
+ for(i=0;i<pocfg->pool_cnt;i++){
+ if ( pocfg->pool_frame_len[i] <= frm_len ) {
+ return RTEMS_INVALID_NAME;
+ }
+ frm_len = pocfg->pool_frame_len[i];
+ }
+
+ /* Ok, we trust user. The pool descriptions are allocated
+ * but not frames, that the user must do self.
+ */
+ if ( pDev->pools ) {
+ free(pDev->pools);
+ }
+ pDev->pools = grlib_malloc(pocfg->pool_cnt * sizeof(*pDev->pools));
+ if ( !pDev->pools ) {
+ pDev->pool_cnt = 0;
+ return RTEMS_NO_MEMORY;
+ }
+ pDev->pool_cnt = pocfg->pool_cnt;
+ for (i=0;i<pocfg->pool_cnt;i++) {
+ pDev->pools[i].frame_len = pocfg->pool_frame_len[i];
+ pDev->pools[i].frame_cnt = 0;
+ pDev->pools[i].frms = NULL;
+ }
+ break;
+
+ case GRTC_IOC_ASSIGN_FRM_POOL:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ if ( (pDev->mode != GRTC_MODE_FRAME) ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ poassign = (struct grtc_ioc_assign_frm_pool *)data;
+ if ( !poassign ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Find pool to assign the frames to */
+ pool = NULL;
+ for(i=0; i<pDev->pool_cnt; i++) {
+ if ( pDev->pools[i].frame_len == poassign->frame_len ) {
+ pool = &pDev->pools[i];
+ break;
+ }
+ }
+ if ( !pool ) {
+ /* No Pool matching frame length */
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Assign frames to pool */
+ frm = poassign->frames;
+ while(frm){
+ frm->pool = pool; /* Assign Frame to pool */
+ frm = frm->next;
+ }
+ break;
+
+ case GRTC_IOC_ADD_BUFF:
+ frms = (struct grtc_frame *)data;
+
+ if ( (pDev->mode != GRTC_MODE_FRAME) ) {
+ return RTEMS_NOT_DEFINED;
+ }
+ if ( !frms ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Add frames to respicative pools */
+ if ( grtc_pool_add_frms(frms) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ break;
+
+ /* Try to read as much data as possible from DMA area and
+ * put it into free frames.
+ *
+ * If receiver is in stopped mode, let user only read previously
+ * received frames.
+ */
+ case GRTC_IOC_RECV:
+
+ if ( (pDev->mode != GRTC_MODE_FRAME) ) {
+ return RTEMS_NOT_DEFINED;
+ }
+
+ while ( pDev->running && ((ret=process_dma(pDev) == 2) || (ret == -1)) ) {
+ /* Frame received or dropped, process next frame */
+ }
+
+ /* Take frames out from ready queue and put them to user */
+ frmlist = (struct grtc_list *)data;
+ if ( !frmlist ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ frmlist->head = pDev->ready.head;
+ frmlist->tail = pDev->ready.tail;
+ frmlist->cnt = pDev->ready.cnt;
+
+ /* Empty list */
+ pDev->ready.head = NULL;
+ pDev->ready.tail = NULL;
+ pDev->ready.cnt = 0;
+
+ if ((frmlist->cnt == 0) && pDev->overrun_condition) {
+ /* signal to the user that overrun has happend when
+ * no more data can be read out.
+ */
+ return RTEMS_IO_ERROR;
+ }
+ break;
+
+ case GRTC_IOC_GET_CLCW_ADR:
+ if ( !data ) {
+ return RTEMS_INVALID_NAME;
+ }
+ *data = (unsigned int)&pDev->regs->clcw1;
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grtc_interrupt(void *arg)
+{
+ struct grtc_priv *pDev = arg;
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int status;
+ SPIN_ISR_IRQFLAGS(irqflags);
+
+ /* Clear interrupt by reading it */
+ status = READ_REG(&regs->pisr);
+
+ /* Spurious Interrupt? */
+ if ( !pDev->running )
+ return;
+
+ if ( status & GRTC_INT_OV ){
+ /* Stop core (Disable receiver, interrupts), set overrun condition,
+ * Flush semaphore if thread waiting for data in grtc_wait_data().
+ */
+ grtc_stop(pDev, 1);
+
+ /* No need to handle the reset of interrupts, we are still */
+ goto out;
+ }
+
+ if ( status & GRTC_INT_CS ){
+ SPIN_LOCK(&pDev->devlock, irqflags);
+
+ if ( (pDev->blocking==GRTC_BLKMODE_COMPLETE) && pDev->timeout ){
+ /* Signal to thread only if enough data is available */
+ if ( pDev->wait_for_nbytes > grtc_data_avail(pDev) ){
+ /* Not enough data available */
+ goto procceed_processing_interrupts;
+ }
+
+ /* Enough data is available which means that we should
+ * wake up the thread sleeping.
+ */
+ }
+
+ /* Disable further CLTUs Stored interrupts, no point until
+ * thread waiting for them says it want to wait for more.
+ */
+ regs->imr = READ_REG(&regs->imr) & ~GRTC_INT_CS;
+ SPIN_UNLOCK(&pDev->devlock, irqflags);
+
+ /* Signal Semaphore to wake waiting thread in read() */
+ rtems_semaphore_release(pDev->sem_rx);
+ }
+
+procceed_processing_interrupts:
+
+ if ( status & GRTC_INT_CR ){
+
+ }
+
+ if ( status & GRTC_INT_FAR ){
+
+ }
+
+ if ( status & GRTC_INT_BLO ){
+
+ }
+
+ if ( status & GRTC_INT_RFA ){
+
+ }
+out:
+ if ( status )
+ regs->picr = status;
+}
+
+static rtems_device_driver grtc_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number unused,
+ void *arg
+ )
+{
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'C'),
+ 1,
+ RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &grtc_dev_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/bsps/shared/grlib/tmtc/grtm.c b/bsps/shared/grlib/tmtc/grtm.c
new file mode 100644
index 0000000000..43476aaaad
--- /dev/null
+++ b/bsps/shared/grlib/tmtc/grtm.c
@@ -0,0 +1,1613 @@
+/* GRTM CCSDS Telemetry Encoder driver
+ *
+ * COPYRIGHT (c) 2007.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/grtm.h>
+
+#include <grlib/grlib_impl.h>
+
+/*
+#define DEBUG
+#define DEBUGFUNCS
+*/
+
+#include <grlib/debug_defs.h>
+
+/* GRTM register map */
+struct grtm_regs {
+ volatile unsigned int dma_ctrl; /* DMA Control Register (0x00) */
+ volatile unsigned int dma_status; /* DMA Status Register (0x04) */
+ volatile unsigned int dma_len; /* DMA Length Register (0x08) */
+ volatile unsigned int dma_bd; /* DMA Descriptor Pointer Register (0x0c) */
+
+ volatile unsigned int dma_cfg; /* DMA Configuration Register (0x10) */
+ volatile unsigned int revision; /* GRTM Revision Register (0x14) */
+
+ int unused0[(0x80-0x18)/4];
+
+ volatile unsigned int ctrl; /* TM Control Register (0x80) */
+ volatile unsigned int status; /* TM Status Register (0x84) */
+ volatile unsigned int cfg; /* TM Configuration Register (0x88) */
+ volatile unsigned int size; /* TM Size Register (0x8c) */
+
+ volatile unsigned int phy; /* TM Physical Layer Register (0x90) */
+ volatile unsigned int code; /* TM Coding Sub-Layer Register (0x94) */
+ volatile unsigned int asmr; /* TM Attached Synchronization Marker Register (0x98) */
+
+ int unused1;
+
+ volatile unsigned int all_frm; /* TM All Frames Generation Register (0xa0) */
+ volatile unsigned int mst_frm; /* TM Master Channel Frame Generation Register (0xa4) */
+ volatile unsigned int idle_frm; /* TM Idle Frame Generation Register (0xa8) */
+
+ int unused2[(0xc0-0xac)/4];
+
+ volatile unsigned int fsh[4]; /* TM FSH/Insert Zone Registers (0xc0..0xcc) */
+
+ volatile unsigned int ocf; /* TM Operational Control Field Register (0xd0) */
+};
+
+/* DMA Control Register (0x00) */
+#define GRTM_DMA_CTRL_EN_BIT 0
+#define GRTM_DMA_CTRL_IE_BIT 1
+#define GRTM_DMA_CTRL_TXRST_BIT 2
+#define GRTM_DMA_CTRL_RST_BIT 3
+#define GRTM_DMA_CTRL_TFIE_BIT 4
+
+#define GRTM_DMA_CTRL_EN (1<<GRTM_DMA_CTRL_EN_BIT)
+#define GRTM_DMA_CTRL_IE (1<<GRTM_DMA_CTRL_IE_BIT)
+#define GRTM_DMA_CTRL_TXRST (1<<GRTM_DMA_CTRL_TXRST_BIT)
+#define GRTM_DMA_CTRL_RST (1<<GRTM_DMA_CTRL_RST_BIT)
+#define GRTM_DMA_CTRL_TFIE (1<<GRTM_DMA_CTRL_TFIE_BIT)
+
+/* DMA Status Register (0x04) */
+#define GRTM_DMA_STS_TE_BIT 0
+#define GRTM_DMA_STS_TI_BIT 1
+#define GRTM_DMA_STS_TA_BIT 2
+#define GRTM_DMA_STS_TFF_BIT 3
+#define GRTM_DMA_STS_TFS_BIT 4
+
+#define GRTM_DMA_STS_TE (1<<GRTM_DMA_STS_TE_BIT)
+#define GRTM_DMA_STS_TI (1<<GRTM_DMA_STS_TI_BIT)
+#define GRTM_DMA_STS_TA (1<<GRTM_DMA_STS_TA_BIT)
+#define GRTM_DMA_STS_TFF (1<<GRTM_DMA_STS_TFF_BIT)
+#define GRTM_DMA_STS_TFS (1<<GRTM_DMA_STS_TFS_BIT)
+#define GRTM_DMA_STS_ALL 0x1f
+
+/* DMA Length Register (0x08) */
+#define GRTM_DMA_LEN_LEN_BIT 0
+#define GRTM_DMA_LEN_LIM_BIT 16
+
+#define GRTM_DMA_LEN_LEN (0x7ff<<GRTM_DMA_LEN_LEN_BIT)
+#define GRTM_DMA_LEN_LIM (0x3ff<<GRTM_DMA_LEN_LIM_BIT)
+
+/* DMA Descriptor Pointer Register (0x0c) */
+#define GRTM_DMA_BD_INDEX_BIT 0
+#define GRTM_DMA_BD_BASE_BIT 10
+
+#define GRTM_DMA_BD_INDEX (0x3ff<<GRTM_DMA_BD_INDEX_BIT)
+#define GRTM_DMA_BD_BASE (0xfffffc<<GRTM_DMA_BD_BASE_BIT)
+
+/* DMA Configuration Register (0x10) */
+#define GRTM_DMA_CFG_BLKSZ_BIT 0
+#define GRTM_DMA_CFG_FIFOSZ_BIT 16
+
+#define GRTM_DMA_CFG_BLKSZ (0xffff<<GRTM_DMA_CFG_BLKSZ_BIT)
+#define GRTM_DMA_CFG_FIFOSZ (0xffff<<GRTM_DMA_CFG_FIFOSZ_BIT)
+
+/* TM Control Register (0x80) */
+#define GRTM_CTRL_EN_BIT 0
+
+#define GRTM_CTRL_EN (1<<GRTM_CTRL_EN_BIT)
+
+/* TM Status Register (0x84) - Unused */
+
+/* TM Configuration Register (0x88) */
+#define GRTM_CFG_SC_BIT 0
+#define GRTM_CFG_SP_BIT 1
+#define GRTM_CFG_CE_BIT 2
+#define GRTM_CFG_NRZ_BIT 3
+#define GRTM_CFG_PSR_BIT 4
+#define GRTM_CFG_TE_BIT 5
+#define GRTM_CFG_RSDEP_BIT 6
+#define GRTM_CFG_RS_BIT 9
+#define GRTM_CFG_AASM_BIT 11
+#define GRTM_CFG_FECF_BIT 12
+#define GRTM_CFG_OCF_BIT 13
+#define GRTM_CFG_EVC_BIT 14
+#define GRTM_CFG_IDLE_BIT 15
+#define GRTM_CFG_FSH_BIT 16
+#define GRTM_CFG_MCG_BIT 17
+#define GRTM_CFG_IZ_BIT 18
+#define GRTM_CFG_FHEC_BIT 19
+#define GRTM_CFG_AOS_BIT 20
+#define GRTM_CFG_CIF_BIT 21
+#define GRTM_CFG_OCFB_BIT 22
+
+#define GRTM_CFG_SC (1<<GRTM_CFG_SC_BIT)
+#define GRTM_CFG_SP (1<<GRTM_CFG_SP_BIT)
+#define GRTM_CFG_CE (1<<GRTM_CFG_CE_BIT)
+#define GRTM_CFG_NRZ (1<<GRTM_CFG_NRZ_BIT)
+#define GRTM_CFG_PSR (1<<GRTM_CFG_PSR_BIT)
+#define GRTM_CFG_TE (1<<GRTM_CFG_TE_BIT)
+#define GRTM_CFG_RSDEP (0x7<<GRTM_CFG_RSDEP_BIT)
+#define GRTM_CFG_RS (0x3<<GRTM_CFG_RS_BIT)
+#define GRTM_CFG_AASM (1<<GRTM_CFG_AASM_BIT)
+#define GRTM_CFG_FECF (1<<GRTM_CFG_FECF_BIT)
+#define GRTM_CFG_OCF (1<<GRTM_CFG_OCF_BIT)
+#define GRTM_CFG_EVC (1<<GRTM_CFG_EVC_BIT)
+#define GRTM_CFG_IDLE (1<<GRTM_CFG_IDLE_BIT)
+#define GRTM_CFG_FSH (1<<GRTM_CFG_FSH_BIT)
+#define GRTM_CFG_MCG (1<<GRTM_CFG_MCG_BIT)
+#define GRTM_CFG_IZ (1<<GRTM_CFG_IZ_BIT)
+#define GRTM_CFG_FHEC (1<<GRTM_CFG_FHEC_BIT)
+#define GRTM_CFG_AOS (1<<GRTM_CFG_AOS_BIT)
+#define GRTM_CFG_CIF (1<<GRTM_CFG_CIF_BIT)
+#define GRTM_CFG_OCFB (1<<GRTM_CFG_OCFB_BIT)
+
+/* TM Size Register (0x8c) */
+#define GRTM_SIZE_BLKSZ_BIT 0
+#define GRTM_SIZE_FIFOSZ_BIT 8
+#define GRTM_SIZE_LEN_BIT 20
+
+#define GRTM_SIZE_BLKSZ (0xff<<GRTM_SIZE_BLKSZ_BIT)
+#define GRTM_SIZE_FIFOSZ (0xfff<<GRTM_SIZE_FIFOSZ_BIT)
+#define GRTM_SIZE_LEN (0xfff<<GRTM_SIZE_LEN_BIT)
+
+/* TM Physical Layer Register (0x90) */
+#define GRTM_PHY_SUB_BIT 0
+#define GRTM_PHY_SCF_BIT 15
+#define GRTM_PHY_SYM_BIT 16
+#define GRTM_PHY_SF_BIT 31
+
+#define GRTM_PHY_SUB (0x7fff<<GRTM_PHY_SUB_BIT)
+#define GRTM_PHY_SCF (1<<GRTM_PHY_SCF_BIT)
+#define GRTM_PHY_SYM (0x7fff<<GRTM_PHY_SYM_BIT)
+#define GRTM_PHY_SF (1<<GRTM_PHY_SF_BIT)
+
+/* TM Coding Sub-Layer Register (0x94) */
+#define GRTM_CODE_SC_BIT 0
+#define GRTM_CODE_SP_BIT 1
+#define GRTM_CODE_CERATE_BIT 2
+#define GRTM_CODE_CE_BIT 5
+#define GRTM_CODE_NRZ_BIT 6
+#define GRTM_CODE_PSR_BIT 7
+#define GRTM_CODE_RS8_BIT 11
+#define GRTM_CODE_RSDEP_BIT 12
+#define GRTM_CODE_RS_BIT 15
+#define GRTM_CODE_AASM_BIT 16
+#define GRTM_CODE_CSEL_BIT 17
+
+#define GRTM_CODE_SC (1<<GRTM_CODE_SC_BIT)
+#define GRTM_CODE_SP (1<<GRTM_CODE_SP_BIT)
+#define GRTM_CODE_CERATE (0x7<<GRTM_CODE_CERATE_BIT)
+#define GRTM_CODE_CE (1<<GRTM_CODE_CE_BIT)
+#define GRTM_CODE_NRZ (1<<GRTM_CODE_NRZ_BIT)
+#define GRTM_CODE_PSR (1<<GRTM_CODE_PSR_BIT)
+#define GRTM_CODE_RS8 (1<<GRTM_CODE_RS8_BIT)
+#define GRTM_CODE_RSDEP (0x7<<GRTM_CODE_RSDEP_BIT)
+#define GRTM_CODE_RS (1<<GRTM_CODE_RS_BIT)
+#define GRTM_CODE_AASM (1<<GRTM_CODE_AASM_BIT)
+#define GRTM_CODE_CSEL (0x3<<GRTM_CODE_CSEL_BIT)
+
+/* TM Attached Synchronization Marker Register (0x98) */
+#define GRTM_ASM_BIT 0
+
+#define GRTM_ASM 0xffffffff
+
+/* TM All Frames Generation Register (0xa0) */
+#define GRTM_ALL_LEN_BIT 0
+#define GRTM_ALL_VER_BIT 12
+#define GRTM_ALL_FHEC_BIT 14
+#define GRTM_ALL_FECF_BIT 15
+#define GRTM_ALL_IZ_BIT 16
+#define GRTM_ALL_IZLEN_BIT 17
+
+#define GRTM_ALL_LEN (0x7ff<<GRTM_ALL_LEN_BIT)
+#define GRTM_ALL_VER (0x3<<GRTM_ALL_VER_BIT)
+#define GRTM_ALL_FHEC (1<<GRTM_ALL_FHEC_BIT)
+#define GRTM_ALL_FECF (1<<GRTM_ALL_FECF_BIT)
+#define GRTM_ALL_IZ (1<<GRTM_ALL_IZ_BIT)
+#define GRTM_ALL_IZLEN (0x1f<<GRTM_ALL_IZLEN_BIT)
+
+/* TM Master Channel Frame Generation Register (0xa4) */
+#define GRTM_MST_OW_BIT 0
+#define GRTM_MST_OCF_BIT 1
+#define GRTM_MST_FSH_BIT 2
+#define GRTM_MST_MC_BIT 3
+#define GRTM_MST_MCCNTR_BIT 24
+
+#define GRTM_MST_OW (1<<GRTM_MST_OW_BIT)
+#define GRTM_MST_OCF (1<<GRTM_MST_OCF_BIT)
+#define GRTM_MST_FSH (1<<GRTM_MST_FSH_BIT)
+#define GRTM_MST_MC (0xff<<GRTM_MST_MC_BIT)
+
+/* TM Idle Frame Generation Register (0xa8) */
+#define GRTM_IDLE_SCID_BIT 0
+#define GRTM_IDLE_VCID_BIT 10
+#define GRTM_IDLE_MC_BIT 16
+#define GRTM_IDLE_VCC_BIT 17
+#define GRTM_IDLE_FSH_BIT 18
+#define GRTM_IDLE_EVC_BIT 19
+#define GRTM_IDLE_OCF_BIT 20
+#define GRTM_IDLE_IDLE_BIT 21
+#define GRTM_IDLE_MCCNTR_BIT 24
+
+#define GRTM_IDLE_SCID (0x3ff<<GRTM_IDLE_SCID_BIT)
+#define GRTM_IDLE_VCID (0x3f<<GRTM_IDLE_VCID_BIT)
+#define GRTM_IDLE_MC (1<<GRTM_IDLE_MC_BIT)
+#define GRTM_IDLE_VCC (1<<GRTM_IDLE_VCC_BIT)
+#define GRTM_IDLE_FSH (1<<GRTM_IDLE_FSH_BIT)
+#define GRTM_IDLE_EVC (1<<GRTM_IDLE_EVC_BIT)
+#define GRTM_IDLE_OCF (1<<GRTM_IDLE_OCF_BIT)
+#define GRTM_IDLE_IDLE (1<<GRTM_IDLE_IDLE_BIT)
+#define GRTM_IDLE_MCCNTR (0xff<<GRTM_IDLE_MCCNTR_BIT)
+
+/* TM FSH/Insert Zone Registers (0xc0..0xcc) */
+#define GRTM_FSH_DATA_BIT 0
+
+#define GRTM_FSH_DATA 0xffffffff
+
+
+/* TM Operational Control Field Register (0xd0) */
+#define GRTM_OCF_CLCW_BIT 0
+
+#define GRTM_OCF_CLCW 0xffffffff
+
+
+/* GRTM Revision 0 */
+#define GRTM_REV0_DMA_CTRL_TXRDY_BIT 5
+#define GRTM_REV0_DMA_CTRL_TXRDY (1<<GRTM_REV0_DMA_CTRL_TXRDY_BIT)
+
+/* GRTM Revision 1 */
+#define GRTM_REV1_DMA_STS_TXRDY_BIT 6
+#define GRTM_REV1_DMA_STS_TXSTAT_BIT 7
+#define GRTM_REV1_DMA_STS_TXRDY (1<<GRTM_REV1_DMA_STS_TXRDY_BIT)
+#define GRTM_REV1_DMA_STS_TXSTAT (1<<GRTM_REV1_DMA_STS_TXSTAT_BIT)
+
+#define GRTM_REV1_REV_SREV_BIT 0
+#define GRTM_REV1_REV_MREV_BIT 8
+#define GRTM_REV1_REV_TIRQ_BIT 16
+#define GRTM_REV1_REV_SREV (0xff<<GRTM_REV1_REV_SREV_BIT)
+#define GRTM_REV1_REV_MREV (0xff<<GRTM_REV1_REV_MREV_BIT)
+#define GRTM_REV1_REV_TIRQ (1<<GRTM_REV1_REV_TIRQ_BIT)
+
+
+/* GRTM transmit descriptor (0x400 Alignment need) */
+struct grtm_bd {
+ volatile unsigned int ctrl;
+ unsigned int address;
+};
+
+#define GRTM_BD_EN_BIT 0
+#define GRTM_BD_WR_BIT 1
+#define GRTM_BD_IE_BIT 2
+#define GRTM_BD_FECFB_BIT 3
+#define GRTM_BD_IZB_BIT 4
+#define GRTM_BD_FHECB_BIT 5
+#define GRTM_BD_OCFB_BIT 6
+#define GRTM_BD_FSHB_BIT 7
+#define GRTM_BD_MCB_BIT 8
+#define GRTM_BD_VCE_BIT 9
+#define GRTM_BD_TS_BIT 14
+#define GRTM_BD_UE_BIT 15
+
+#define GRTM_BD_EN (1<<GRTM_BD_EN_BIT)
+#define GRTM_BD_WR (1<<GRTM_BD_WR_BIT)
+#define GRTM_BD_IE (1<<GRTM_BD_IE_BIT)
+#define GRTM_BD_FECFB (1<<GRTM_BD_FECFB_BIT)
+#define GRTM_BD_IZB (1<<GRTM_BD_IZB_BIT)
+#define GRTM_BD_FHECB (1<<GRTM_BD_FHECB_BIT)
+#define GRTM_BD_OCFB (1<<GRTM_BD_OCFB_BIT)
+#define GRTM_BD_FSHB (1<<GRTM_BD_FSHB_BIT)
+#define GRTM_BD_MCB (1<<GRTM_BD_MCB_BIT)
+#define GRTM_BD_VCE (1<<GRTM_BD_VCE_BIT)
+#define GRTM_BD_TS (1<<GRTM_BD_TS_BIT)
+#define GRTM_BD_UE (1<<GRTM_BD_UE_BIT)
+
+/* Load register */
+
+#define READ_REG(address) (*(volatile unsigned int *)address)
+
+/* Driver functions */
+static rtems_device_driver grtm_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define GRTM_DRIVER_TABLE_ENTRY { grtm_initialize, grtm_open, grtm_close, grtm_read, grtm_write, grtm_ioctl }
+
+static rtems_driver_address_table grtm_driver = GRTM_DRIVER_TABLE_ENTRY;
+
+/* Structure that connects BD with SoftWare Frame */
+struct grtm_ring {
+ struct grtm_ring *next;
+ struct grtm_bd *bd;
+ struct grtm_frame *frm;
+};
+
+struct grtm_priv {
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ struct grtm_regs *regs;
+ int irq;
+ int minor;
+ int subrev; /* GRTM Revision */
+ SPIN_DECLARE(devlock); /* spin-lock ISR protection */
+
+ int open;
+ int running;
+
+ struct grtm_bd *bds;
+ void *_bds;
+
+ /* Interrupt generation */
+ int enable_cnt_curr;/* Down counter, when 0 the interrupt bit is set for next descriptor */
+ volatile int handling_transmission; /* Tells ISR if user are active changing descriptors/queues */
+
+ struct grtm_ring *_ring; /* Root of ring */
+ struct grtm_ring *ring; /* Next ring to use for new frames to be transmitted */
+ struct grtm_ring *ring_end; /* Oldest activated ring used */
+
+ /* Collections of frames Ready to sent/ Scheduled for transmission/Sent
+ * frames waiting for the user to reclaim
+ */
+ struct grtm_list ready; /* Frames Waiting for free BDs */
+ struct grtm_list scheduled; /* Frames in BDs beeing transmitted */
+ struct grtm_list sent; /* Sent Frames waiting for user to reclaim and reuse */
+
+ /* Number of frames in the lists */
+ int ready_cnt; /* Number of ready frames */
+ int scheduled_cnt; /* Number of scheduled frames */
+ int sent_cnt; /* Number of sent frames */
+
+ struct grtm_ioc_hw hw_avail; /* Hardware support available */
+ struct grtm_ioc_config config;
+ struct grtm_ioc_stats stats;
+
+ rtems_id sem_tx;
+};
+
+/* Prototypes */
+static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf);
+static void grtm_hw_reset(struct grtm_priv *pDev);
+static void grtm_interrupt(void *arg);
+
+/* Common Global Variables */
+static rtems_id grtm_dev_sem;
+static int grtm_driver_io_registered = 0;
+static rtems_device_major_number grtm_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static int grtm_register_io(rtems_device_major_number *m);
+static int grtm_device_init(struct grtm_priv *pDev);
+
+static int grtm_init2(struct drvmgr_dev *dev);
+static int grtm_init3(struct drvmgr_dev *dev);
+
+static struct drvmgr_drv_ops grtm_ops =
+{
+ {NULL, grtm_init2, grtm_init3, NULL},
+ NULL,
+ NULL
+};
+
+static struct amba_dev_id grtm_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRTM},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info grtm_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRTM_ID, /* Driver ID */
+ "GRTM_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grtm_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grtm_ids[0]
+};
+
+void grtm_register_drv (void)
+{
+ DBG("Registering GRTM driver\n");
+ drvmgr_drv_register(&grtm_drv_info.general);
+}
+
+static int grtm_init2(struct drvmgr_dev *dev)
+{
+ struct grtm_priv *priv;
+
+ DBG("GRTM[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = grlib_calloc(1, sizeof(*priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+static int grtm_init3(struct drvmgr_dev *dev)
+{
+ struct grtm_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( grtm_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grtm_register_io(&grtm_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grtm_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+ if ( grtm_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grtm%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrtm%d", prefix, dev->minor_bus);
+ }
+
+ SPIN_INIT(&priv->devlock, priv->devName);
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grtm_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+static int grtm_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grtm_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("GRTM driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("GRTM rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("GRTM rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("GRTM rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("GRTM rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int grtm_device_init(struct grtm_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (struct grtm_regs *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+ pDev->open = 0;
+ pDev->running = 0;
+
+ /* Create Binary RX Semaphore with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'M', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->sem_tx) != RTEMS_SUCCESSFUL ) {
+ return -1;
+ }
+
+ /* Allocate Memory for Buffer Descriptor Table, or let user provide a custom
+ * address.
+ */
+ value = drvmgr_dev_key_get(pDev->dev, "bdTabAdr", DRVMGR_KT_POINTER);
+ if ( value ) {
+ pDev->bds = (struct grtm_bd *)value->ptr;
+ pDev->_bds = (void *)value->ptr;
+ } else {
+ pDev->bds = (struct grtm_bd *)grtm_memalign(0x400, 0x400, &pDev->_bds);
+ }
+ if ( !pDev->bds ) {
+ DBG("GRTM: Failed to allocate descriptor table\n");
+ return -1;
+ }
+ memset(pDev->bds, 0, 0x400);
+
+ pDev->_ring = grlib_malloc(sizeof(*pDev->_ring) * 128);
+ if ( !pDev->_ring ) {
+ return -1;
+ }
+
+ /* Reset Hardware before attaching IRQ handler */
+ grtm_hw_reset(pDev);
+
+ /* Read SUB revision number, ignore */
+ pDev->subrev = (READ_REG(&pDev->regs->revision) & GRTM_REV1_REV_SREV)
+ >> GRTM_REV1_REV_SREV_BIT;
+
+ return 0;
+}
+
+
+static inline void grtm_list_clr(struct grtm_list *list)
+{
+ list->head = NULL;
+ list->tail = NULL;
+}
+
+static void grtm_hw_reset(struct grtm_priv *pDev)
+{
+ /* Reset Core */
+ pDev->regs->dma_ctrl = GRTM_DMA_CTRL_RST;
+}
+
+static void grtm_hw_get_implementation(struct grtm_priv *pDev, struct grtm_ioc_hw *hwcfg)
+{
+ unsigned int cfg = READ_REG(&pDev->regs->cfg);
+
+ hwcfg->cs = (cfg & GRTM_CFG_SC) ? 1:0;
+ hwcfg->sp = (cfg & GRTM_CFG_SP) ? 1:0;
+ hwcfg->ce = (cfg & GRTM_CFG_CE) ? 1:0;
+ hwcfg->nrz = (cfg & GRTM_CFG_NRZ) ? 1:0;
+ hwcfg->psr = (cfg & GRTM_CFG_PSR) ? 1:0;
+ hwcfg->te = (cfg & GRTM_CFG_TE) ? 1:0;
+ hwcfg->rsdep = (cfg & GRTM_CFG_RSDEP)>>GRTM_CFG_RSDEP_BIT;
+ hwcfg->rs = (cfg & GRTM_CFG_RS)>>GRTM_CFG_RS_BIT;
+ hwcfg->aasm = (cfg & GRTM_CFG_AASM) ? 1:0;
+ hwcfg->fecf = (cfg & GRTM_CFG_FECF) ? 1:0;
+ hwcfg->ocf = (cfg & GRTM_CFG_OCF) ? 1:0;
+ hwcfg->evc = (cfg & GRTM_CFG_EVC) ? 1:0;
+ hwcfg->idle = (cfg & GRTM_CFG_IDLE) ? 1:0;
+ hwcfg->fsh = (cfg & GRTM_CFG_FSH) ? 1:0;
+ hwcfg->mcg = (cfg & GRTM_CFG_MCG) ? 1:0;
+ hwcfg->iz = (cfg & GRTM_CFG_IZ) ? 1:0;
+ hwcfg->fhec = (cfg & GRTM_CFG_FHEC) ? 1:0;
+ hwcfg->aos = (cfg & GRTM_CFG_AOS) ? 1:0;
+ hwcfg->cif = (cfg & GRTM_CFG_CIF) ? 1:0;
+ hwcfg->ocfb = (cfg & GRTM_CFG_OCFB) ? 1:0;
+
+ cfg = READ_REG(&pDev->regs->dma_cfg);
+ hwcfg->blk_size = (cfg & GRTM_DMA_CFG_BLKSZ) >> GRTM_DMA_CFG_BLKSZ_BIT;
+ hwcfg->fifo_size= (cfg & GRTM_DMA_CFG_FIFOSZ) >> GRTM_DMA_CFG_FIFOSZ_BIT;
+}
+
+
+/* TODO: Implement proper default calculation from hardware configuration */
+static void grtm_hw_get_default_modes(struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg)
+{
+ cfg->mode = GRTM_MODE_TM;
+ cfg->frame_length = 223;
+ cfg->limit = 0; /* Make driver auto configure it on START, user may override with non-zero value */
+ cfg->as_marker = 0x1ACFFC1D;
+
+ /* Physical */
+ cfg->phy_subrate = 1;
+ cfg->phy_symbolrate = 1;
+ cfg->phy_opts = 0;
+
+ /* Coding Layer */
+ cfg->code_rsdep = 1;
+ cfg->code_ce_rate = 0;
+ cfg->code_csel = 0;
+ cfg->code_opts = 0;
+
+ /* All Frame Generation */
+ cfg->all_izlen = 0;
+ cfg->all_opts = GRTM_IOC_ALL_FECF;
+
+ /* Master Channel Frame Generation */
+ if ( hwcfg->mcg ) {
+ cfg->mf_opts = GRTM_IOC_MF_MC;
+ } else {
+ cfg->mf_opts = 0;
+ }
+
+ /* Idle Frame Generation */
+ cfg->idle_scid = 0;
+ cfg->idle_vcid = 0;
+ if ( hwcfg->idle ) {
+ cfg->idle_opts = GRTM_IOC_IDLE_EN;
+ } else {
+ cfg->idle_opts = 0;
+ }
+
+ /* Interrupt options */
+ cfg->blocking = 0; /* non-blocking mode is default */
+ cfg->enable_cnt = 16; /* generate interrupt every 16 descriptor */
+ cfg->isr_desc_proc = 1; /* Let interrupt handler do descriptor processing */
+ cfg->timeout = RTEMS_NO_TIMEOUT;
+
+}
+
+static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf)
+{
+ *(int *)realbuf = (int)grlib_malloc(length+boundary);
+ DBG("GRTM: Alloced %d (0x%x) bytes, requested: %d\n",length+boundary,length+boundary,length);
+ return (void *)(((*(unsigned int *)realbuf)+boundary) & ~(boundary-1));
+}
+
+static int grtm_hw_set_config(struct grtm_priv *pDev, struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg)
+{
+ struct grtm_regs *regs = pDev->regs;
+ unsigned int tmp;
+ unsigned int limit;
+
+ if ( cfg->limit == 0 ) {
+ /* Calculate Limit */
+ if ( cfg->frame_length > hwcfg->blk_size ) {
+ limit = hwcfg->blk_size*2;
+ } else {
+ limit = cfg->frame_length;
+ }
+ } else {
+ /* Use user configured limit */
+ limit = cfg->limit;
+ }
+
+ /* Frame Length and Limit */
+ regs->dma_len = (((limit-1) << GRTM_DMA_LEN_LIM_BIT) & GRTM_DMA_LEN_LIM)|
+ (((cfg->frame_length-1) << GRTM_DMA_LEN_LEN_BIT) & GRTM_DMA_LEN_LEN);
+
+ /* Physical layer options */
+ tmp = (cfg->phy_opts & (GRTM_IOC_PHY_SCF|GRTM_IOC_PHY_SF)) |
+ (((cfg->phy_symbolrate-1)<<GRTM_PHY_SYM_BIT) & GRTM_PHY_SYM) | (((cfg->phy_subrate-1)<<GRTM_PHY_SUB_BIT) & GRTM_PHY_SUB);
+ regs->phy = tmp;
+
+ /* Coding Sub-layer Options */
+ tmp = (cfg->code_opts & GRTM_IOC_CODE_ALL) | ((cfg->code_csel<<GRTM_CODE_CSEL_BIT) & GRTM_CODE_CSEL) |
+ (((cfg->code_rsdep-1)<<GRTM_CODE_RSDEP_BIT) & GRTM_CODE_RSDEP) | ((cfg->code_ce_rate<<GRTM_CODE_CERATE_BIT) & GRTM_CODE_CERATE);
+ regs->code = tmp;
+
+ /* Attached synchronization marker register */
+ regs->asmr = cfg->as_marker;
+
+ /* All Frames Generation */
+ tmp = ((cfg->all_opts & GRTM_IOC_ALL_ALL)<<14) |
+ ((cfg->all_izlen<<GRTM_ALL_IZLEN_BIT) & GRTM_ALL_IZLEN) |
+ ((cfg->mode<<GRTM_ALL_VER_BIT) & GRTM_ALL_VER);
+ regs->all_frm = tmp;
+
+ /* Master Frame Generation */
+ regs->mst_frm = cfg->mf_opts & GRTM_IOC_MF_ALL;
+
+ /* Idle frame Generation */
+ tmp = ((cfg->idle_opts & GRTM_IOC_IDLE_ALL) << 16) |
+ ((cfg->idle_vcid << GRTM_IDLE_VCID_BIT) & GRTM_IDLE_VCID) |
+ ((cfg->idle_scid << GRTM_IDLE_SCID_BIT) & GRTM_IDLE_SCID);
+ regs->idle_frm = tmp;
+
+ return 0;
+}
+
+static int grtm_start(struct grtm_priv *pDev)
+{
+ struct grtm_regs *regs = pDev->regs;
+ int i;
+ struct grtm_ioc_config *cfg = &pDev->config;
+ unsigned int txrdy;
+
+ /* Clear Descriptors */
+ memset(pDev->bds,0,0x400);
+
+ /* Clear stats */
+ memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats));
+
+ /* Init Descriptor Ring */
+ memset(pDev->_ring,0,sizeof(struct grtm_ring)*128);
+ for(i=0;i<127;i++){
+ pDev->_ring[i].next = &pDev->_ring[i+1];
+ pDev->_ring[i].bd = &pDev->bds[i];
+ pDev->_ring[i].frm = NULL;
+ }
+ pDev->_ring[127].next = &pDev->_ring[0];
+ pDev->_ring[127].bd = &pDev->bds[127];
+ pDev->_ring[127].frm = NULL;
+
+ pDev->ring = &pDev->_ring[0];
+ pDev->ring_end = &pDev->_ring[0];
+
+ /* Clear Scheduled, Ready and Sent list */
+ grtm_list_clr(&pDev->ready);
+ grtm_list_clr(&pDev->scheduled);
+ grtm_list_clr(&pDev->sent);
+
+ /* Software init */
+ pDev->handling_transmission = 0;
+
+ /* Reset the transmitter */
+ regs->dma_ctrl = GRTM_DMA_CTRL_TXRST;
+ regs->dma_ctrl = 0; /* Leave Reset */
+
+ /* Clear old interrupts */
+ regs->dma_status = GRTM_DMA_STS_ALL;
+
+ /* Set Descriptor Pointer Base register to point to first descriptor */
+ drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA, (void *)pDev->bds,
+ (void **)&regs->dma_bd, 0x400);
+
+ /* Set hardware options as defined by config */
+ if ( grtm_hw_set_config(pDev, cfg, &pDev->hw_avail) ) {
+ return RTEMS_IO_ERROR;
+ }
+
+ /* Enable TM Transmitter */
+ regs->ctrl = GRTM_CTRL_EN;
+
+ /* Wait for TXRDY to be cleared */
+ i=1000;
+ while( i > 0 ) {
+ asm volatile ("nop"::);
+ i--;
+ }
+
+ /* Check transmitter startup OK */
+ i = 1000000;
+ do {
+ /* Location of TXRDY Bit is different for different revisions */
+ if ( pDev->subrev == 0 ) {
+ txrdy = READ_REG(&regs->dma_ctrl) &
+ GRTM_REV0_DMA_CTRL_TXRDY;
+ } else {
+ txrdy = READ_REG(&regs->dma_status) &
+ GRTM_REV1_DMA_STS_TXRDY;
+ }
+ if (txrdy != 0)
+ break;
+
+ asm volatile ("nop"::);
+ } while ( --i > 0 );
+ if ( i == 0 ) {
+ /* Reset Failed */
+ DBG("GRTM: start: Reseting transmitter failed (%d)\n",i);
+ return RTEMS_IO_ERROR;
+ }
+ DBG("GRTM: reset time %d\n",i);
+
+ /* Everything is configured, the TM transmitter is started
+ * and idle frames has been sent.
+ */
+
+ /* Mark running before enabling the DMA transmitter */
+ pDev->running = 1;
+
+ /* Enable interrupts (Error and DMA TX) */
+ regs->dma_ctrl = GRTM_DMA_CTRL_IE;
+
+ DBG("GRTM: STARTED\n");
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grtm_stop(struct grtm_priv *pDev)
+{
+ struct grtm_regs *regs = pDev->regs;
+
+ /* Disable the transmitter & Interrupts */
+ regs->dma_ctrl = 0;
+
+ /* Clear any pending interrupt */
+ regs->dma_status = GRTM_DMA_STS_ALL;
+
+ DBG("GRTM: STOPPED\n");
+
+ /* Flush semaphore in case a thread is stuck waiting for TX Interrupts */
+ rtems_semaphore_flush(pDev->sem_tx);
+}
+
+static rtems_device_driver grtm_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg)
+{
+ struct grtm_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtm_priv *)dev->priv;
+
+ /* Wait until we get semaphore */
+ if ( rtems_semaphore_obtain(grtm_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Is device in use? */
+ if ( pDev->open ){
+ rtems_semaphore_release(grtm_dev_sem);
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Mark device taken */
+ pDev->open = 1;
+
+ rtems_semaphore_release(grtm_dev_sem);
+
+ DBG("grtm_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev);
+
+ /* Set defaults */
+ pDev->config.timeout = RTEMS_NO_TIMEOUT; /* no timeout (wait forever) */
+ pDev->config.blocking = 0; /* polling mode */
+
+ pDev->running = 0; /* not in running mode yet */
+
+ memset(&pDev->config,0,sizeof(pDev->config));
+
+ /* The core has been reset when we execute here, so it is possible
+ * to read out what HW is implemented from core.
+ */
+ grtm_hw_get_implementation(pDev, &pDev->hw_avail);
+
+ /* Get default modes */
+ grtm_hw_get_default_modes(&pDev->config,&pDev->hw_avail);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtm_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtm_priv *)dev->priv;
+
+ if ( pDev->running ){
+ drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev);
+ grtm_stop(pDev);
+ pDev->running = 0;
+ }
+
+ /* Reset core */
+ grtm_hw_reset(pDev);
+
+ /* Clear descriptor area just for sure */
+ memset(pDev->bds, 0, 0x400);
+
+ /* Mark not open */
+ pDev->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ FUNCDBG();
+ return RTEMS_NOT_IMPLEMENTED;
+}
+
+static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ FUNCDBG();
+ return RTEMS_NOT_IMPLEMENTED;
+}
+
+/* Scans the desciptor table for scheduled frames that has been sent,
+ * and moves these frames from the head of the scheduled queue to the
+ * tail of the sent queue.
+ *
+ * Also, for all frames the status is updated.
+ *
+ * Return Value
+ * Number of frames freed.
+ */
+static int grtm_free_sent(struct grtm_priv *pDev)
+{
+ struct grtm_ring *curr;
+ struct grtm_frame *last_frm, *first_frm;
+ int freed_frame_cnt=0;
+ unsigned int ctrl;
+
+ curr = pDev->ring_end;
+
+ /* Step into TX ring to find sent frames */
+ if ( !curr->frm ){
+ /* No scheduled frames, abort */
+ return 0;
+ }
+
+ /* There has been messages scheduled ==> scheduled messages may have been
+ * transmitted and needs to be collected.
+ */
+
+ first_frm = curr->frm;
+
+ /* Loop until first enabled unsent frame is found.
+ * A unused descriptor is indicated by an unassigned frm field
+ */
+ while ( curr->frm && !((ctrl=READ_REG(&curr->bd->ctrl)) & GRTM_BD_EN) ){
+ /* Handle one sent Frame */
+
+ /* Remember last handled frame so that insertion/removal from
+ * frames lists go fast.
+ */
+ last_frm = curr->frm;
+
+ /* 1. Set flags to indicate error(s) and other information */
+ last_frm->flags |= GRTM_FLAGS_SENT; /* Mark sent */
+
+ /* Update Stats */
+ pDev->stats.frames_sent++;
+
+ /* Did packet encounter link error? */
+ if ( ctrl & GRTM_BD_UE ) {
+ pDev->stats.err_underrun++;
+ last_frm->flags |= GRRM_FLAGS_ERR;
+ }
+
+ curr->frm = NULL; /* Mark unused */
+
+ /* Increment */
+ curr = curr->next;
+ freed_frame_cnt++;
+ }
+
+ /* 1. Remove all handled frames from scheduled queue
+ * 2. Put all handled frames into sent queue
+ */
+ if ( freed_frame_cnt > 0 ){
+
+ /* Save TX ring posistion */
+ pDev->ring_end = curr;
+
+ /* Remove all sent frames from scheduled list */
+ if ( pDev->scheduled.tail == last_frm ){
+ /* All scheduled frames sent... */
+ pDev->scheduled.head = NULL;
+ pDev->scheduled.tail = NULL;
+ }else{
+ pDev->scheduled.head = last_frm->next;
+ }
+ last_frm->next = NULL;
+
+ /* Put all sent frames into "Sent queue" for user to
+ * collect, later on.
+ */
+ if ( !pDev->sent.head ){
+ /* Sent queue empty */
+ pDev->sent.head = first_frm;
+ pDev->sent.tail = last_frm;
+ }else{
+ pDev->sent.tail->next = first_frm;
+ pDev->sent.tail = last_frm;
+ }
+ }
+ return freed_frame_cnt;
+}
+
+
+/* Moves as many frames in the ready queue (as there are free descriptors for)
+ * to the scheduled queue. The free descriptors are then assigned one frame
+ * each and enabled for transmission.
+ *
+ * Return Value
+ * Returns number of frames moved from ready to scheduled queue
+ */
+static int grtm_schedule_ready(struct grtm_priv *pDev)
+{
+ int cnt;
+ unsigned int ctrl, dmactrl;
+ struct grtm_ring *curr_bd;
+ struct grtm_frame *curr_frm, *last_frm;
+
+ if ( !pDev->ready.head ){
+ return 0;
+ }
+
+ cnt=0;
+ curr_frm = pDev->ready.head;
+ curr_bd = pDev->ring;
+ while( !curr_bd->frm ){
+ /* Assign frame to descriptor */
+ curr_bd->frm = curr_frm;
+
+ /* Prepare descriptor address. Three cases:
+ * - GRTM core on same bus as CPU ==> no translation (Address used by CPU = address used by GRTM)
+ * - GRTM core on remote bus, and payload address given as used by CPU ==> Translation needed
+ * - GRTM core on remote bus, and payload address given as used by GRTM ==> no translation [ USER does custom translation]
+ */
+ if ( curr_frm->flags & (GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER) ) {
+ /* Do translation */
+ drvmgr_translate(pDev->dev, CPUMEM_TO_DMA, (void *)curr_frm->payload, (void **)&curr_bd->bd->address);
+ if ( curr_frm->flags & GRTM_FLAGS_TRANSLATE_AND_REMEMBER ) {
+ if ( curr_frm->payload != (unsigned int *)curr_bd->bd->address ) {
+ /* Translation needed */
+ curr_frm->flags &= ~GRTM_FLAGS_TRANSLATE_AND_REMEMBER;
+ curr_frm->flags |= GRTM_FLAGS_TRANSLATE;
+ } else {
+ /* No Trnaslation needed */
+ curr_frm->flags &= ~(GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER);
+ }
+ }
+ } else {
+ /* Custom translation or no translation needed */
+ curr_bd->bd->address = (unsigned int)curr_frm->payload;
+ }
+
+ ctrl = GRTM_BD_EN;
+ if ( curr_bd->next == pDev->_ring ){
+ ctrl |= GRTM_BD_WR; /* Wrap around */
+ }
+ /* Apply user options/flags */
+ ctrl |= (curr_frm->flags & GRTM_FLAGS_MASK);
+
+ /* Is this Frame going to be an interrupt Frame? */
+ if ( (--pDev->enable_cnt_curr) <= 0 ){
+ if ( pDev->config.enable_cnt == 0 ){
+ pDev->enable_cnt_curr = 0x3fffffff;
+ }else{
+ pDev->enable_cnt_curr = pDev->config.enable_cnt;
+ ctrl |= GRTM_BD_IE;
+ }
+ }
+
+ /* Enable descriptor */
+ curr_bd->bd->ctrl = ctrl;
+
+ last_frm = curr_frm;
+ curr_bd = curr_bd->next;
+ cnt++;
+
+ /* Get Next Frame from Ready Queue */
+ if ( curr_frm == pDev->ready.tail ){
+ /* Handled all in ready queue. */
+ curr_frm = NULL;
+ break;
+ }
+ curr_frm = curr_frm->next;
+ }
+
+ /* Has frames have been scheduled? */
+ if ( cnt > 0 ){
+ /* Make last frame mark end of chain, probably pointless... */
+ last_frm->next = NULL;
+
+ /* Insert scheduled packets into scheduled queue */
+ if ( !pDev->scheduled.head ){
+ /* empty scheduled queue */
+ pDev->scheduled.head = pDev->ready.head;
+ pDev->scheduled.tail = last_frm;
+ }else{
+ pDev->scheduled.tail->next = pDev->ready.head;
+ pDev->scheduled.tail = last_frm;
+ }
+
+ /* Remove scheduled packets from ready queue */
+ pDev->ready.head = curr_frm;
+ if ( !curr_frm ){
+ pDev->ready.tail = NULL;
+ }
+
+ /* Update TX ring posistion */
+ pDev->ring = curr_bd;
+
+ /* Make hardware aware of the newly enabled descriptors */
+ dmactrl = READ_REG(&pDev->regs->dma_ctrl);
+ dmactrl &= ~(GRTM_DMA_CTRL_TXRST | GRTM_DMA_CTRL_RST);
+ dmactrl |= GRTM_DMA_CTRL_EN;
+ pDev->regs->dma_ctrl = dmactrl;
+ }
+
+ return cnt;
+}
+
+static void grtm_tx_process(struct grtm_priv *pDev)
+{
+ int num;
+
+ /* Free used descriptors and put the sent frame into the "Sent queue"
+ * (SCHEDULED->SENT)
+ */
+ num = grtm_free_sent(pDev);
+ pDev->scheduled_cnt -= num;
+ pDev->sent_cnt += num;
+
+ /* Use all available free descriptors there are frames for
+ * in the ready queue.
+ * (READY->SCHEDULED)
+ */
+ if (pDev->running) {
+ num = grtm_schedule_ready(pDev);
+ pDev->ready_cnt -= num;
+ pDev->scheduled_cnt += num;
+ }
+}
+
+/*
+ * The TX lock protects user tasks from the ISR. If TX DMA interrupt occurs
+ * while the user task is processing the TX DMA descriptors the ISR will
+ * ignore interrupt the request by not processing the DMA table since that
+ * is done by the user task anyway. In SMP, when a user task enters the TX DMA
+ * processing while the ISR (on another CPU) is also processing the user task
+ * will loop waiting for the ISR to complete.
+ */
+static int grtm_request_txlock(struct grtm_priv *pDev, int block)
+{
+ SPIN_IRQFLAGS(irqflags);
+ int got_lock = 0;
+
+ do {
+ SPIN_LOCK_IRQ(&pDev->devlock, irqflags);
+ if (pDev->handling_transmission == 0) {
+ pDev->handling_transmission = 1;
+ got_lock = 1;
+ }
+ SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags);
+ } while (!got_lock && block);
+
+ return got_lock;
+}
+
+static inline int grtm_request_txlock_isr(struct grtm_priv *pDev)
+{
+ SPIN_ISR_IRQFLAGS(irqflags);
+ int got_lock = 0;
+
+ SPIN_LOCK(&pDev->devlock, irqflags);
+ if (pDev->handling_transmission == 0) {
+ pDev->handling_transmission = 1;
+ got_lock = 1;
+ }
+ SPIN_UNLOCK(&pDev->devlock, irqflags);
+
+ return got_lock;
+}
+
+static inline void grtm_release_txlock(struct grtm_priv *pDev)
+{
+ pDev->handling_transmission = 0;
+}
+
+static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtm_priv *pDev;
+ struct drvmgr_dev *dev;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg;
+ unsigned int *data;
+ int status;
+ struct grtm_ioc_config *cfg;
+ struct grtm_ioc_hw_status *hwregs;
+ struct grtm_list *chain;
+ struct grtm_frame *curr;
+ struct grtm_ioc_hw *hwimpl;
+ struct grtm_ioc_stats *stats;
+ int num,ret;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtm_priv *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ data = ioarg->buffer;
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case GRTM_IOC_START:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ if ( (status=grtm_start(pDev)) != RTEMS_SUCCESSFUL ){
+ return status;
+ }
+ /* Register ISR & Enable interrupt */
+ drvmgr_interrupt_register(dev, 0, "grtm", grtm_interrupt, pDev);
+
+ /* Read and write are now open... */
+ break;
+
+ case GRTM_IOC_STOP:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Disable interrupts */
+ drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev);
+ grtm_stop(pDev);
+ pDev->running = 0;
+ break;
+
+ case GRTM_IOC_ISSTARTED:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ break;
+
+ case GRTM_IOC_SET_BLOCKING_MODE:
+ if ( (unsigned int)data > GRTM_BLKMODE_BLK ) {
+ return RTEMS_INVALID_NAME;
+ }
+ DBG("GRTM: Set blocking mode: %d\n",(unsigned int)data);
+ pDev->config.blocking = (unsigned int)data;
+ break;
+
+ case GRTM_IOC_SET_TIMEOUT:
+ DBG("GRTM: Timeout: %d\n",(unsigned int)data);
+ pDev->config.timeout = (rtems_interval)data;
+ break;
+
+ case GRTM_IOC_SET_CONFIG:
+ cfg = (struct grtm_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ pDev->config = *cfg;
+ break;
+
+ case GRTM_IOC_GET_STATS:
+ stats = (struct grtm_ioc_stats *)data;
+ if ( !stats ) {
+ return RTEMS_INVALID_NAME;
+ }
+ memcpy(stats,&pDev->stats,sizeof(struct grtm_ioc_stats));
+ break;
+
+ case GRTM_IOC_CLR_STATS:
+ memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats));
+ break;
+
+ case GRTM_IOC_GET_CONFIG:
+ cfg = (struct grtm_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ *cfg = pDev->config;
+ break;
+
+ case GRTM_IOC_GET_OCFREG:
+ if ( !pDev->hw_avail.ocf ) {
+ /* Hardware does not implement the OCF register */
+ return RTEMS_NOT_DEFINED;
+ }
+ if ( !data ) {
+ return RTEMS_INVALID_NAME;
+ }
+ *(unsigned int **)data = (unsigned int *)&pDev->regs->ocf;
+ break;
+
+ case GRTM_IOC_GET_HW_IMPL:
+ hwimpl = (struct grtm_ioc_hw *)data;
+ if ( !hwimpl ) {
+ return RTEMS_INVALID_NAME;
+ }
+ *hwimpl = pDev->hw_avail;
+ break;
+
+ case GRTM_IOC_GET_HW_STATUS:
+ hwregs = (struct grtm_ioc_hw_status *)data;
+ if ( !hwregs ) {
+ return RTEMS_INVALID_NAME;
+ }
+ /* We disable interrupt in order to get a snapshot of the registers */
+/* TODO: implement hwregs */
+ break;
+
+ /* Put a chain of frames at the back of the "Ready frames" queue. This
+ * triggers the driver to put frames from the Ready queue into unused
+ * available descriptors. (Ready -> Scheduled)
+ */
+
+ case GRTM_IOC_SEND:
+ if ( !pDev->running ){
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Get pointer to frame chain wished be sent */
+ chain = (struct grtm_list *)ioarg->buffer;
+ if ( !chain ){
+ /* No new frames to send ==> just trigger hardware
+ * to send previously made ready frames to be sent.
+ * If someone else is processing the DMA we igore the
+ * request.
+ */
+ if (grtm_request_txlock(pDev, 0)) {
+ grtm_tx_process(pDev);
+ grtm_release_txlock(pDev);
+ }
+ break;
+ }
+ if ( !chain->tail || !chain->head ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ DBG("GRTM_SEND: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail);
+
+ /* Mark ready frames unsent by clearing GRTM_FLAGS_SENT of all frames */
+
+ num = 0;
+ curr = chain->head;
+ while(curr != chain->tail){
+ curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR);
+ curr = curr->next;
+ num++;
+ }
+ curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR);
+ num++;
+
+ /* wait until we get the device lock */
+ grtm_request_txlock(pDev, 1);
+
+ /* 1. Put frames into ready queue
+ * (New Frames->READY)
+ */
+ if ( pDev->ready.head ){
+ /* Frames already on ready queue (no free descriptors previously) ==>
+ * Put frames at end of ready queue
+ */
+ pDev->ready.tail->next = chain->head;
+ pDev->ready.tail = chain->tail;
+ chain->tail->next = NULL;
+ }else{
+ /* All frames is put into the ready queue for later processing */
+ pDev->ready.head = chain->head;
+ pDev->ready.tail = chain->tail;
+ chain->tail->next = NULL;
+ }
+ pDev->ready_cnt += num; /* Added 'num' frames to ready queue */
+
+ /* 2. SCHEDULED->SENT
+ * 3. READY->SCHEDULED
+ */
+ grtm_tx_process(pDev);
+ grtm_release_txlock(pDev);
+ break;
+
+ /* Take all available sent frames from the "Sent frames" queue.
+ * If no frames has been sent, the thread may get blocked if in blocking
+ * mode. The blocking mode is not available if driver is not in running mode.
+ *
+ * Note this ioctl may return success even if the driver is not in STARTED mode.
+ * This is because in case of a error (link error of similar) and the driver switch
+ * from START to STOP mode we must still be able to get our frames back.
+ *
+ * Note in case the driver fails to send a frame for some reason (link error),
+ * the sent flag is set to 0 indicating a failure.
+ *
+ */
+ case GRTM_IOC_RECLAIM:
+ /* Get pointer to were to place reaped chain */
+ chain = (struct grtm_list *)ioarg->buffer;
+ if ( !chain ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Lock out interrupt handler */
+ grtm_request_txlock(pDev, 1);
+
+ do {
+ /* Process descriptor table and populate with new
+ * buffers:
+ * * SCHEDULED->SENT
+ * * READY->SCHEDULED
+ */
+ grtm_tx_process(pDev);
+
+ /* Are there any frames on the sent queue waiting to be
+ * reclaimed?
+ */
+
+ if ( !pDev->sent.head ){
+ /* No frames to reclaim - no frame in sent queue.
+ * Instead we block thread until frames have been sent
+ * if in blocking mode.
+ */
+ if ( pDev->running && pDev->config.blocking ){
+ ret = rtems_semaphore_obtain(pDev->sem_tx,RTEMS_WAIT,pDev->config.timeout);
+ if ( ret == RTEMS_TIMEOUT ) {
+ grtm_release_txlock(pDev);
+ return RTEMS_TIMEOUT;
+ } else if ( ret == RTEMS_SUCCESSFUL ) {
+ /* There might be frames available, go check */
+ continue;
+ } else {
+ /* any error (driver closed, internal error etc.) */
+ grtm_release_txlock(pDev);
+ return RTEMS_UNSATISFIED;
+ }
+
+ }else{
+ /* non-blocking mode, we quit */
+ chain->head = NULL;
+ chain->tail = NULL;
+ /* do not lock out interrupt handler any more */
+ grtm_release_txlock(pDev);
+ return RTEMS_TIMEOUT;
+ }
+ }else{
+ /* Take all sent framess from sent queue to userspace queue */
+ chain->head = pDev->sent.head;
+ chain->tail = pDev->sent.tail;
+ chain->tail->next = NULL; /* Just for sure */
+
+ /* Mark no Sent */
+ grtm_list_clr(&pDev->sent);
+ pDev->sent_cnt = 0;
+
+ DBG("TX_RECLAIM: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail);
+ break;
+ }
+
+ }while(1);
+
+ /* do not lock out interrupt handler any more */
+ grtm_release_txlock(pDev);
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grtm_interrupt(void *arg)
+{
+ struct grtm_priv *pDev = arg;
+ struct grtm_regs *regs = pDev->regs;
+ unsigned int status;
+
+ /* Clear interrupt by reading it */
+ status = READ_REG(&regs->dma_status);
+
+ /* Spurious Interrupt? */
+ if ( !pDev->running || !status)
+ return;
+
+ regs->dma_status = status;
+
+ if ( status & GRTM_DMA_STS_TFF ){
+ pDev->stats.err_transfer_frame++;
+ }
+
+ if ( status & GRTM_DMA_STS_TA ){
+ pDev->stats.err_ahb++;
+ }
+
+ if ( status & GRTM_DMA_STS_TE ){
+ pDev->stats.err_tx++;
+ }
+
+ if ( status & GRTM_DMA_STS_TI ){
+
+ if ( pDev->config.isr_desc_proc) {
+ if (grtm_request_txlock_isr(pDev)) {
+ grtm_tx_process(pDev);
+ grtm_release_txlock(pDev);
+ }
+
+#if 0
+ if ( (pDev->config.blocking==GRTM_BLKMODE_COMPLETE) && pDev->timeout ){
+ /* Signal to thread only if enough data is available */
+ if ( pDev->wait_for_frames > grtm_data_avail(pDev) ){
+ /* Not enough data available */
+ goto procceed_processing_interrupts;
+ }
+
+ /* Enough number of frames has been transmitted which means that
+ * the waiting thread should be woken up.
+ */
+ rtems_semaphore_release(pDev->sem_tx);
+ }
+#endif
+ }
+
+ if ( pDev->config.blocking == GRTM_BLKMODE_BLK ) {
+ /* Blocking mode */
+
+#if 0
+ /* Disable further Interrupts until handled by waiting task. */
+ regs->dma_ctrl = READ_REG(&regs->dma_ctrl) & ~GRTM_DMA_CTRL_IE;
+#endif
+
+ /* Signal Semaphore to wake waiting thread in ioctl(SEND|RECLAIM) */
+ rtems_semaphore_release(pDev->sem_tx);
+ }
+
+ }
+#if 0
+procceed_processing_interrupts:
+ ;
+#endif
+}
+
+static rtems_device_driver grtm_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number unused,
+ void *arg
+ )
+{
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'M'),
+ 1,
+ RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &grtm_dev_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/bsps/shared/grlib/uart/apbuart_cons.c b/bsps/shared/grlib/uart/apbuart_cons.c
new file mode 100644
index 0000000000..8cd8a1ebf0
--- /dev/null
+++ b/bsps/shared/grlib/uart/apbuart_cons.c
@@ -0,0 +1,757 @@
+/* This file contains the driver for the GRLIB APBUART serial port. The driver
+ * is implemented by using the cons.c console layer. Interrupt/Polling/Task
+ * driven mode can be configured using driver resources:
+ *
+ * - mode (0=Polling, 1=Interrupt, 2=Task-Driven-Interrupt Mode)
+ * - syscon (0=Force not Ssystem Console, 1=Suggest System Console)
+ *
+ * The BSP define APBUART_INFO_AVAIL in order to add the info routine
+ * used for debugging.
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/******************* Driver manager interface ***********************/
+#include <bsp.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <rtems/bspIo.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <drvmgr/drvmgr.h>
+#include <grlib/ambapp_bus.h>
+#include <grlib/apbuart.h>
+#include <grlib/ambapp.h>
+#include <grlib/grlib.h>
+#include <grlib/cons.h>
+#include <rtems/termiostypes.h>
+#include <grlib/apbuart_cons.h>
+
+/*#define DEBUG 1 */
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* LEON3 Low level transmit/receive functions provided by debug-uart code */
+#ifdef LEON3
+extern struct apbuart_regs *leon3_debug_uart; /* The debug UART */
+#endif
+
+/* Probed hardware capabilities */
+enum {
+ CAP_FIFO = 0x01, /* FIFO available */
+ CAP_DI = 0x02, /* RX delayed interrupt available */
+};
+struct apbuart_priv {
+ struct console_dev condev;
+ struct drvmgr_dev *dev;
+ struct apbuart_regs *regs;
+ struct rtems_termios_tty *tty;
+ char devName[32];
+ volatile int sending;
+ int mode;
+ int cap;
+};
+
+/* Getters for different interfaces. It happens to be just casting which we do
+ * in one place to avoid getting cast away. */
+static struct console_dev *base_get_condev(rtems_termios_device_context *base)
+{
+ return (struct console_dev *) base;
+}
+
+static struct apbuart_priv *condev_get_priv(struct console_dev *condev)
+{
+ return (struct apbuart_priv *) condev;
+}
+
+static struct apbuart_priv *base_get_priv(rtems_termios_device_context *base)
+{
+ return condev_get_priv(base_get_condev(base));
+}
+
+/* TERMIOS Layer Callback functions */
+static bool first_open(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ struct termios *term,
+ rtems_libio_open_close_args_t *args
+);
+static void last_close(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ rtems_libio_open_close_args_t *args
+);
+static void write_interrupt(
+ rtems_termios_device_context *base,
+ const char *buf,
+ size_t len
+);
+static bool set_attributes(
+ rtems_termios_device_context *base,
+ const struct termios *t
+);
+static void get_attributes(
+ rtems_termios_device_context *base,
+ struct termios *t
+);
+static int read_polled(rtems_termios_device_context *base);
+static int read_task(rtems_termios_device_context *base);
+static void write_polled(
+ rtems_termios_device_context *base,
+ const char *buf,
+ size_t len
+);
+
+static void apbuart_cons_isr(void *arg);
+int apbuart_get_baud(struct apbuart_priv *uart);
+
+int apbuart_init1(struct drvmgr_dev *dev);
+#ifdef APBUART_INFO_AVAIL
+static int apbuart_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p, int, char *argv[]);
+#define APBUART_INFO_FUNC apbuart_info
+#else
+#define APBUART_INFO_FUNC NULL
+#endif
+
+struct drvmgr_drv_ops apbuart_ops =
+{
+ .init = {apbuart_init1, NULL, NULL, NULL},
+ .remove = NULL,
+ .info = APBUART_INFO_FUNC
+};
+
+static struct amba_dev_id apbuart_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_APBUART},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info apbuart_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_APBUART_ID, /* Driver ID */
+ "APBUART_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &apbuart_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct apbuart_priv), /*DrvMgr alloc private*/
+ },
+ &apbuart_ids[0]
+};
+
+void apbuart_cons_register_drv (void)
+{
+ DBG("Registering APBUART Console driver\n");
+ drvmgr_drv_register(&apbuart_drv_info.general);
+}
+
+static const rtems_termios_device_handler handler_interrupt = {
+ .first_open = first_open,
+ .last_close = last_close,
+ .write = write_interrupt,
+ .set_attributes = set_attributes,
+ .mode = TERMIOS_IRQ_DRIVEN
+};
+
+static const rtems_termios_device_handler handler_task = {
+ .first_open = first_open,
+ .last_close = last_close,
+ .poll_read = read_task,
+ .write = write_interrupt,
+ .set_attributes = set_attributes,
+ .mode = TERMIOS_TASK_DRIVEN
+};
+
+static const rtems_termios_device_handler handler_polled = {
+ .first_open = first_open,
+ .last_close = last_close,
+ .poll_read = read_polled,
+ .write = write_polled,
+ .set_attributes = set_attributes,
+ .mode = TERMIOS_POLLED
+};
+
+/*
+ * APBUART hardware instantiation is flexible. Probe features here and driver
+ * can select appropriate routines for the hardware. probecap() return value
+ * is a CAP_ bitmask.
+ */
+static int probecap(struct apbuart_regs *regs)
+{
+ int cap = 0;
+
+ /* Probe FIFO */
+ if (regs->ctrl & APBUART_CTRL_FA) {
+ cap |= CAP_FIFO;
+
+ /* Probe RX delayed interrupt */
+ regs->ctrl |= APBUART_CTRL_DI;
+ if (regs->ctrl & APBUART_CTRL_DI) {
+ regs->ctrl &= ~APBUART_CTRL_DI;
+ cap |= CAP_DI;
+ }
+ }
+
+ return cap;
+}
+
+int apbuart_init1(struct drvmgr_dev *dev)
+{
+ struct apbuart_priv *priv;
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+ char prefix[32];
+ unsigned int db;
+ static int first_uart = 1;
+
+ /* The default operation in AMP is to use APBUART[0] for CPU[0],
+ * APBUART[1] for CPU[1] and so on. The remaining UARTs is not used
+ * since we don't know how many CPU-cores there are. Note this only
+ * affects the on-chip amba bus (the root bus). The user can override
+ * the default resource sharing by defining driver resources for the
+ * APBUART devices on each AMP OS instance.
+ */
+#if defined(RTEMS_MULTIPROCESSING) && defined(LEON3)
+ if (drvmgr_on_rootbus(dev) && dev->minor_drv != LEON3_Cpu_Index &&
+ drvmgr_keys_get(dev, NULL) != 0) {
+ /* User hasn't configured on-chip APBUART, leave it untouched */
+ return DRVMGR_EBUSY;
+ }
+#endif
+
+ DBG("APBUART[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ /* Private data was allocated and zeroed by driver manager */
+ priv = dev->priv;
+ if (!priv)
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)priv->dev->businfo;
+ if (ambadev == NULL)
+ return -1;
+ pnpinfo = &ambadev->info;
+ priv->regs = (struct apbuart_regs *)pnpinfo->apb_slv->start;
+
+ /* Clear HW regs, leave baudrate register as it is */
+ priv->regs->status = 0;
+
+ /* leave Transmitter/receiver if this is the RTEMS debug UART (assume
+ * it has been setup by boot loader).
+ */
+ db = 0;
+#ifdef LEON3
+ if (priv->regs == leon3_debug_uart) {
+ db = priv->regs->ctrl & (LEON_REG_UART_CTRL_RE |
+ LEON_REG_UART_CTRL_TE |
+ LEON_REG_UART_CTRL_PE |
+ LEON_REG_UART_CTRL_PS);
+ }
+#endif
+ /* Let UART debug tunnelling be untouched if Flow-control is set.
+ *
+ * With old APBUARTs debug is enabled by setting LB and FL, since LB or
+ * DB are not reset we can not trust them. However since FL is reset we
+ * guess that we are debugging if FL is already set, the debugger set
+ * either LB or DB depending on UART capabilities.
+ */
+ if (priv->regs->ctrl & LEON_REG_UART_CTRL_FL) {
+ db |= priv->regs->ctrl & (LEON_REG_UART_CTRL_DB |
+ LEON_REG_UART_CTRL_LB | LEON_REG_UART_CTRL_FL);
+ }
+
+ priv->regs->ctrl = db;
+
+ priv->cap = probecap(priv->regs);
+
+ /* The system console and Debug console may depend on this device, so
+ * initialize it straight away.
+ *
+ * We default to have System Console on first APBUART, user may override
+ * this behaviour by setting the syscon option to 0.
+ */
+ if (drvmgr_on_rootbus(dev) && first_uart) {
+ priv->condev.flags = CONSOLE_FLAG_SYSCON;
+ first_uart = 0;
+ } else {
+ priv->condev.flags = 0;
+ }
+
+ value = drvmgr_dev_key_get(priv->dev, "syscon", DRVMGR_KT_INT);
+ if (value) {
+ if (value->i)
+ priv->condev.flags |= CONSOLE_FLAG_SYSCON;
+ else
+ priv->condev.flags &= ~CONSOLE_FLAG_SYSCON;
+ }
+
+ /* Select 0=Polled, 1=IRQ, 2=Task-Driven UART Mode */
+ value = drvmgr_dev_key_get(priv->dev, "mode", DRVMGR_KT_INT);
+ if (value)
+ priv->mode = value->i;
+ else
+ priv->mode = TERMIOS_POLLED;
+ /* TERMIOS device handlers */
+ if (priv->mode == TERMIOS_IRQ_DRIVEN) {
+ priv->condev.handler = &handler_interrupt;
+ } else if (priv->mode == TERMIOS_TASK_DRIVEN) {
+ priv->condev.handler = &handler_task;
+ } else {
+ priv->condev.handler = &handler_polled;
+ }
+
+ priv->condev.fsname = NULL;
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if (drvmgr_get_dev_prefix(dev, prefix)) {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sapbuart%d", prefix, dev->minor_bus);
+ priv->condev.fsname = priv->devName;
+ } else {
+ sprintf(priv->devName, "/dev/apbuart%d", dev->minor_drv);
+ }
+
+ /* Register it as a console device, the console driver will register
+ * a termios device as well
+ */
+ console_dev_register(&priv->condev);
+
+ return DRVMGR_OK;
+}
+
+#ifdef APBUART_INFO_AVAIL
+static int apbuart_info(
+ struct drvmgr_dev *dev,
+ void (*print_line)(void *p, char *str),
+ void *p, int argc, char *argv[])
+{
+ struct apbuart_priv *priv = dev->priv;
+ char *str1;
+ char buf[64];
+
+ if (dev->priv == NULL)
+ return -DRVMGR_EINVAL;
+
+ if (priv->mode == TERMIOS_POLLED)
+ str1 = "TERMIOS_POLLED";
+ else if (priv->mode == TERMIOS_IRQ_DRIVEN)
+ str1 = "TERMIOS_IRQ_DRIVEN";
+ else if (priv->mode == TERMIOS_TASK_DRIVEN)
+ str1 = "TERMIOS_TASK_DRIVEN";
+ else
+ str1 = "BAD MODE";
+
+ sprintf(buf, "UART Mode: %s", str1);
+ print_line(p, buf);
+ if (priv->condev.fsname) {
+ sprintf(buf, "FS Name: %s", priv->condev.fsname);
+ print_line(p, buf);
+ }
+ sprintf(buf, "STATUS REG: 0x%x", priv->regs->status);
+ print_line(p, buf);
+ sprintf(buf, "CTRL REG: 0x%x", priv->regs->ctrl);
+ print_line(p, buf);
+ sprintf(buf, "SCALER REG: 0x%x baud rate %d",
+ priv->regs->scaler, apbuart_get_baud(priv));
+ print_line(p, buf);
+
+ return DRVMGR_OK;
+}
+#endif
+
+static bool first_open(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ struct termios *term,
+ rtems_libio_open_close_args_t *args
+)
+{
+ struct apbuart_priv *uart = base_get_priv(base);
+
+ uart->tty = tty;
+
+ /* Inherit UART hardware parameters from bootloader on system console */
+ if (uart->condev.flags & CONSOLE_FLAG_SYSCON_GRANT) {
+ get_attributes(base, term);
+ term->c_oflag |= ONLCR;
+ set_attributes(base, term);
+ }
+
+ /* Enable TX/RX */
+ uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
+
+ if (uart->mode != TERMIOS_POLLED) {
+ int ret;
+ uint32_t ctrl;
+
+ /* Register interrupt and enable it */
+ ret = drvmgr_interrupt_register(
+ uart->dev, 0, uart->devName, apbuart_cons_isr, tty
+ );
+ if (ret) {
+ return false;
+ }
+
+ uart->sending = 0;
+
+ /* Turn on RX interrupts */
+ ctrl = uart->regs->ctrl;
+ ctrl |= APBUART_CTRL_RI;
+ if (uart->cap & CAP_DI) {
+ /* Use RX FIFO interrupt only if delayed interrupt available. */
+ ctrl |= (APBUART_CTRL_DI | APBUART_CTRL_RF);
+ }
+ uart->regs->ctrl = ctrl;
+ }
+
+ return true;
+}
+
+static void last_close(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ rtems_libio_open_close_args_t *args
+)
+{
+ struct apbuart_priv *uart = base_get_priv(base);
+ rtems_interrupt_lock_context lock_context;
+
+ if (uart->mode != TERMIOS_POLLED) {
+ /* Turn off RX interrupts */
+ rtems_termios_device_lock_acquire(base, &lock_context);
+ uart->regs->ctrl &=
+ ~(APBUART_CTRL_DI | APBUART_CTRL_RI | APBUART_CTRL_RF);
+ rtems_termios_device_lock_release(base, &lock_context);
+
+ /**** Flush device ****/
+ while (uart->sending) {
+ /* Wait until all data has been sent */
+ }
+ while (
+ (uart->regs->ctrl & APBUART_CTRL_TE) &&
+ !(uart->regs->status & APBUART_STATUS_TS)
+ ) {
+ /* Wait until all data has left shift register */
+ }
+
+ /* Disable and unregister interrupt handler */
+ drvmgr_interrupt_unregister(uart->dev, 0, apbuart_cons_isr, tty);
+ }
+
+#ifdef LEON3
+ /* Disable TX/RX if not used for DEBUG */
+ if (uart->regs != leon3_debug_uart)
+ uart->regs->ctrl &= ~(APBUART_CTRL_RE | APBUART_CTRL_TE);
+#endif
+}
+
+static int read_polled(rtems_termios_device_context *base)
+{
+ struct apbuart_priv *uart = base_get_priv(base);
+
+ return apbuart_inbyte_nonblocking(uart->regs);
+}
+
+/* This function is called from TERMIOS rxdaemon task without device lock. */
+static int read_task(rtems_termios_device_context *base)
+{
+ rtems_interrupt_lock_context lock_context;
+ struct apbuart_priv *uart = base_get_priv(base);
+ struct apbuart_regs *regs = uart->regs;
+ int cnt;
+ char buf[33];
+ struct rtems_termios_tty *tty;
+ uint32_t ctrl_add;
+
+ ctrl_add = APBUART_CTRL_RI;
+ if (uart->cap & CAP_DI) {
+ ctrl_add |= (APBUART_CTRL_DI | APBUART_CTRL_RF);
+ }
+ tty = uart->tty;
+ do {
+ cnt = 0;
+ while (
+ (regs->status & APBUART_STATUS_DR) &&
+ (cnt < sizeof(buf))
+ ) {
+ buf[cnt] = regs->data;
+ cnt++;
+ }
+ if (0 < cnt) {
+ /* Tell termios layer about new characters */
+ rtems_termios_enqueue_raw_characters(tty, &buf[0], cnt);
+ }
+
+ /*
+ * Turn on RX interrupts. A new character in FIFO now may not
+ * cause interrupt so we must check data ready again
+ * afterwards.
+ */
+ rtems_termios_device_lock_acquire(base, &lock_context);
+ regs->ctrl |= ctrl_add;
+ rtems_termios_device_lock_release(base, &lock_context);
+ } while (regs->status & APBUART_STATUS_DR);
+
+ return EOF;
+}
+
+int apbuart_get_baud(struct apbuart_priv *uart)
+{
+ unsigned int core_clk_hz;
+ unsigned int scaler;
+
+ /* Get current scaler setting */
+ scaler = uart->regs->scaler;
+
+ /* Get APBUART core frequency */
+ drvmgr_freq_get(uart->dev, DEV_APB_SLV, &core_clk_hz);
+
+ /* Calculate baud rate from generator "scaler" number */
+ return core_clk_hz / ((scaler + 1) * 8);
+}
+
+static bool set_attributes(
+ rtems_termios_device_context *base,
+ const struct termios *t
+)
+{
+ unsigned int core_clk_hz;
+ unsigned int scaler;
+ unsigned int ctrl;
+ int baud;
+ struct apbuart_priv *uart = base_get_priv(base);
+ rtems_interrupt_lock_context lock_context;
+
+ switch(t->c_cflag & CSIZE) {
+ default:
+ case CS5:
+ case CS6:
+ case CS7:
+ /* Hardware doesn't support other than CS8 */
+ return false;
+ case CS8:
+ break;
+ }
+
+ rtems_termios_device_lock_acquire(base, &lock_context);
+
+ /* Read out current value */
+ ctrl = uart->regs->ctrl;
+
+ switch(t->c_cflag & (PARENB|PARODD)){
+ case (PARENB|PARODD):
+ /* Odd parity */
+ ctrl |= LEON_REG_UART_CTRL_PE|LEON_REG_UART_CTRL_PS;
+ break;
+
+ case PARENB:
+ /* Even parity */
+ ctrl &= ~LEON_REG_UART_CTRL_PS;
+ ctrl |= LEON_REG_UART_CTRL_PE;
+ break;
+
+ default:
+ case 0:
+ case PARODD:
+ /* No Parity */
+ ctrl &= ~(LEON_REG_UART_CTRL_PS|LEON_REG_UART_CTRL_PE);
+ }
+
+ if (!(t->c_cflag & CLOCAL))
+ ctrl |= LEON_REG_UART_CTRL_FL;
+ else
+ ctrl &= ~LEON_REG_UART_CTRL_FL;
+
+ /* Update new settings */
+ uart->regs->ctrl = ctrl;
+
+ rtems_termios_device_lock_release(base, &lock_context);
+
+ /* Baud rate */
+ baud = rtems_termios_baud_to_number(t->c_ospeed);
+ if (baud > 0){
+ /* Get APBUART core frequency */
+ drvmgr_freq_get(uart->dev, DEV_APB_SLV, &core_clk_hz);
+
+ /* Calculate Baud rate generator "scaler" number */
+ scaler = (((core_clk_hz*10)/(baud*8))-5)/10;
+
+ /* Set new baud rate by setting scaler */
+ uart->regs->scaler = scaler;
+ }
+
+ return true;
+}
+
+static void get_attributes(
+ rtems_termios_device_context *base,
+ struct termios *t
+)
+{
+ struct apbuart_priv *uart = base_get_priv(base);
+ unsigned int ctrl;
+
+ t->c_cflag = t->c_cflag & ~(CSIZE|PARENB|PARODD|CLOCAL);
+
+ /* Hardware support only CS8 */
+ t->c_cflag |= CS8;
+
+ /* Read out current parity */
+ ctrl = uart->regs->ctrl;
+ if (ctrl & LEON_REG_UART_CTRL_PE) {
+ if (ctrl & LEON_REG_UART_CTRL_PS)
+ t->c_cflag |= PARENB|PARODD; /* Odd parity */
+ else
+ t->c_cflag |= PARENB; /* Even parity */
+ }
+
+ if ((ctrl & LEON_REG_UART_CTRL_FL) == 0)
+ t->c_cflag |= CLOCAL;
+
+ rtems_termios_set_best_baud(t, apbuart_get_baud(uart));
+}
+
+static void write_polled(
+ rtems_termios_device_context *base,
+ const char *buf,
+ size_t len
+)
+{
+ struct apbuart_priv *uart = base_get_priv(base);
+ int nwrite = 0;
+
+ while (nwrite < len) {
+ apbuart_outbyte_polled(uart->regs, *buf++, 0, 0);
+ nwrite++;
+ }
+}
+
+static void write_interrupt(
+ rtems_termios_device_context *base,
+ const char *buf,
+ size_t len
+)
+{
+ struct apbuart_priv *uart = base_get_priv(base);
+ struct apbuart_regs *regs = uart->regs;
+ int sending;
+ unsigned int ctrl;
+
+ ctrl = regs->ctrl;
+
+ if (len > 0) {
+ /*
+ * sending is used to remember how much we have outstanding so
+ * we can tell termios later.
+ */
+ /* Enable TX interrupt (interrupt is edge-triggered) */
+ regs->ctrl = ctrl | APBUART_CTRL_TI;
+
+ if (ctrl & APBUART_CTRL_FA) {
+ /* APBUART with FIFO.. Fill as many as FIFO allows */
+ sending = 0;
+ while (
+ ((regs->status & APBUART_STATUS_TF) == 0) &&
+ (sending < len)
+ ) {
+ regs->data = *buf;
+ buf++;
+ sending++;
+ }
+ } else {
+ /* start UART TX, this will result in an interrupt when done */
+ regs->data = *buf;
+
+ sending = 1;
+ }
+ } else {
+ /* No more to send, disable TX interrupts */
+ regs->ctrl = ctrl & ~APBUART_CTRL_TI;
+
+ /* Tell close that we sent everything */
+ sending = 0;
+ }
+
+ uart->sending = sending;
+}
+
+/* Handle UART interrupts */
+static void apbuart_cons_isr(void *arg)
+{
+ rtems_termios_tty *tty = arg;
+ rtems_termios_device_context *base;
+ struct console_dev *condev = rtems_termios_get_device_context(tty);
+ struct apbuart_priv *uart = condev_get_priv(condev);
+ struct apbuart_regs *regs = uart->regs;
+ unsigned int status;
+ char buf[33];
+ int cnt;
+
+ if (uart->mode == TERMIOS_TASK_DRIVEN) {
+ if ((status = regs->status) & APBUART_STATUS_DR) {
+ rtems_interrupt_lock_context lock_context;
+
+ /* Turn off RX interrupts */
+ base = rtems_termios_get_device_context(tty);
+ rtems_termios_device_lock_acquire(base, &lock_context);
+ regs->ctrl &=
+ ~(APBUART_CTRL_DI | APBUART_CTRL_RI |
+ APBUART_CTRL_RF);
+ rtems_termios_device_lock_release(base, &lock_context);
+ /* Activate termios RX daemon task */
+ rtems_termios_rxirq_occured(tty);
+ }
+ } else {
+ /*
+ * Get all new characters from APBUART RX (FIFO) and store them
+ * on the stack. Then tell termios about the new characters.
+ * Maximum APBUART RX FIFO size is 32 characters.
+ */
+ cnt = 0;
+ while (
+ ((status=regs->status) & APBUART_STATUS_DR) &&
+ (cnt < sizeof(buf))
+ ) {
+ buf[cnt] = regs->data;
+ cnt++;
+ }
+ if (0 < cnt) {
+ /* Tell termios layer about new characters */
+ rtems_termios_enqueue_raw_characters(tty, &buf[0], cnt);
+ }
+ }
+
+ if (uart->sending && (status & APBUART_STATUS_TE)) {
+ /* Tell close that we sent everything */
+ cnt = uart->sending;
+
+ /*
+ * Tell termios how much we have sent. dequeue() may call
+ * write_interrupt() to refill the transmitter.
+ * write_interrupt() will eventually be called with 0 len to
+ * disable TX interrupts.
+ */
+ rtems_termios_dequeue_characters(tty, cnt);
+ }
+}
+
diff --git a/bsps/shared/grlib/uart/apbuart_polled.c b/bsps/shared/grlib/uart/apbuart_polled.c
new file mode 100644
index 0000000000..0fbfbc51ba
--- /dev/null
+++ b/bsps/shared/grlib/uart/apbuart_polled.c
@@ -0,0 +1,52 @@
+/*
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/apbuart.h>
+
+void apbuart_outbyte_polled(
+ struct apbuart_regs *regs,
+ unsigned char ch,
+ int do_cr_on_newline,
+ int wait_sent
+)
+{
+send:
+ while ( (regs->status & APBUART_STATUS_TE) == 0 ) {
+ /* Lower bus utilization while waiting for UART */
+ __asm__ volatile ("nop"::); __asm__ volatile ("nop"::);
+ __asm__ volatile ("nop"::); __asm__ volatile ("nop"::);
+ __asm__ volatile ("nop"::); __asm__ volatile ("nop"::);
+ __asm__ volatile ("nop"::); __asm__ volatile ("nop"::);
+ }
+
+ if ((ch == '\n') && do_cr_on_newline) {
+ regs->data = (unsigned int) '\r';
+ do_cr_on_newline = 0;
+ goto send;
+ }
+ regs->data = (unsigned int) ch;
+
+ /* Wait until the character has been sent? */
+ if (wait_sent) {
+ while ((regs->status & APBUART_STATUS_TE) == 0)
+ ;
+ }
+}
+
+int apbuart_inbyte_nonblocking(struct apbuart_regs *regs)
+{
+ /* Clear errors */
+ if (regs->status & APBUART_STATUS_ERR)
+ regs->status = ~APBUART_STATUS_ERR;
+
+ if ((regs->status & APBUART_STATUS_DR) == 0)
+ return -1;
+ else
+ return (int) regs->data;
+}
diff --git a/bsps/shared/grlib/uart/apbuart_termios.c b/bsps/shared/grlib/uart/apbuart_termios.c
new file mode 100644
index 0000000000..81df89c171
--- /dev/null
+++ b/bsps/shared/grlib/uart/apbuart_termios.c
@@ -0,0 +1,259 @@
+/*
+ * COPYRIGHT (c) 1989-1998.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Modified for LEON3 BSP.
+ * COPYRIGHT (c) 2004.
+ * Gaisler Research.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <grlib/apbuart_termios.h>
+#include <grlib/apbuart.h>
+#include <bsp.h>
+
+static void apbuart_isr(void *arg)
+{
+ rtems_termios_tty *tty = arg;
+ struct apbuart_context *uart = rtems_termios_get_device_context(tty);
+ unsigned int status;
+ char data;
+
+ /* Get all received characters */
+ while ((status=uart->regs->status) & APBUART_STATUS_DR) {
+ /* Data has arrived, get new data */
+ data = uart->regs->data;
+
+ /* Tell termios layer about new character */
+ rtems_termios_enqueue_raw_characters(tty, &data, 1);
+ }
+
+ if (
+ (status & APBUART_STATUS_TE)
+ && (uart->regs->ctrl & APBUART_CTRL_TI) != 0
+ ) {
+ /* write_interrupt will get called from this function */
+ rtems_termios_dequeue_characters(tty, 1);
+ }
+}
+
+static void apbuart_write_support(
+ rtems_termios_device_context *base,
+ const char *buf,
+ size_t len
+)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+ int sending;
+
+ if (len > 0) {
+ /* Enable TX interrupt (interrupt is edge-triggered) */
+ uart->regs->ctrl |= APBUART_CTRL_TI;
+
+ /* start UART TX, this will result in an interrupt when done */
+ uart->regs->data = *buf;
+
+ sending = 1;
+ } else {
+ /* No more to send, disable TX interrupts */
+ uart->regs->ctrl &= ~APBUART_CTRL_TI;
+
+ /* Tell close that we sent everything */
+ sending = 0;
+ }
+
+ uart->sending = sending;
+}
+
+static void apbuart_write_polled(
+ rtems_termios_device_context *base,
+ const char *buf,
+ size_t len
+)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+ size_t nwrite = 0;
+
+ while (nwrite < len) {
+ apbuart_outbyte_polled(uart->regs, *buf++, 0, 0);
+ nwrite++;
+ }
+}
+
+static int apbuart_poll_read(rtems_termios_device_context *base)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+
+ return apbuart_inbyte_nonblocking(uart->regs);
+}
+
+static bool apbuart_set_attributes(
+ rtems_termios_device_context *base,
+ const struct termios *t
+)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+ rtems_interrupt_lock_context lock_context;
+ unsigned int scaler;
+ unsigned int ctrl;
+ int baud;
+
+ switch (t->c_cflag & CSIZE) {
+ default:
+ case CS5:
+ case CS6:
+ case CS7:
+ /* Hardware doesn't support other than CS8 */
+ return false;
+ case CS8:
+ break;
+ }
+
+ rtems_termios_device_lock_acquire(base, &lock_context);
+
+ /* Read out current value */
+ ctrl = uart->regs->ctrl;
+
+ switch (t->c_cflag & (PARENB|PARODD)) {
+ case (PARENB|PARODD):
+ /* Odd parity */
+ ctrl |= APBUART_CTRL_PE|APBUART_CTRL_PS;
+ break;
+
+ case PARENB:
+ /* Even parity */
+ ctrl &= ~APBUART_CTRL_PS;
+ ctrl |= APBUART_CTRL_PE;
+ break;
+
+ default:
+ case 0:
+ case PARODD:
+ /* No Parity */
+ ctrl &= ~(APBUART_CTRL_PS|APBUART_CTRL_PE);
+ }
+
+ if (!(t->c_cflag & CLOCAL)) {
+ ctrl |= APBUART_CTRL_FL;
+ } else {
+ ctrl &= ~APBUART_CTRL_FL;
+ }
+
+ /* Update new settings */
+ uart->regs->ctrl = ctrl;
+
+ rtems_termios_device_lock_release(base, &lock_context);
+
+ /* Baud rate */
+ baud = rtems_termios_baud_to_number(t->c_ospeed);
+ if (baud > 0) {
+ /* Calculate Baud rate generator "scaler" number */
+ scaler = (((uart->freq_hz * 10) / (baud * 8)) - 5) / 10;
+
+ /* Set new baud rate by setting scaler */
+ uart->regs->scaler = scaler;
+ }
+
+ return true;
+}
+
+static void apbuart_set_best_baud(
+ const struct apbuart_context *uart,
+ struct termios *term
+)
+{
+ uint32_t baud = (uart->freq_hz * 10) / ((uart->regs->scaler * 10 + 5) * 8);
+
+ rtems_termios_set_best_baud(term, baud);
+}
+
+static bool apbuart_first_open_polled(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ struct termios *term,
+ rtems_libio_open_close_args_t *args
+)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+
+ apbuart_set_best_baud(uart, term);
+
+ /* Initialize UART on opening */
+ uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
+ uart->regs->status = 0;
+
+ return true;
+}
+
+static bool apbuart_first_open_interrupt(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ struct termios *term,
+ rtems_libio_open_close_args_t *args
+)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+ rtems_status_code sc;
+
+ apbuart_set_best_baud(uart, term);
+
+ /* Register Interrupt handler */
+ sc = rtems_interrupt_handler_install(uart->irq, "console",
+ RTEMS_INTERRUPT_SHARED,
+ apbuart_isr,
+ tty);
+ if (sc != RTEMS_SUCCESSFUL)
+ return false;
+
+ uart->sending = 0;
+ /* Enable Receiver and transmitter and Turn on RX interrupts */
+ uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE |
+ APBUART_CTRL_RI;
+ /* Initialize UART on opening */
+ uart->regs->ctrl |= APBUART_CTRL_RE | APBUART_CTRL_TE;
+ uart->regs->status = 0;
+
+ return true;
+}
+
+static void apbuart_last_close_interrupt(
+ rtems_termios_tty *tty,
+ rtems_termios_device_context *base,
+ rtems_libio_open_close_args_t *args
+)
+{
+ struct apbuart_context *uart = (struct apbuart_context *) base;
+ rtems_interrupt_lock_context lock_context;
+
+ /* Turn off RX interrupts */
+ rtems_termios_device_lock_acquire(base, &lock_context);
+ uart->regs->ctrl &= ~(APBUART_CTRL_RI);
+ rtems_termios_device_lock_release(base, &lock_context);
+
+ /**** Flush device ****/
+ while (uart->sending) {
+ /* Wait until all data has been sent */
+ }
+
+ /* uninstall ISR */
+ rtems_interrupt_handler_remove(uart->irq, apbuart_isr, tty);
+}
+
+const rtems_termios_device_handler apbuart_handler_interrupt = {
+ .first_open = apbuart_first_open_interrupt,
+ .last_close = apbuart_last_close_interrupt,
+ .write = apbuart_write_support,
+ .set_attributes = apbuart_set_attributes,
+ .mode = TERMIOS_IRQ_DRIVEN
+};
+
+const rtems_termios_device_handler apbuart_handler_polled = {
+ .first_open = apbuart_first_open_polled,
+ .poll_read = apbuart_poll_read,
+ .write = apbuart_write_polled,
+ .set_attributes = apbuart_set_attributes,
+ .mode = TERMIOS_POLLED
+};
diff --git a/bsps/shared/grlib/uart/cons.c b/bsps/shared/grlib/uart/cons.c
new file mode 100644
index 0000000000..5fa41e6914
--- /dev/null
+++ b/bsps/shared/grlib/uart/cons.c
@@ -0,0 +1,137 @@
+/* This file contains the TTY driver for the serial ports. The driver
+ * is layered so that different UART hardware can be used. It is implemented
+ * using the Driver Manager.
+ *
+ * This driver uses the termios pseudo driver.
+ *
+ * COPYRIGHT (c) 2010.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <stdlib.h>
+#include <string.h>
+#include <grlib/cons.h>
+#include <rtems/console.h>
+
+#ifdef RTEMS_DRVMGR_STARTUP
+
+/* Note that it is not possible to use the interrupt mode of the driver
+ * together with the "old" APBUART and -u to GRMON. However the new
+ * APBUART core (from 1.0.17-b2710) has the GRMON debug bit and can
+ * handle interrupts.
+ */
+
+static int console_initialized = 0;
+
+#define FLAG_SYSCON 0x01
+struct console_priv {
+ int flags; /* 0x1=SystemConsole */
+ int minor;
+ struct console_dev *dev;
+};
+
+#define CONSOLE_MAX BSP_NUMBER_OF_TERMIOS_PORTS
+struct console_priv cons[CONSOLE_MAX] = {{0,0},};
+
+/* Install Console in TERMIOS layer */
+static void console_dev_init(struct console_priv *con)
+{
+ char name[16], *fsname;
+ rtems_status_code status;
+ int minor;
+
+ minor = con->minor;
+ if (!con->dev->fsname) {
+ strcpy(name, "/dev/console_a");
+ /* Special console name and MINOR for SYSTEM CONSOLE */
+ if (minor == 0)
+ name[12] = '\0'; /* /dev/console */
+ name[13] += minor; /* when minor=0, this has no effect... */
+ fsname = name;
+ } else {
+ fsname = con->dev->fsname;
+ }
+ status = rtems_termios_device_install(
+ fsname,
+ con->dev->handler,
+ NULL,
+ &con->dev->base
+ );
+ if (status != RTEMS_SUCCESSFUL) {
+ rtems_fatal_error_occurred(status);
+ }
+}
+
+/* Called by device driver to register itself to the cons interface. */
+void console_dev_register(struct console_dev *dev)
+{
+ int i, minor = 0;
+ struct console_priv *con = NULL;
+
+ if ((dev->flags & CONSOLE_FLAG_SYSCON) && !cons[0].dev) {
+ con = &cons[0];
+ con->flags = FLAG_SYSCON;
+ } else {
+ for (i=1; i<CONSOLE_MAX; i++) {
+ if (!cons[i].dev) {
+ con = &cons[i];
+ con->flags = 0;
+ minor = i;
+ break;
+ }
+ }
+ }
+ if (con == NULL) {
+ /* Not enough console structures */
+ return;
+ }
+ dev->flags &= ~CONSOLE_FLAG_SYSCON_GRANT;
+ if (con->flags & FLAG_SYSCON) {
+ dev->flags |= CONSOLE_FLAG_SYSCON_GRANT;
+ }
+
+ /* Assign Console */
+ con->dev = dev;
+ con->minor = minor;
+
+ if (console_initialized) {
+ /* Console layer is already initialized, that means that we can
+ * register termios interface directly.
+ */
+ console_dev_init(con);
+ }
+}
+
+#if 0
+void console_dev_unregister(struct console_dev *dev)
+{
+
+}
+#endif
+
+rtems_device_driver console_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg)
+{
+ int i;
+
+ rtems_termios_initialize();
+
+ /* Register all Console a file system device node */
+ for (i=0; i<CONSOLE_MAX; i++) {
+ if (cons[i].dev)
+ console_dev_init(&cons[i]);
+ }
+
+ console_initialized = 1;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+#endif