summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libbsp/sparc/shared/tmtc
diff options
context:
space:
mode:
authorDaniel Hellstrom <daniel@gaisler.com>2015-02-23 13:02:39 +0100
committerDaniel Hellstrom <daniel@gaisler.com>2015-04-17 01:10:17 +0200
commit3bb41226e0941b86d58ecb97f7d292677de573c8 (patch)
tree907aa270343f7c6d1bc08bf73288fb9b10da6197 /c/src/lib/libbsp/sparc/shared/tmtc
parentLEON: added network device configuration helper function (diff)
downloadrtems-3bb41226e0941b86d58ecb97f7d292677de573c8.tar.bz2
LEON: added new drivers to the LEON2/LEON3 BSPs
Most drivers use the Driver Manager for device probing, they work on AMBA-over-PCI systems if PCI is big-endian. New APIs: * GPIO Library, interfaced to GRGPIO * GENIRQ, Generic interrupt service implementation helper New GRLIB Drivers: * ACTEL 1553 RT, user interface is similar to 1553 BRM driver * GR1553 (1553 BC, RT and BM core) * AHBSTAT (AHB error status core) * GRADCDAC (Core interfacing to ADC/DAC hardware) * GRGPIO (GPIO port accessed from GPIO Library) * MCTRL (Memory controller settings configuration) * GRETH (10/100/1000 Ethernet driver using Driver manager) * GRPWM (Pulse Width Modulation core) * SPICTRL (SPI master interface) * GRSPW_ROUTER (SpaceWire Router AMBA configuration interface) * GRCTM (SpaceCraft on-board Time Management core) * SPWCUC (Time distribution over SpaceWire) * GRTC (SpaceCraft up-link Tele core) * GRTM (SpaceCraft down-link Tele Metry core) GR712RC ASIC specific interfaces: * GRASCS * CANMUX (select between OCCAN and SATCAN) * SATCAN * SLINK
Diffstat (limited to 'c/src/lib/libbsp/sparc/shared/tmtc')
-rw-r--r--c/src/lib/libbsp/sparc/shared/tmtc/grtc.c1962
-rw-r--r--c/src/lib/libbsp/sparc/shared/tmtc/grtm.c1587
2 files changed, 3549 insertions, 0 deletions
diff --git a/c/src/lib/libbsp/sparc/shared/tmtc/grtc.c b/c/src/lib/libbsp/sparc/shared/tmtc/grtc.c
new file mode 100644
index 0000000000..3794e95d24
--- /dev/null
+++ b/c/src/lib/libbsp/sparc/shared/tmtc/grtc.c
@@ -0,0 +1,1962 @@
+/* GRTC Telecommand decoder driver
+ *
+ * COPYRIGHT (c) 2007.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <drvmgr/ambapp_bus.h>
+#include <ambapp.h>
+#include <grtc.h>
+
+#ifndef IRQ_GLOBAL_PREPARE
+ #define IRQ_GLOBAL_PREPARE(level) rtems_interrupt_level level
+#endif
+
+#ifndef IRQ_GLOBAL_DISABLE
+ #define IRQ_GLOBAL_DISABLE(level) rtems_interrupt_disable(level)
+#endif
+
+#ifndef IRQ_GLOBAL_ENABLE
+ #define IRQ_GLOBAL_ENABLE(level) rtems_interrupt_enable(level)
+#endif
+
+/*
+#define DEBUG
+#define DEBUGFUNCS
+*/
+
+#include <debug_defs.h>
+
+#ifdef DEBUG_ERROR
+#define DEBUG_ERR_LOG(device,error) grtc_log_error(device,error)
+#else
+#define DEBUG_ERR_LOG(device,error)
+#endif
+
+/* GRTC register map */
+struct grtc_regs {
+ volatile unsigned int grst; /* Global Reset Register (GRR 0x00) */
+ volatile unsigned int gctrl; /* Global Control Register (GCR 0x04) */
+ int unused0;
+ volatile unsigned int sir; /* Spacecraft Identifier Register (SIR 0x0c) */
+ volatile unsigned int far; /* Frame Acceptance Report Register (FAR 0x10) */
+
+ volatile unsigned int clcw1; /* CLCW Register 1 (CLCWR1 0x14) */
+ volatile unsigned int clcw2; /* CLCW Register 2 (CLCWR2 0x18) */
+ volatile unsigned int phir; /* Physical Interface Register (PHIR 0x1c) */
+ volatile unsigned int cor; /* Control Register (COR 0x20) */
+
+ volatile unsigned int str; /* Status Register (STR 0x24) */
+ volatile unsigned int asr; /* Address Space Register (ASR 0x28) */
+ volatile unsigned int rp; /* Receive Read Pointer Register (RRP 0x2c) */
+ volatile unsigned int wp; /* Receive Write Pointer Register (RWP 0x30) */
+
+ int unused1[(0x60-0x34)/4];
+
+ volatile unsigned int pimsr; /* Pending Interrupt Masked Status Register (PIMSR 0x60) */
+ volatile unsigned int pimr; /* Pending Interrupt Masked Register (PIMR 0x64) */
+ volatile unsigned int pisr; /* Pending Interrupt Status Register (PISR 0x68) */
+ volatile unsigned int pir; /* Pending Interrupt Register (PIR 0x6c) */
+ volatile unsigned int imr; /* Interrupt Mask Register (IMR 0x70) */
+ volatile unsigned int picr; /* Pending Interrupt Clear Register (PICR 0x74) */
+};
+
+/* Security Byte */
+#define GRTC_SEB 0x55000000
+
+/* Global Reset Register (GRR 0x00) */
+#define GRTC_GRR_SRST 0x1
+#define GRTC_GRR_SRST_BIT 0
+
+/* Global Control Register (GCR 0x04) */
+#define GRTC_GCR_PSR_BIT 10
+#define GRTC_GCR_NRZM_BIT 11
+#define GRTC_GCR_PSS_BIT 12
+
+#define GRTC_GCR_PSR (1<<GRTC_GCR_PSR_BIT)
+#define GRTC_GCR_NRZM (1<<GRTC_GCR_NRZM_BIT)
+#define GRTC_GCR_PSS (1<<GRTC_GCR_PSS_BIT)
+
+/* Spacecraft Identifier Register (SIR 0x0c) */
+
+
+/* Frame Acceptance Report Register (FAR 0x10) */
+#define GRTC_FAR_SCI_BIT 10
+#define GRTC_FAR_CSEC_BIT 11
+#define GRTC_FAR_CAC_BIT 12
+#define GRTC_FAR_SSD_BIT 13
+
+#define GRTC_FAR_SCI (0x7<<GRTC_FAR_SCI_BIT)
+#define GRTC_FAR_CSEC (0x7<<GRTC_FAR_CSEC_BIT)
+#define GRTC_FAR_CAC (0x3f<<GRTC_FAR_CAC_BIT)
+#define GRTC_FAR_SSD (1<<GRTC_FAR_SSD_BIT)
+
+/* CLCW Register 1 (CLCWR1 0x14) */
+/* CLCW Register 2 (CLCWR2 0x18) */
+#define GRTC_CLCW_RVAL_BIT 0
+#define GRTC_CLCW_RTYPE_BIT 8
+#define GRTC_CLCW_FBCO_BIT 9
+#define GRTC_CLCW_RTMI_BIT 11
+#define GRTC_CLCW_WAIT_BIT 12
+#define GRTC_CLCW_LOUT_BIT 13
+#define GRTC_CLCW_NBLO_BIT 14
+#define GRTC_CLCW_NRFA_BIT 15
+#define GRTC_CLCW_VCI_BIT 18
+#define GRTC_CLCW_CIE_BIT 24
+#define GRTC_CLCW_STAF_BIT 26
+#define GRTC_CLCW_VNUM_BIT 29
+#define GRTC_CLCW_CWTY_BIT 31
+
+#define GRTC_CLCW_RVAL (0xff<<GRTC_CLCW_RVAL_BIT)
+#define GRTC_CLCW_RTYPE (1<<GRTC_CLCW_RTYPE_BIT)
+#define GRTC_CLCW_FBCO (0x3<<GRTC_CLCW_FBCO_BIT)
+#define GRTC_CLCW_RTMI (0x3<<GRTC_CLCW_RTMI_BIT)
+#define GRTC_CLCW_WAIT (1<<GRTC_CLCW_WAIT_BIT)
+#define GRTC_CLCW_LOUT (1<<GRTC_CLCW_LOUT_BIT)
+#define GRTC_CLCW_NBLO (1<<GRTC_CLCW_NBLO_BIT)
+#define GRTC_CLCW_NRFA (1<<GRTC_CLCW_NRFA_BIT)
+#define GRTC_CLCW_VCI (0x3f<<GRTC_CLCW_VCI_BIT)
+#define GRTC_CLCW_CIE (0x3<<GRTC_CLCW_CIE_BIT)
+#define GRTC_CLCW_STAF (0x3<<GRTC_CLCW_STAF_BIT)
+#define GRTC_CLCW_VNUM (0x3<<GRTC_CLCW_VNUM_BIT)
+#define GRTC_CLCW_CWTY (1<<GRTC_CLCW_CWTY_BIT)
+
+/* Physical Interface Register (PIR 0x1c) */
+#define GRTC_PIR_BLO_BIT 0
+#define GRTC_PIR_RFA_BIT 8
+
+#define GRTC_PIR_BLO (0xff<<GRTC_PIR_BLO_BIT)
+#define GRTC_PIR_RFA (0xff<<GRTC_PIR_RFA_BIT)
+
+/* Control Register (COR 0x20) */
+#define GRTC_COR_RE_BIT 0
+#define GRTC_COR_CRST_BIT 9
+
+#define GRTC_COR_RE (1<<GRTC_COR_RE_BIT)
+#define GRTC_COR_CRST (1<<GRTC_COR_CRST_BIT)
+
+/* Status Register (STR 0x24) */
+#define GRTC_STR_CR_BIT 0
+#define GRTC_STR_OV_BIT 4
+#define GRTC_STR_RFF_BIT 7
+#define GRTC_STR_RBF_BIT 10
+
+#define GRTC_STR_CR (1<<GRTC_STR_CR_BIT)
+#define GRTC_STR_OV (1<<GRTC_STR_OV_BIT)
+#define GRTC_STR_RFF (1<<GRTC_STR_RFF_BIT)
+#define GRTC_STR_RBF (1<<GRTC_STR_RBF_BIT)
+
+/* Address Space Register (ASR 0x28) */
+#define GRTC_ASR_RXLEN_BIT 0
+#define GRTC_ASR_BUFST_BIT 10
+
+#define GRTC_ASR_RXLEN (0xff<<GRTC_ASR_RXLEN_BIT)
+#define GRTC_ASR_BUFST (0x3fffff<<GRTC_ASR_BUFST_BIT)
+
+/* Receive Read Pointer Register (RRP 0x2c) */
+#define GRTC_RRP_PTR_BIT 0
+
+#define GRTC_RRP_PTR (0xffffff<<GRTC_RRP_PTR_BIT)
+
+/* Receive Write Pointer Register (RWP 0x30) */
+#define GRTC_RWP_PTR_BIT 0
+
+#define GRTC_RWP_PTR (0xffffff<<GRTC_RWP_PTR_BIT)
+
+/* Pending Interrupt Masked Status Register (PIMSR 0x60) */
+/* Pending Interrupt Masked Register (PIMR 0x64) */
+/* Pending Interrupt Status Register (PISR 0x68) */
+/* Pending Interrupt Register (PIR 0x6c) */
+/* Interrupt Mask Register (IMR 0x70) */
+/* Pending Interrupt Clear Register (PICR 0x74) */
+#define GRTC_INT_RFA_BIT 0
+#define GRTC_INT_BLO_BIT 1
+#define GRTC_INT_FAR_BIT 2
+#define GRTC_INT_CR_BIT 3
+#define GRTC_INT_RBF_BIT 4
+#define GRTC_INT_OV_BIT 5
+#define GRTC_INT_CS_BIT 6
+
+#define GRTC_INT_RFA (1<<GRTC_INT_RFA_BIT)
+#define GRTC_INT_BLO (1<<GRTC_INT_BLO_BIT)
+#define GRTC_INT_FAR (1<<GRTC_INT_FAR_BIT)
+#define GRTC_INT_CR (1<<GRTC_INT_CR_BIT)
+#define GRTC_INT_OV (1<<GRTC_INT_OV_BIT)
+#define GRTC_INT_CS (1<<GRTC_INT_CS_BIT)
+
+#define GRTC_INT_ALL (GRTC_INT_RFA|GRTC_INT_BLO|GRTC_INT_FAR|GRTC_INT_CR|GRTC_INT_OV|GRTC_INT_CS)
+
+#define READ_REG(address) (*(volatile unsigned int *)address)
+
+/* Driver functions */
+static rtems_device_driver grtc_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtc_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define GRTC_DRIVER_TABLE_ENTRY { grtc_initialize, grtc_open, grtc_close, grtc_read, grtc_write, grtc_ioctl }
+
+static rtems_driver_address_table grtc_driver = GRTC_DRIVER_TABLE_ENTRY;
+
+enum {
+ FRM_STATE_NONE = 0, /* not started */
+ FRM_STATE_HDR = 1, /* Reading Header (Frame length isn't known) */
+ FRM_STATE_ALLOC = 2, /* Allocate Frame to hold data */
+ FRM_STATE_PAYLOAD = 3, /* Reading Payload (Frame length is known) */
+ FRM_STATE_FILLER = 4, /* Check filler */
+ FRM_STATE_DROP = 5 /* error, drop data until end marker */
+};
+
+/* Frame pool, all frames in pool have the same buffer length (frame mode only) */
+struct grtc_frame_pool {
+ unsigned int frame_len; /* Maximal length of frame (payload+hdr+crc..) */
+ unsigned int frame_cnt; /* Current number of frames in pool (in frms) */
+ struct grtc_frame *frms; /* Chain of frames in pool (this is the pool) */
+};
+
+struct grtc_priv {
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ struct grtc_regs *regs; /* TC Hardware Register MAP */
+ int irq; /* IRQ number of TC core */
+
+ int major; /* Driver major */
+ int minor; /* Device Minor */
+
+ int open; /* Device has been opened by user */
+ int running; /* TC receiver running */
+ int mode; /* RAW or FRAME mode */
+ int overrun_condition; /* Overrun condition */
+ int blocking; /* Blocking/polling mode */
+ rtems_interval timeout; /* Timeout in blocking mode */
+ int wait_for_nbytes;/* Number of bytes to wait for in blocking mode */
+
+ struct grtc_ioc_config config;
+
+/* RAW MODE ONLY */
+ /* Buffer allocation (user provided or driver allocated using malloc) */
+ void *buf;
+ void *buf_remote;
+ void *_buf;
+ int buf_custom; /* 0=no custom buffer, 1=custom buffer (don't free it...) */
+ unsigned int len;
+
+/* FRAME MODE ONLY */
+ /* Frame management when user provides buffers. */
+ int pool_cnt; /* Number of Pools */
+ struct grtc_frame_pool *pools; /* Array of pools */
+
+ struct grtc_list ready; /* Ready queue (received frames) */
+
+ /* Frame read data (Frame mode only) */
+ int frame_state;
+ int filler;
+ unsigned char hdr[5] __attribute__((aligned(2)));
+ struct grtc_frame *frm; /* Frame currently beeing copied */
+ int frmlen;
+
+ struct grtc_ioc_stats stats; /* Statistics */
+
+ rtems_id sem_rx;
+
+#ifdef DEBUG_ERROR
+ /* Buffer read/write state */
+ unsigned int rp;
+ unsigned int wp;
+
+ /* Debugging */
+ int last_error[128];
+ int last_error_cnt;
+#endif
+};
+
+/* Prototypes */
+static void grtc_hw_reset(struct grtc_priv *priv);
+static void grtc_interrupt(void *arg);
+
+/* Common Global Variables */
+static rtems_id grtc_dev_sem;
+static int grtc_driver_io_registered = 0;
+static rtems_device_major_number grtc_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static int grtc_register_io(rtems_device_major_number *m);
+static int grtc_device_init(struct grtc_priv *pDev);
+
+static int grtc_init2(struct drvmgr_dev *dev);
+static int grtc_init3(struct drvmgr_dev *dev);
+
+static struct drvmgr_drv_ops grtc_ops =
+{
+ {NULL, grtc_init2, grtc_init3, NULL},
+ NULL,
+ NULL,
+};
+
+static struct amba_dev_id grtc_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRTC},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info grtc_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRTC_ID, /* Driver ID */
+ "GRTC_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grtc_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ sizeof(struct grtc_priv),
+ },
+ &grtc_ids[0]
+};
+
+void grtc_register_drv (void)
+{
+ DBG("Registering GRTC driver\n");
+ drvmgr_drv_register(&grtc_drv_info.general);
+}
+
+static int grtc_init2(struct drvmgr_dev *dev)
+{
+ struct grtc_priv *priv;
+
+ DBG("GRTC[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv;
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+static int grtc_init3(struct drvmgr_dev *dev)
+{
+ struct grtc_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( grtc_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grtc_register_io(&grtc_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grtc_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+ if ( grtc_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grtc%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrtc%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grtc_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+static int grtc_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grtc_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("GRTC driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("GRTC rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("GRTC rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("GRTC rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("GRTC rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int grtc_device_init(struct grtc_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (struct grtc_regs *)pnpinfo->ahb_slv->start[0];
+ pDev->minor = pDev->dev->minor_drv;
+ pDev->open = 0;
+ pDev->running = 0;
+
+ /* Create Binary RX Semaphore with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'C', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->sem_rx) != RTEMS_SUCCESSFUL ) {
+ return -1;
+ }
+
+ /* Reset Hardware before attaching IRQ handler */
+ grtc_hw_reset(pDev);
+
+ return 0;
+}
+
+static void grtc_hw_reset(struct grtc_priv *priv)
+{
+ /* Reset Core */
+ priv->regs->grst = GRTC_SEB | GRTC_GRR_SRST;
+}
+
+static void grtc_hw_get_defaults(struct grtc_priv *pDev, struct grtc_ioc_config *config)
+{
+ unsigned int gcr = READ_REG(&pDev->regs->gctrl);
+
+ config->psr_enable = (gcr & GRTC_GCR_PSR) ? 1:0;
+ config->nrzm_enable = (gcr & GRTC_GCR_NRZM) ? 1:0;
+ config->pss_enable = (gcr & GRTC_GCR_PSS) ? 1:0;
+
+ config->crc_calc = 0;
+}
+
+/* bufsize is given in bytes */
+static int __inline__ grtc_hw_data_avail_upper(unsigned int rrp, unsigned rwp, unsigned int bufsize)
+{
+ if ( rrp == rwp )
+ return 0;
+
+ if ( rwp > rrp ) {
+ return rwp-rrp;
+ }
+
+ return (bufsize-rrp);
+}
+
+/* bufsize is given in bytes */
+static int __inline__ grtc_hw_data_avail_lower(unsigned int rrp, unsigned rwp, unsigned int bufsize)
+{
+ if ( rrp == rwp )
+ return 0;
+
+ if ( rwp > rrp ) {
+ return 0;
+ }
+
+ return rwp;
+}
+
+/* bufsize is given in bytes */
+static int __inline__ grtc_hw_data_avail(unsigned int rrp, unsigned rwp, unsigned int bufsize)
+{
+ if ( rrp == rwp )
+ return 0;
+
+ if ( rwp > rrp ) {
+ return rwp-rrp;
+ }
+
+ return rwp+(bufsize-rrp);
+}
+
+/* Reads as much as possiböe but not more than 'max' bytes from the TC receive buffer.
+ * Number of bytes put into 'buf' is returned.
+ */
+static int grtc_hw_read_try(struct grtc_priv *pDev, char *buf, int max)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt, left;
+
+ FUNCDBG();
+
+ if ( max < 1 )
+ return 0;
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax);
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax);
+
+ DBG("grtc_hw_read_try: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_read_try: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buffer);
+
+ if ( (upper+lower) == 0 )
+ return 0;
+
+ /* Count bytes will be read */
+ count = (upper+lower) > max ? max : (upper+lower);
+ left = count;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ if ( left < upper ){
+ cnt = left;
+ }else{
+ cnt = upper; /* Read all upper data available */
+ }
+ DBG("grtc_hw_read_try: COPYING %d from upper\n",cnt);
+ /* Convert from Remote address (RP) into CPU Local address */
+ memcpy(buf, (void *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf), cnt);
+ buf += cnt;
+ left -= cnt;
+ }
+
+ /* Read from lower part of data buffer */
+ if ( left > 0 ){
+ if ( left < lower ){
+ cnt = left;
+ }else{
+ cnt = lower; /* Read all lower data available */
+ }
+ DBG("grtc_hw_read_try: COPYING %d from lower\n",cnt);
+ memcpy(buf, (void *)pDev->buf, cnt);
+ buf += cnt;
+ left -= cnt;
+ }
+
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+
+ return count;
+}
+
+/* Reads as much as possiböe but not more than 'max' bytes from the TC receive buffer.
+ * Number of bytes put into 'buf' is returned.
+ */
+static int grtc_data_avail(struct grtc_priv *pDev)
+{
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ struct grtc_regs *regs = pDev->regs;
+
+ FUNCDBG();
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ return grtc_hw_data_avail(rrp,rwp,bufmax);
+}
+
+static void *grtc_memalign(unsigned int boundary, unsigned int length, void *realbuf)
+{
+ *(int *)realbuf = (int)malloc(length+(~GRTC_ASR_BUFST)+1);
+ DBG("GRTC: Alloced %d (0x%x) bytes, requested: %d\n",length+(~GRTC_ASR_BUFST)+1,length+(~GRTC_ASR_BUFST)+1,length);
+ return (void *)(((*(unsigned int *)realbuf)+(~GRTC_ASR_BUFST)+1) & ~(boundary-1));
+}
+
+static int grtc_start(struct grtc_priv *pDev)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int tmp;
+
+ if ( !pDev->buf || (((unsigned int)pDev->buf & ~GRTC_ASR_BUFST) != 0) ||
+ (pDev->len>(1024*0x100)) || (pDev->len<1024) || ((pDev->len & (1024-1)) != 0)
+ ) {
+ DBG("GRTC: start: buffer not properly allocated(0x%x,0x%x,0x%x,0x%x)\n",pDev->buf,pDev->len,((unsigned int)pDev->buf & ~GRTC_ASR_BUFST),(pDev->len & ~(1024-1)));
+ return RTEMS_NO_MEMORY;
+ }
+
+ memset(pDev->buf,0,pDev->len);
+
+ /* Software init */
+ pDev->overrun_condition = 0;
+#ifdef DEBUG_ERROR
+ pDev->last_error_cnt = 0;
+ memset(&pDev->last_error[0],0,128*sizeof(int));
+#endif
+ memset(&pDev->stats,0,sizeof(struct grtc_ioc_stats));
+
+ /* Reset the receiver */
+ regs->cor = GRTC_SEB | GRTC_COR_CRST;
+ if ( READ_REG(&regs->cor) & GRTC_COR_CRST ){
+ /* Reset Failed */
+ DBG("GRTC: start: Reseting receiver failed\n");
+ return RTEMS_IO_ERROR;
+ }
+
+ /* Set operating modes */
+ tmp = 0;
+ if ( pDev->config.psr_enable )
+ tmp |= GRTC_GCR_PSR;
+ if ( pDev->config.nrzm_enable )
+ tmp |= GRTC_GCR_NRZM;
+ if ( pDev->config.pss_enable )
+ tmp |= GRTC_GCR_PSS;
+ regs->gctrl = GRTC_SEB | tmp;
+
+ /* Clear any pending interrupt */
+ tmp = READ_REG(&regs->pir);
+ regs->picr = GRTC_INT_ALL;
+
+ /* Unmask only the Overrun interrupt */
+ regs->imr = GRTC_INT_OV;
+
+ /* Set up DMA registers
+ * 1. Let hardware know about our DMA area (size and location)
+ * 2. Set DMA read/write posistions to zero.
+ */
+ regs->asr = (unsigned int)pDev->buf_remote | ((pDev->len>>10)-1);
+ regs->rp = (unsigned int)pDev->buf_remote;
+
+ /* Mark running before enabling the receiver, we could receive
+ * an interrupt directly after enabling the receiver and it would
+ * then interpret the interrupt as spurious (see interrupt handler)
+ */
+ pDev->running = 1;
+
+ /* Enable receiver */
+ regs->cor = GRTC_SEB | GRTC_COR_RE;
+
+ DBG("GRTC: STARTED\n");
+
+ return 0;
+}
+
+static void grtc_stop(struct grtc_priv *pDev)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int tmp;
+
+ /* Disable the receiver */
+ regs->cor = GRTC_SEB;
+
+ /* disable all interrupts and clear them */
+ regs->imr = 0;
+ tmp = READ_REG(&regs->pir);
+ regs->picr = GRTC_INT_ALL;
+
+ DBG("GRTC: STOPPED\n");
+
+ /* Flush semaphores in case a thread is stuck waiting for CLTUs (RX data) */
+ rtems_semaphore_flush(pDev->sem_rx);
+}
+
+/* Wait until 'count' bytes are available in receive buffer, or until
+ * the timeout expires.
+ */
+static int grtc_wait_data(struct grtc_priv *pDev, int count, rtems_interval timeout)
+{
+ int avail;
+ int ret;
+ IRQ_GLOBAL_PREPARE(oldLevel);
+
+ FUNCDBG();
+
+ if ( count < 1 )
+ return 0;
+
+ IRQ_GLOBAL_DISABLE(oldLevel);
+
+ /* Enable interrupts when receiving CLTUs, Also clear old pending CLTUs store
+ * interrupts.
+ */
+ pDev->regs->picr = GRTC_INT_CS;
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) | GRTC_INT_CS;
+
+ avail = grtc_data_avail(pDev);
+ if ( avail < count ) {
+ /* Wait for interrupt. */
+
+ IRQ_GLOBAL_ENABLE(oldLevel);
+
+ if ( timeout == 0 ){
+ timeout = RTEMS_NO_TIMEOUT;
+ }
+ ret = rtems_semaphore_obtain(pDev->sem_rx,RTEMS_WAIT,timeout);
+ /* RTEMS_SUCCESSFUL = interrupt signaled data is available
+ * RTEMS_TIMEOUT = timeout expired, probably not enough data available
+ * RTEMS_UNSATISFIED = driver has been closed or an error (overrun) occured
+ * which should cancel this operation.
+ * RTEMS_OBJECT_WAS_DELETED, RTEMS_INVALID_ID = driver error.
+ */
+ IRQ_GLOBAL_DISABLE(oldLevel);
+ }else{
+ ret = RTEMS_SUCCESSFUL;
+ }
+
+ /* Disable interrupts when receiving CLTUs */
+ pDev->regs->imr = READ_REG(&pDev->regs->imr) & ~GRTC_INT_CS;
+
+ IRQ_GLOBAL_ENABLE(oldLevel);
+
+ return ret;
+}
+
+static rtems_device_driver grtc_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ /* Wait until we get semaphore */
+ if ( rtems_semaphore_obtain(grtc_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Is device in use? */
+ if ( pDev->open ){
+ rtems_semaphore_release(grtc_dev_sem);
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Mark device taken */
+ pDev->open = 1;
+
+ rtems_semaphore_release(grtc_dev_sem);
+
+ DBG("grtc_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev);
+
+ /* Set defaults */
+ pDev->buf = NULL;
+ pDev->_buf = NULL;
+ pDev->buf_custom = 0;
+ pDev->buf_remote = 0;
+ pDev->len = 0;
+ pDev->timeout = 0; /* no timeout */
+ pDev->blocking = 0; /* polling mode */
+ pDev->mode = GRTC_MODE_RAW; /* Always default to Raw mode */
+ pDev->ready.head = NULL;
+ pDev->ready.tail = NULL;
+ pDev->ready.cnt = 0;
+
+ pDev->running = 0;
+
+ memset(&pDev->config,0,sizeof(pDev->config));
+
+ /* The core has been reset when we execute here, so it is possible
+ * to read out defualts from core.
+ */
+ grtc_hw_get_defaults(pDev,&pDev->config);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtc_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ if ( pDev->running ){
+ grtc_stop(pDev);
+ pDev->running = 0;
+ }
+
+ /* Reset core */
+ grtc_hw_reset(pDev);
+
+ /* Mark not open */
+ pDev->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtc_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+ int count;
+ int left;
+ int timedout;
+ int err;
+ rtems_interval timeout;
+ rtems_libio_rw_args_t *rw_args;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ if ( !pDev->running && !pDev->overrun_condition ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ if ( pDev->mode != GRTC_MODE_RAW ) {
+ return RTEMS_NOT_DEFINED;
+ }
+
+ rw_args = (rtems_libio_rw_args_t *) arg;
+ left = rw_args->count;
+ timedout = 0;
+ timeout = pDev->timeout;
+
+read_from_buffer:
+ /* Read maximally rw_args->count bytes from receive buffer */
+ count = grtc_hw_read_try(pDev,rw_args->buffer,left);
+
+ left -= count;
+
+ DBG("READ %d bytes from DMA, left: %d\n",count,left);
+
+ if ( !timedout && !pDev->overrun_condition && ((count < 1) || ((count < rw_args->count) && (pDev->blocking == GRTC_BLKMODE_COMPLETE))) ){
+ /* didn't read anything (no data available) or we want to wait for all bytes requested.
+ *
+ * Wait for data to arrive only in blocking mode
+ */
+ if ( pDev->blocking ) {
+ if ( (err=grtc_wait_data(pDev,left,timeout)) != RTEMS_SUCCESSFUL ){
+ /* Some kind of error, closed, overrun etc. */
+ if ( err == RTEMS_TIMEOUT ){
+ /* Got a timeout, we try to read as much as possible */
+ timedout = 1;
+ goto read_from_buffer;
+ }
+ return err;
+ }
+ goto read_from_buffer;
+ }
+ /* Non-blocking mode and no data read. */
+ return RTEMS_TIMEOUT;
+ }
+
+ /* Tell caller how much was read. */
+
+ DBG("READ returning %d bytes, left: %d\n",rw_args->count-left,left);
+
+ rw_args->bytes_moved = rw_args->count - left;
+ if ( rw_args->bytes_moved == 0 ){
+ return RTEMS_TIMEOUT;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtc_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ FUNCDBG();
+ return RTEMS_NOT_IMPLEMENTED;
+}
+
+static int grtc_pool_add_frms(struct grtc_frame *frms)
+{
+ struct grtc_frame *frm, *next;
+
+ /* Add frames to pools */
+ frm = frms;
+ while(frm){
+
+ if ( !frm->pool ) {
+ /* */
+ DBG("GRTC: Frame not assigned to a pool\n");
+ return -1;
+ }
+ next = frm->next; /* Remember next frame to process */
+
+ DBG("GRTC: adding frame 0x%x to pool %d (%d)\n",frm,frm->pool->frame_len,frm->pool->frame_cnt);
+
+ /* Insert Frame into pool */
+ frm->next = frm->pool->frms;
+ frm->pool->frms = frm;
+ frm->pool->frame_cnt++;
+
+ frm = next;
+ }
+
+ return 0;
+}
+
+static struct grtc_frame *grtc_pool_get_frm(struct grtc_priv *pDev, int frame_len, int *error)
+{
+ struct grtc_frame *frm;
+ struct grtc_frame_pool *pool;
+ int i;
+
+ /* Loop through all pools until a pool is found
+ * with a matching (or larger) frame length
+ */
+ pool = pDev->pools;
+ for (i=0; i<pDev->pool_cnt; i++,pool++) {
+ if ( pool->frame_len >= frame_len ) {
+ /* Found a good pool ==> get frame */
+ frm = pool->frms;
+ if ( !frm ) {
+ /* not enough frames available for this
+ * frame length, we try next
+ *
+ * If this is a severe error add your handling
+ * code here.
+ */
+#if 0
+ if ( error )
+ *error = 0;
+ return 0;
+#endif
+ continue;
+ }
+
+ /* Got a frame, the frame is taken out of the
+ * pool for usage.
+ */
+ pool->frms = frm->next;
+ pool->frame_cnt--;
+ return frm;
+ }
+ }
+
+ if ( error )
+ *error = 1;
+
+ /* Didn't find any frames */
+ return NULL;
+}
+
+/* Return number of bytes processed, Stops at the first occurance
+ * of the pattern given in 'pattern'
+ */
+static int grtc_scan(unsigned short *src, int max, unsigned char pattern, int *found)
+{
+ unsigned short tmp = 0;
+ unsigned int left = max;
+
+ while ( (left>1) && (((tmp=*src) & 0x00ff) != pattern) ) {
+ src++;
+ left-=2;
+ }
+ if ( (tmp & 0xff) == pattern ) {
+ *found = 1;
+ } else {
+ *found = 0;
+ }
+ return max-left;
+}
+
+static int grtc_copy(unsigned short *src, unsigned char *buf, int cnt)
+{
+ unsigned short tmp;
+ int left = cnt;
+
+ while ( (left>0) && ((((tmp=*src) & 0x00ff) == 0x00) || ((tmp & 0x00ff) == 0x01)) ) {
+ *buf++ = tmp>>8;
+ src++;
+ left--;
+ }
+
+ return cnt-left;
+}
+
+
+static int grtc_hw_find_frm(struct grtc_priv *pDev)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt;
+ int found;
+
+ FUNCDBG();
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ wp = READ_REG(&regs->wp);
+
+ /* Quick Check for most common case where Start of frame is at next
+ * data byte.
+ */
+ if ( rp != wp ) {
+ /* At least 1 byte in buffer */
+ if ( ((*(unsigned short *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf)) & 0x00ff) == 0x01 ) {
+ return 0;
+ }
+ }
+
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax);
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax);
+
+ DBG("grtc_hw_find_frm: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_find_frm: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buf_remote);
+
+ if ( (upper+lower) == 0 )
+ return 1;
+
+ /* Count bytes will be read */
+ count = 0;
+ found = 0;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ cnt = grtc_scan((unsigned short *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf), upper, 0x01, &found);
+ count = cnt;
+ if ( found ) {
+ DBG("grtc_hw_find_frm: SCANNED upper %d bytes until found\n",cnt);
+ goto out;
+ }
+
+ DBG("grtc_hw_find_frm: SCANNED all upper %d bytes, not found\n",cnt);
+ }
+
+ /* Read from lower part of data buffer */
+ if ( lower > 0 ){
+ cnt = grtc_scan((unsigned short *)pDev->buf, lower, 0x01, &found);
+ count += cnt;
+
+ if ( found ) {
+ DBG("grtc_hw_find_frm: SCANNED lower %d bytes until found\n",cnt);
+ goto out;
+ }
+
+ DBG("grtc_hw_find_frm: SCANNED all lower %d bytes, not found\n",cnt);
+ }
+
+out:
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( count > 0 ) {
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+ }
+ if ( found )
+ return 0;
+ return 1;
+
+}
+
+static int grtc_check_ending(unsigned short *src, int max, int end)
+{
+ while ( max > 0 ) {
+ /* Check Filler */
+ if ( *src != 0x5500 ) {
+ /* Filler is wrong */
+ return -1;
+ }
+ src++;
+ max-=2;
+ }
+
+ /* Check ending (at least */
+ if ( end ) {
+ if ( (*src & 0x00ff) != 0x02 ) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int grtc_hw_check_ending(struct grtc_priv *pDev, int max)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt, left;
+ int tot;
+
+ FUNCDBG();
+
+ if ( max < 1 )
+ return 0;
+ max = max*2;
+ max += 2; /* Check ending also (2 byte extra) */
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax);
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax);
+
+ DBG("grtc_hw_check_ending: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_check_ending: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buf_remote);
+
+ if ( (upper+lower) < max )
+ return 0;
+
+ /* Count bytes will be read */
+ count = max;
+ left = count;
+ tot = 0;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ if ( left <= upper ){
+ cnt = left;
+ if ( grtc_check_ending((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), cnt-2, 1) ) {
+ return -1;
+ }
+ }else{
+ cnt = upper; /* Read all upper data available */
+ if ( grtc_check_ending((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), cnt, 0) ) {
+ return -1;
+ }
+ }
+ left -= cnt;
+ }
+
+ /* Read from lower part of data buffer */
+ if ( left > 0 ){
+ cnt = left;
+ if ( grtc_check_ending((unsigned short *)pDev->buf, cnt-2, 1) ) {
+ return -1;
+ }
+ left -= cnt;
+ }
+
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+
+ return 0;
+}
+
+/* Copies Data from DMA area to buf, the control bytes are stripped. For
+ * every data byte, in the DMA area, one control byte is stripped.
+ */
+static int grtc_hw_copy(struct grtc_priv *pDev, unsigned char *buf, int max, int partial)
+{
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int rp, wp, asr, bufmax, rrp, rwp;
+ unsigned int upper, lower;
+ unsigned int count, cnt, left;
+ int ret, tot, tmp;
+
+ FUNCDBG();
+
+ if ( max < 1 )
+ return 0;
+
+ rp = READ_REG(&regs->rp);
+ asr = READ_REG(&regs->asr);
+ bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT;
+ bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */
+ wp = READ_REG(&regs->wp);
+
+ /* Relative rp and wp */
+ rrp = rp - (asr & GRTC_ASR_BUFST);
+ rwp = wp - (asr & GRTC_ASR_BUFST);
+
+ lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax) >> 1;
+ upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax) >> 1;
+
+ DBG("grtc_hw_copy: AVAIL: Lower: %d, Upper: %d\n",lower,upper);
+ DBG("grtc_hw_copy: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n",
+ rp,rrp,wp,rwp,bufmax,pDev->buf_remote);
+
+ if ( (upper+lower) == 0 || (!partial && ((upper+lower)<max) ) )
+ return 0;
+
+ /* Count bytes will be read */
+ count = (upper+lower) > max ? max : (upper+lower);
+ left = count;
+ tot = 0;
+
+ /* Read from upper part of data buffer */
+ if ( upper > 0 ){
+ if ( left < upper ){
+ cnt = left;
+ }else{
+ cnt = upper; /* Read all upper data available */
+ }
+ DBG("grtc_hw_copy: COPYING %d from upper\n",cnt);
+ if ( (tot=grtc_copy((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), buf, cnt)) != cnt ) {
+ /* Failed to copy due to an receive error */
+ DBG("grtc_hw_copy(upper): not all in DMA buffer (%d)\n",tot);
+ count = tot;
+ ret = -1;
+ goto out;
+ }
+ buf += cnt;
+ left -= cnt;
+ }
+
+ /* Read from lower part of data buffer */
+ if ( left > 0 ){
+ if ( left < lower ){
+ cnt = left;
+ }else{
+ cnt = lower; /* Read all lower data available */
+ }
+ DBG("grtc_hw_copy: COPYING %d from lower\n",cnt);
+ if ( (tmp=grtc_copy((unsigned short *)pDev->buf, buf, cnt)) != cnt ) {
+ /* Failed to copy due to an receive error */
+ DBG("grtc_hw_copy(lower): not all in DMA buffer (%d)\n",tot);
+ count = tot+tmp;
+ ret = -1;
+ goto out;
+ }
+ buf += cnt;
+ left -= cnt;
+ }
+ ret = count;
+
+out:
+ count = count*2;
+ /* Update hardware RP pointer to tell hardware about new space available */
+ if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){
+ regs->rp = (rp+count-bufmax);
+ } else {
+ regs->rp = rp+count;
+ }
+
+ return ret;
+}
+
+#ifdef DEBUG_ERROR
+void grtc_log_error(struct grtc_priv *pDev, int err)
+{
+ /* Stop Receiver */
+ *(volatile unsigned int *)&pDev->regs->cor = 0x55000000;
+ *(volatile unsigned int *)&pDev->regs->cor = 0x55000000;
+ pDev->last_error[pDev->last_error_cnt] = err;
+ if ( ++pDev->last_error_cnt > 128 )
+ pDev->last_error_cnt = 0;
+}
+#endif
+
+/* Read one frame from DMA buffer
+ *
+ * Return Values
+ * Zero - nothing more to process
+ * 1 - more to process, no free frames
+ * 2 - more to process, frame received
+ * negative - more to process, frame dropped
+ */
+static int process_dma(struct grtc_priv *pDev)
+{
+ int ret, err;
+ int left, total_len;
+ unsigned char *dst;
+ struct grtc_frame *frm;
+
+ switch( pDev->frame_state ) {
+ case FRM_STATE_NONE:
+ DBG2("FRAME_STATE_NONE\n");
+
+ /* Find Start of next frame by searching for 0x01 */
+ ret = grtc_hw_find_frm(pDev);
+ if ( ret != 0 ) {
+ /* Frame start not found */
+ return 0;
+ }
+
+ /* Start of frame found, Try to copy header */
+ pDev->frm = NULL;
+ pDev->frame_state = FRM_STATE_HDR;
+
+ case FRM_STATE_HDR:
+ DBG2("FRAME_STATE_HDR\n");
+
+ /* Wait for all of header to be in place by setting partial to 0 */
+ ret = grtc_hw_copy(pDev,pDev->hdr,5,0);
+ if ( ret < 0 ) {
+ /* Error copying header, restart scanning for new frame */
+ DEBUG_ERR_LOG(pDev,1);
+ pDev->stats.err++;
+ pDev->stats.err_hdr++;
+ DBG("FRAME_STATE_HDR: copying failed %d\n",ret);
+ pDev->frame_state = FRM_STATE_NONE;
+ return -1;
+ } else if ( ret != 5 ) {
+ DBG("FRAME_STATE_HDR: no header (%d)\n",ret);
+ /* Not all bytes available, come back later */
+ return 0;
+ }
+
+ /* The complete header has been copied, parse it */
+ pDev->frmlen = ((*(unsigned short *)&pDev->hdr[2]) & 0x3ff)+1;
+ if ( pDev->frmlen < 5 ) {
+ /* Error: frame length is not correct */
+ pDev->stats.err++;
+ pDev->stats.err_hdr++;
+ DBG("FRAME_STATE_HDR: frame length error: %d\n", pDev->frmlen);
+ pDev->frame_state = FRM_STATE_NONE;
+ return -1;
+ }
+ pDev->frame_state = FRM_STATE_ALLOC;
+
+ case FRM_STATE_ALLOC:
+ DBG2("FRAME_STATE_ALLOC\n");
+ /* Header has been read, allocate a frame to put payload and header into */
+
+ /* Allocate Frame matching Frame length */
+ err = 0;
+ frm = grtc_pool_get_frm(pDev,pDev->frmlen,&err);
+ if ( !frm ) {
+ /* Couldn't find frame */
+ DEBUG_ERR_LOG(pDev,2);
+ pDev->stats.dropped++;
+ DBG2("No free frames\n");
+ if ( err == 0 ){
+ /* Frame length exist in pool configuration, but no
+ * frames are available for that frame length.
+ */
+ DEBUG_ERR_LOG(pDev,3);
+ pDev->stats.dropped_no_buf++;
+ return 1;
+ } else {
+ /* Frame length of incoming frame is larger than the
+ * frame length in any of the configured frame pools.
+ *
+ * This may be because of an corrupt header. We simply
+ * scan for the end of frame marker in the DMA buffer
+ * so we can drop the frame.
+ */
+ DEBUG_ERR_LOG(pDev,4);
+ pDev->stats.dropped_too_long++;
+ pDev->frame_state = FRM_STATE_NONE;
+ return -2;
+ }
+ }
+ frm->len = 5; /* Only header currenlty in frame */
+
+ /* Copy Frame Header into frame structure */
+ *((unsigned char *)&frm->hdr + 0) = pDev->hdr[0];
+ *((unsigned char *)&frm->hdr + 1) = pDev->hdr[1];
+ *((unsigned char *)&frm->hdr + 2) = pDev->hdr[2];
+ *((unsigned char *)&frm->hdr + 3) = pDev->hdr[3];
+ *((unsigned char *)&frm->hdr + 4) = pDev->hdr[4];
+
+ /* Calc Total and Filler byte count in frame */
+ total_len = pDev->frmlen / 7;
+ total_len = total_len * 7;
+ if ( pDev->frmlen != total_len )
+ total_len += 7;
+
+ pDev->filler = total_len - pDev->frmlen;
+
+ pDev->frame_state = FRM_STATE_PAYLOAD;
+ pDev->frm = frm;
+
+ case FRM_STATE_PAYLOAD:
+ DBG2("FRAME_STATE_PAYLOAD\n");
+ /* Parts of payload and the complete header has been read */
+ frm = pDev->frm;
+
+ dst = (unsigned char *)&frm->data[frm->len-5];
+ left = pDev->frmlen-frm->len;
+
+ ret = grtc_hw_copy(pDev,dst,left,1);
+ if ( ret < 0 ) {
+ DEBUG_ERR_LOG(pDev,5);
+ /* Error copying header, restart scanning for new frame */
+ pDev->frame_state = FRM_STATE_NONE;
+ frm->next = NULL;
+ grtc_pool_add_frms(frm);
+ pDev->frm = NULL;
+ pDev->stats.err++;
+ pDev->stats.err_payload++;
+ return -1;
+ } else if ( ret != left ) {
+ /* Not all bytes available, come back later */
+ frm->len += ret;
+ return 0;
+ }
+ frm->len += ret;
+ pDev->frame_state = FRM_STATE_FILLER;
+
+ case FRM_STATE_FILLER:
+ DBG2("FRAME_STATE_FILLER\n");
+ /* check filler data */
+ frm = pDev->frm;
+
+ ret = grtc_hw_check_ending(pDev,pDev->filler);
+ if ( ret != 0 ) {
+ /* Error in frame, drop frame */
+ DEBUG_ERR_LOG(pDev,6);
+ pDev->frame_state = FRM_STATE_NONE;
+ frm->next = NULL;
+ grtc_pool_add_frms(frm);
+ pDev->frm = NULL;
+ pDev->stats.err++;
+ pDev->stats.err_ending++;
+ return -1;
+ }
+
+ /* A complete frame received, put it into received frame queue */
+ if ( pDev->ready.head ) {
+ /* Queue not empty */
+ pDev->ready.tail->next = frm;
+ } else {
+ /* Queue empty */
+ pDev->ready.head = frm;
+ }
+ pDev->ready.tail = frm;
+ frm->next = NULL;
+ pDev->ready.cnt++;
+ pDev->stats.frames_recv++;
+
+ pDev->frame_state = FRM_STATE_NONE;
+ frm->next = NULL;
+ return 2;
+
+#if 0
+ case FRM_STATE_DROP:
+ DBG2("FRAME_STATE_DROP\n");
+ break;
+#endif
+
+ default:
+ printk("GRTC: internal error\n");
+ pDev->frame_state = FRM_STATE_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static rtems_device_driver grtc_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtc_priv *pDev;
+ struct drvmgr_dev *dev;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg;
+ unsigned int *data = ioarg->buffer;
+ int status,frm_len,i,ret;
+ struct grtc_ioc_buf_params *buf_arg;
+ struct grtc_ioc_config *cfg;
+ struct grtc_ioc_hw_status *hwregs;
+ struct grtc_ioc_pools_setup *pocfg;
+ struct grtc_ioc_assign_frm_pool *poassign;
+ struct grtc_frame *frm, *frms;
+ struct grtc_frame_pool *pool;
+ struct grtc_list *frmlist;
+ struct grtc_ioc_stats *stats;
+ unsigned int mem;
+
+ IRQ_GLOBAL_PREPARE(oldLevel);
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtc_priv *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case GRTC_IOC_START:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ if ( (status=grtc_start(pDev)) != RTEMS_SUCCESSFUL ){
+ return status;
+ }
+ /* Register ISR and Unmask interrupt */
+ drvmgr_interrupt_register(pDev->dev, 0, "grtc", grtc_interrupt, pDev);
+
+ /* Read and write are now open... */
+ break;
+
+ case GRTC_IOC_STOP:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ drvmgr_interrupt_unregister(pDev->dev, 0, grtc_interrupt, pDev);
+ grtc_stop(pDev);
+ pDev->running = 0;
+ break;
+
+ case GRTC_IOC_ISSTARTED:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ break;
+
+ case GRTC_IOC_SET_BLOCKING_MODE:
+ if ( (unsigned int)data > GRTC_BLKMODE_COMPLETE ) {
+ return RTEMS_INVALID_NAME;
+ }
+ DBG("GRTC: Set blocking mode: %d\n",(unsigned int)data);
+ pDev->blocking = (unsigned int)data;
+ break;
+
+ case GRTC_IOC_SET_TIMEOUT:
+ DBG("GRTC: Timeout: %d\n",(unsigned int)data);
+ pDev->timeout = (rtems_interval)data;
+ break;
+
+ case GRTC_IOC_SET_BUF_PARAM:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+
+ buf_arg = (struct grtc_ioc_buf_params *)data;
+ if ( !buf_arg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ DBG("GRTC: IOC_SET_BUF_PARAM: Len: 0x%x, Custom Buffer: 0x%x\n",buf_arg->length,buf_arg->custom_buffer);
+
+ /* Check alignment need, skip bit 0 since that bit only indicates remote address or not */
+ if ( (unsigned int)buf_arg->custom_buffer & (~GRTC_BUF_MASK) & (~0x1) ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ( buf_arg->length > 0x100 ){
+ DBG("GRTC: Too big buffer requested\n");
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* If current buffer allocated by driver we must free it */
+ if ( !pDev->buf_custom && pDev->buf ){
+ free(pDev->_buf);
+ pDev->_buf = NULL;
+ }
+ pDev->buf = NULL;
+ pDev->len = buf_arg->length*1024;
+
+ if (pDev->len <= 0)
+ break;
+ mem = (unsigned int)buf_arg->custom_buffer;
+ pDev->buf_custom = mem;
+
+ if (mem & 1) {
+ /* Remote address given, the address is as the GRTC
+ * core looks at it. Translate the base address into
+ * an address that the CPU can understand.
+ */
+ pDev->buf_remote = (void *)(mem & ~0x1);
+ drvmgr_translate_check(pDev->dev, DMAMEM_TO_CPU,
+ (void *)pDev->buf_remote,
+ (void **)&pDev->buf,
+ pDev->len);
+ } else {
+ if (mem == 0) {
+ pDev->buf = grtc_memalign((~GRTC_ASR_BUFST)+1,pDev->len,&pDev->_buf);
+ DBG("grtc_ioctl: SETBUF: new buf: 0x%x(0x%x), Len: %d\n",pDev->buf,pDev->_buf,pDev->len);
+ if (!pDev->buf){
+ pDev->len = 0;
+ pDev->buf_custom = 0;
+ pDev->_buf = NULL;
+ pDev->buf_remote = 0;
+ DBG("GRTC: Failed to allocate memory\n");
+ return RTEMS_NO_MEMORY;
+ }
+ } else{
+ pDev->buf = buf_arg->custom_buffer;
+ }
+
+ /* Translate into a remote address so that GRTC core
+ * on a remote AMBA bus (for example over the PCI bus)
+ * gets a valid address
+ */
+ drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA,
+ (void *)pDev->buf,
+ (void **)&pDev->buf_remote,
+ pDev->len);
+ }
+ break;
+
+ case GRTC_IOC_GET_BUF_PARAM:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+
+ buf_arg = (struct grtc_ioc_buf_params *)data;
+ if ( !buf_arg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ buf_arg->length = pDev->len >> 10; /* Length in 1kByte blocks */
+ if ( pDev->buf_custom )
+ buf_arg->custom_buffer =(void *)pDev->buf;
+ else
+ buf_arg->custom_buffer = 0; /* Don't reveal internal driver buffer */
+ break;
+
+ case GRTC_IOC_SET_CONFIG:
+ cfg = (struct grtc_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ pDev->config = *cfg;
+ break;
+
+ case GRTC_IOC_GET_CONFIG:
+ cfg = (struct grtc_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ *cfg = pDev->config;
+ break;
+
+ case GRTC_IOC_GET_HW_STATUS:
+ hwregs = (struct grtc_ioc_hw_status *)data;
+ if ( !hwregs ) {
+ return RTEMS_INVALID_NAME;
+ }
+ /* We disable interrupt in order to get a snapshot of the registers */
+ IRQ_GLOBAL_DISABLE(oldLevel);
+ hwregs->sir = READ_REG(&pDev->regs->sir);
+ hwregs->far = READ_REG(&pDev->regs->far);
+ hwregs->clcw1 = READ_REG(&pDev->regs->clcw1);
+ hwregs->clcw2 = READ_REG(&pDev->regs->clcw2);
+ hwregs->phir = READ_REG(&pDev->regs->phir);
+ hwregs->str = READ_REG(&pDev->regs->str);
+ IRQ_GLOBAL_ENABLE(oldLevel);
+ break;
+
+ case GRTC_IOC_GET_STATS:
+ stats = (struct grtc_ioc_stats *)data;
+ if ( !stats ) {
+ return RTEMS_INVALID_NAME;
+ }
+ memcpy(stats,&pDev->stats,sizeof(struct grtc_ioc_stats));
+ break;
+
+ case GRTC_IOC_CLR_STATS:
+ memset(&pDev->stats,0,sizeof(struct grtc_ioc_stats));
+ break;
+
+ case GRTC_IOC_SET_MODE:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ if ( (int)data == GRTC_MODE_FRAME ) {
+ pDev->mode = GRTC_MODE_FRAME;
+ } else if ( (int)data == GRTC_MODE_RAW ) {
+ pDev->mode = GRTC_MODE_RAW;
+ } else {
+ return RTEMS_INVALID_NAME;
+ }
+ break;
+
+ case GRTC_IOC_POOLS_SETUP:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ pocfg = (struct grtc_ioc_pools_setup *)data;
+ if ( (pDev->mode != GRTC_MODE_FRAME) || !pocfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Check that list is sorted */
+ frm_len = 0;
+ for(i=0;i<pocfg->pool_cnt;i++){
+ if ( pocfg->pool_frame_len[i] <= frm_len ) {
+ return RTEMS_INVALID_NAME;
+ }
+ frm_len = pocfg->pool_frame_len[i];
+ }
+
+ /* Ok, we trust user. The pool descriptions are allocated
+ * but not frames, that the user must do self.
+ */
+ if ( pDev->pools ) {
+ free(pDev->pools);
+ }
+ pDev->pools = malloc(pocfg->pool_cnt * sizeof(struct grtc_frame_pool));
+ if ( !pDev->pools ) {
+ pDev->pool_cnt = 0;
+ return RTEMS_NO_MEMORY;
+ }
+ pDev->pool_cnt = pocfg->pool_cnt;
+ for (i=0;i<pocfg->pool_cnt;i++) {
+ pDev->pools[i].frame_len = pocfg->pool_frame_len[i];
+ pDev->pools[i].frame_cnt = 0;
+ pDev->pools[i].frms = NULL;
+ }
+ break;
+
+ case GRTC_IOC_ASSIGN_FRM_POOL:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ if ( (pDev->mode != GRTC_MODE_FRAME) ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ poassign = (struct grtc_ioc_assign_frm_pool *)data;
+ if ( !poassign ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Find pool to assign the frames to */
+ pool = NULL;
+ for(i=0; i<pDev->pool_cnt; i++) {
+ if ( pDev->pools[i].frame_len == poassign->frame_len ) {
+ pool = &pDev->pools[i];
+ break;
+ }
+ }
+ if ( !pool ) {
+ /* No Pool matching frame length */
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Assign frames to pool */
+ frm = poassign->frames;
+ while(frm){
+ frm->pool = pool; /* Assign Frame to pool */
+ frm = frm->next;
+ }
+ break;
+
+ case GRTC_IOC_ADD_BUFF:
+ frms = (struct grtc_frame *)data;
+
+ if ( (pDev->mode != GRTC_MODE_FRAME) ) {
+ return RTEMS_NOT_DEFINED;
+ }
+ if ( !frms ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Add frames to respicative pools */
+ if ( grtc_pool_add_frms(frms) ) {
+ return RTEMS_INVALID_NAME;
+ }
+ break;
+
+ /* Try to read as much data as possible from DMA area and
+ * put it into free frames.
+ *
+ * If receiver is in stopped mode, let user only read previously
+ * received frames.
+ */
+ case GRTC_IOC_RECV:
+
+ if ( (pDev->mode != GRTC_MODE_FRAME) ) {
+ return RTEMS_NOT_DEFINED;
+ }
+
+ while ( pDev->running && ((ret=process_dma(pDev) == 2) || (ret == -1)) ) {
+ /* Frame received or dropped, process next frame */
+ }
+
+ /* Take frames out from ready queue and put them to user */
+ frmlist = (struct grtc_list *)data;
+ if ( !frmlist ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ frmlist->head = pDev->ready.head;
+ frmlist->tail = pDev->ready.tail;
+ frmlist->cnt = pDev->ready.cnt;
+
+ /* Empty list */
+ pDev->ready.head = NULL;
+ pDev->ready.tail = NULL;
+ pDev->ready.cnt = 0;
+ break;
+
+ case GRTC_IOC_GET_CLCW_ADR:
+ if ( !data ) {
+ return RTEMS_INVALID_NAME;
+ }
+ *data = (unsigned int)&pDev->regs->clcw1;
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grtc_interrupt(void *arg)
+{
+ struct grtc_priv *pDev = arg;
+ struct grtc_regs *regs = pDev->regs;
+ unsigned int status;
+
+ /* Clear interrupt by reading it */
+ status = READ_REG(&regs->pisr);
+
+ /* Spurious Interrupt? */
+ if ( !pDev->running )
+ return;
+
+ if ( status & GRTC_INT_OV ){
+
+ /* Stop core (Disable receiver, interrupts), set overrun condition,
+ * Flush semaphore if thread waiting for data in grtc_wait_data().
+ */
+ pDev->overrun_condition = 1;
+
+ grtc_stop(pDev);
+
+ /* No need to handle the reset of interrupts, we are still */
+ goto out;
+ }
+
+ if ( status & GRTC_INT_CS ){
+ if ( (pDev->blocking==GRTC_BLKMODE_COMPLETE) && pDev->timeout ){
+ /* Signal to thread only if enough data is available */
+ if ( pDev->wait_for_nbytes > grtc_data_avail(pDev) ){
+ /* Not enough data available */
+ goto procceed_processing_interrupts;
+ }
+
+ /* Enough data is available which means that we should wake
+ * up thread sleeping.
+ */
+ }
+
+ /* Disable further CLTUs Stored interrupts, no point until thread waiting for them
+ * say it want to wait for more.
+ */
+ regs->imr = READ_REG(&regs->imr) & ~GRTC_INT_CS;
+
+ /* Signal Semaphore to wake waiting thread in read() */
+ rtems_semaphore_release(pDev->sem_rx);
+ }
+
+procceed_processing_interrupts:
+
+ if ( status & GRTC_INT_CR ){
+
+ }
+
+ if ( status & GRTC_INT_FAR ){
+
+ }
+
+ if ( status & GRTC_INT_BLO ){
+
+ }
+
+ if ( status & GRTC_INT_RFA ){
+
+ }
+out:
+ if ( status )
+ regs->picr = status;
+}
+
+static rtems_device_driver grtc_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number unused,
+ void *arg
+ )
+{
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'C'),
+ 1,
+ RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &grtc_dev_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/c/src/lib/libbsp/sparc/shared/tmtc/grtm.c b/c/src/lib/libbsp/sparc/shared/tmtc/grtm.c
new file mode 100644
index 0000000000..de1df727ad
--- /dev/null
+++ b/c/src/lib/libbsp/sparc/shared/tmtc/grtm.c
@@ -0,0 +1,1587 @@
+/* GRTM CCSDS Telemetry Encoder driver
+ *
+ * COPYRIGHT (c) 2007.
+ * Cobham Gaisler AB.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <bsp.h>
+#include <rtems/libio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <rtems/bspIo.h>
+
+#include <drvmgr/drvmgr.h>
+#include <ambapp.h>
+#include <drvmgr/ambapp_bus.h>
+#include <grtm.h>
+
+#ifndef IRQ_GLOBAL_PREPARE
+ #define IRQ_GLOBAL_PREPARE(level) rtems_interrupt_level level
+#endif
+
+#ifndef IRQ_GLOBAL_DISABLE
+ #define IRQ_GLOBAL_DISABLE(level) rtems_interrupt_disable(level)
+#endif
+
+#ifndef IRQ_GLOBAL_ENABLE
+ #define IRQ_GLOBAL_ENABLE(level) rtems_interrupt_enable(level)
+#endif
+
+/*
+#define DEBUG
+#define DEBUGFUNCS
+*/
+
+#include <debug_defs.h>
+
+/* GRTM register map */
+struct grtm_regs {
+ volatile unsigned int dma_ctrl; /* DMA Control Register (0x00) */
+ volatile unsigned int dma_status; /* DMA Status Register (0x04) */
+ volatile unsigned int dma_len; /* DMA Length Register (0x08) */
+ volatile unsigned int dma_bd; /* DMA Descriptor Pointer Register (0x0c) */
+
+ volatile unsigned int dma_cfg; /* DMA Configuration Register (0x10) */
+ volatile unsigned int revision; /* GRTM Revision Register (0x14) */
+
+ int unused0[(0x80-0x18)/4];
+
+ volatile unsigned int ctrl; /* TM Control Register (0x80) */
+ volatile unsigned int status; /* TM Status Register (0x84) */
+ volatile unsigned int cfg; /* TM Configuration Register (0x88) */
+ volatile unsigned int size; /* TM Size Register (0x8c) */
+
+ volatile unsigned int phy; /* TM Physical Layer Register (0x90) */
+ volatile unsigned int code; /* TM Coding Sub-Layer Register (0x94) */
+ volatile unsigned int asmr; /* TM Attached Synchronization Marker Register (0x98) */
+
+ int unused1;
+
+ volatile unsigned int all_frm; /* TM All Frames Generation Register (0xa0) */
+ volatile unsigned int mst_frm; /* TM Master Channel Frame Generation Register (0xa4) */
+ volatile unsigned int idle_frm; /* TM Idle Frame Generation Register (0xa8) */
+
+ int unused2[(0xc0-0xac)/4];
+
+ volatile unsigned int fsh[4]; /* TM FSH/Insert Zone Registers (0xc0..0xcc) */
+
+ volatile unsigned int ocf; /* TM Operational Control Field Register (0xd0) */
+};
+
+/* DMA Control Register (0x00) */
+#define GRTM_DMA_CTRL_EN_BIT 0
+#define GRTM_DMA_CTRL_IE_BIT 1
+#define GRTM_DMA_CTRL_TXRST_BIT 2
+#define GRTM_DMA_CTRL_RST_BIT 3
+#define GRTM_DMA_CTRL_TFIE_BIT 4
+
+#define GRTM_DMA_CTRL_EN (1<<GRTM_DMA_CTRL_EN_BIT)
+#define GRTM_DMA_CTRL_IE (1<<GRTM_DMA_CTRL_IE_BIT)
+#define GRTM_DMA_CTRL_TXRST (1<<GRTM_DMA_CTRL_TXRST_BIT)
+#define GRTM_DMA_CTRL_RST (1<<GRTM_DMA_CTRL_RST_BIT)
+#define GRTM_DMA_CTRL_TFIE (1<<GRTM_DMA_CTRL_TFIE_BIT)
+
+/* DMA Status Register (0x04) */
+#define GRTM_DMA_STS_TE_BIT 0
+#define GRTM_DMA_STS_TI_BIT 1
+#define GRTM_DMA_STS_TA_BIT 2
+#define GRTM_DMA_STS_TFF_BIT 3
+#define GRTM_DMA_STS_TFS_BIT 4
+
+#define GRTM_DMA_STS_TE (1<<GRTM_DMA_STS_TE_BIT)
+#define GRTM_DMA_STS_TI (1<<GRTM_DMA_STS_TI_BIT)
+#define GRTM_DMA_STS_TA (1<<GRTM_DMA_STS_TA_BIT)
+#define GRTM_DMA_STS_TFF (1<<GRTM_DMA_STS_TFF_BIT)
+#define GRTM_DMA_STS_TFS (1<<GRTM_DMA_STS_TFS_BIT)
+#define GRTM_DMA_STS_ALL 0x1f
+
+/* DMA Length Register (0x08) */
+#define GRTM_DMA_LEN_LEN_BIT 0
+#define GRTM_DMA_LEN_LIM_BIT 16
+
+#define GRTM_DMA_LEN_LEN (0x7ff<<GRTM_DMA_LEN_LEN_BIT)
+#define GRTM_DMA_LEN_LIM (0x3ff<<GRTM_DMA_LEN_LIM_BIT)
+
+/* DMA Descriptor Pointer Register (0x0c) */
+#define GRTM_DMA_BD_INDEX_BIT 0
+#define GRTM_DMA_BD_BASE_BIT 10
+
+#define GRTM_DMA_BD_INDEX (0x3ff<<GRTM_DMA_BD_INDEX_BIT)
+#define GRTM_DMA_BD_BASE (0xfffffc<<GRTM_DMA_BD_BASE_BIT)
+
+/* DMA Configuration Register (0x10) */
+#define GRTM_DMA_CFG_BLKSZ_BIT 0
+#define GRTM_DMA_CFG_FIFOSZ_BIT 16
+
+#define GRTM_DMA_CFG_BLKSZ (0xffff<<GRTM_DMA_CFG_BLKSZ_BIT)
+#define GRTM_DMA_CFG_FIFOSZ (0xffff<<GRTM_DMA_CFG_FIFOSZ_BIT)
+
+/* TM Control Register (0x80) */
+#define GRTM_CTRL_EN_BIT 0
+
+#define GRTM_CTRL_EN (1<<GRTM_CTRL_EN_BIT)
+
+/* TM Status Register (0x84) - Unused */
+
+/* TM Configuration Register (0x88) */
+#define GRTM_CFG_SC_BIT 0
+#define GRTM_CFG_SP_BIT 1
+#define GRTM_CFG_CE_BIT 2
+#define GRTM_CFG_NRZ_BIT 3
+#define GRTM_CFG_PSR_BIT 4
+#define GRTM_CFG_TE_BIT 5
+#define GRTM_CFG_RSDEP_BIT 6
+#define GRTM_CFG_RS_BIT 9
+#define GRTM_CFG_AASM_BIT 11
+#define GRTM_CFG_FECF_BIT 12
+#define GRTM_CFG_OCF_BIT 13
+#define GRTM_CFG_EVC_BIT 14
+#define GRTM_CFG_IDLE_BIT 15
+#define GRTM_CFG_FSH_BIT 16
+#define GRTM_CFG_MCG_BIT 17
+#define GRTM_CFG_IZ_BIT 18
+#define GRTM_CFG_FHEC_BIT 19
+#define GRTM_CFG_AOS_BIT 20
+#define GRTM_CFG_CIF_BIT 21
+#define GRTM_CFG_OCFB_BIT 22
+
+#define GRTM_CFG_SC (1<<GRTM_CFG_SC_BIT)
+#define GRTM_CFG_SP (1<<GRTM_CFG_SP_BIT)
+#define GRTM_CFG_CE (1<<GRTM_CFG_CE_BIT)
+#define GRTM_CFG_NRZ (1<<GRTM_CFG_NRZ_BIT)
+#define GRTM_CFG_PSR (1<<GRTM_CFG_PSR_BIT)
+#define GRTM_CFG_TE (1<<GRTM_CFG_TE_BIT)
+#define GRTM_CFG_RSDEP (0x7<<GRTM_CFG_RSDEP_BIT)
+#define GRTM_CFG_RS (0x3<<GRTM_CFG_RS_BIT)
+#define GRTM_CFG_AASM (1<<GRTM_CFG_AASM_BIT)
+#define GRTM_CFG_FECF (1<<GRTM_CFG_FECF_BIT)
+#define GRTM_CFG_OCF (1<<GRTM_CFG_OCF_BIT)
+#define GRTM_CFG_EVC (1<<GRTM_CFG_EVC_BIT)
+#define GRTM_CFG_IDLE (1<<GRTM_CFG_IDLE_BIT)
+#define GRTM_CFG_FSH (1<<GRTM_CFG_FSH_BIT)
+#define GRTM_CFG_MCG (1<<GRTM_CFG_MCG_BIT)
+#define GRTM_CFG_IZ (1<<GRTM_CFG_IZ_BIT)
+#define GRTM_CFG_FHEC (1<<GRTM_CFG_FHEC_BIT)
+#define GRTM_CFG_AOS (1<<GRTM_CFG_AOS_BIT)
+#define GRTM_CFG_CIF (1<<GRTM_CFG_CIF_BIT)
+#define GRTM_CFG_OCFB (1<<GRTM_CFG_OCFB_BIT)
+
+/* TM Size Register (0x8c) */
+#define GRTM_SIZE_BLKSZ_BIT 0
+#define GRTM_SIZE_FIFOSZ_BIT 8
+#define GRTM_SIZE_LEN_BIT 20
+
+#define GRTM_SIZE_BLKSZ (0xff<<GRTM_SIZE_BLKSZ_BIT)
+#define GRTM_SIZE_FIFOSZ (0xfff<<GRTM_SIZE_FIFOSZ_BIT)
+#define GRTM_SIZE_LEN (0xfff<<GRTM_SIZE_LEN_BIT)
+
+/* TM Physical Layer Register (0x90) */
+#define GRTM_PHY_SUB_BIT 0
+#define GRTM_PHY_SCF_BIT 15
+#define GRTM_PHY_SYM_BIT 16
+#define GRTM_PHY_SF_BIT 31
+
+#define GRTM_PHY_SUB (0x7fff<<GRTM_PHY_SUB_BIT)
+#define GRTM_PHY_SCF (1<<GRTM_PHY_SCF_BIT)
+#define GRTM_PHY_SYM (0x7fff<<GRTM_PHY_SYM_BIT)
+#define GRTM_PHY_SF (1<<GRTM_PHY_SF_BIT)
+
+/* TM Coding Sub-Layer Register (0x94) */
+#define GRTM_CODE_SC_BIT 0
+#define GRTM_CODE_SP_BIT 1
+#define GRTM_CODE_CERATE_BIT 2
+#define GRTM_CODE_CE_BIT 5
+#define GRTM_CODE_NRZ_BIT 6
+#define GRTM_CODE_PSR_BIT 7
+#define GRTM_CODE_RS8_BIT 11
+#define GRTM_CODE_RSDEP_BIT 12
+#define GRTM_CODE_RS_BIT 15
+#define GRTM_CODE_AASM_BIT 16
+#define GRTM_CODE_CSEL_BIT 17
+
+#define GRTM_CODE_SC (1<<GRTM_CODE_SC_BIT)
+#define GRTM_CODE_SP (1<<GRTM_CODE_SP_BIT)
+#define GRTM_CODE_CERATE (0x7<<GRTM_CODE_CERATE_BIT)
+#define GRTM_CODE_CE (1<<GRTM_CODE_CE_BIT)
+#define GRTM_CODE_NRZ (1<<GRTM_CODE_NRZ_BIT)
+#define GRTM_CODE_PSR (1<<GRTM_CODE_PSR_BIT)
+#define GRTM_CODE_RS8 (1<<GRTM_CODE_RS8_BIT)
+#define GRTM_CODE_RSDEP (0x7<<GRTM_CODE_RSDEP_BIT)
+#define GRTM_CODE_RS (1<<GRTM_CODE_RS_BIT)
+#define GRTM_CODE_AASM (1<<GRTM_CODE_AASM_BIT)
+#define GRTM_CODE_CSEL (0x3<<GRTM_CODE_CSEL_BIT)
+
+/* TM Attached Synchronization Marker Register (0x98) */
+#define GRTM_ASM_BIT 0
+
+#define GRTM_ASM 0xffffffff
+
+/* TM All Frames Generation Register (0xa0) */
+#define GRTM_ALL_LEN_BIT 0
+#define GRTM_ALL_VER_BIT 12
+#define GRTM_ALL_FHEC_BIT 14
+#define GRTM_ALL_FECF_BIT 15
+#define GRTM_ALL_IZ_BIT 16
+#define GRTM_ALL_IZLEN_BIT 17
+
+#define GRTM_ALL_LEN (0x7ff<<GRTM_ALL_LEN_BIT)
+#define GRTM_ALL_VER (0x3<<GRTM_ALL_VER_BIT)
+#define GRTM_ALL_FHEC (1<<GRTM_ALL_FHEC_BIT)
+#define GRTM_ALL_FECF (1<<GRTM_ALL_FECF_BIT)
+#define GRTM_ALL_IZ (1<<GRTM_ALL_IZ_BIT)
+#define GRTM_ALL_IZLEN (0x1f<<GRTM_ALL_IZLEN_BIT)
+
+/* TM Master Channel Frame Generation Register (0xa4) */
+#define GRTM_MST_OW_BIT 0
+#define GRTM_MST_OCF_BIT 1
+#define GRTM_MST_FSH_BIT 2
+#define GRTM_MST_MC_BIT 3
+#define GRTM_MST_MCCNTR_BIT 24
+
+#define GRTM_MST_OW (1<<GRTM_MST_OW_BIT)
+#define GRTM_MST_OCF (1<<GRTM_MST_OCF_BIT)
+#define GRTM_MST_FSH (1<<GRTM_MST_FSH_BIT)
+#define GRTM_MST_MC (0xff<<GRTM_MST_MC_BIT)
+
+/* TM Idle Frame Generation Register (0xa8) */
+#define GRTM_IDLE_SCID_BIT 0
+#define GRTM_IDLE_VCID_BIT 10
+#define GRTM_IDLE_MC_BIT 16
+#define GRTM_IDLE_VCC_BIT 17
+#define GRTM_IDLE_FSH_BIT 18
+#define GRTM_IDLE_EVC_BIT 19
+#define GRTM_IDLE_OCF_BIT 20
+#define GRTM_IDLE_IDLE_BIT 21
+#define GRTM_IDLE_MCCNTR_BIT 24
+
+#define GRTM_IDLE_SCID (0x3ff<<GRTM_IDLE_SCID_BIT)
+#define GRTM_IDLE_VCID (0x3f<<GRTM_IDLE_VCID_BIT)
+#define GRTM_IDLE_MC (1<<GRTM_IDLE_MC_BIT)
+#define GRTM_IDLE_VCC (1<<GRTM_IDLE_VCC_BIT)
+#define GRTM_IDLE_FSH (1<<GRTM_IDLE_FSH_BIT)
+#define GRTM_IDLE_EVC (1<<GRTM_IDLE_EVC_BIT)
+#define GRTM_IDLE_OCF (1<<GRTM_IDLE_OCF_BIT)
+#define GRTM_IDLE_IDLE (1<<GRTM_IDLE_IDLE_BIT)
+#define GRTM_IDLE_MCCNTR (0xff<<GRTM_IDLE_MCCNTR_BIT)
+
+/* TM FSH/Insert Zone Registers (0xc0..0xcc) */
+#define GRTM_FSH_DATA_BIT 0
+
+#define GRTM_FSH_DATA 0xffffffff
+
+
+/* TM Operational Control Field Register (0xd0) */
+#define GRTM_OCF_CLCW_BIT 0
+
+#define GRTM_OCF_CLCW 0xffffffff
+
+
+/* GRTM Revision 0 */
+#define GRTM_REV0_DMA_CTRL_TXRDY_BIT 5
+#define GRTM_REV0_DMA_CTRL_TXRDY (1<<GRTM_REV0_DMA_CTRL_TXRDY_BIT)
+
+/* GRTM Revision 1 */
+#define GRTM_REV1_DMA_STS_TXRDY_BIT 6
+#define GRTM_REV1_DMA_STS_TXSTAT_BIT 7
+#define GRTM_REV1_DMA_STS_TXRDY (1<<GRTM_REV1_DMA_STS_TXRDY_BIT)
+#define GRTM_REV1_DMA_STS_TXSTAT (1<<GRTM_REV1_DMA_STS_TXSTAT_BIT)
+
+#define GRTM_REV1_REV_SREV_BIT 0
+#define GRTM_REV1_REV_MREV_BIT 8
+#define GRTM_REV1_REV_TIRQ_BIT 16
+#define GRTM_REV1_REV_SREV (0xff<<GRTM_REV1_REV_SREV_BIT)
+#define GRTM_REV1_REV_MREV (0xff<<GRTM_REV1_REV_MREV_BIT)
+#define GRTM_REV1_REV_TIRQ (1<<GRTM_REV1_REV_TIRQ_BIT)
+
+
+/* GRTM transmit descriptor (0x400 Alignment need) */
+struct grtm_bd {
+ volatile unsigned int ctrl;
+ unsigned int address;
+};
+
+#define GRTM_BD_EN_BIT 0
+#define GRTM_BD_WR_BIT 1
+#define GRTM_BD_IE_BIT 2
+#define GRTM_BD_FECFB_BIT 3
+#define GRTM_BD_IZB_BIT 4
+#define GRTM_BD_FHECB_BIT 5
+#define GRTM_BD_OCFB_BIT 6
+#define GRTM_BD_FSHB_BIT 7
+#define GRTM_BD_MCB_BIT 8
+#define GRTM_BD_VCE_BIT 9
+#define GRTM_BD_TS_BIT 14
+#define GRTM_BD_UE_BIT 15
+
+#define GRTM_BD_EN (1<<GRTM_BD_EN_BIT)
+#define GRTM_BD_WR (1<<GRTM_BD_WR_BIT)
+#define GRTM_BD_IE (1<<GRTM_BD_IE_BIT)
+#define GRTM_BD_FECFB (1<<GRTM_BD_FECFB_BIT)
+#define GRTM_BD_IZB (1<<GRTM_BD_IZB_BIT)
+#define GRTM_BD_FHECB (1<<GRTM_BD_FHECB_BIT)
+#define GRTM_BD_OCFB (1<<GRTM_BD_OCFB_BIT)
+#define GRTM_BD_FSHB (1<<GRTM_BD_FSHB_BIT)
+#define GRTM_BD_MCB (1<<GRTM_BD_MCB_BIT)
+#define GRTM_BD_VCE (1<<GRTM_BD_VCE_BIT)
+#define GRTM_BD_TS (1<<GRTM_BD_TS_BIT)
+#define GRTM_BD_UE (1<<GRTM_BD_UE_BIT)
+
+/* Load register */
+
+#define READ_REG(address) (*(volatile unsigned int *)address)
+
+/* Driver functions */
+static rtems_device_driver grtm_initialize(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
+
+#define GRTM_DRIVER_TABLE_ENTRY { grtm_initialize, grtm_open, grtm_close, grtm_read, grtm_write, grtm_ioctl }
+
+static rtems_driver_address_table grtm_driver = GRTM_DRIVER_TABLE_ENTRY;
+
+/* Structure that connects BD with SoftWare Frame */
+struct grtm_ring {
+ struct grtm_ring *next;
+ struct grtm_bd *bd;
+ struct grtm_frame *frm;
+};
+
+struct grtm_priv {
+ struct drvmgr_dev *dev; /* Driver manager device */
+ char devName[32]; /* Device Name */
+ struct grtm_regs *regs;
+ int irq;
+ int minor;
+ int subrev; /* GRTM Revision */
+
+ int open;
+ int running;
+
+ struct grtm_bd *bds;
+ void *_bds;
+
+ /* Interrupt generation */
+ int enable_cnt_curr;/* Down counter, when 0 the interrupt bit is set for next descriptor */
+ volatile int handling_transmission; /* Tells ISR if user are active changing descriptors/queues */
+
+ struct grtm_ring *_ring; /* Root of ring */
+ struct grtm_ring *ring; /* Next ring to use for new frames to be transmitted */
+ struct grtm_ring *ring_end; /* Oldest activated ring used */
+
+ /* Collections of frames Ready to sent/ Scheduled for transmission/Sent
+ * frames waiting for the user to reclaim
+ */
+ struct grtm_list ready; /* Frames Waiting for free BDs */
+ struct grtm_list scheduled; /* Frames in BDs beeing transmitted */
+ struct grtm_list sent; /* Sent Frames waiting for user to reclaim and reuse */
+
+ /* Number of frames in the lists */
+ int ready_cnt; /* Number of ready frames */
+ int scheduled_cnt; /* Number of scheduled frames */
+ int sent_cnt; /* Number of sent frames */
+
+ struct grtm_ioc_hw hw_avail; /* Hardware support available */
+ struct grtm_ioc_config config;
+ struct grtm_ioc_stats stats;
+
+ rtems_id sem_tx;
+};
+
+/* Prototypes */
+static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf);
+static void grtm_hw_reset(struct grtm_priv *pDev);
+static void grtm_interrupt(void *arg);
+
+/* Common Global Variables */
+static rtems_id grtm_dev_sem;
+static int grtm_driver_io_registered = 0;
+static rtems_device_major_number grtm_driver_io_major = 0;
+
+/******************* Driver manager interface ***********************/
+
+/* Driver prototypes */
+static int grtm_register_io(rtems_device_major_number *m);
+static int grtm_device_init(struct grtm_priv *pDev);
+
+static int grtm_init2(struct drvmgr_dev *dev);
+static int grtm_init3(struct drvmgr_dev *dev);
+
+static struct drvmgr_drv_ops grtm_ops =
+{
+ {NULL, grtm_init2, grtm_init3, NULL},
+ NULL,
+ NULL
+};
+
+static struct amba_dev_id grtm_ids[] =
+{
+ {VENDOR_GAISLER, GAISLER_GRTM},
+ {0, 0} /* Mark end of table */
+};
+
+static struct amba_drv_info grtm_drv_info =
+{
+ {
+ DRVMGR_OBJ_DRV, /* Driver */
+ NULL, /* Next driver */
+ NULL, /* Device list */
+ DRIVER_AMBAPP_GAISLER_GRTM_ID, /* Driver ID */
+ "GRTM_DRV", /* Driver Name */
+ DRVMGR_BUS_TYPE_AMBAPP, /* Bus Type */
+ &grtm_ops,
+ NULL, /* Funcs */
+ 0, /* No devices yet */
+ 0,
+ },
+ &grtm_ids[0]
+};
+
+void grtm_register_drv (void)
+{
+ DBG("Registering GRTM driver\n");
+ drvmgr_drv_register(&grtm_drv_info.general);
+}
+
+static int grtm_init2(struct drvmgr_dev *dev)
+{
+ struct grtm_priv *priv;
+
+ DBG("GRTM[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
+ priv = dev->priv = malloc(sizeof(struct grtm_priv));
+ if ( !priv )
+ return DRVMGR_NOMEM;
+ memset(priv, 0, sizeof(*priv));
+ priv->dev = dev;
+
+ /* This core will not find other cores, so we wait for init2() */
+
+ return DRVMGR_OK;
+}
+
+static int grtm_init3(struct drvmgr_dev *dev)
+{
+ struct grtm_priv *priv;
+ char prefix[32];
+ rtems_status_code status;
+
+ priv = dev->priv;
+
+ /* Do initialization */
+
+ if ( grtm_driver_io_registered == 0) {
+ /* Register the I/O driver only once for all cores */
+ if ( grtm_register_io(&grtm_driver_io_major) ) {
+ /* Failed to register I/O driver */
+ dev->priv = NULL;
+ return DRVMGR_FAIL;
+ }
+
+ grtm_driver_io_registered = 1;
+ }
+
+ /* I/O system registered and initialized
+ * Now we take care of device initialization.
+ */
+ if ( grtm_device_init(priv) ) {
+ return DRVMGR_FAIL;
+ }
+
+ /* Get Filesystem name prefix */
+ prefix[0] = '\0';
+ if ( drvmgr_get_dev_prefix(dev, prefix) ) {
+ /* Failed to get prefix, make sure of a unique FS name
+ * by using the driver minor.
+ */
+ sprintf(priv->devName, "/dev/grtm%d", dev->minor_drv);
+ } else {
+ /* Got special prefix, this means we have a bus prefix
+ * And we should use our "bus minor"
+ */
+ sprintf(priv->devName, "/dev/%sgrtm%d", prefix, dev->minor_bus);
+ }
+
+ /* Register Device */
+ status = rtems_io_register_name(priv->devName, grtm_driver_io_major, dev->minor_drv);
+ if (status != RTEMS_SUCCESSFUL) {
+ return DRVMGR_FAIL;
+ }
+
+ return DRVMGR_OK;
+}
+
+/******************* Driver Implementation ***********************/
+
+static int grtm_register_io(rtems_device_major_number *m)
+{
+ rtems_status_code r;
+
+ if ((r = rtems_io_register_driver(0, &grtm_driver, m)) == RTEMS_SUCCESSFUL) {
+ DBG("GRTM driver successfully registered, major: %d\n", *m);
+ } else {
+ switch(r) {
+ case RTEMS_TOO_MANY:
+ printk("GRTM rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
+ return -1;
+ case RTEMS_INVALID_NUMBER:
+ printk("GRTM rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
+ return -1;
+ case RTEMS_RESOURCE_IN_USE:
+ printk("GRTM rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
+ return -1;
+ default:
+ printk("GRTM rtems_io_register_driver failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int grtm_device_init(struct grtm_priv *pDev)
+{
+ struct amba_dev_info *ambadev;
+ struct ambapp_core *pnpinfo;
+ union drvmgr_key_value *value;
+
+ /* Get device information from AMBA PnP information */
+ ambadev = (struct amba_dev_info *)pDev->dev->businfo;
+ if ( ambadev == NULL ) {
+ return -1;
+ }
+ pnpinfo = &ambadev->info;
+ pDev->irq = pnpinfo->irq;
+ pDev->regs = (struct grtm_regs *)pnpinfo->apb_slv->start;
+ pDev->minor = pDev->dev->minor_drv;
+ pDev->open = 0;
+ pDev->running = 0;
+
+ /* Create Binary RX Semaphore with count = 0 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'M', '0' + pDev->minor),
+ 0,
+ RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
+ RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &pDev->sem_tx) != RTEMS_SUCCESSFUL ) {
+ return -1;
+ }
+
+ /* Allocate Memory for Buffer Descriptor Table, or let user provide a custom
+ * address.
+ */
+ value = drvmgr_dev_key_get(pDev->dev, "bdTabAdr", KEY_TYPE_POINTER);
+ if ( value ) {
+ pDev->bds = (struct grtm_bd *)value->ptr;
+ pDev->_bds = (void *)value->ptr;
+ } else {
+ pDev->bds = (struct grtm_bd *)grtm_memalign(0x400, 0x400, &pDev->_bds);
+ }
+ if ( !pDev->bds ) {
+ DBG("GRTM: Failed to allocate descriptor table\n");
+ return -1;
+ }
+ memset(pDev->bds, 0, 0x400);
+
+ pDev->_ring = malloc(sizeof(struct grtm_ring) * 128);
+ if ( !pDev->_ring ) {
+ return -1;
+ }
+
+ /* Reset Hardware before attaching IRQ handler */
+ grtm_hw_reset(pDev);
+
+ /* Read SUB revision number, ignore */
+ pDev->subrev = (READ_REG(&pDev->regs->revision) & GRTM_REV1_REV_SREV)
+ >> GRTM_REV1_REV_SREV_BIT;
+
+ return 0;
+}
+
+
+static inline void grtm_list_clr(struct grtm_list *list)
+{
+ list->head = NULL;
+ list->tail = NULL;
+}
+
+static void grtm_hw_reset(struct grtm_priv *pDev)
+{
+ /* Reset Core */
+ pDev->regs->dma_ctrl = GRTM_DMA_CTRL_RST;
+}
+
+static void grtm_hw_get_implementation(struct grtm_priv *pDev, struct grtm_ioc_hw *hwcfg)
+{
+ unsigned int cfg = READ_REG(&pDev->regs->cfg);
+
+ hwcfg->cs = (cfg & GRTM_CFG_SC) ? 1:0;
+ hwcfg->sp = (cfg & GRTM_CFG_SP) ? 1:0;
+ hwcfg->ce = (cfg & GRTM_CFG_CE) ? 1:0;
+ hwcfg->nrz = (cfg & GRTM_CFG_NRZ) ? 1:0;
+ hwcfg->psr = (cfg & GRTM_CFG_PSR) ? 1:0;
+ hwcfg->te = (cfg & GRTM_CFG_TE) ? 1:0;
+ hwcfg->rsdep = (cfg & GRTM_CFG_RSDEP)>>GRTM_CFG_RSDEP_BIT;
+ hwcfg->rs = (cfg & GRTM_CFG_RS)>>GRTM_CFG_RS_BIT;
+ hwcfg->aasm = (cfg & GRTM_CFG_AASM) ? 1:0;
+ hwcfg->fecf = (cfg & GRTM_CFG_FECF) ? 1:0;
+ hwcfg->ocf = (cfg & GRTM_CFG_OCF) ? 1:0;
+ hwcfg->evc = (cfg & GRTM_CFG_EVC) ? 1:0;
+ hwcfg->idle = (cfg & GRTM_CFG_IDLE) ? 1:0;
+ hwcfg->fsh = (cfg & GRTM_CFG_FSH) ? 1:0;
+ hwcfg->mcg = (cfg & GRTM_CFG_MCG) ? 1:0;
+ hwcfg->iz = (cfg & GRTM_CFG_IZ) ? 1:0;
+ hwcfg->fhec = (cfg & GRTM_CFG_FHEC) ? 1:0;
+ hwcfg->aos = (cfg & GRTM_CFG_AOS) ? 1:0;
+ hwcfg->cif = (cfg & GRTM_CFG_CIF) ? 1:0;
+ hwcfg->ocfb = (cfg & GRTM_CFG_OCFB) ? 1:0;
+
+ cfg = READ_REG(&pDev->regs->dma_cfg);
+ hwcfg->blk_size = (cfg & GRTM_DMA_CFG_BLKSZ) >> GRTM_DMA_CFG_BLKSZ_BIT;
+ hwcfg->fifo_size= (cfg & GRTM_DMA_CFG_FIFOSZ) >> GRTM_DMA_CFG_FIFOSZ_BIT;
+}
+
+#warning Extra: Implement proper default calculation from hardware configuration
+static void grtm_hw_get_default_modes(struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg)
+{
+ cfg->mode = GRTM_MODE_TM;
+ cfg->frame_length = 223;
+ cfg->limit = 0; /* Make driver auto configure it on START, user may override with non-zero value */
+ cfg->as_marker = 0x1ACFFC1D;
+
+ /* Physical */
+ cfg->phy_subrate = 1;
+ cfg->phy_symbolrate = 1;
+ cfg->phy_opts = 0;
+
+ /* Coding Layer */
+ cfg->code_rsdep = 1;
+ cfg->code_ce_rate = 0;
+ cfg->code_csel = 0;
+ cfg->code_opts = 0;
+
+ /* All Frame Generation */
+ cfg->all_izlen = 0;
+ cfg->all_opts = GRTM_IOC_ALL_FECF;
+
+ /* Master Channel Frame Generation */
+ if ( hwcfg->mcg ) {
+ cfg->mf_opts = GRTM_IOC_MF_MC;
+ } else {
+ cfg->mf_opts = 0;
+ }
+
+ /* Idle Frame Generation */
+ cfg->idle_scid = 0;
+ cfg->idle_vcid = 0;
+ if ( hwcfg->idle ) {
+ cfg->idle_opts = GRTM_IOC_IDLE_EN;
+ } else {
+ cfg->idle_opts = 0;
+ }
+
+ /* Interrupt options */
+ cfg->blocking = 0; /* non-blocking mode is default */
+ cfg->enable_cnt = 16; /* generate interrupt every 16 descriptor */
+ cfg->isr_desc_proc = 1; /* Let interrupt handler do descriptor processing */
+ cfg->timeout = RTEMS_NO_TIMEOUT;
+
+}
+
+static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf)
+{
+ *(int *)realbuf = (int)malloc(length+boundary);
+ DBG("GRTM: Alloced %d (0x%x) bytes, requested: %d\n",length+boundary,length+boundary,length);
+ return (void *)(((*(unsigned int *)realbuf)+boundary) & ~(boundary-1));
+}
+
+static int grtm_hw_set_config(struct grtm_priv *pDev, struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg)
+{
+ struct grtm_regs *regs = pDev->regs;
+ unsigned int tmp;
+ unsigned int limit;
+
+ if ( cfg->limit == 0 ) {
+ /* Calculate Limit */
+ if ( cfg->frame_length > hwcfg->blk_size ) {
+ limit = hwcfg->blk_size*2;
+ } else {
+ limit = cfg->frame_length;
+ }
+ } else {
+ /* Use user configured limit */
+ limit = cfg->limit;
+ }
+
+ /* Frame Length and Limit */
+ regs->dma_len = (((limit-1) << GRTM_DMA_LEN_LIM_BIT) & GRTM_DMA_LEN_LIM)|
+ (((cfg->frame_length-1) << GRTM_DMA_LEN_LEN_BIT) & GRTM_DMA_LEN_LEN);
+
+ /* Physical layer options */
+ tmp = (cfg->phy_opts & (GRTM_IOC_PHY_SCF|GRTM_IOC_PHY_SF)) |
+ (((cfg->phy_symbolrate-1)<<GRTM_PHY_SYM_BIT) & GRTM_PHY_SYM) | (((cfg->phy_subrate-1)<<GRTM_PHY_SUB_BIT) & GRTM_PHY_SUB);
+ regs->phy = tmp;
+
+ /* Coding Sub-layer Options */
+ tmp = (cfg->code_opts & GRTM_IOC_CODE_ALL) | ((cfg->code_csel<<GRTM_CODE_CSEL_BIT) & GRTM_CODE_CSEL) |
+ (((cfg->code_rsdep-1)<<GRTM_CODE_RSDEP_BIT) & GRTM_CODE_RSDEP) | ((cfg->code_ce_rate<<GRTM_CODE_CERATE_BIT) & GRTM_CODE_CERATE);
+ regs->code = tmp;
+
+ /* Attached synchronization marker register */
+ regs->asmr = cfg->as_marker;
+
+ /* All Frames Generation */
+ tmp = ((cfg->all_opts & GRTM_IOC_ALL_ALL)<<14) |
+ ((cfg->all_izlen<<GRTM_ALL_IZLEN_BIT) & GRTM_ALL_IZLEN) |
+ ((cfg->mode<<GRTM_ALL_VER_BIT) & GRTM_ALL_VER);
+ regs->all_frm = tmp;
+
+ /* Master Frame Generation */
+ regs->mst_frm = cfg->mf_opts & GRTM_IOC_MF_ALL;
+
+ /* Idle frame Generation */
+ tmp = ((cfg->idle_opts & GRTM_IOC_IDLE_ALL) << 16) |
+ ((cfg->idle_vcid << GRTM_IDLE_VCID_BIT) & GRTM_IDLE_VCID) |
+ ((cfg->idle_scid << GRTM_IDLE_SCID_BIT) & GRTM_IDLE_SCID);
+ regs->idle_frm = tmp;
+
+ return 0;
+}
+
+static int grtm_start(struct grtm_priv *pDev)
+{
+ struct grtm_regs *regs = pDev->regs;
+ int i;
+ struct grtm_ioc_config *cfg = &pDev->config;
+ volatile unsigned int *txrdy_reg;
+ unsigned int txrdy_mask;
+
+ /* Clear Descriptors */
+ memset(pDev->bds,0,0x400);
+
+ /* Clear stats */
+ memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats));
+
+ /* Init Descriptor Ring */
+ memset(pDev->_ring,0,sizeof(struct grtm_ring)*128);
+ for(i=0;i<127;i++){
+ pDev->_ring[i].next = &pDev->_ring[i+1];
+ pDev->_ring[i].bd = &pDev->bds[i];
+ pDev->_ring[i].frm = NULL;
+ }
+ pDev->_ring[127].next = &pDev->_ring[0];
+ pDev->_ring[127].bd = &pDev->bds[127];
+ pDev->_ring[127].frm = NULL;
+
+ pDev->ring = &pDev->_ring[0];
+ pDev->ring_end = &pDev->_ring[0];
+
+ /* Clear Scheduled, Ready and Sent list */
+ grtm_list_clr(&pDev->ready);
+ grtm_list_clr(&pDev->scheduled);
+ grtm_list_clr(&pDev->sent);
+
+ /* Software init */
+ pDev->handling_transmission = 0;
+
+ /* Reset the transmitter */
+ regs->dma_ctrl = GRTM_DMA_CTRL_TXRST;
+ regs->dma_ctrl = 0; /* Leave Reset */
+
+ /* Clear old interrupts */
+ regs->dma_status = GRTM_DMA_STS_ALL;
+
+ /* Set Descriptor Pointer Base register to point to first descriptor */
+ drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA, (void *)pDev->bds,
+ (void **)&regs->dma_bd, 0x400);
+
+ /* Set hardware options as defined by config */
+ if ( grtm_hw_set_config(pDev, cfg, &pDev->hw_avail) ) {
+ return RTEMS_IO_ERROR;
+ }
+
+ /* Enable TM Transmitter */
+ regs->ctrl = GRTM_CTRL_EN;
+
+ /* Wait for TXRDY to be cleared */
+ i=1000;
+ while( i > 0 ) {
+ asm volatile ("nop"::);
+ i--;
+ }
+
+ /* Location of TXRDY Bit is different for different revisions */
+ if ( pDev->subrev == 0 ) {
+ txrdy_reg = &regs->dma_ctrl;
+ txrdy_mask = GRTM_REV0_DMA_CTRL_TXRDY;
+ } else {
+ txrdy_reg = &regs->dma_status;
+ txrdy_mask = GRTM_REV1_DMA_STS_TXRDY;
+ }
+
+ /* Check transmitter startup OK */
+ i=0;
+ while( !(READ_REG(txrdy_reg) & txrdy_mask) && (i<1000000) ){
+ i++;
+ }
+ if ( !(READ_REG(txrdy_reg) & txrdy_mask) ){
+ /* Reset Failed */
+ DBG("GRTM: start: Reseting transmitter failed (%d)\n",i);
+ return RTEMS_IO_ERROR;
+ }
+ DBG("GRTM: reset time %d\n",i);
+
+ /* Everything is configured, the TM transmitter is started
+ * and idle frames has been sent.
+ */
+
+ /* Mark running before enabling the DMA transmitter */
+ pDev->running = 1;
+
+ /* Enable interrupts (Error and DMA TX) */
+ regs->dma_ctrl = GRTM_DMA_CTRL_IE;
+
+ DBG("GRTM: STARTED\n");
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grtm_stop(struct grtm_priv *pDev)
+{
+ struct grtm_regs *regs = pDev->regs;
+
+ /* Disable the transmitter & Interrupts */
+ regs->dma_ctrl = 0;
+
+ /* Clear any pending interrupt */
+ regs->dma_status = GRTM_DMA_STS_ALL;
+
+ DBG("GRTM: STOPPED\n");
+
+ /* Flush semaphore in case a thread is stuck waiting for TX Interrupts */
+ rtems_semaphore_flush(pDev->sem_tx);
+}
+
+static rtems_device_driver grtm_open(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg)
+{
+ struct grtm_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
+ DBG("Wrong minor %d\n", minor);
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtm_priv *)dev->priv;
+
+ /* Wait until we get semaphore */
+ if ( rtems_semaphore_obtain(grtm_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ /* Is device in use? */
+ if ( pDev->open ){
+ rtems_semaphore_release(grtm_dev_sem);
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Mark device taken */
+ pDev->open = 1;
+
+ rtems_semaphore_release(grtm_dev_sem);
+
+ DBG("grtm_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev);
+
+ /* Set defaults */
+ pDev->config.timeout = RTEMS_NO_TIMEOUT; /* no timeout (wait forever) */
+ pDev->config.blocking = 0; /* polling mode */
+
+ pDev->running = 0; /* not in running mode yet */
+
+ memset(&pDev->config,0,sizeof(pDev->config));
+
+ /* The core has been reset when we execute here, so it is possible
+ * to read out what HW is implemented from core.
+ */
+ grtm_hw_get_implementation(pDev, &pDev->hw_avail);
+
+ /* Get default modes */
+ grtm_hw_get_default_modes(&pDev->config,&pDev->hw_avail);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtm_priv *pDev;
+ struct drvmgr_dev *dev;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtm_priv *)dev->priv;
+
+ if ( pDev->running ){
+ grtm_stop(pDev);
+ pDev->running = 0;
+ }
+
+ /* Reset core */
+ grtm_hw_reset(pDev);
+
+ /* Clear descriptor area just for sure */
+ memset(pDev->bds, 0, 0x400);
+
+ /* Mark not open */
+ pDev->open = 0;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ FUNCDBG();
+ return RTEMS_NOT_IMPLEMENTED;
+}
+
+static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ FUNCDBG();
+ return RTEMS_NOT_IMPLEMENTED;
+}
+
+/* Scans the desciptor table for scheduled frames that has been sent,
+ * and moves these frames from the head of the scheduled queue to the
+ * tail of the sent queue.
+ *
+ * Also, for all frames the status is updated.
+ *
+ * Return Value
+ * Number of frames freed.
+ */
+static int grtm_free_sent(struct grtm_priv *pDev)
+{
+ struct grtm_ring *curr;
+ struct grtm_frame *last_frm, *first_frm;
+ int freed_frame_cnt=0;
+ unsigned int ctrl;
+
+ curr = pDev->ring_end;
+
+ /* Step into TX ring to find sent frames */
+ if ( !curr->frm ){
+ /* No scheduled frames, abort */
+ return 0;
+ }
+
+ /* There has been messages scheduled ==> scheduled messages may have been
+ * transmitted and needs to be collected.
+ */
+
+ first_frm = curr->frm;
+
+ /* Loop until first enabled unsent frame is found.
+ * A unused descriptor is indicated by an unassigned frm field
+ */
+ while ( curr->frm && !((ctrl=READ_REG(&curr->bd->ctrl)) & GRTM_BD_EN) ){
+ /* Handle one sent Frame */
+
+ /* Remember last handled frame so that insertion/removal from
+ * frames lists go fast.
+ */
+ last_frm = curr->frm;
+
+ /* 1. Set flags to indicate error(s) and other information */
+ last_frm->flags |= GRTM_FLAGS_SENT; /* Mark sent */
+
+ /* Update Stats */
+ pDev->stats.frames_sent++;
+
+ /* Did packet encounter link error? */
+ if ( ctrl & GRTM_BD_UE ) {
+ pDev->stats.err_underrun++;
+ last_frm->flags |= GRRM_FLAGS_ERR;
+ }
+
+ curr->frm = NULL; /* Mark unused */
+
+ /* Increment */
+ curr = curr->next;
+ freed_frame_cnt++;
+ }
+
+ /* 1. Remove all handled frames from scheduled queue
+ * 2. Put all handled frames into sent queue
+ */
+ if ( freed_frame_cnt > 0 ){
+
+ /* Save TX ring posistion */
+ pDev->ring_end = curr;
+
+ /* Remove all sent frames from scheduled list */
+ if ( pDev->scheduled.tail == last_frm ){
+ /* All scheduled frames sent... */
+ pDev->scheduled.head = NULL;
+ pDev->scheduled.tail = NULL;
+ }else{
+ pDev->scheduled.head = last_frm->next;
+ }
+ last_frm->next = NULL;
+
+ /* Put all sent frames into "Sent queue" for user to
+ * collect, later on.
+ */
+ if ( !pDev->sent.head ){
+ /* Sent queue empty */
+ pDev->sent.head = first_frm;
+ pDev->sent.tail = last_frm;
+ }else{
+ pDev->sent.tail->next = first_frm;
+ pDev->sent.tail = last_frm;
+ }
+ }
+ return freed_frame_cnt;
+}
+
+
+/* Moves as many frames in the ready queue (as there are free descriptors for)
+ * to the scheduled queue. The free descriptors are then assigned one frame
+ * each and enabled for transmission.
+ *
+ * Return Value
+ * Returns number of frames moved from ready to scheduled queue
+ */
+static int grtm_schedule_ready(struct grtm_priv *pDev, int ints_off)
+{
+ int cnt;
+ unsigned int ctrl, dmactrl;
+ struct grtm_ring *curr_bd;
+ struct grtm_frame *curr_frm, *last_frm;
+ IRQ_GLOBAL_PREPARE(oldLevel);
+
+ if ( !pDev->ready.head ){
+ return 0;
+ }
+
+ cnt=0;
+ curr_frm = pDev->ready.head;
+ curr_bd = pDev->ring;
+ while( !curr_bd->frm ){
+ /* Assign frame to descriptor */
+ curr_bd->frm = curr_frm;
+
+ /* Prepare descriptor address. Three cases:
+ * - GRTM core on same bus as CPU ==> no translation (Address used by CPU = address used by GRTM)
+ * - GRTM core on remote bus, and payload address given as used by CPU ==> Translation needed
+ * - GRTM core on remote bus, and payload address given as used by GRTM ==> no translation [ USER does custom translation]
+ */
+ if ( curr_frm->flags & (GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER) ) {
+ /* Do translation */
+ drvmgr_translate(pDev->dev, CPUMEM_TO_DMA, (void *)curr_frm->payload, (void **)&curr_bd->bd->address);
+ if ( curr_frm->flags & GRTM_FLAGS_TRANSLATE_AND_REMEMBER ) {
+ if ( curr_frm->payload != curr_bd->bd->address ) {
+ /* Translation needed */
+ curr_frm->flags &= ~GRTM_FLAGS_TRANSLATE_AND_REMEMBER;
+ curr_frm->flags |= GRTM_FLAGS_TRANSLATE;
+ } else {
+ /* No Trnaslation needed */
+ curr_frm->flags &= ~(GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER);
+ }
+ }
+ } else {
+ /* Custom translation or no translation needed */
+ curr_bd->bd->address = (unsigned int)curr_frm->payload;
+ }
+
+ ctrl = GRTM_BD_EN;
+ if ( curr_bd->next == pDev->_ring ){
+ ctrl |= GRTM_BD_WR; /* Wrap around */
+ }
+ /* Apply user options/flags */
+ ctrl |= (curr_frm->flags & GRTM_FLAGS_MASK);
+
+ /* Is this Frame going to be an interrupt Frame? */
+ if ( (--pDev->enable_cnt_curr) <= 0 ){
+ if ( pDev->config.enable_cnt == 0 ){
+ pDev->enable_cnt_curr = 0x3fffffff;
+ }else{
+ pDev->enable_cnt_curr = pDev->config.enable_cnt;
+ ctrl |= GRTM_BD_IE;
+ }
+ }
+
+ /* Enable descriptor */
+ curr_bd->bd->ctrl = ctrl;
+
+ last_frm = curr_frm;
+ curr_bd = curr_bd->next;
+ cnt++;
+
+ /* Get Next Frame from Ready Queue */
+ if ( curr_frm == pDev->ready.tail ){
+ /* Handled all in ready queue. */
+ curr_frm = NULL;
+ break;
+ }
+ curr_frm = curr_frm->next;
+ }
+
+ /* Has frames have been scheduled? */
+ if ( cnt > 0 ){
+ /* Make last frame mark end of chain, probably pointless... */
+ last_frm->next = NULL;
+
+ /* Insert scheduled packets into scheduled queue */
+ if ( !pDev->scheduled.head ){
+ /* empty scheduled queue */
+ pDev->scheduled.head = pDev->ready.head;
+ pDev->scheduled.tail = last_frm;
+ }else{
+ pDev->scheduled.tail->next = pDev->ready.head;
+ pDev->scheduled.tail = last_frm;
+ }
+
+ /* Remove scheduled packets from ready queue */
+ pDev->ready.head = curr_frm;
+ if ( !curr_frm ){
+ pDev->ready.tail = NULL;
+ }
+
+ /* Update TX ring posistion */
+ pDev->ring = curr_bd;
+ if ( !ints_off ) {
+ IRQ_GLOBAL_DISABLE(oldLevel);
+ }
+
+ /* Make hardware aware of the newly enabled descriptors */
+ dmactrl = READ_REG(&pDev->regs->dma_ctrl);
+ dmactrl &= ~(GRTM_DMA_CTRL_TXRST | GRTM_DMA_CTRL_RST);
+ dmactrl |= GRTM_DMA_CTRL_EN;
+ pDev->regs->dma_ctrl = dmactrl;
+
+ if ( !ints_off ) {
+ IRQ_GLOBAL_ENABLE(oldLevel);
+ }
+ }
+ return cnt;
+}
+
+
+static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
+{
+ struct grtm_priv *pDev;
+ struct drvmgr_dev *dev;
+ rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg;
+ unsigned int *data = ioarg->buffer;
+ int status;
+ struct grtm_ioc_config *cfg;
+ struct grtm_ioc_hw_status *hwregs;
+ IRQ_GLOBAL_PREPARE(oldLevel);
+ struct grtm_list *chain;
+ struct grtm_frame *curr;
+ struct grtm_ioc_hw *hwimpl;
+ struct grtm_ioc_stats *stats;
+ int num,ret;
+
+ FUNCDBG();
+
+ if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+ pDev = (struct grtm_priv *)dev->priv;
+
+ if (!ioarg)
+ return RTEMS_INVALID_NAME;
+
+ ioarg->ioctl_return = 0;
+ switch(ioarg->command) {
+ case GRTM_IOC_START:
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE; /* EBUSY */
+ }
+ if ( (status=grtm_start(pDev)) != RTEMS_SUCCESSFUL ){
+ return status;
+ }
+ /* Register ISR & Enable interrupt */
+ drvmgr_interrupt_register(dev, 0, "grtm", grtm_interrupt, pDev);
+
+ /* Read and write are now open... */
+ break;
+
+ case GRTM_IOC_STOP:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ /* Disable interrupts */
+ drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev);
+ grtm_stop(pDev);
+ pDev->running = 0;
+ break;
+
+ case GRTM_IOC_ISSTARTED:
+ if ( !pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ break;
+
+ case GRTM_IOC_SET_BLOCKING_MODE:
+ if ( (unsigned int)data > GRTM_BLKMODE_BLK ) {
+ return RTEMS_INVALID_NAME;
+ }
+ DBG("GRTM: Set blocking mode: %d\n",(unsigned int)data);
+ pDev->config.blocking = (unsigned int)data;
+ break;
+
+ case GRTM_IOC_SET_TIMEOUT:
+ DBG("GRTM: Timeout: %d\n",(unsigned int)data);
+ pDev->config.timeout = (rtems_interval)data;
+ break;
+
+ case GRTM_IOC_SET_CONFIG:
+ cfg = (struct grtm_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ if ( pDev->running ) {
+ return RTEMS_RESOURCE_IN_USE;
+ }
+
+ pDev->config = *cfg;
+ break;
+
+ case GRTM_IOC_GET_STATS:
+ stats = (struct grtm_ioc_stats *)data;
+ if ( !stats ) {
+ return RTEMS_INVALID_NAME;
+ }
+ memcpy(stats,&pDev->stats,sizeof(struct grtm_ioc_stats));
+ break;
+
+ case GRTM_IOC_CLR_STATS:
+ memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats));
+ break;
+
+ case GRTM_IOC_GET_CONFIG:
+ cfg = (struct grtm_ioc_config *)data;
+ if ( !cfg ) {
+ return RTEMS_INVALID_NAME;
+ }
+
+ *cfg = pDev->config;
+ break;
+
+ case GRTM_IOC_GET_OCFREG:
+ if ( !pDev->hw_avail.ocf ) {
+ /* Hardware does not implement the OCF register */
+ return RTEMS_NOT_DEFINED;
+ }
+ if ( !data ) {
+ return RTEMS_INVALID_NAME;
+ }
+ *(unsigned int **)data = (unsigned int *)&pDev->regs->ocf;
+ break;
+
+ case GRTM_IOC_GET_HW_IMPL:
+ hwimpl = (struct grtm_ioc_hw *)data;
+ if ( !hwimpl ) {
+ return RTEMS_INVALID_NAME;
+ }
+ *hwimpl = pDev->hw_avail;
+ break;
+
+ case GRTM_IOC_GET_HW_STATUS:
+ hwregs = (struct grtm_ioc_hw_status *)data;
+ if ( !hwregs ) {
+ return RTEMS_INVALID_NAME;
+ }
+ /* We disable interrupt in order to get a snapshot of the registers */
+ IRQ_GLOBAL_DISABLE(oldLevel);
+#warning IMPLEMENT HWREGS
+ IRQ_GLOBAL_ENABLE(oldLevel);
+ break;
+
+ /* Put a chain of frames at the back of the "Ready frames" queue. This
+ * triggers the driver to put frames from the Ready queue into unused
+ * available descriptors. (Ready -> Scheduled)
+ */
+
+ case GRTM_IOC_SEND:
+ if ( !pDev->running ){
+ return RTEMS_RESOURCE_IN_USE;
+ }
+ num=0;
+
+ /* Get pointer to frame chain wished be sent */
+ chain = (struct grtm_list *)ioarg->buffer;
+ if ( !chain ){
+ /* No new frames to send ==> just trigger hardware
+ * to send previously made ready frames to be sent.
+ */
+ pDev->handling_transmission = 1;
+ goto trigger_transmission;
+ }
+ if ( !chain->tail || !chain->head ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ DBG("GRTM_SEND: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail);
+
+ /* Mark ready frames unsent by clearing GRTM_FLAGS_SENT of all frames */
+
+ curr = chain->head;
+ while(curr != chain->tail){
+ curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR);
+ curr = curr->next;
+ num++;
+ }
+ curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR);
+ num++;
+
+ pDev->handling_transmission = 1;
+ /* 1. Put frames into ready queue
+ * (New Frames->READY)
+ */
+ if ( pDev->ready.head ){
+ /* Frames already on ready queue (no free descriptors previously) ==>
+ * Put frames at end of ready queue
+ */
+ pDev->ready.tail->next = chain->head;
+ pDev->ready.tail = chain->tail;
+ chain->tail->next = NULL;
+ }else{
+ /* All frames is put into the ready queue for later processing */
+ pDev->ready.head = chain->head;
+ pDev->ready.tail = chain->tail;
+ chain->tail->next = NULL;
+ }
+ pDev->ready_cnt += num; /* Added 'num' frames to ready queue */
+trigger_transmission:
+ /* 2. Free used descriptors and put the sent frame into the "Sent queue"
+ * (SCHEDULED->SENT)
+ */
+ num = grtm_free_sent(pDev);
+ pDev->scheduled_cnt -= num;
+ pDev->sent_cnt += num;
+
+ /* 3. Use all available free descriptors there are frames for
+ * in the ready queue.
+ * (READY->SCHEDULED)
+ */
+ num = grtm_schedule_ready(pDev,0);
+ pDev->ready_cnt -= num;
+ pDev->scheduled_cnt += num;
+
+ pDev->handling_transmission = 0;
+ break;
+
+ /* Take all available sent frames from the "Sent frames" queue.
+ * If no frames has been sent, the thread may get blocked if in blocking
+ * mode. The blocking mode is not available if driver is not in running mode.
+ *
+ * Note this ioctl may return success even if the driver is not in STARTED mode.
+ * This is because in case of a error (link error of similar) and the driver switch
+ * from START to STOP mode we must still be able to get our frames back.
+ *
+ * Note in case the driver fails to send a frame for some reason (link error),
+ * the sent flag is set to 0 indicating a failure.
+ *
+ */
+ case GRTM_IOC_RECLAIM:
+ /* Get pointer to were to place reaped chain */
+ chain = (struct grtm_list *)ioarg->buffer;
+ if ( !chain ){
+ return RTEMS_INVALID_NAME;
+ }
+
+ /* Lock out interrupt handler */
+ pDev->handling_transmission = 1;
+
+ do {
+ /* Move sent frames from descriptors to Sent queue. This makes more
+ * descriptors (BDs) available.
+ */
+ num = grtm_free_sent(pDev);
+ pDev->scheduled_cnt -= num;
+ pDev->sent_cnt += num;
+
+
+ if ( pDev->running ){
+ /* Fill descriptors with as many frames from the ready list
+ * as possible.
+ */
+ num = grtm_schedule_ready(pDev,0);
+ pDev->ready_cnt -= num;
+ pDev->scheduled_cnt += num;
+ }
+
+ /* Are there any frames on the sent queue waiting to be
+ * reclaimed?
+ */
+
+ if ( !pDev->sent.head ){
+ /* No frames to reclaim - no frame in sent queue.
+ * Instead we block thread until frames have been sent
+ * if in blocking mode.
+ */
+ if ( pDev->running && pDev->config.blocking ){
+ ret = rtems_semaphore_obtain(pDev->sem_tx,RTEMS_WAIT,pDev->config.timeout);
+ if ( ret == RTEMS_TIMEOUT ) {
+ pDev->handling_transmission = 0;
+ return RTEMS_TIMEOUT;
+ } else if ( ret == RTEMS_SUCCESSFUL ) {
+ /* There might be frames available, go check */
+ continue;
+ } else {
+ /* any error (driver closed, internal error etc.) */
+ pDev->handling_transmission = 0;
+ return RTEMS_UNSATISFIED;
+ }
+
+ }else{
+ /* non-blocking mode, we quit */
+ chain->head = NULL;
+ chain->tail = NULL;
+ /* do not lock out interrupt handler any more */
+ pDev->handling_transmission = 0;
+ return RTEMS_TIMEOUT;
+ }
+ }else{
+ /* Take all sent framess from sent queue to userspace queue */
+ chain->head = pDev->sent.head;
+ chain->tail = pDev->sent.tail;
+ chain->tail->next = NULL; /* Just for sure */
+
+ /* Mark no Sent */
+ grtm_list_clr(&pDev->sent);
+ pDev->sent_cnt = 0;
+
+ DBG("TX_RECLAIM: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail);
+ break;
+ }
+
+ }while(1);
+
+ /* do not lock out interrupt handler any more */
+ pDev->handling_transmission = 0;
+ break;
+
+ default:
+ return RTEMS_NOT_DEFINED;
+ }
+ return RTEMS_SUCCESSFUL;
+}
+
+static void grtm_interrupt(void *arg)
+{
+ struct grtm_priv *pDev = arg;
+ struct grtm_regs *regs = pDev->regs;
+ unsigned int status;
+ int num;
+
+ /* Clear interrupt by reading it */
+ status = READ_REG(&regs->dma_status);
+
+ /* Spurious Interrupt? */
+ if ( !pDev->running )
+ return;
+
+ if ( status )
+ regs->dma_status = status;
+
+ if ( status & GRTM_DMA_STS_TFF ){
+ pDev->stats.err_transfer_frame++;
+ }
+
+ if ( status & GRTM_DMA_STS_TA ){
+ pDev->stats.err_ahb++;
+ }
+
+ if ( status & GRTM_DMA_STS_TE ){
+ pDev->stats.err_tx++;
+ }
+
+ if ( status & GRTM_DMA_STS_TI ){
+
+ if ( pDev->config.isr_desc_proc && !pDev->handling_transmission ) {
+ /* Free used descriptors and put the sent frame into the "Sent queue"
+ * (SCHEDULED->SENT)
+ */
+ num = grtm_free_sent(pDev);
+ pDev->scheduled_cnt -= num;
+ pDev->sent_cnt += num;
+
+ /* Use all available free descriptors there are frames for
+ * in the ready queue.
+ * (READY->SCHEDULED)
+ */
+ num = grtm_schedule_ready(pDev,1);
+ pDev->ready_cnt -= num;
+ pDev->scheduled_cnt += num;
+
+#if 0
+ if ( (pDev->config.blocking==GRTM_BLKMODE_COMPLETE) && pDev->timeout ){
+ /* Signal to thread only if enough data is available */
+ if ( pDev->wait_for_frames > grtm_data_avail(pDev) ){
+ /* Not enough data available */
+ goto procceed_processing_interrupts;
+ }
+
+ /* Enough number of frames has been transmitted which means that
+ * the waiting thread should be woken up.
+ */
+ rtems_semaphore_release(pDev->sem_tx);
+ }
+#endif
+ }
+
+ if ( pDev->config.blocking == GRTM_BLKMODE_BLK ) {
+ /* Blocking mode */
+
+#if 0
+ /* Disable further Interrupts until handled by waiting task. */
+ regs->dma_ctrl = READ_REG(&regs->dma_ctrl) & ~GRTM_DMA_CTRL_IE;
+#endif
+
+ /* Signal Semaphore to wake waiting thread in ioctl(SEND|RECLAIM) */
+ rtems_semaphore_release(pDev->sem_tx);
+ }
+
+ }
+
+procceed_processing_interrupts:
+ ;
+}
+
+static rtems_device_driver grtm_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number unused,
+ void *arg
+ )
+{
+ /* Device Semaphore created with count = 1 */
+ if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'M'),
+ 1,
+ RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
+ 0,
+ &grtm_dev_sem) != RTEMS_SUCCESSFUL ) {
+ return RTEMS_INTERNAL_ERROR;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}