From 7eb606d393306da25fd6e6aa7f8595ffb2e924fc Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Sat, 22 Dec 2018 18:31:04 +0100 Subject: grlib: Move source files Update #3678. --- bsps/shared/grlib/tmtc/grtc.c | 1984 +++++++++++++++++++++++++++++++++++++++++ bsps/shared/grlib/tmtc/grtm.c | 1613 +++++++++++++++++++++++++++++++++ 2 files changed, 3597 insertions(+) create mode 100644 bsps/shared/grlib/tmtc/grtc.c create mode 100644 bsps/shared/grlib/tmtc/grtm.c (limited to 'bsps/shared/grlib/tmtc') diff --git a/bsps/shared/grlib/tmtc/grtc.c b/bsps/shared/grlib/tmtc/grtc.c new file mode 100644 index 0000000000..44e9685c3a --- /dev/null +++ b/bsps/shared/grlib/tmtc/grtc.c @@ -0,0 +1,1984 @@ +/* GRTC Telecommand decoder driver + * + * COPYRIGHT (c) 2007. + * Cobham Gaisler AB. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* +#define DEBUG +#define DEBUGFUNCS +*/ + +#include + +#ifdef DEBUG_ERROR +#define DEBUG_ERR_LOG(device,error) grtc_log_error(device,error) +#else +#define DEBUG_ERR_LOG(device,error) +#endif + +/* GRTC register map */ +struct grtc_regs { + volatile unsigned int grst; /* Global Reset Register (GRR 0x00) */ + volatile unsigned int gctrl; /* Global Control Register (GCR 0x04) */ + int unused0; + volatile unsigned int sir; /* Spacecraft Identifier Register (SIR 0x0c) */ + volatile unsigned int far; /* Frame Acceptance Report Register (FAR 0x10) */ + + volatile unsigned int clcw1; /* CLCW Register 1 (CLCWR1 0x14) */ + volatile unsigned int clcw2; /* CLCW Register 2 (CLCWR2 0x18) */ + volatile unsigned int phir; /* Physical Interface Register (PHIR 0x1c) */ + volatile unsigned int cor; /* Control Register (COR 0x20) */ + + volatile unsigned int str; /* Status Register (STR 0x24) */ + volatile unsigned int asr; /* Address Space Register (ASR 0x28) */ + volatile unsigned int rp; /* Receive Read Pointer Register (RRP 0x2c) */ + volatile unsigned int wp; /* Receive Write Pointer Register (RWP 0x30) */ + + int unused1[(0x60-0x34)/4]; + + volatile unsigned int pimsr; /* Pending Interrupt Masked Status Register (PIMSR 0x60) */ + volatile unsigned int pimr; /* Pending Interrupt Masked Register (PIMR 0x64) */ + volatile unsigned int pisr; /* Pending Interrupt Status Register (PISR 0x68) */ + volatile unsigned int pir; /* Pending Interrupt Register (PIR 0x6c) */ + volatile unsigned int imr; /* Interrupt Mask Register (IMR 0x70) */ + volatile unsigned int picr; /* Pending Interrupt Clear Register (PICR 0x74) */ +}; + +/* Security Byte */ +#define GRTC_SEB 0x55000000 + +/* Global Reset Register (GRR 0x00) */ +#define GRTC_GRR_SRST 0x1 +#define GRTC_GRR_SRST_BIT 0 + +/* Global Control Register (GCR 0x04) */ +#define GRTC_GCR_PSR_BIT 10 +#define GRTC_GCR_NRZM_BIT 11 +#define GRTC_GCR_PSS_BIT 12 + +#define GRTC_GCR_PSR (1<minor_drv, dev->parent->dev->name); + priv = dev->priv; + if ( !priv ) + return DRVMGR_NOMEM; + priv->dev = dev; + + /* This core will not find other cores, so we wait for init2() */ + + return DRVMGR_OK; +} + +static int grtc_init3(struct drvmgr_dev *dev) +{ + struct grtc_priv *priv; + char prefix[32]; + rtems_status_code status; + + priv = dev->priv; + + /* Do initialization */ + + if ( grtc_driver_io_registered == 0) { + /* Register the I/O driver only once for all cores */ + if ( grtc_register_io(&grtc_driver_io_major) ) { + /* Failed to register I/O driver */ + dev->priv = NULL; + return DRVMGR_FAIL; + } + + grtc_driver_io_registered = 1; + } + + /* I/O system registered and initialized + * Now we take care of device initialization. + */ + if ( grtc_device_init(priv) ) { + return DRVMGR_FAIL; + } + + /* Get Filesystem name prefix */ + prefix[0] = '\0'; + if ( drvmgr_get_dev_prefix(dev, prefix) ) { + /* Failed to get prefix, make sure of a unique FS name + * by using the driver minor. + */ + sprintf(priv->devName, "/dev/grtc%d", dev->minor_drv); + } else { + /* Got special prefix, this means we have a bus prefix + * And we should use our "bus minor" + */ + sprintf(priv->devName, "/dev/%sgrtc%d", prefix, dev->minor_bus); + } + + SPIN_INIT(&priv->devlock, priv->devName); + + /* Register Device */ + status = rtems_io_register_name(priv->devName, grtc_driver_io_major, dev->minor_drv); + if (status != RTEMS_SUCCESSFUL) { + return DRVMGR_FAIL; + } + + return DRVMGR_OK; +} + +/******************* Driver Implementation ***********************/ + +static int grtc_register_io(rtems_device_major_number *m) +{ + rtems_status_code r; + + if ((r = rtems_io_register_driver(0, &grtc_driver, m)) == RTEMS_SUCCESSFUL) { + DBG("GRTC driver successfully registered, major: %d\n", *m); + } else { + switch(r) { + case RTEMS_TOO_MANY: + printk("GRTC rtems_io_register_driver failed: RTEMS_TOO_MANY\n"); + return -1; + case RTEMS_INVALID_NUMBER: + printk("GRTC rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n"); + return -1; + case RTEMS_RESOURCE_IN_USE: + printk("GRTC rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n"); + return -1; + default: + printk("GRTC rtems_io_register_driver failed\n"); + return -1; + } + } + return 0; +} + +static int grtc_device_init(struct grtc_priv *pDev) +{ + struct amba_dev_info *ambadev; + struct ambapp_core *pnpinfo; + + /* Get device information from AMBA PnP information */ + ambadev = (struct amba_dev_info *)pDev->dev->businfo; + if ( ambadev == NULL ) { + return -1; + } + pnpinfo = &ambadev->info; + pDev->irq = pnpinfo->irq; + pDev->regs = (struct grtc_regs *)pnpinfo->ahb_slv->start[0]; + pDev->minor = pDev->dev->minor_drv; + pDev->open = 0; + pDev->running = 0; + + /* Create Binary RX Semaphore with count = 0 */ + if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'C', '0' + pDev->minor), + 0, + RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\ + RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING, + 0, + &pDev->sem_rx) != RTEMS_SUCCESSFUL ) { + return -1; + } + + /* Reset Hardware before attaching IRQ handler */ + grtc_hw_reset(pDev); + + return 0; +} + +static void grtc_hw_reset(struct grtc_priv *priv) +{ + /* Reset Core */ + priv->regs->grst = GRTC_SEB | GRTC_GRR_SRST; +} + +static void grtc_hw_get_defaults(struct grtc_priv *pDev, struct grtc_ioc_config *config) +{ + unsigned int gcr = READ_REG(&pDev->regs->gctrl); + + config->psr_enable = (gcr & GRTC_GCR_PSR) ? 1:0; + config->nrzm_enable = (gcr & GRTC_GCR_NRZM) ? 1:0; + config->pss_enable = (gcr & GRTC_GCR_PSS) ? 1:0; + + config->crc_calc = 0; +} + +/* bufsize is given in bytes */ +static int __inline__ grtc_hw_data_avail_upper(unsigned int rrp, unsigned rwp, unsigned int bufsize) +{ + if ( rrp == rwp ) + return 0; + + if ( rwp > rrp ) { + return rwp-rrp; + } + + return (bufsize-rrp); +} + +/* bufsize is given in bytes */ +static int __inline__ grtc_hw_data_avail_lower(unsigned int rrp, unsigned rwp, unsigned int bufsize) +{ + if ( rrp == rwp ) + return 0; + + if ( rwp > rrp ) { + return 0; + } + + return rwp; +} + +/* bufsize is given in bytes */ +static int __inline__ grtc_hw_data_avail(unsigned int rrp, unsigned rwp, unsigned int bufsize) +{ + if ( rrp == rwp ) + return 0; + + if ( rwp > rrp ) { + return rwp-rrp; + } + + return rwp+(bufsize-rrp); +} + +/* Reads as much as possible but not more than 'max' bytes from the TC receive buffer. + * Number of bytes put into 'buf' is returned. + */ +static int grtc_hw_read_try(struct grtc_priv *pDev, char *buf, int max) +{ + struct grtc_regs *regs = pDev->regs; + unsigned int rp, wp, asr, bufmax, rrp, rwp; + unsigned int upper, lower; + unsigned int count, cnt, left; + + FUNCDBG(); + + if ( max < 1 ) + return 0; + + rp = READ_REG(®s->rp); + asr = READ_REG(®s->asr); + bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT; + bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */ + wp = READ_REG(®s->wp); + + /* Relative rp and wp */ + rrp = rp - (asr & GRTC_ASR_BUFST); + rwp = wp - (asr & GRTC_ASR_BUFST); + + lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax); + upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax); + + DBG("grtc_hw_read_try: AVAIL: Lower: %d, Upper: %d\n",lower,upper); + DBG("grtc_hw_read_try: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n", + rp,rrp,wp,rwp,bufmax,pDev->buffer); + + if ( (upper+lower) == 0 ) + return 0; + + /* Count bytes will be read */ + count = (upper+lower) > max ? max : (upper+lower); + left = count; + + /* Read from upper part of data buffer */ + if ( upper > 0 ){ + if ( left < upper ){ + cnt = left; + }else{ + cnt = upper; /* Read all upper data available */ + } + DBG("grtc_hw_read_try: COPYING %d from upper\n",cnt); + /* Convert from Remote address (RP) into CPU Local address */ + memcpy(buf, (void *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf), cnt); + buf += cnt; + left -= cnt; + } + + /* Read from lower part of data buffer */ + if ( left > 0 ){ + if ( left < lower ){ + cnt = left; + }else{ + cnt = lower; /* Read all lower data available */ + } + DBG("grtc_hw_read_try: COPYING %d from lower\n",cnt); + memcpy(buf, (void *)pDev->buf, cnt); + buf += cnt; + left -= cnt; + } + + /* Update hardware RP pointer to tell hardware about new space available */ + if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){ + regs->rp = (rp+count-bufmax); + } else { + regs->rp = rp+count; + } + + return count; +} + +/* Reads as much as possible but not more than 'max' bytes from the TC receive buffer. + * Number of bytes put into 'buf' is returned. + */ +static int grtc_data_avail(struct grtc_priv *pDev) +{ + unsigned int rp, wp, asr, bufmax, rrp, rwp; + struct grtc_regs *regs = pDev->regs; + + FUNCDBG(); + + rp = READ_REG(®s->rp); + asr = READ_REG(®s->asr); + bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT; + bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */ + wp = READ_REG(®s->wp); + + /* Relative rp and wp */ + rrp = rp - (asr & GRTC_ASR_BUFST); + rwp = wp - (asr & GRTC_ASR_BUFST); + + return grtc_hw_data_avail(rrp,rwp,bufmax); +} + +static void *grtc_memalign(unsigned int boundary, unsigned int length, void *realbuf) +{ + *(int *)realbuf = (int)grlib_malloc(length+(~GRTC_ASR_BUFST)+1); + DBG("GRTC: Alloced %d (0x%x) bytes, requested: %d\n",length+(~GRTC_ASR_BUFST)+1,length+(~GRTC_ASR_BUFST)+1,length); + return (void *)(((*(unsigned int *)realbuf)+(~GRTC_ASR_BUFST)+1) & ~(boundary-1)); +} + +static int grtc_start(struct grtc_priv *pDev) +{ + struct grtc_regs *regs = pDev->regs; + unsigned int tmp; + + if ( !pDev->buf || (((unsigned int)pDev->buf & ~GRTC_ASR_BUFST) != 0) || + (pDev->len>(1024*0x100)) || (pDev->len<1024) || ((pDev->len & (1024-1)) != 0) + ) { + DBG("GRTC: start: buffer not properly allocated(0x%x,0x%x,0x%x,0x%x)\n",pDev->buf,pDev->len,((unsigned int)pDev->buf & ~GRTC_ASR_BUFST),(pDev->len & ~(1024-1))); + return RTEMS_NO_MEMORY; + } + + memset(pDev->buf,0,pDev->len); + + /* Software init */ + pDev->overrun_condition = 0; +#ifdef DEBUG_ERROR + pDev->last_error_cnt = 0; + memset(&pDev->last_error[0],0,128*sizeof(int)); +#endif + memset(&pDev->stats,0,sizeof(struct grtc_ioc_stats)); + + /* Reset the receiver */ + regs->cor = GRTC_SEB | GRTC_COR_CRST; + if ( READ_REG(®s->cor) & GRTC_COR_CRST ){ + /* Reset Failed */ + DBG("GRTC: start: Reseting receiver failed\n"); + return RTEMS_IO_ERROR; + } + + /* make sure the RX semaphore is in the correct state when starting. + * In case of a previous overrun condition it could be in incorrect + * state (where rtems_semaphore_flush was used). + */ + rtems_semaphore_obtain(pDev->sem_rx, RTEMS_NO_WAIT, 0); + + /* Set operating modes */ + tmp = 0; + if ( pDev->config.psr_enable ) + tmp |= GRTC_GCR_PSR; + if ( pDev->config.nrzm_enable ) + tmp |= GRTC_GCR_NRZM; + if ( pDev->config.pss_enable ) + tmp |= GRTC_GCR_PSS; + regs->gctrl = GRTC_SEB | tmp; + + /* Clear any pending interrupt */ + tmp = READ_REG(®s->pir); + regs->picr = GRTC_INT_ALL; + + /* Unmask only the Overrun interrupt */ + regs->imr = GRTC_INT_OV; + + /* Set up DMA registers + * 1. Let hardware know about our DMA area (size and location) + * 2. Set DMA read/write posistions to zero. + */ + regs->asr = (unsigned int)pDev->buf_remote | ((pDev->len>>10)-1); + regs->rp = (unsigned int)pDev->buf_remote; + + /* Mark running before enabling the receiver, we could receive + * an interrupt directly after enabling the receiver and it would + * then interpret the interrupt as spurious (see interrupt handler) + */ + pDev->running = 1; + + /* Enable receiver */ + regs->cor = GRTC_SEB | GRTC_COR_RE; + + DBG("GRTC: STARTED\n"); + + return 0; +} + +static void grtc_stop(struct grtc_priv *pDev, int overrun) +{ + struct grtc_regs *regs = pDev->regs; + SPIN_IRQFLAGS(irqflags); + + SPIN_LOCK_IRQ(&pDev->devlock, irqflags); + + /* Disable the receiver */ + regs->cor = GRTC_SEB; + + /* disable all interrupts and clear them */ + regs->imr = 0; + READ_REG(®s->pir); + regs->picr = GRTC_INT_ALL; + + DBG("GRTC: STOPPED\n"); + + if (overrun) { + pDev->overrun_condition = 1; + } else { + pDev->running = 0; + } + + SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags); + + /* Flush semaphores in case a thread is stuck waiting for CLTUs (RX data) */ + rtems_semaphore_flush(pDev->sem_rx); +} + +/* Wait until 'count' bytes are available in receive buffer, or until + * the timeout expires. + */ +static int grtc_wait_data(struct grtc_priv *pDev, int count, rtems_interval timeout) +{ + int avail; + int ret; + SPIN_IRQFLAGS(irqflags); + + FUNCDBG(); + + if ( count < 1 ) + return 0; + + SPIN_LOCK_IRQ(&pDev->devlock, irqflags); + + /* Enable interrupts when receiving CLTUs, Also clear old pending CLTUs store + * interrupts. + */ + pDev->regs->picr = GRTC_INT_CS; + pDev->regs->imr = READ_REG(&pDev->regs->imr) | GRTC_INT_CS; + + avail = grtc_data_avail(pDev); + if ( avail < count ) { + /* Wait for interrupt. */ + + SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags); + + if ( timeout == 0 ){ + timeout = RTEMS_NO_TIMEOUT; + } + ret = rtems_semaphore_obtain(pDev->sem_rx,RTEMS_WAIT,timeout); + /* RTEMS_SUCCESSFUL = interrupt signaled data is available + * RTEMS_TIMEOUT = timeout expired, probably not enough data available + * RTEMS_UNSATISFIED = driver has been closed or an error (overrun) occured + * which should cancel this operation. + * RTEMS_OBJECT_WAS_DELETED, RTEMS_INVALID_ID = driver error. + */ + SPIN_LOCK_IRQ(&pDev->devlock, irqflags); + }else{ + ret = RTEMS_SUCCESSFUL; + } + + /* Disable interrupts when receiving CLTUs */ + pDev->regs->imr = READ_REG(&pDev->regs->imr) & ~GRTC_INT_CS; + + SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags); + + return ret; +} + +static rtems_device_driver grtc_open( + rtems_device_major_number major, + rtems_device_minor_number minor, + void *arg) +{ + struct grtc_priv *pDev; + struct drvmgr_dev *dev; + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) { + DBG("Wrong minor %d\n", minor); + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtc_priv *)dev->priv; + + /* Wait until we get semaphore */ + if ( rtems_semaphore_obtain(grtc_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){ + return RTEMS_INTERNAL_ERROR; + } + + /* Is device in use? */ + if ( pDev->open ){ + rtems_semaphore_release(grtc_dev_sem); + return RTEMS_RESOURCE_IN_USE; + } + + /* Mark device taken */ + pDev->open = 1; + + rtems_semaphore_release(grtc_dev_sem); + + DBG("grtc_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev); + + /* Set defaults */ + pDev->buf = NULL; + pDev->_buf = NULL; + pDev->buf_custom = 0; + pDev->buf_remote = 0; + pDev->len = 0; + pDev->timeout = 0; /* no timeout */ + pDev->blocking = 0; /* polling mode */ + pDev->mode = GRTC_MODE_RAW; /* Always default to Raw mode */ + pDev->ready.head = NULL; + pDev->ready.tail = NULL; + pDev->ready.cnt = 0; + + pDev->running = 0; + pDev->overrun_condition = 0; + + memset(&pDev->config,0,sizeof(pDev->config)); + + /* The core has been reset when we execute here, so it is possible + * to read out defualts from core. + */ + grtc_hw_get_defaults(pDev,&pDev->config); + + return RTEMS_SUCCESSFUL; +} + +static rtems_device_driver grtc_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + struct grtc_priv *pDev; + struct drvmgr_dev *dev; + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) { + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtc_priv *)dev->priv; + + if ( pDev->running ){ + grtc_stop(pDev, 0); + } + + /* Reset core */ + grtc_hw_reset(pDev); + + /* Mark not open */ + pDev->open = 0; + + return RTEMS_SUCCESSFUL; +} + +static rtems_device_driver grtc_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + struct grtc_priv *pDev; + struct drvmgr_dev *dev; + int count; + int left; + int timedout; + int err; + rtems_interval timeout; + rtems_libio_rw_args_t *rw_args; + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) { + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtc_priv *)dev->priv; + + if ( !pDev->running && !pDev->overrun_condition ) { + return RTEMS_RESOURCE_IN_USE; + } + + if ( pDev->mode != GRTC_MODE_RAW ) { + return RTEMS_NOT_DEFINED; + } + + rw_args = (rtems_libio_rw_args_t *) arg; + left = rw_args->count; + timedout = 0; + timeout = pDev->timeout; + +read_from_buffer: + /* Read maximally rw_args->count bytes from receive buffer */ + count = grtc_hw_read_try(pDev,rw_args->buffer,left); + + left -= count; + + DBG("READ %d bytes from DMA, left: %d\n",count,left); + + if ( !timedout && !pDev->overrun_condition && ((count < 1) || ((count < rw_args->count) && (pDev->blocking == GRTC_BLKMODE_COMPLETE))) ){ + /* didn't read anything (no data available) or we want to wait for all bytes requested. + * + * Wait for data to arrive only in blocking mode + */ + if ( pDev->blocking ) { + if ( (err=grtc_wait_data(pDev,left,timeout)) != RTEMS_SUCCESSFUL ){ + /* Some kind of error, closed, overrun etc. */ + if ( err == RTEMS_TIMEOUT ){ + /* Got a timeout, we try to read as much as possible */ + timedout = 1; + goto read_from_buffer; + } + return err; + } + goto read_from_buffer; + } + /* Non-blocking mode and no data read. */ + return RTEMS_TIMEOUT; + } + + /* Tell caller how much was read. */ + + DBG("READ returning %d bytes, left: %d\n",rw_args->count-left,left); + + rw_args->bytes_moved = rw_args->count - left; + if ( rw_args->bytes_moved == 0 ) { + if ( pDev->overrun_condition ) { + /* signal to the user that overrun has happend when + * no more data can be read out. + */ + return RTEMS_IO_ERROR; + } + return RTEMS_TIMEOUT; + } + + return RTEMS_SUCCESSFUL; +} + +static rtems_device_driver grtc_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + FUNCDBG(); + return RTEMS_NOT_IMPLEMENTED; +} + +static int grtc_pool_add_frms(struct grtc_frame *frms) +{ + struct grtc_frame *frm, *next; + + /* Add frames to pools */ + frm = frms; + while(frm){ + + if ( !frm->pool ) { + /* */ + DBG("GRTC: Frame not assigned to a pool\n"); + return -1; + } + next = frm->next; /* Remember next frame to process */ + + DBG("GRTC: adding frame 0x%x to pool %d (%d)\n",frm,frm->pool->frame_len,frm->pool->frame_cnt); + + /* Insert Frame into pool */ + frm->next = frm->pool->frms; + frm->pool->frms = frm; + frm->pool->frame_cnt++; + + frm = next; + } + + return 0; +} + +static struct grtc_frame *grtc_pool_get_frm(struct grtc_priv *pDev, int frame_len, int *error) +{ + struct grtc_frame *frm; + struct grtc_frame_pool *pool; + int i; + + /* Loop through all pools until a pool is found + * with a matching (or larger) frame length + */ + pool = pDev->pools; + for (i=0; ipool_cnt; i++,pool++) { + if ( pool->frame_len >= frame_len ) { + /* Found a good pool ==> get frame */ + frm = pool->frms; + if ( !frm ) { + /* not enough frames available for this + * frame length, we try next + * + * If this is a severe error add your handling + * code here. + */ +#if 0 + if ( error ) + *error = 0; + return 0; +#endif + continue; + } + + /* Got a frame, the frame is taken out of the + * pool for usage. + */ + pool->frms = frm->next; + pool->frame_cnt--; + return frm; + } + } + + if ( error ) + *error = 1; + + /* Didn't find any frames */ + return NULL; +} + +/* Return number of bytes processed, Stops at the first occurance + * of the pattern given in 'pattern' + */ +static int grtc_scan(unsigned short *src, int max, unsigned char pattern, int *found) +{ + unsigned short tmp = 0; + unsigned int left = max; + + while ( (left>1) && (((tmp=*src) & 0x00ff) != pattern) ) { + src++; + left-=2; + } + if ( (tmp & 0xff) == pattern ) { + *found = 1; + } else { + *found = 0; + } + return max-left; +} + +static int grtc_copy(unsigned short *src, unsigned char *buf, int cnt) +{ + unsigned short tmp; + int left = cnt; + + while ( (left>0) && ((((tmp=*src) & 0x00ff) == 0x00) || ((tmp & 0x00ff) == 0x01)) ) { + *buf++ = tmp>>8; + src++; + left--; + } + + return cnt-left; +} + + +static int grtc_hw_find_frm(struct grtc_priv *pDev) +{ + struct grtc_regs *regs = pDev->regs; + unsigned int rp, wp, asr, bufmax, rrp, rwp; + unsigned int upper, lower; + unsigned int count, cnt; + int found; + + FUNCDBG(); + + rp = READ_REG(®s->rp); + asr = READ_REG(®s->asr); + wp = READ_REG(®s->wp); + + /* Quick Check for most common case where Start of frame is at next + * data byte. + */ + if ( rp != wp ) { + /* At least 1 byte in buffer */ + if ( ((*(unsigned short *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf)) & 0x00ff) == 0x01 ) { + return 0; + } + } + + bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT; + bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */ + + /* Relative rp and wp */ + rrp = rp - (asr & GRTC_ASR_BUFST); + rwp = wp - (asr & GRTC_ASR_BUFST); + + lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax); + upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax); + + DBG("grtc_hw_find_frm: AVAIL: Lower: %d, Upper: %d\n",lower,upper); + DBG("grtc_hw_find_frm: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n", + rp,rrp,wp,rwp,bufmax,pDev->buf_remote); + + if ( (upper+lower) == 0 ) + return 1; + + /* Count bytes will be read */ + count = 0; + found = 0; + + /* Read from upper part of data buffer */ + if ( upper > 0 ){ + cnt = grtc_scan((unsigned short *)((rp - (unsigned int)pDev->buf_remote) + (unsigned int)pDev->buf), upper, 0x01, &found); + count = cnt; + if ( found ) { + DBG("grtc_hw_find_frm: SCANNED upper %d bytes until found\n",cnt); + goto out; + } + + DBG("grtc_hw_find_frm: SCANNED all upper %d bytes, not found\n",cnt); + } + + /* Read from lower part of data buffer */ + if ( lower > 0 ){ + cnt = grtc_scan((unsigned short *)pDev->buf, lower, 0x01, &found); + count += cnt; + + if ( found ) { + DBG("grtc_hw_find_frm: SCANNED lower %d bytes until found\n",cnt); + goto out; + } + + DBG("grtc_hw_find_frm: SCANNED all lower %d bytes, not found\n",cnt); + } + +out: + /* Update hardware RP pointer to tell hardware about new space available */ + if ( count > 0 ) { + if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){ + regs->rp = (rp+count-bufmax); + } else { + regs->rp = rp+count; + } + } + if ( found ) + return 0; + return 1; + +} + +static int grtc_check_ending(unsigned short *src, int max, int end) +{ + while ( max > 0 ) { + /* Check Filler */ + if ( *src != 0x5500 ) { + /* Filler is wrong */ + return -1; + } + src++; + max-=2; + } + + /* Check ending (at least */ + if ( end ) { + if ( (*src & 0x00ff) != 0x02 ) { + return -1; + } + } + + return 0; +} + +static int grtc_hw_check_ending(struct grtc_priv *pDev, int max) +{ + struct grtc_regs *regs = pDev->regs; + unsigned int rp, wp, asr, bufmax, rrp, rwp; + unsigned int upper, lower; + unsigned int count, cnt, left; + + FUNCDBG(); + + if ( max < 1 ) + return 0; + max = max*2; + max += 2; /* Check ending also (2 byte extra) */ + + rp = READ_REG(®s->rp); + asr = READ_REG(®s->asr); + bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT; + bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */ + wp = READ_REG(®s->wp); + + /* Relative rp and wp */ + rrp = rp - (asr & GRTC_ASR_BUFST); + rwp = wp - (asr & GRTC_ASR_BUFST); + + lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax); + upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax); + + DBG("grtc_hw_check_ending: AVAIL: Lower: %d, Upper: %d\n",lower,upper); + DBG("grtc_hw_check_ending: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n", + rp,rrp,wp,rwp,bufmax,pDev->buf_remote); + + if ( (upper+lower) < max ) + return 0; + + /* Count bytes will be read */ + count = max; + left = count; + + /* Read from upper part of data buffer */ + if ( upper > 0 ){ + if ( left <= upper ){ + cnt = left; + if ( grtc_check_ending((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), cnt-2, 1) ) { + return -1; + } + }else{ + cnt = upper; /* Read all upper data available */ + if ( grtc_check_ending((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), cnt, 0) ) { + return -1; + } + } + left -= cnt; + } + + /* Read from lower part of data buffer */ + if ( left > 0 ){ + cnt = left; + if ( grtc_check_ending((unsigned short *)pDev->buf, cnt-2, 1) ) { + return -1; + } + left -= cnt; + } + + /* Update hardware RP pointer to tell hardware about new space available */ + if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){ + regs->rp = (rp+count-bufmax); + } else { + regs->rp = rp+count; + } + + return 0; +} + +/* Copies Data from DMA area to buf, the control bytes are stripped. For + * every data byte, in the DMA area, one control byte is stripped. + */ +static int grtc_hw_copy(struct grtc_priv *pDev, unsigned char *buf, int max, int partial) +{ + struct grtc_regs *regs = pDev->regs; + unsigned int rp, wp, asr, bufmax, rrp, rwp; + unsigned int upper, lower; + unsigned int count, cnt, left; + int ret, tot, tmp; + + FUNCDBG(); + + if ( max < 1 ) + return 0; + + rp = READ_REG(®s->rp); + asr = READ_REG(®s->asr); + bufmax = (asr & GRTC_ASR_RXLEN) >> GRTC_ASR_RXLEN_BIT; + bufmax = (bufmax+1) << 10; /* Convert from 1kbyte blocks into bytes */ + wp = READ_REG(®s->wp); + + /* Relative rp and wp */ + rrp = rp - (asr & GRTC_ASR_BUFST); + rwp = wp - (asr & GRTC_ASR_BUFST); + + lower = grtc_hw_data_avail_lower(rrp,rwp,bufmax) >> 1; + upper = grtc_hw_data_avail_upper(rrp,rwp,bufmax) >> 1; + + DBG("grtc_hw_copy: AVAIL: Lower: %d, Upper: %d\n",lower,upper); + DBG("grtc_hw_copy: rp: 0x%x, rrp: 0x%x, wp: 0x%x, rwp: 0x%x, bufmax: %d\n, start: 0x%x\n", + rp,rrp,wp,rwp,bufmax,pDev->buf_remote); + + if ( (upper+lower) == 0 || (!partial && ((upper+lower) max ? max : (upper+lower); + left = count; + tot = 0; + + /* Read from upper part of data buffer */ + if ( upper > 0 ){ + if ( left < upper ){ + cnt = left; + }else{ + cnt = upper; /* Read all upper data available */ + } + DBG("grtc_hw_copy: COPYING %d from upper\n",cnt); + if ( (tot=grtc_copy((unsigned short *)((rp-(unsigned int)pDev->buf_remote)+(unsigned int)pDev->buf), buf, cnt)) != cnt ) { + /* Failed to copy due to an receive error */ + DBG("grtc_hw_copy(upper): not all in DMA buffer (%d)\n",tot); + count = tot; + ret = -1; + goto out; + } + buf += cnt; + left -= cnt; + } + + /* Read from lower part of data buffer */ + if ( left > 0 ){ + if ( left < lower ){ + cnt = left; + }else{ + cnt = lower; /* Read all lower data available */ + } + DBG("grtc_hw_copy: COPYING %d from lower\n",cnt); + if ( (tmp=grtc_copy((unsigned short *)pDev->buf, buf, cnt)) != cnt ) { + /* Failed to copy due to an receive error */ + DBG("grtc_hw_copy(lower): not all in DMA buffer (%d)\n",tot); + count = tot+tmp; + ret = -1; + goto out; + } + buf += cnt; + left -= cnt; + } + ret = count; + +out: + count = count*2; + /* Update hardware RP pointer to tell hardware about new space available */ + if ( (rp+count) >= ((asr&GRTC_ASR_BUFST)+bufmax) ){ + regs->rp = (rp+count-bufmax); + } else { + regs->rp = rp+count; + } + + return ret; +} + +#ifdef DEBUG_ERROR +void grtc_log_error(struct grtc_priv *pDev, int err) +{ + /* Stop Receiver */ + *(volatile unsigned int *)&pDev->regs->cor = 0x55000000; + *(volatile unsigned int *)&pDev->regs->cor = 0x55000000; + pDev->last_error[pDev->last_error_cnt] = err; + if ( ++pDev->last_error_cnt > 128 ) + pDev->last_error_cnt = 0; +} +#endif + +/* Read one frame from DMA buffer + * + * Return Values + * Zero - nothing more to process + * 1 - more to process, no free frames + * 2 - more to process, frame received + * negative - more to process, frame dropped + */ +static int process_dma(struct grtc_priv *pDev) +{ + int ret, err; + int left, total_len; + unsigned char *dst; + struct grtc_frame *frm; + + switch( pDev->frame_state ) { + case FRM_STATE_NONE: + DBG2("FRAME_STATE_NONE\n"); + + /* Find Start of next frame by searching for 0x01 */ + ret = grtc_hw_find_frm(pDev); + if ( ret != 0 ) { + /* Frame start not found */ + return 0; + } + + /* Start of frame found, Try to copy header */ + pDev->frm = NULL; + pDev->frame_state = FRM_STATE_HDR; + + case FRM_STATE_HDR: + DBG2("FRAME_STATE_HDR\n"); + + /* Wait for all of header to be in place by setting partial to 0 */ + ret = grtc_hw_copy(pDev, (unsigned char *)pDev->hdr, 5, 0); + if ( ret < 0 ) { + /* Error copying header, restart scanning for new frame */ + DEBUG_ERR_LOG(pDev,1); + pDev->stats.err++; + pDev->stats.err_hdr++; + DBG("FRAME_STATE_HDR: copying failed %d\n",ret); + pDev->frame_state = FRM_STATE_NONE; + return -1; + } else if ( ret != 5 ) { + DBG("FRAME_STATE_HDR: no header (%d)\n",ret); + /* Not all bytes available, come back later */ + return 0; + } + + /* The complete header has been copied, parse it */ + pDev->frmlen = (((unsigned short *)pDev->hdr)[1] & 0x3ff)+1; + if ( pDev->frmlen < 5 ) { + /* Error: frame length is not correct */ + pDev->stats.err++; + pDev->stats.err_hdr++; + DBG("FRAME_STATE_HDR: frame length error: %d\n", pDev->frmlen); + pDev->frame_state = FRM_STATE_NONE; + return -1; + } + pDev->frame_state = FRM_STATE_ALLOC; + + case FRM_STATE_ALLOC: + DBG2("FRAME_STATE_ALLOC\n"); + /* Header has been read, allocate a frame to put payload and header into */ + + /* Allocate Frame matching Frame length */ + err = 0; + frm = grtc_pool_get_frm(pDev,pDev->frmlen,&err); + if ( !frm ) { + /* Couldn't find frame */ + DEBUG_ERR_LOG(pDev,2); + pDev->stats.dropped++; + DBG2("No free frames\n"); + if ( err == 0 ){ + /* Frame length exist in pool configuration, but no + * frames are available for that frame length. + */ + DEBUG_ERR_LOG(pDev,3); + pDev->stats.dropped_no_buf++; + return 1; + } else { + /* Frame length of incoming frame is larger than the + * frame length in any of the configured frame pools. + * + * This may be because of an corrupt header. We simply + * scan for the end of frame marker in the DMA buffer + * so we can drop the frame. + */ + DEBUG_ERR_LOG(pDev,4); + pDev->stats.dropped_too_long++; + pDev->frame_state = FRM_STATE_NONE; + return -2; + } + } + frm->len = 5; /* Only header currenlty in frame */ + + /* Copy Frame Header into frame structure */ + ((unsigned char*)&frm->hdr)[0] = ((unsigned char*)pDev->hdr)[0]; + ((unsigned char*)&frm->hdr)[1] = ((unsigned char*)pDev->hdr)[1]; + ((unsigned char*)&frm->hdr)[2] = ((unsigned char*)pDev->hdr)[2]; + ((unsigned char*)&frm->hdr)[3] = ((unsigned char*)pDev->hdr)[3]; + ((unsigned char*)&frm->hdr)[4] = ((unsigned char*)pDev->hdr)[4]; + + /* Calc Total and Filler byte count in frame */ + total_len = pDev->frmlen / 7; + total_len = total_len * 7; + if ( pDev->frmlen != total_len ) + total_len += 7; + + pDev->filler = total_len - pDev->frmlen; + + pDev->frame_state = FRM_STATE_PAYLOAD; + pDev->frm = frm; + + case FRM_STATE_PAYLOAD: + DBG2("FRAME_STATE_PAYLOAD\n"); + /* Parts of payload and the complete header has been read */ + frm = pDev->frm; + + dst = (unsigned char *)&frm->data[frm->len-5]; + left = pDev->frmlen-frm->len; + + ret = grtc_hw_copy(pDev,dst,left,1); + if ( ret < 0 ) { + DEBUG_ERR_LOG(pDev,5); + /* Error copying header, restart scanning for new frame */ + pDev->frame_state = FRM_STATE_NONE; + frm->next = NULL; + grtc_pool_add_frms(frm); + pDev->frm = NULL; + pDev->stats.err++; + pDev->stats.err_payload++; + return -1; + } else if ( ret != left ) { + /* Not all bytes available, come back later */ + frm->len += ret; + return 0; + } + frm->len += ret; + pDev->frame_state = FRM_STATE_FILLER; + + case FRM_STATE_FILLER: + DBG2("FRAME_STATE_FILLER\n"); + /* check filler data */ + frm = pDev->frm; + + ret = grtc_hw_check_ending(pDev,pDev->filler); + if ( ret != 0 ) { + /* Error in frame, drop frame */ + DEBUG_ERR_LOG(pDev,6); + pDev->frame_state = FRM_STATE_NONE; + frm->next = NULL; + grtc_pool_add_frms(frm); + pDev->frm = NULL; + pDev->stats.err++; + pDev->stats.err_ending++; + return -1; + } + + /* A complete frame received, put it into received frame queue */ + if ( pDev->ready.head ) { + /* Queue not empty */ + pDev->ready.tail->next = frm; + } else { + /* Queue empty */ + pDev->ready.head = frm; + } + pDev->ready.tail = frm; + frm->next = NULL; + pDev->ready.cnt++; + pDev->stats.frames_recv++; + + pDev->frame_state = FRM_STATE_NONE; + frm->next = NULL; + return 2; + +#if 0 + case FRM_STATE_DROP: + DBG2("FRAME_STATE_DROP\n"); + break; +#endif + + default: + printk("GRTC: internal error\n"); + pDev->frame_state = FRM_STATE_NONE; + break; + } + + return 0; +} + +static rtems_device_driver grtc_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + struct grtc_priv *pDev; + struct drvmgr_dev *dev; + rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg; + unsigned int *data = ioarg->buffer; + int status,frm_len,i,ret; + struct grtc_ioc_buf_params *buf_arg; + struct grtc_ioc_config *cfg; + struct grtc_ioc_hw_status *hwregs; + struct grtc_ioc_pools_setup *pocfg; + struct grtc_ioc_assign_frm_pool *poassign; + struct grtc_frame *frm, *frms; + struct grtc_frame_pool *pool; + struct grtc_list *frmlist; + struct grtc_ioc_stats *stats; + unsigned int mem; + IRQ_LOCAL_DECLARE(oldLevel); + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtc_drv_info.general, minor, &dev) ) { + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtc_priv *)dev->priv; + + if (!ioarg) + return RTEMS_INVALID_NAME; + + ioarg->ioctl_return = 0; + switch(ioarg->command) { + case GRTC_IOC_START: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; /* EBUSY */ + } + if ( (status=grtc_start(pDev)) != RTEMS_SUCCESSFUL ){ + return status; + } + /* Register ISR and Unmask interrupt */ + drvmgr_interrupt_register(pDev->dev, 0, "grtc", grtc_interrupt, pDev); + + /* Read and write are now open... */ + break; + + case GRTC_IOC_STOP: + if ( !pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + drvmgr_interrupt_unregister(pDev->dev, 0, grtc_interrupt, pDev); + grtc_stop(pDev, 0); + break; + + case GRTC_IOC_ISSTARTED: + if ( !pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } else if ( pDev->overrun_condition ) { + return RTEMS_IO_ERROR; + } + break; + + case GRTC_IOC_SET_BLOCKING_MODE: + if ( (unsigned int)data > GRTC_BLKMODE_COMPLETE ) { + return RTEMS_INVALID_NAME; + } + DBG("GRTC: Set blocking mode: %d\n",(unsigned int)data); + pDev->blocking = (unsigned int)data; + break; + + case GRTC_IOC_SET_TIMEOUT: + DBG("GRTC: Timeout: %d\n",(unsigned int)data); + pDev->timeout = (rtems_interval)data; + break; + + case GRTC_IOC_SET_BUF_PARAM: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; /* EBUSY */ + } + + buf_arg = (struct grtc_ioc_buf_params *)data; + if ( !buf_arg ) { + return RTEMS_INVALID_NAME; + } + + DBG("GRTC: IOC_SET_BUF_PARAM: Len: 0x%x, Custom Buffer: 0x%x\n",buf_arg->length,buf_arg->custom_buffer); + + /* Check alignment need, skip bit 0 since that bit only indicates remote address or not */ + if ( (unsigned int)buf_arg->custom_buffer & (~GRTC_BUF_MASK) & (~0x1) ) { + return RTEMS_INVALID_NAME; + } + + if ( buf_arg->length > 0x100 ){ + DBG("GRTC: Too big buffer requested\n"); + return RTEMS_INVALID_NAME; + } + + /* If current buffer allocated by driver we must free it */ + if ( !pDev->buf_custom && pDev->buf ){ + free(pDev->_buf); + pDev->_buf = NULL; + } + pDev->buf = NULL; + pDev->len = buf_arg->length*1024; + + if (pDev->len <= 0) + break; + mem = (unsigned int)buf_arg->custom_buffer; + pDev->buf_custom = mem; + + if (mem & 1) { + /* Remote address given, the address is as the GRTC + * core looks at it. Translate the base address into + * an address that the CPU can understand. + */ + pDev->buf_remote = (void *)(mem & ~0x1); + drvmgr_translate_check(pDev->dev, DMAMEM_TO_CPU, + (void *)pDev->buf_remote, + (void **)&pDev->buf, + pDev->len); + } else { + if (mem == 0) { + pDev->buf = grtc_memalign((~GRTC_ASR_BUFST)+1,pDev->len,&pDev->_buf); + DBG("grtc_ioctl: SETBUF: new buf: 0x%x(0x%x), Len: %d\n",pDev->buf,pDev->_buf,pDev->len); + if (!pDev->buf){ + pDev->len = 0; + pDev->buf_custom = 0; + pDev->_buf = NULL; + pDev->buf_remote = 0; + DBG("GRTC: Failed to allocate memory\n"); + return RTEMS_NO_MEMORY; + } + } else{ + pDev->buf = buf_arg->custom_buffer; + } + + /* Translate into a remote address so that GRTC core + * on a remote AMBA bus (for example over the PCI bus) + * gets a valid address + */ + drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA, + (void *)pDev->buf, + (void **)&pDev->buf_remote, + pDev->len); + } + break; + + case GRTC_IOC_GET_BUF_PARAM: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; /* EBUSY */ + } + + buf_arg = (struct grtc_ioc_buf_params *)data; + if ( !buf_arg ) { + return RTEMS_INVALID_NAME; + } + + buf_arg->length = pDev->len >> 10; /* Length in 1kByte blocks */ + if ( pDev->buf_custom ) + buf_arg->custom_buffer =(void *)pDev->buf; + else + buf_arg->custom_buffer = 0; /* Don't reveal internal driver buffer */ + break; + + case GRTC_IOC_SET_CONFIG: + cfg = (struct grtc_ioc_config *)data; + if ( !cfg ) { + return RTEMS_INVALID_NAME; + } + + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + + pDev->config = *cfg; + break; + + case GRTC_IOC_GET_CONFIG: + cfg = (struct grtc_ioc_config *)data; + if ( !cfg ) { + return RTEMS_INVALID_NAME; + } + + *cfg = pDev->config; + break; + + case GRTC_IOC_GET_HW_STATUS: + hwregs = (struct grtc_ioc_hw_status *)data; + if ( !hwregs ) { + return RTEMS_INVALID_NAME; + } + /* We disable interrupt on the local CPU in order to get a + * snapshot of the registers. + */ + IRQ_LOCAL_DISABLE(oldLevel); + hwregs->sir = READ_REG(&pDev->regs->sir); + hwregs->far = READ_REG(&pDev->regs->far); + hwregs->clcw1 = READ_REG(&pDev->regs->clcw1); + hwregs->clcw2 = READ_REG(&pDev->regs->clcw2); + hwregs->phir = READ_REG(&pDev->regs->phir); + hwregs->str = READ_REG(&pDev->regs->str); + IRQ_LOCAL_ENABLE(oldLevel); + break; + + case GRTC_IOC_GET_STATS: + stats = (struct grtc_ioc_stats *)data; + if ( !stats ) { + return RTEMS_INVALID_NAME; + } + memcpy(stats,&pDev->stats,sizeof(struct grtc_ioc_stats)); + break; + + case GRTC_IOC_CLR_STATS: + memset(&pDev->stats,0,sizeof(struct grtc_ioc_stats)); + break; + + case GRTC_IOC_SET_MODE: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + if ( (int)data == GRTC_MODE_FRAME ) { + pDev->mode = GRTC_MODE_FRAME; + } else if ( (int)data == GRTC_MODE_RAW ) { + pDev->mode = GRTC_MODE_RAW; + } else { + return RTEMS_INVALID_NAME; + } + break; + + case GRTC_IOC_POOLS_SETUP: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + pocfg = (struct grtc_ioc_pools_setup *)data; + if ( (pDev->mode != GRTC_MODE_FRAME) || !pocfg ) { + return RTEMS_INVALID_NAME; + } + + /* Check that list is sorted */ + frm_len = 0; + for(i=0;ipool_cnt;i++){ + if ( pocfg->pool_frame_len[i] <= frm_len ) { + return RTEMS_INVALID_NAME; + } + frm_len = pocfg->pool_frame_len[i]; + } + + /* Ok, we trust user. The pool descriptions are allocated + * but not frames, that the user must do self. + */ + if ( pDev->pools ) { + free(pDev->pools); + } + pDev->pools = grlib_malloc(pocfg->pool_cnt * sizeof(*pDev->pools)); + if ( !pDev->pools ) { + pDev->pool_cnt = 0; + return RTEMS_NO_MEMORY; + } + pDev->pool_cnt = pocfg->pool_cnt; + for (i=0;ipool_cnt;i++) { + pDev->pools[i].frame_len = pocfg->pool_frame_len[i]; + pDev->pools[i].frame_cnt = 0; + pDev->pools[i].frms = NULL; + } + break; + + case GRTC_IOC_ASSIGN_FRM_POOL: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + + if ( (pDev->mode != GRTC_MODE_FRAME) ) { + return RTEMS_INVALID_NAME; + } + + poassign = (struct grtc_ioc_assign_frm_pool *)data; + if ( !poassign ) { + return RTEMS_INVALID_NAME; + } + + /* Find pool to assign the frames to */ + pool = NULL; + for(i=0; ipool_cnt; i++) { + if ( pDev->pools[i].frame_len == poassign->frame_len ) { + pool = &pDev->pools[i]; + break; + } + } + if ( !pool ) { + /* No Pool matching frame length */ + return RTEMS_INVALID_NAME; + } + + /* Assign frames to pool */ + frm = poassign->frames; + while(frm){ + frm->pool = pool; /* Assign Frame to pool */ + frm = frm->next; + } + break; + + case GRTC_IOC_ADD_BUFF: + frms = (struct grtc_frame *)data; + + if ( (pDev->mode != GRTC_MODE_FRAME) ) { + return RTEMS_NOT_DEFINED; + } + if ( !frms ) { + return RTEMS_INVALID_NAME; + } + + /* Add frames to respicative pools */ + if ( grtc_pool_add_frms(frms) ) { + return RTEMS_INVALID_NAME; + } + break; + + /* Try to read as much data as possible from DMA area and + * put it into free frames. + * + * If receiver is in stopped mode, let user only read previously + * received frames. + */ + case GRTC_IOC_RECV: + + if ( (pDev->mode != GRTC_MODE_FRAME) ) { + return RTEMS_NOT_DEFINED; + } + + while ( pDev->running && ((ret=process_dma(pDev) == 2) || (ret == -1)) ) { + /* Frame received or dropped, process next frame */ + } + + /* Take frames out from ready queue and put them to user */ + frmlist = (struct grtc_list *)data; + if ( !frmlist ) { + return RTEMS_INVALID_NAME; + } + + frmlist->head = pDev->ready.head; + frmlist->tail = pDev->ready.tail; + frmlist->cnt = pDev->ready.cnt; + + /* Empty list */ + pDev->ready.head = NULL; + pDev->ready.tail = NULL; + pDev->ready.cnt = 0; + + if ((frmlist->cnt == 0) && pDev->overrun_condition) { + /* signal to the user that overrun has happend when + * no more data can be read out. + */ + return RTEMS_IO_ERROR; + } + break; + + case GRTC_IOC_GET_CLCW_ADR: + if ( !data ) { + return RTEMS_INVALID_NAME; + } + *data = (unsigned int)&pDev->regs->clcw1; + break; + + default: + return RTEMS_NOT_DEFINED; + } + return RTEMS_SUCCESSFUL; +} + +static void grtc_interrupt(void *arg) +{ + struct grtc_priv *pDev = arg; + struct grtc_regs *regs = pDev->regs; + unsigned int status; + SPIN_ISR_IRQFLAGS(irqflags); + + /* Clear interrupt by reading it */ + status = READ_REG(®s->pisr); + + /* Spurious Interrupt? */ + if ( !pDev->running ) + return; + + if ( status & GRTC_INT_OV ){ + /* Stop core (Disable receiver, interrupts), set overrun condition, + * Flush semaphore if thread waiting for data in grtc_wait_data(). + */ + grtc_stop(pDev, 1); + + /* No need to handle the reset of interrupts, we are still */ + goto out; + } + + if ( status & GRTC_INT_CS ){ + SPIN_LOCK(&pDev->devlock, irqflags); + + if ( (pDev->blocking==GRTC_BLKMODE_COMPLETE) && pDev->timeout ){ + /* Signal to thread only if enough data is available */ + if ( pDev->wait_for_nbytes > grtc_data_avail(pDev) ){ + /* Not enough data available */ + goto procceed_processing_interrupts; + } + + /* Enough data is available which means that we should + * wake up the thread sleeping. + */ + } + + /* Disable further CLTUs Stored interrupts, no point until + * thread waiting for them says it want to wait for more. + */ + regs->imr = READ_REG(®s->imr) & ~GRTC_INT_CS; + SPIN_UNLOCK(&pDev->devlock, irqflags); + + /* Signal Semaphore to wake waiting thread in read() */ + rtems_semaphore_release(pDev->sem_rx); + } + +procceed_processing_interrupts: + + if ( status & GRTC_INT_CR ){ + + } + + if ( status & GRTC_INT_FAR ){ + + } + + if ( status & GRTC_INT_BLO ){ + + } + + if ( status & GRTC_INT_RFA ){ + + } +out: + if ( status ) + regs->picr = status; +} + +static rtems_device_driver grtc_initialize( + rtems_device_major_number major, + rtems_device_minor_number unused, + void *arg + ) +{ + /* Device Semaphore created with count = 1 */ + if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'C'), + 1, + RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING, + 0, + &grtc_dev_sem) != RTEMS_SUCCESSFUL ) { + return RTEMS_INTERNAL_ERROR; + } + + return RTEMS_SUCCESSFUL; +} diff --git a/bsps/shared/grlib/tmtc/grtm.c b/bsps/shared/grlib/tmtc/grtm.c new file mode 100644 index 0000000000..43476aaaad --- /dev/null +++ b/bsps/shared/grlib/tmtc/grtm.c @@ -0,0 +1,1613 @@ +/* GRTM CCSDS Telemetry Encoder driver + * + * COPYRIGHT (c) 2007. + * Cobham Gaisler AB. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* +#define DEBUG +#define DEBUGFUNCS +*/ + +#include + +/* GRTM register map */ +struct grtm_regs { + volatile unsigned int dma_ctrl; /* DMA Control Register (0x00) */ + volatile unsigned int dma_status; /* DMA Status Register (0x04) */ + volatile unsigned int dma_len; /* DMA Length Register (0x08) */ + volatile unsigned int dma_bd; /* DMA Descriptor Pointer Register (0x0c) */ + + volatile unsigned int dma_cfg; /* DMA Configuration Register (0x10) */ + volatile unsigned int revision; /* GRTM Revision Register (0x14) */ + + int unused0[(0x80-0x18)/4]; + + volatile unsigned int ctrl; /* TM Control Register (0x80) */ + volatile unsigned int status; /* TM Status Register (0x84) */ + volatile unsigned int cfg; /* TM Configuration Register (0x88) */ + volatile unsigned int size; /* TM Size Register (0x8c) */ + + volatile unsigned int phy; /* TM Physical Layer Register (0x90) */ + volatile unsigned int code; /* TM Coding Sub-Layer Register (0x94) */ + volatile unsigned int asmr; /* TM Attached Synchronization Marker Register (0x98) */ + + int unused1; + + volatile unsigned int all_frm; /* TM All Frames Generation Register (0xa0) */ + volatile unsigned int mst_frm; /* TM Master Channel Frame Generation Register (0xa4) */ + volatile unsigned int idle_frm; /* TM Idle Frame Generation Register (0xa8) */ + + int unused2[(0xc0-0xac)/4]; + + volatile unsigned int fsh[4]; /* TM FSH/Insert Zone Registers (0xc0..0xcc) */ + + volatile unsigned int ocf; /* TM Operational Control Field Register (0xd0) */ +}; + +/* DMA Control Register (0x00) */ +#define GRTM_DMA_CTRL_EN_BIT 0 +#define GRTM_DMA_CTRL_IE_BIT 1 +#define GRTM_DMA_CTRL_TXRST_BIT 2 +#define GRTM_DMA_CTRL_RST_BIT 3 +#define GRTM_DMA_CTRL_TFIE_BIT 4 + +#define GRTM_DMA_CTRL_EN (1<minor_drv, dev->parent->dev->name); + priv = dev->priv = grlib_calloc(1, sizeof(*priv)); + if ( !priv ) + return DRVMGR_NOMEM; + priv->dev = dev; + + /* This core will not find other cores, so we wait for init2() */ + + return DRVMGR_OK; +} + +static int grtm_init3(struct drvmgr_dev *dev) +{ + struct grtm_priv *priv; + char prefix[32]; + rtems_status_code status; + + priv = dev->priv; + + /* Do initialization */ + + if ( grtm_driver_io_registered == 0) { + /* Register the I/O driver only once for all cores */ + if ( grtm_register_io(&grtm_driver_io_major) ) { + /* Failed to register I/O driver */ + dev->priv = NULL; + return DRVMGR_FAIL; + } + + grtm_driver_io_registered = 1; + } + + /* I/O system registered and initialized + * Now we take care of device initialization. + */ + if ( grtm_device_init(priv) ) { + return DRVMGR_FAIL; + } + + /* Get Filesystem name prefix */ + prefix[0] = '\0'; + if ( drvmgr_get_dev_prefix(dev, prefix) ) { + /* Failed to get prefix, make sure of a unique FS name + * by using the driver minor. + */ + sprintf(priv->devName, "/dev/grtm%d", dev->minor_drv); + } else { + /* Got special prefix, this means we have a bus prefix + * And we should use our "bus minor" + */ + sprintf(priv->devName, "/dev/%sgrtm%d", prefix, dev->minor_bus); + } + + SPIN_INIT(&priv->devlock, priv->devName); + + /* Register Device */ + status = rtems_io_register_name(priv->devName, grtm_driver_io_major, dev->minor_drv); + if (status != RTEMS_SUCCESSFUL) { + return DRVMGR_FAIL; + } + + return DRVMGR_OK; +} + +/******************* Driver Implementation ***********************/ + +static int grtm_register_io(rtems_device_major_number *m) +{ + rtems_status_code r; + + if ((r = rtems_io_register_driver(0, &grtm_driver, m)) == RTEMS_SUCCESSFUL) { + DBG("GRTM driver successfully registered, major: %d\n", *m); + } else { + switch(r) { + case RTEMS_TOO_MANY: + printk("GRTM rtems_io_register_driver failed: RTEMS_TOO_MANY\n"); + return -1; + case RTEMS_INVALID_NUMBER: + printk("GRTM rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n"); + return -1; + case RTEMS_RESOURCE_IN_USE: + printk("GRTM rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n"); + return -1; + default: + printk("GRTM rtems_io_register_driver failed\n"); + return -1; + } + } + return 0; +} + +static int grtm_device_init(struct grtm_priv *pDev) +{ + struct amba_dev_info *ambadev; + struct ambapp_core *pnpinfo; + union drvmgr_key_value *value; + + /* Get device information from AMBA PnP information */ + ambadev = (struct amba_dev_info *)pDev->dev->businfo; + if ( ambadev == NULL ) { + return -1; + } + pnpinfo = &ambadev->info; + pDev->irq = pnpinfo->irq; + pDev->regs = (struct grtm_regs *)pnpinfo->apb_slv->start; + pDev->minor = pDev->dev->minor_drv; + pDev->open = 0; + pDev->running = 0; + + /* Create Binary RX Semaphore with count = 0 */ + if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'M', '0' + pDev->minor), + 0, + RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\ + RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING, + 0, + &pDev->sem_tx) != RTEMS_SUCCESSFUL ) { + return -1; + } + + /* Allocate Memory for Buffer Descriptor Table, or let user provide a custom + * address. + */ + value = drvmgr_dev_key_get(pDev->dev, "bdTabAdr", DRVMGR_KT_POINTER); + if ( value ) { + pDev->bds = (struct grtm_bd *)value->ptr; + pDev->_bds = (void *)value->ptr; + } else { + pDev->bds = (struct grtm_bd *)grtm_memalign(0x400, 0x400, &pDev->_bds); + } + if ( !pDev->bds ) { + DBG("GRTM: Failed to allocate descriptor table\n"); + return -1; + } + memset(pDev->bds, 0, 0x400); + + pDev->_ring = grlib_malloc(sizeof(*pDev->_ring) * 128); + if ( !pDev->_ring ) { + return -1; + } + + /* Reset Hardware before attaching IRQ handler */ + grtm_hw_reset(pDev); + + /* Read SUB revision number, ignore */ + pDev->subrev = (READ_REG(&pDev->regs->revision) & GRTM_REV1_REV_SREV) + >> GRTM_REV1_REV_SREV_BIT; + + return 0; +} + + +static inline void grtm_list_clr(struct grtm_list *list) +{ + list->head = NULL; + list->tail = NULL; +} + +static void grtm_hw_reset(struct grtm_priv *pDev) +{ + /* Reset Core */ + pDev->regs->dma_ctrl = GRTM_DMA_CTRL_RST; +} + +static void grtm_hw_get_implementation(struct grtm_priv *pDev, struct grtm_ioc_hw *hwcfg) +{ + unsigned int cfg = READ_REG(&pDev->regs->cfg); + + hwcfg->cs = (cfg & GRTM_CFG_SC) ? 1:0; + hwcfg->sp = (cfg & GRTM_CFG_SP) ? 1:0; + hwcfg->ce = (cfg & GRTM_CFG_CE) ? 1:0; + hwcfg->nrz = (cfg & GRTM_CFG_NRZ) ? 1:0; + hwcfg->psr = (cfg & GRTM_CFG_PSR) ? 1:0; + hwcfg->te = (cfg & GRTM_CFG_TE) ? 1:0; + hwcfg->rsdep = (cfg & GRTM_CFG_RSDEP)>>GRTM_CFG_RSDEP_BIT; + hwcfg->rs = (cfg & GRTM_CFG_RS)>>GRTM_CFG_RS_BIT; + hwcfg->aasm = (cfg & GRTM_CFG_AASM) ? 1:0; + hwcfg->fecf = (cfg & GRTM_CFG_FECF) ? 1:0; + hwcfg->ocf = (cfg & GRTM_CFG_OCF) ? 1:0; + hwcfg->evc = (cfg & GRTM_CFG_EVC) ? 1:0; + hwcfg->idle = (cfg & GRTM_CFG_IDLE) ? 1:0; + hwcfg->fsh = (cfg & GRTM_CFG_FSH) ? 1:0; + hwcfg->mcg = (cfg & GRTM_CFG_MCG) ? 1:0; + hwcfg->iz = (cfg & GRTM_CFG_IZ) ? 1:0; + hwcfg->fhec = (cfg & GRTM_CFG_FHEC) ? 1:0; + hwcfg->aos = (cfg & GRTM_CFG_AOS) ? 1:0; + hwcfg->cif = (cfg & GRTM_CFG_CIF) ? 1:0; + hwcfg->ocfb = (cfg & GRTM_CFG_OCFB) ? 1:0; + + cfg = READ_REG(&pDev->regs->dma_cfg); + hwcfg->blk_size = (cfg & GRTM_DMA_CFG_BLKSZ) >> GRTM_DMA_CFG_BLKSZ_BIT; + hwcfg->fifo_size= (cfg & GRTM_DMA_CFG_FIFOSZ) >> GRTM_DMA_CFG_FIFOSZ_BIT; +} + + +/* TODO: Implement proper default calculation from hardware configuration */ +static void grtm_hw_get_default_modes(struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg) +{ + cfg->mode = GRTM_MODE_TM; + cfg->frame_length = 223; + cfg->limit = 0; /* Make driver auto configure it on START, user may override with non-zero value */ + cfg->as_marker = 0x1ACFFC1D; + + /* Physical */ + cfg->phy_subrate = 1; + cfg->phy_symbolrate = 1; + cfg->phy_opts = 0; + + /* Coding Layer */ + cfg->code_rsdep = 1; + cfg->code_ce_rate = 0; + cfg->code_csel = 0; + cfg->code_opts = 0; + + /* All Frame Generation */ + cfg->all_izlen = 0; + cfg->all_opts = GRTM_IOC_ALL_FECF; + + /* Master Channel Frame Generation */ + if ( hwcfg->mcg ) { + cfg->mf_opts = GRTM_IOC_MF_MC; + } else { + cfg->mf_opts = 0; + } + + /* Idle Frame Generation */ + cfg->idle_scid = 0; + cfg->idle_vcid = 0; + if ( hwcfg->idle ) { + cfg->idle_opts = GRTM_IOC_IDLE_EN; + } else { + cfg->idle_opts = 0; + } + + /* Interrupt options */ + cfg->blocking = 0; /* non-blocking mode is default */ + cfg->enable_cnt = 16; /* generate interrupt every 16 descriptor */ + cfg->isr_desc_proc = 1; /* Let interrupt handler do descriptor processing */ + cfg->timeout = RTEMS_NO_TIMEOUT; + +} + +static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf) +{ + *(int *)realbuf = (int)grlib_malloc(length+boundary); + DBG("GRTM: Alloced %d (0x%x) bytes, requested: %d\n",length+boundary,length+boundary,length); + return (void *)(((*(unsigned int *)realbuf)+boundary) & ~(boundary-1)); +} + +static int grtm_hw_set_config(struct grtm_priv *pDev, struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg) +{ + struct grtm_regs *regs = pDev->regs; + unsigned int tmp; + unsigned int limit; + + if ( cfg->limit == 0 ) { + /* Calculate Limit */ + if ( cfg->frame_length > hwcfg->blk_size ) { + limit = hwcfg->blk_size*2; + } else { + limit = cfg->frame_length; + } + } else { + /* Use user configured limit */ + limit = cfg->limit; + } + + /* Frame Length and Limit */ + regs->dma_len = (((limit-1) << GRTM_DMA_LEN_LIM_BIT) & GRTM_DMA_LEN_LIM)| + (((cfg->frame_length-1) << GRTM_DMA_LEN_LEN_BIT) & GRTM_DMA_LEN_LEN); + + /* Physical layer options */ + tmp = (cfg->phy_opts & (GRTM_IOC_PHY_SCF|GRTM_IOC_PHY_SF)) | + (((cfg->phy_symbolrate-1)<phy_subrate-1)<phy = tmp; + + /* Coding Sub-layer Options */ + tmp = (cfg->code_opts & GRTM_IOC_CODE_ALL) | ((cfg->code_csel<code_rsdep-1)<code_ce_rate<code = tmp; + + /* Attached synchronization marker register */ + regs->asmr = cfg->as_marker; + + /* All Frames Generation */ + tmp = ((cfg->all_opts & GRTM_IOC_ALL_ALL)<<14) | + ((cfg->all_izlen<mode<all_frm = tmp; + + /* Master Frame Generation */ + regs->mst_frm = cfg->mf_opts & GRTM_IOC_MF_ALL; + + /* Idle frame Generation */ + tmp = ((cfg->idle_opts & GRTM_IOC_IDLE_ALL) << 16) | + ((cfg->idle_vcid << GRTM_IDLE_VCID_BIT) & GRTM_IDLE_VCID) | + ((cfg->idle_scid << GRTM_IDLE_SCID_BIT) & GRTM_IDLE_SCID); + regs->idle_frm = tmp; + + return 0; +} + +static int grtm_start(struct grtm_priv *pDev) +{ + struct grtm_regs *regs = pDev->regs; + int i; + struct grtm_ioc_config *cfg = &pDev->config; + unsigned int txrdy; + + /* Clear Descriptors */ + memset(pDev->bds,0,0x400); + + /* Clear stats */ + memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats)); + + /* Init Descriptor Ring */ + memset(pDev->_ring,0,sizeof(struct grtm_ring)*128); + for(i=0;i<127;i++){ + pDev->_ring[i].next = &pDev->_ring[i+1]; + pDev->_ring[i].bd = &pDev->bds[i]; + pDev->_ring[i].frm = NULL; + } + pDev->_ring[127].next = &pDev->_ring[0]; + pDev->_ring[127].bd = &pDev->bds[127]; + pDev->_ring[127].frm = NULL; + + pDev->ring = &pDev->_ring[0]; + pDev->ring_end = &pDev->_ring[0]; + + /* Clear Scheduled, Ready and Sent list */ + grtm_list_clr(&pDev->ready); + grtm_list_clr(&pDev->scheduled); + grtm_list_clr(&pDev->sent); + + /* Software init */ + pDev->handling_transmission = 0; + + /* Reset the transmitter */ + regs->dma_ctrl = GRTM_DMA_CTRL_TXRST; + regs->dma_ctrl = 0; /* Leave Reset */ + + /* Clear old interrupts */ + regs->dma_status = GRTM_DMA_STS_ALL; + + /* Set Descriptor Pointer Base register to point to first descriptor */ + drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA, (void *)pDev->bds, + (void **)®s->dma_bd, 0x400); + + /* Set hardware options as defined by config */ + if ( grtm_hw_set_config(pDev, cfg, &pDev->hw_avail) ) { + return RTEMS_IO_ERROR; + } + + /* Enable TM Transmitter */ + regs->ctrl = GRTM_CTRL_EN; + + /* Wait for TXRDY to be cleared */ + i=1000; + while( i > 0 ) { + asm volatile ("nop"::); + i--; + } + + /* Check transmitter startup OK */ + i = 1000000; + do { + /* Location of TXRDY Bit is different for different revisions */ + if ( pDev->subrev == 0 ) { + txrdy = READ_REG(®s->dma_ctrl) & + GRTM_REV0_DMA_CTRL_TXRDY; + } else { + txrdy = READ_REG(®s->dma_status) & + GRTM_REV1_DMA_STS_TXRDY; + } + if (txrdy != 0) + break; + + asm volatile ("nop"::); + } while ( --i > 0 ); + if ( i == 0 ) { + /* Reset Failed */ + DBG("GRTM: start: Reseting transmitter failed (%d)\n",i); + return RTEMS_IO_ERROR; + } + DBG("GRTM: reset time %d\n",i); + + /* Everything is configured, the TM transmitter is started + * and idle frames has been sent. + */ + + /* Mark running before enabling the DMA transmitter */ + pDev->running = 1; + + /* Enable interrupts (Error and DMA TX) */ + regs->dma_ctrl = GRTM_DMA_CTRL_IE; + + DBG("GRTM: STARTED\n"); + + return RTEMS_SUCCESSFUL; +} + +static void grtm_stop(struct grtm_priv *pDev) +{ + struct grtm_regs *regs = pDev->regs; + + /* Disable the transmitter & Interrupts */ + regs->dma_ctrl = 0; + + /* Clear any pending interrupt */ + regs->dma_status = GRTM_DMA_STS_ALL; + + DBG("GRTM: STOPPED\n"); + + /* Flush semaphore in case a thread is stuck waiting for TX Interrupts */ + rtems_semaphore_flush(pDev->sem_tx); +} + +static rtems_device_driver grtm_open( + rtems_device_major_number major, + rtems_device_minor_number minor, + void *arg) +{ + struct grtm_priv *pDev; + struct drvmgr_dev *dev; + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) { + DBG("Wrong minor %d\n", minor); + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtm_priv *)dev->priv; + + /* Wait until we get semaphore */ + if ( rtems_semaphore_obtain(grtm_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){ + return RTEMS_INTERNAL_ERROR; + } + + /* Is device in use? */ + if ( pDev->open ){ + rtems_semaphore_release(grtm_dev_sem); + return RTEMS_RESOURCE_IN_USE; + } + + /* Mark device taken */ + pDev->open = 1; + + rtems_semaphore_release(grtm_dev_sem); + + DBG("grtm_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev); + + /* Set defaults */ + pDev->config.timeout = RTEMS_NO_TIMEOUT; /* no timeout (wait forever) */ + pDev->config.blocking = 0; /* polling mode */ + + pDev->running = 0; /* not in running mode yet */ + + memset(&pDev->config,0,sizeof(pDev->config)); + + /* The core has been reset when we execute here, so it is possible + * to read out what HW is implemented from core. + */ + grtm_hw_get_implementation(pDev, &pDev->hw_avail); + + /* Get default modes */ + grtm_hw_get_default_modes(&pDev->config,&pDev->hw_avail); + + return RTEMS_SUCCESSFUL; +} + +static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + struct grtm_priv *pDev; + struct drvmgr_dev *dev; + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) { + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtm_priv *)dev->priv; + + if ( pDev->running ){ + drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev); + grtm_stop(pDev); + pDev->running = 0; + } + + /* Reset core */ + grtm_hw_reset(pDev); + + /* Clear descriptor area just for sure */ + memset(pDev->bds, 0, 0x400); + + /* Mark not open */ + pDev->open = 0; + + return RTEMS_SUCCESSFUL; +} + +static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + FUNCDBG(); + return RTEMS_NOT_IMPLEMENTED; +} + +static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + FUNCDBG(); + return RTEMS_NOT_IMPLEMENTED; +} + +/* Scans the desciptor table for scheduled frames that has been sent, + * and moves these frames from the head of the scheduled queue to the + * tail of the sent queue. + * + * Also, for all frames the status is updated. + * + * Return Value + * Number of frames freed. + */ +static int grtm_free_sent(struct grtm_priv *pDev) +{ + struct grtm_ring *curr; + struct grtm_frame *last_frm, *first_frm; + int freed_frame_cnt=0; + unsigned int ctrl; + + curr = pDev->ring_end; + + /* Step into TX ring to find sent frames */ + if ( !curr->frm ){ + /* No scheduled frames, abort */ + return 0; + } + + /* There has been messages scheduled ==> scheduled messages may have been + * transmitted and needs to be collected. + */ + + first_frm = curr->frm; + + /* Loop until first enabled unsent frame is found. + * A unused descriptor is indicated by an unassigned frm field + */ + while ( curr->frm && !((ctrl=READ_REG(&curr->bd->ctrl)) & GRTM_BD_EN) ){ + /* Handle one sent Frame */ + + /* Remember last handled frame so that insertion/removal from + * frames lists go fast. + */ + last_frm = curr->frm; + + /* 1. Set flags to indicate error(s) and other information */ + last_frm->flags |= GRTM_FLAGS_SENT; /* Mark sent */ + + /* Update Stats */ + pDev->stats.frames_sent++; + + /* Did packet encounter link error? */ + if ( ctrl & GRTM_BD_UE ) { + pDev->stats.err_underrun++; + last_frm->flags |= GRRM_FLAGS_ERR; + } + + curr->frm = NULL; /* Mark unused */ + + /* Increment */ + curr = curr->next; + freed_frame_cnt++; + } + + /* 1. Remove all handled frames from scheduled queue + * 2. Put all handled frames into sent queue + */ + if ( freed_frame_cnt > 0 ){ + + /* Save TX ring posistion */ + pDev->ring_end = curr; + + /* Remove all sent frames from scheduled list */ + if ( pDev->scheduled.tail == last_frm ){ + /* All scheduled frames sent... */ + pDev->scheduled.head = NULL; + pDev->scheduled.tail = NULL; + }else{ + pDev->scheduled.head = last_frm->next; + } + last_frm->next = NULL; + + /* Put all sent frames into "Sent queue" for user to + * collect, later on. + */ + if ( !pDev->sent.head ){ + /* Sent queue empty */ + pDev->sent.head = first_frm; + pDev->sent.tail = last_frm; + }else{ + pDev->sent.tail->next = first_frm; + pDev->sent.tail = last_frm; + } + } + return freed_frame_cnt; +} + + +/* Moves as many frames in the ready queue (as there are free descriptors for) + * to the scheduled queue. The free descriptors are then assigned one frame + * each and enabled for transmission. + * + * Return Value + * Returns number of frames moved from ready to scheduled queue + */ +static int grtm_schedule_ready(struct grtm_priv *pDev) +{ + int cnt; + unsigned int ctrl, dmactrl; + struct grtm_ring *curr_bd; + struct grtm_frame *curr_frm, *last_frm; + + if ( !pDev->ready.head ){ + return 0; + } + + cnt=0; + curr_frm = pDev->ready.head; + curr_bd = pDev->ring; + while( !curr_bd->frm ){ + /* Assign frame to descriptor */ + curr_bd->frm = curr_frm; + + /* Prepare descriptor address. Three cases: + * - GRTM core on same bus as CPU ==> no translation (Address used by CPU = address used by GRTM) + * - GRTM core on remote bus, and payload address given as used by CPU ==> Translation needed + * - GRTM core on remote bus, and payload address given as used by GRTM ==> no translation [ USER does custom translation] + */ + if ( curr_frm->flags & (GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER) ) { + /* Do translation */ + drvmgr_translate(pDev->dev, CPUMEM_TO_DMA, (void *)curr_frm->payload, (void **)&curr_bd->bd->address); + if ( curr_frm->flags & GRTM_FLAGS_TRANSLATE_AND_REMEMBER ) { + if ( curr_frm->payload != (unsigned int *)curr_bd->bd->address ) { + /* Translation needed */ + curr_frm->flags &= ~GRTM_FLAGS_TRANSLATE_AND_REMEMBER; + curr_frm->flags |= GRTM_FLAGS_TRANSLATE; + } else { + /* No Trnaslation needed */ + curr_frm->flags &= ~(GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER); + } + } + } else { + /* Custom translation or no translation needed */ + curr_bd->bd->address = (unsigned int)curr_frm->payload; + } + + ctrl = GRTM_BD_EN; + if ( curr_bd->next == pDev->_ring ){ + ctrl |= GRTM_BD_WR; /* Wrap around */ + } + /* Apply user options/flags */ + ctrl |= (curr_frm->flags & GRTM_FLAGS_MASK); + + /* Is this Frame going to be an interrupt Frame? */ + if ( (--pDev->enable_cnt_curr) <= 0 ){ + if ( pDev->config.enable_cnt == 0 ){ + pDev->enable_cnt_curr = 0x3fffffff; + }else{ + pDev->enable_cnt_curr = pDev->config.enable_cnt; + ctrl |= GRTM_BD_IE; + } + } + + /* Enable descriptor */ + curr_bd->bd->ctrl = ctrl; + + last_frm = curr_frm; + curr_bd = curr_bd->next; + cnt++; + + /* Get Next Frame from Ready Queue */ + if ( curr_frm == pDev->ready.tail ){ + /* Handled all in ready queue. */ + curr_frm = NULL; + break; + } + curr_frm = curr_frm->next; + } + + /* Has frames have been scheduled? */ + if ( cnt > 0 ){ + /* Make last frame mark end of chain, probably pointless... */ + last_frm->next = NULL; + + /* Insert scheduled packets into scheduled queue */ + if ( !pDev->scheduled.head ){ + /* empty scheduled queue */ + pDev->scheduled.head = pDev->ready.head; + pDev->scheduled.tail = last_frm; + }else{ + pDev->scheduled.tail->next = pDev->ready.head; + pDev->scheduled.tail = last_frm; + } + + /* Remove scheduled packets from ready queue */ + pDev->ready.head = curr_frm; + if ( !curr_frm ){ + pDev->ready.tail = NULL; + } + + /* Update TX ring posistion */ + pDev->ring = curr_bd; + + /* Make hardware aware of the newly enabled descriptors */ + dmactrl = READ_REG(&pDev->regs->dma_ctrl); + dmactrl &= ~(GRTM_DMA_CTRL_TXRST | GRTM_DMA_CTRL_RST); + dmactrl |= GRTM_DMA_CTRL_EN; + pDev->regs->dma_ctrl = dmactrl; + } + + return cnt; +} + +static void grtm_tx_process(struct grtm_priv *pDev) +{ + int num; + + /* Free used descriptors and put the sent frame into the "Sent queue" + * (SCHEDULED->SENT) + */ + num = grtm_free_sent(pDev); + pDev->scheduled_cnt -= num; + pDev->sent_cnt += num; + + /* Use all available free descriptors there are frames for + * in the ready queue. + * (READY->SCHEDULED) + */ + if (pDev->running) { + num = grtm_schedule_ready(pDev); + pDev->ready_cnt -= num; + pDev->scheduled_cnt += num; + } +} + +/* + * The TX lock protects user tasks from the ISR. If TX DMA interrupt occurs + * while the user task is processing the TX DMA descriptors the ISR will + * ignore interrupt the request by not processing the DMA table since that + * is done by the user task anyway. In SMP, when a user task enters the TX DMA + * processing while the ISR (on another CPU) is also processing the user task + * will loop waiting for the ISR to complete. + */ +static int grtm_request_txlock(struct grtm_priv *pDev, int block) +{ + SPIN_IRQFLAGS(irqflags); + int got_lock = 0; + + do { + SPIN_LOCK_IRQ(&pDev->devlock, irqflags); + if (pDev->handling_transmission == 0) { + pDev->handling_transmission = 1; + got_lock = 1; + } + SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags); + } while (!got_lock && block); + + return got_lock; +} + +static inline int grtm_request_txlock_isr(struct grtm_priv *pDev) +{ + SPIN_ISR_IRQFLAGS(irqflags); + int got_lock = 0; + + SPIN_LOCK(&pDev->devlock, irqflags); + if (pDev->handling_transmission == 0) { + pDev->handling_transmission = 1; + got_lock = 1; + } + SPIN_UNLOCK(&pDev->devlock, irqflags); + + return got_lock; +} + +static inline void grtm_release_txlock(struct grtm_priv *pDev) +{ + pDev->handling_transmission = 0; +} + +static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg) +{ + struct grtm_priv *pDev; + struct drvmgr_dev *dev; + rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg; + unsigned int *data; + int status; + struct grtm_ioc_config *cfg; + struct grtm_ioc_hw_status *hwregs; + struct grtm_list *chain; + struct grtm_frame *curr; + struct grtm_ioc_hw *hwimpl; + struct grtm_ioc_stats *stats; + int num,ret; + + FUNCDBG(); + + if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) { + return RTEMS_INVALID_NUMBER; + } + pDev = (struct grtm_priv *)dev->priv; + + if (!ioarg) + return RTEMS_INVALID_NAME; + + data = ioarg->buffer; + ioarg->ioctl_return = 0; + switch(ioarg->command) { + case GRTM_IOC_START: + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; /* EBUSY */ + } + if ( (status=grtm_start(pDev)) != RTEMS_SUCCESSFUL ){ + return status; + } + /* Register ISR & Enable interrupt */ + drvmgr_interrupt_register(dev, 0, "grtm", grtm_interrupt, pDev); + + /* Read and write are now open... */ + break; + + case GRTM_IOC_STOP: + if ( !pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + + /* Disable interrupts */ + drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev); + grtm_stop(pDev); + pDev->running = 0; + break; + + case GRTM_IOC_ISSTARTED: + if ( !pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + break; + + case GRTM_IOC_SET_BLOCKING_MODE: + if ( (unsigned int)data > GRTM_BLKMODE_BLK ) { + return RTEMS_INVALID_NAME; + } + DBG("GRTM: Set blocking mode: %d\n",(unsigned int)data); + pDev->config.blocking = (unsigned int)data; + break; + + case GRTM_IOC_SET_TIMEOUT: + DBG("GRTM: Timeout: %d\n",(unsigned int)data); + pDev->config.timeout = (rtems_interval)data; + break; + + case GRTM_IOC_SET_CONFIG: + cfg = (struct grtm_ioc_config *)data; + if ( !cfg ) { + return RTEMS_INVALID_NAME; + } + + if ( pDev->running ) { + return RTEMS_RESOURCE_IN_USE; + } + + pDev->config = *cfg; + break; + + case GRTM_IOC_GET_STATS: + stats = (struct grtm_ioc_stats *)data; + if ( !stats ) { + return RTEMS_INVALID_NAME; + } + memcpy(stats,&pDev->stats,sizeof(struct grtm_ioc_stats)); + break; + + case GRTM_IOC_CLR_STATS: + memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats)); + break; + + case GRTM_IOC_GET_CONFIG: + cfg = (struct grtm_ioc_config *)data; + if ( !cfg ) { + return RTEMS_INVALID_NAME; + } + + *cfg = pDev->config; + break; + + case GRTM_IOC_GET_OCFREG: + if ( !pDev->hw_avail.ocf ) { + /* Hardware does not implement the OCF register */ + return RTEMS_NOT_DEFINED; + } + if ( !data ) { + return RTEMS_INVALID_NAME; + } + *(unsigned int **)data = (unsigned int *)&pDev->regs->ocf; + break; + + case GRTM_IOC_GET_HW_IMPL: + hwimpl = (struct grtm_ioc_hw *)data; + if ( !hwimpl ) { + return RTEMS_INVALID_NAME; + } + *hwimpl = pDev->hw_avail; + break; + + case GRTM_IOC_GET_HW_STATUS: + hwregs = (struct grtm_ioc_hw_status *)data; + if ( !hwregs ) { + return RTEMS_INVALID_NAME; + } + /* We disable interrupt in order to get a snapshot of the registers */ +/* TODO: implement hwregs */ + break; + + /* Put a chain of frames at the back of the "Ready frames" queue. This + * triggers the driver to put frames from the Ready queue into unused + * available descriptors. (Ready -> Scheduled) + */ + + case GRTM_IOC_SEND: + if ( !pDev->running ){ + return RTEMS_RESOURCE_IN_USE; + } + + /* Get pointer to frame chain wished be sent */ + chain = (struct grtm_list *)ioarg->buffer; + if ( !chain ){ + /* No new frames to send ==> just trigger hardware + * to send previously made ready frames to be sent. + * If someone else is processing the DMA we igore the + * request. + */ + if (grtm_request_txlock(pDev, 0)) { + grtm_tx_process(pDev); + grtm_release_txlock(pDev); + } + break; + } + if ( !chain->tail || !chain->head ){ + return RTEMS_INVALID_NAME; + } + + DBG("GRTM_SEND: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail); + + /* Mark ready frames unsent by clearing GRTM_FLAGS_SENT of all frames */ + + num = 0; + curr = chain->head; + while(curr != chain->tail){ + curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR); + curr = curr->next; + num++; + } + curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR); + num++; + + /* wait until we get the device lock */ + grtm_request_txlock(pDev, 1); + + /* 1. Put frames into ready queue + * (New Frames->READY) + */ + if ( pDev->ready.head ){ + /* Frames already on ready queue (no free descriptors previously) ==> + * Put frames at end of ready queue + */ + pDev->ready.tail->next = chain->head; + pDev->ready.tail = chain->tail; + chain->tail->next = NULL; + }else{ + /* All frames is put into the ready queue for later processing */ + pDev->ready.head = chain->head; + pDev->ready.tail = chain->tail; + chain->tail->next = NULL; + } + pDev->ready_cnt += num; /* Added 'num' frames to ready queue */ + + /* 2. SCHEDULED->SENT + * 3. READY->SCHEDULED + */ + grtm_tx_process(pDev); + grtm_release_txlock(pDev); + break; + + /* Take all available sent frames from the "Sent frames" queue. + * If no frames has been sent, the thread may get blocked if in blocking + * mode. The blocking mode is not available if driver is not in running mode. + * + * Note this ioctl may return success even if the driver is not in STARTED mode. + * This is because in case of a error (link error of similar) and the driver switch + * from START to STOP mode we must still be able to get our frames back. + * + * Note in case the driver fails to send a frame for some reason (link error), + * the sent flag is set to 0 indicating a failure. + * + */ + case GRTM_IOC_RECLAIM: + /* Get pointer to were to place reaped chain */ + chain = (struct grtm_list *)ioarg->buffer; + if ( !chain ){ + return RTEMS_INVALID_NAME; + } + + /* Lock out interrupt handler */ + grtm_request_txlock(pDev, 1); + + do { + /* Process descriptor table and populate with new + * buffers: + * * SCHEDULED->SENT + * * READY->SCHEDULED + */ + grtm_tx_process(pDev); + + /* Are there any frames on the sent queue waiting to be + * reclaimed? + */ + + if ( !pDev->sent.head ){ + /* No frames to reclaim - no frame in sent queue. + * Instead we block thread until frames have been sent + * if in blocking mode. + */ + if ( pDev->running && pDev->config.blocking ){ + ret = rtems_semaphore_obtain(pDev->sem_tx,RTEMS_WAIT,pDev->config.timeout); + if ( ret == RTEMS_TIMEOUT ) { + grtm_release_txlock(pDev); + return RTEMS_TIMEOUT; + } else if ( ret == RTEMS_SUCCESSFUL ) { + /* There might be frames available, go check */ + continue; + } else { + /* any error (driver closed, internal error etc.) */ + grtm_release_txlock(pDev); + return RTEMS_UNSATISFIED; + } + + }else{ + /* non-blocking mode, we quit */ + chain->head = NULL; + chain->tail = NULL; + /* do not lock out interrupt handler any more */ + grtm_release_txlock(pDev); + return RTEMS_TIMEOUT; + } + }else{ + /* Take all sent framess from sent queue to userspace queue */ + chain->head = pDev->sent.head; + chain->tail = pDev->sent.tail; + chain->tail->next = NULL; /* Just for sure */ + + /* Mark no Sent */ + grtm_list_clr(&pDev->sent); + pDev->sent_cnt = 0; + + DBG("TX_RECLAIM: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail); + break; + } + + }while(1); + + /* do not lock out interrupt handler any more */ + grtm_release_txlock(pDev); + break; + + default: + return RTEMS_NOT_DEFINED; + } + return RTEMS_SUCCESSFUL; +} + +static void grtm_interrupt(void *arg) +{ + struct grtm_priv *pDev = arg; + struct grtm_regs *regs = pDev->regs; + unsigned int status; + + /* Clear interrupt by reading it */ + status = READ_REG(®s->dma_status); + + /* Spurious Interrupt? */ + if ( !pDev->running || !status) + return; + + regs->dma_status = status; + + if ( status & GRTM_DMA_STS_TFF ){ + pDev->stats.err_transfer_frame++; + } + + if ( status & GRTM_DMA_STS_TA ){ + pDev->stats.err_ahb++; + } + + if ( status & GRTM_DMA_STS_TE ){ + pDev->stats.err_tx++; + } + + if ( status & GRTM_DMA_STS_TI ){ + + if ( pDev->config.isr_desc_proc) { + if (grtm_request_txlock_isr(pDev)) { + grtm_tx_process(pDev); + grtm_release_txlock(pDev); + } + +#if 0 + if ( (pDev->config.blocking==GRTM_BLKMODE_COMPLETE) && pDev->timeout ){ + /* Signal to thread only if enough data is available */ + if ( pDev->wait_for_frames > grtm_data_avail(pDev) ){ + /* Not enough data available */ + goto procceed_processing_interrupts; + } + + /* Enough number of frames has been transmitted which means that + * the waiting thread should be woken up. + */ + rtems_semaphore_release(pDev->sem_tx); + } +#endif + } + + if ( pDev->config.blocking == GRTM_BLKMODE_BLK ) { + /* Blocking mode */ + +#if 0 + /* Disable further Interrupts until handled by waiting task. */ + regs->dma_ctrl = READ_REG(®s->dma_ctrl) & ~GRTM_DMA_CTRL_IE; +#endif + + /* Signal Semaphore to wake waiting thread in ioctl(SEND|RECLAIM) */ + rtems_semaphore_release(pDev->sem_tx); + } + + } +#if 0 +procceed_processing_interrupts: + ; +#endif +} + +static rtems_device_driver grtm_initialize( + rtems_device_major_number major, + rtems_device_minor_number unused, + void *arg + ) +{ + /* Device Semaphore created with count = 1 */ + if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'M'), + 1, + RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING, + 0, + &grtm_dev_sem) != RTEMS_SUCCESSFUL ) { + return RTEMS_INTERNAL_ERROR; + } + + return RTEMS_SUCCESSFUL; +} -- cgit v1.2.3