From 0c10bd5da2b9c98f7d71284c81146813db03743d Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Thu, 26 Mar 2015 14:25:44 +0100 Subject: if_dwc: Import from FreeBSD --- Makefile | 1 + freebsd-to-rtems.py | 2 + freebsd/sys/dev/dwc/if_dwc.c | 1323 ++++++++++++++++++++++++++++++++++++++++++ freebsd/sys/dev/dwc/if_dwc.h | 262 +++++++++ 4 files changed, 1588 insertions(+) create mode 100644 freebsd/sys/dev/dwc/if_dwc.c create mode 100644 freebsd/sys/dev/dwc/if_dwc.h diff --git a/Makefile b/Makefile index 59e1db0d..d8a758b2 100644 --- a/Makefile +++ b/Makefile @@ -419,6 +419,7 @@ LIB_C_FILES += freebsd/sys/dev/mii/ukphy.c LIB_C_FILES += freebsd/sys/dev/mii/ukphy_subr.c LIB_C_FILES += freebsd/sys/dev/tsec/if_tsec.c LIB_C_FILES += freebsd/sys/dev/cadence/if_cgem.c +LIB_C_FILES += freebsd/sys/dev/dwc/if_dwc.c LIB_C_FILES += freebsd/sys/arm/xilinx/zy7_slcr.c LIB_C_FILES += freebsd/sys/dev/usb/usb_busdma.c LIB_C_FILES += freebsd/sys/dev/usb/usb_core.c diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py index 01f78f55..b24c1496 100755 --- a/freebsd-to-rtems.py +++ b/freebsd-to-rtems.py @@ -1290,6 +1290,7 @@ devNet.addKernelSpaceHeaderFiles( 'sys/dev/tsec/if_tsec.h', 'sys/dev/tsec/if_tsecreg.h', 'sys/dev/cadence/if_cgem_hw.h', + 'sys/dev/dwc/if_dwc.h', 'sys/arm/xilinx/zy7_slcr.h', ] ) @@ -1306,6 +1307,7 @@ devNet.addKernelSpaceSourceFiles( 'sys/dev/mii/ukphy_subr.c', 'sys/dev/tsec/if_tsec.c', 'sys/dev/cadence/if_cgem.c', + 'sys/dev/dwc/if_dwc.c', 'sys/arm/xilinx/zy7_slcr.c', ] ) diff --git a/freebsd/sys/dev/dwc/if_dwc.c b/freebsd/sys/dev/dwc/if_dwc.c new file mode 100644 index 00000000..48711a34 --- /dev/null +++ b/freebsd/sys/dev/dwc/if_dwc.c @@ -0,0 +1,1323 @@ +#include + +/*- + * Copyright (c) 2014 Ruslan Bukin + * All rights reserved. + * + * This software was developed by SRI International and the University of + * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) + * ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Ethernet media access controller (EMAC) + * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) + * + * EMAC is an instance of the Synopsys DesignWare 3504-0 + * Universal 10/100/1000 Ethernet MAC (DWC_gmac). + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define READ4(_sc, _reg) \ + bus_read_4((_sc)->res[0], _reg) +#define WRITE4(_sc, _reg, _val) \ + bus_write_4((_sc)->res[0], _reg, _val) + +#define MAC_RESET_TIMEOUT 100 +#define WATCHDOG_TIMEOUT_SECS 5 +#define STATS_HARVEST_INTERVAL 2 +#define MII_CLK_VAL 2 + +#include + +#define DWC_LOCK(sc) mtx_lock(&(sc)->mtx) +#define DWC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) +#define DWC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED); +#define DWC_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED); + +#define DDESC_TDES0_OWN (1 << 31) +#define DDESC_TDES0_TXINT (1 << 30) +#define DDESC_TDES0_TXLAST (1 << 29) +#define DDESC_TDES0_TXFIRST (1 << 28) +#define DDESC_TDES0_TXCRCDIS (1 << 27) +#define DDESC_TDES0_TXRINGEND (1 << 21) +#define DDESC_TDES0_TXCHAIN (1 << 20) + +#define DDESC_RDES0_OWN (1 << 31) +#define DDESC_RDES0_FL_MASK 0x3fff +#define DDESC_RDES0_FL_SHIFT 16 /* Frame Length */ +#define DDESC_RDES1_CHAINED (1 << 14) + +struct dwc_bufmap { + bus_dmamap_t map; + struct mbuf *mbuf; +}; + +/* + * A hardware buffer descriptor. Rx and Tx buffers have the same descriptor + * layout, but the bits in the flags field have different meanings. + */ +struct dwc_hwdesc +{ + uint32_t tdes0; + uint32_t tdes1; + uint32_t addr; /* pointer to buffer data */ + uint32_t addr_next; /* link to next descriptor */ +}; + +/* + * Driver data and defines. + */ +#define RX_DESC_COUNT 1024 +#define RX_DESC_SIZE (sizeof(struct dwc_hwdesc) * RX_DESC_COUNT) +#define TX_DESC_COUNT 1024 +#define TX_DESC_SIZE (sizeof(struct dwc_hwdesc) * TX_DESC_COUNT) + +/* + * The hardware imposes alignment restrictions on various objects involved in + * DMA transfers. These values are expressed in bytes (not bits). + */ +#define DWC_DESC_RING_ALIGN 2048 + +struct dwc_softc { + struct resource *res[2]; + bus_space_tag_t bst; + bus_space_handle_t bsh; + device_t dev; + int mii_clk; + device_t miibus; + struct mii_data * mii_softc; + struct ifnet *ifp; + int if_flags; + struct mtx mtx; + void * intr_cookie; + struct callout dwc_callout; + uint8_t phy_conn_type; + uint8_t mactype; + boolean_t link_is_up; + boolean_t is_attached; + boolean_t is_detaching; + int tx_watchdog_count; + int stats_harvest_count; + + /* RX */ + bus_dma_tag_t rxdesc_tag; + bus_dmamap_t rxdesc_map; + struct dwc_hwdesc *rxdesc_ring; + bus_addr_t rxdesc_ring_paddr; + bus_dma_tag_t rxbuf_tag; + struct dwc_bufmap rxbuf_map[RX_DESC_COUNT]; + uint32_t rx_idx; + + /* TX */ + bus_dma_tag_t txdesc_tag; + bus_dmamap_t txdesc_map; + struct dwc_hwdesc *txdesc_ring; + bus_addr_t txdesc_ring_paddr; + bus_dma_tag_t txbuf_tag; + struct dwc_bufmap txbuf_map[RX_DESC_COUNT]; + uint32_t tx_idx_head; + uint32_t tx_idx_tail; + int txcount; +}; + +static struct resource_spec dwc_spec[] = { + { SYS_RES_MEMORY, 0, RF_ACTIVE }, + { SYS_RES_IRQ, 0, RF_ACTIVE }, + { -1, 0 } +}; + +static void dwc_txfinish_locked(struct dwc_softc *sc); +static void dwc_rxfinish_locked(struct dwc_softc *sc); +static void dwc_stop_locked(struct dwc_softc *sc); +static void dwc_setup_rxfilter(struct dwc_softc *sc); + +static inline uint32_t +next_rxidx(struct dwc_softc *sc, uint32_t curidx) +{ + + return ((curidx + 1) % RX_DESC_COUNT); +} + +static inline uint32_t +next_txidx(struct dwc_softc *sc, uint32_t curidx) +{ + + return ((curidx + 1) % TX_DESC_COUNT); +} + +static void +dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (error != 0) + return; + *(bus_addr_t *)arg = segs[0].ds_addr; +} + +inline static uint32_t +dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr, + uint32_t len) +{ + uint32_t flags; + uint32_t nidx; + + nidx = next_txidx(sc, idx); + + /* Addr/len 0 means we're clearing the descriptor after xmit done. */ + if (paddr == 0 || len == 0) { + flags = 0; + --sc->txcount; + } else { + flags = DDESC_TDES0_TXCHAIN | DDESC_TDES0_TXFIRST + | DDESC_TDES0_TXLAST | DDESC_TDES0_TXINT; + ++sc->txcount; + } + + sc->txdesc_ring[idx].addr = (uint32_t)(paddr); + sc->txdesc_ring[idx].tdes0 = flags; + sc->txdesc_ring[idx].tdes1 = len; + + if (paddr && len) { + wmb(); + sc->txdesc_ring[idx].tdes0 |= DDESC_TDES0_OWN; + wmb(); + } + + return (nidx); +} + +static int +dwc_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp) +{ + struct bus_dma_segment seg; + int error, nsegs; + struct mbuf * m; + + if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) + return (ENOMEM); + *mp = m; + + error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, + m, &seg, &nsegs, 0); + if (error != 0) { + return (ENOMEM); + } + + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); + + bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, + BUS_DMASYNC_PREWRITE); + + sc->txbuf_map[idx].mbuf = m; + + dwc_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len); + + return (0); +} + +static void +dwc_txstart_locked(struct dwc_softc *sc) +{ + struct ifnet *ifp; + struct mbuf *m; + int enqueued; + + DWC_ASSERT_LOCKED(sc); + + if (!sc->link_is_up) + return; + + ifp = sc->ifp; + + if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { + return; + } + + enqueued = 0; + + for (;;) { + if (sc->txcount == (TX_DESC_COUNT-1)) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + + IFQ_DRV_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) + break; + if (dwc_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) { + IFQ_DRV_PREPEND(&ifp->if_snd, m); + break; + } + BPF_MTAP(ifp, m); + sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head); + ++enqueued; + } + + if (enqueued != 0) { + WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1); + sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; + } +} + +static void +dwc_txstart(struct ifnet *ifp) +{ + struct dwc_softc *sc = ifp->if_softc; + + DWC_LOCK(sc); + dwc_txstart_locked(sc); + DWC_UNLOCK(sc); +} + +static void +dwc_stop_locked(struct dwc_softc *sc) +{ + struct ifnet *ifp; + int reg; + + DWC_ASSERT_LOCKED(sc); + + ifp = sc->ifp; + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + sc->tx_watchdog_count = 0; + sc->stats_harvest_count = 0; + + callout_stop(&sc->dwc_callout); + + /* Stop DMA TX */ + reg = READ4(sc, OPERATION_MODE); + reg &= ~(MODE_ST); + WRITE4(sc, OPERATION_MODE, reg); + + /* Flush TX */ + reg = READ4(sc, OPERATION_MODE); + reg |= (MODE_FTF); + WRITE4(sc, OPERATION_MODE, reg); + + /* Stop transmitters */ + reg = READ4(sc, MAC_CONFIGURATION); + reg &= ~(CONF_TE | CONF_RE); + WRITE4(sc, MAC_CONFIGURATION, reg); + + /* Stop DMA RX */ + reg = READ4(sc, OPERATION_MODE); + reg &= ~(MODE_SR); + WRITE4(sc, OPERATION_MODE, reg); +} + +static void dwc_clear_stats(struct dwc_softc *sc) +{ + int reg; + + reg = READ4(sc, MMC_CONTROL); + reg |= (MMC_CONTROL_CNTRST); + WRITE4(sc, MMC_CONTROL, reg); +} + +static void +dwc_harvest_stats(struct dwc_softc *sc) +{ + struct ifnet *ifp; + + /* We don't need to harvest too often. */ + if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL) + return; + + sc->stats_harvest_count = 0; + ifp = sc->ifp; + + if_inc_counter(ifp, IFCOUNTER_IPACKETS, READ4(sc, RXFRAMECOUNT_GB)); + if_inc_counter(ifp, IFCOUNTER_IMCASTS, READ4(sc, RXMULTICASTFRAMES_G)); + if_inc_counter(ifp, IFCOUNTER_IERRORS, + READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) + + READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) + + READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) + + READ4(sc, RXLENGTHERROR)); + + if_inc_counter(ifp, IFCOUNTER_OPACKETS, READ4(sc, TXFRAMECOUNT_G)); + if_inc_counter(ifp, IFCOUNTER_OMCASTS, READ4(sc, TXMULTICASTFRAMES_G)); + if_inc_counter(ifp, IFCOUNTER_OERRORS, + READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) + + READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR)); + + if_inc_counter(ifp, IFCOUNTER_COLLISIONS, + READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL)); + + dwc_clear_stats(sc); +} + +static void +dwc_tick(void *arg) +{ + struct dwc_softc *sc; + struct ifnet *ifp; + int link_was_up; + + sc = arg; + + DWC_ASSERT_LOCKED(sc); + + ifp = sc->ifp; + + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + return; + + /* + * Typical tx watchdog. If this fires it indicates that we enqueued + * packets for output and never got a txdone interrupt for them. Maybe + * it's a missed interrupt somehow, just pretend we got one. + */ + if (sc->tx_watchdog_count > 0) { + if (--sc->tx_watchdog_count == 0) { + dwc_txfinish_locked(sc); + } + } + + /* Gather stats from hardware counters. */ + dwc_harvest_stats(sc); + + /* Check the media status. */ + link_was_up = sc->link_is_up; + mii_tick(sc->mii_softc); + if (sc->link_is_up && !link_was_up) + dwc_txstart_locked(sc); + + /* Schedule another check one second from now. */ + callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); +} + +static void +dwc_init_locked(struct dwc_softc *sc) +{ + struct ifnet *ifp = sc->ifp; + int reg; + + DWC_ASSERT_LOCKED(sc); + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + return; + + ifp->if_drv_flags |= IFF_DRV_RUNNING; + + dwc_setup_rxfilter(sc); + + /* Initializa DMA and enable transmitters */ + reg = READ4(sc, OPERATION_MODE); + reg |= (MODE_TSF | MODE_OSF | MODE_FUF); + reg &= ~(MODE_RSF); + reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT); + WRITE4(sc, OPERATION_MODE, reg); + + WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT); + + /* Start DMA */ + reg = READ4(sc, OPERATION_MODE); + reg |= (MODE_ST | MODE_SR); + WRITE4(sc, OPERATION_MODE, reg); + + /* Enable transmitters */ + reg = READ4(sc, MAC_CONFIGURATION); + reg |= (CONF_JD | CONF_ACS | CONF_BE); + reg |= (CONF_TE | CONF_RE); + WRITE4(sc, MAC_CONFIGURATION, reg); + + /* + * Call mii_mediachg() which will call back into dwc_miibus_statchg() + * to set up the remaining config registers based on current media. + */ + mii_mediachg(sc->mii_softc); + callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); +} + +static void +dwc_init(void *if_softc) +{ + struct dwc_softc *sc = if_softc; + + DWC_LOCK(sc); + dwc_init_locked(sc); + DWC_UNLOCK(sc); +} + +inline static uint32_t +dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr) +{ + uint32_t nidx; + + sc->rxdesc_ring[idx].addr = (uint32_t)paddr; + nidx = next_rxidx(sc, idx); + sc->rxdesc_ring[idx].addr_next = sc->rxdesc_ring_paddr + \ + (nidx * sizeof(struct dwc_hwdesc)); + sc->rxdesc_ring[idx].tdes1 = DDESC_RDES1_CHAINED | MCLBYTES; + + wmb(); + sc->rxdesc_ring[idx].tdes0 = DDESC_RDES0_OWN; + wmb(); + + return (nidx); +} + +static int +dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) +{ + struct bus_dma_segment seg; + int error, nsegs; + + m_adj(m, ETHER_ALIGN); + + error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, + m, &seg, &nsegs, 0); + if (error != 0) { + return (error); + } + + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); + + bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, + BUS_DMASYNC_PREREAD); + + sc->rxbuf_map[idx].mbuf = m; + dwc_setup_rxdesc(sc, idx, seg.ds_addr); + + return (0); +} + +static struct mbuf * +dwc_alloc_mbufcl(struct dwc_softc *sc) +{ + struct mbuf *m; + + m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; + + return (m); +} + +static void +dwc_media_status(struct ifnet * ifp, struct ifmediareq *ifmr) +{ + struct dwc_softc *sc; + struct mii_data *mii; + + sc = ifp->if_softc; + mii = sc->mii_softc; + DWC_LOCK(sc); + mii_pollstat(mii); + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; + DWC_UNLOCK(sc); +} + +static int +dwc_media_change_locked(struct dwc_softc *sc) +{ + + return (mii_mediachg(sc->mii_softc)); +} + +static int +dwc_media_change(struct ifnet * ifp) +{ + struct dwc_softc *sc; + int error; + + sc = ifp->if_softc; + + DWC_LOCK(sc); + error = dwc_media_change_locked(sc); + DWC_UNLOCK(sc); + return (error); +} + +static const uint8_t nibbletab[] = { + /* 0x0 0000 -> 0000 */ 0x0, + /* 0x1 0001 -> 1000 */ 0x8, + /* 0x2 0010 -> 0100 */ 0x4, + /* 0x3 0011 -> 1100 */ 0xc, + /* 0x4 0100 -> 0010 */ 0x2, + /* 0x5 0101 -> 1010 */ 0xa, + /* 0x6 0110 -> 0110 */ 0x6, + /* 0x7 0111 -> 1110 */ 0xe, + /* 0x8 1000 -> 0001 */ 0x1, + /* 0x9 1001 -> 1001 */ 0x9, + /* 0xa 1010 -> 0101 */ 0x5, + /* 0xb 1011 -> 1101 */ 0xd, + /* 0xc 1100 -> 0011 */ 0x3, + /* 0xd 1101 -> 1011 */ 0xb, + /* 0xe 1110 -> 0111 */ 0x7, + /* 0xf 1111 -> 1111 */ 0xf, }; + +static uint8_t +bitreverse(uint8_t x) +{ + + return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4]; +} + +static void +dwc_setup_rxfilter(struct dwc_softc *sc) +{ + struct ifmultiaddr *ifma; + struct ifnet *ifp; + uint8_t *eaddr; + uint32_t crc; + uint8_t val; + int hashbit; + int hashreg; + int ffval; + int reg; + int lo; + int hi; + + DWC_ASSERT_LOCKED(sc); + + ifp = sc->ifp; + + /* + * Set the multicast (group) filter hash. + */ + if ((ifp->if_flags & IFF_ALLMULTI)) + ffval = (FRAME_FILTER_PM); + else { + ffval = (FRAME_FILTER_HMC); + if_maddr_rlock(ifp); + TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) + ifma->ifma_addr), ETHER_ADDR_LEN); + + /* Take lower 8 bits and reverse it */ + val = bitreverse(~crc & 0xff); + hashreg = (val >> 5); + hashbit = (val & 31); + + reg = READ4(sc, HASH_TABLE_REG(hashreg)); + reg |= (1 << hashbit); + WRITE4(sc, HASH_TABLE_REG(hashreg), reg); + } + if_maddr_runlock(ifp); + } + + /* + * Set the individual address filter hash. + */ + if (ifp->if_flags & IFF_PROMISC) + ffval |= (FRAME_FILTER_PR); + + /* + * Set the primary address. + */ + eaddr = IF_LLADDR(ifp); + lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | + (eaddr[3] << 24); + hi = eaddr[4] | (eaddr[5] << 8); + WRITE4(sc, MAC_ADDRESS_LOW(0), lo); + WRITE4(sc, MAC_ADDRESS_HIGH(0), hi); + WRITE4(sc, MAC_FRAME_FILTER, ffval); +} + +static int +dwc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct dwc_softc *sc; + struct mii_data *mii; + struct ifreq *ifr; + int mask, error; + + sc = ifp->if_softc; + ifr = (struct ifreq *)data; + + error = 0; + switch (cmd) { + case SIOCSIFFLAGS: + DWC_LOCK(sc); + if (ifp->if_flags & IFF_UP) { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + if ((ifp->if_flags ^ sc->if_flags) & + (IFF_PROMISC | IFF_ALLMULTI)) + dwc_setup_rxfilter(sc); + } else { + if (!sc->is_detaching) + dwc_init_locked(sc); + } + } else { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + dwc_stop_locked(sc); + } + sc->if_flags = ifp->if_flags; + DWC_UNLOCK(sc); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + DWC_LOCK(sc); + dwc_setup_rxfilter(sc); + DWC_UNLOCK(sc); + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + mii = sc->mii_softc; + error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); + break; + case SIOCSIFCAP: + mask = ifp->if_capenable ^ ifr->ifr_reqcap; + if (mask & IFCAP_VLAN_MTU) { + /* No work to do except acknowledge the change took */ + ifp->if_capenable ^= IFCAP_VLAN_MTU; + } + break; + + default: + error = ether_ioctl(ifp, cmd, data); + break; + } + + return (error); +} + +static void +dwc_txfinish_locked(struct dwc_softc *sc) +{ + struct dwc_bufmap *bmap; + struct dwc_hwdesc *desc; + struct ifnet *ifp; + + DWC_ASSERT_LOCKED(sc); + + ifp = sc->ifp; + + while (sc->tx_idx_tail != sc->tx_idx_head) { + desc = &sc->txdesc_ring[sc->tx_idx_tail]; + if ((desc->tdes0 & DDESC_TDES0_OWN) != 0) + break; + bmap = &sc->txbuf_map[sc->tx_idx_tail]; + bus_dmamap_sync(sc->txbuf_tag, bmap->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->txbuf_tag, bmap->map); + m_freem(bmap->mbuf); + bmap->mbuf = NULL; + dwc_setup_txdesc(sc, sc->tx_idx_tail, 0, 0); + sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail); + } + + /* If there are no buffers outstanding, muzzle the watchdog. */ + if (sc->tx_idx_tail == sc->tx_idx_head) { + sc->tx_watchdog_count = 0; + } +} + +static void +dwc_rxfinish_locked(struct dwc_softc *sc) +{ + struct ifnet *ifp; + struct mbuf *m0; + struct mbuf *m; + int error; + int rdes0; + int idx; + int len; + + ifp = sc->ifp; + + for (;;) { + idx = sc->rx_idx; + + rdes0 = sc->rxdesc_ring[idx].tdes0; + if ((rdes0 & DDESC_RDES0_OWN) != 0) + break; + + bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->rxbuf_tag, sc->rxbuf_map[idx].map); + + len = (rdes0 >> DDESC_RDES0_FL_SHIFT) & DDESC_RDES0_FL_MASK; + if (len != 0) { + m = sc->rxbuf_map[idx].mbuf; + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = len; + m->m_len = len; + if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); + + DWC_UNLOCK(sc); + (*ifp->if_input)(ifp, m); + DWC_LOCK(sc); + } else { + /* XXX Zero-length packet ? */ + } + + if ((m0 = dwc_alloc_mbufcl(sc)) != NULL) { + if ((error = dwc_setup_rxbuf(sc, idx, m0)) != 0) { + /* + * XXX Now what? + * We've got a hole in the rx ring. + */ + } + } else + if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); + + sc->rx_idx = next_rxidx(sc, sc->rx_idx); + } +} + +static void +dwc_intr(void *arg) +{ + struct dwc_softc *sc; + uint32_t reg; + + sc = arg; + + DWC_LOCK(sc); + + reg = READ4(sc, INTERRUPT_STATUS); + if (reg) { + mii_mediachg(sc->mii_softc); + READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS); + } + + reg = READ4(sc, DMA_STATUS); + if (reg & DMA_STATUS_NIS) { + if (reg & DMA_STATUS_RI) + dwc_rxfinish_locked(sc); + + if (reg & DMA_STATUS_TI) + dwc_txfinish_locked(sc); + } + + if (reg & DMA_STATUS_AIS) { + if (reg & DMA_STATUS_FBI) { + /* Fatal bus error */ + device_printf(sc->dev, + "Ethernet DMA error, restarting controller.\n"); + dwc_stop_locked(sc); + dwc_init_locked(sc); + } + } + + WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK); + DWC_UNLOCK(sc); +} + +static int +setup_dma(struct dwc_softc *sc) +{ + struct mbuf *m; + int error; + int nidx; + int idx; + + /* + * Set up TX descriptor ring, descriptors, and dma maps. + */ + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* Parent tag. */ + DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + TX_DESC_SIZE, 1, /* maxsize, nsegments */ + TX_DESC_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->txdesc_tag); + if (error != 0) { + device_printf(sc->dev, + "could not create TX ring DMA tag.\n"); + goto out; + } + + error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, + BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, + &sc->txdesc_map); + if (error != 0) { + device_printf(sc->dev, + "could not allocate TX descriptor ring.\n"); + goto out; + } + + error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, + sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, + &sc->txdesc_ring_paddr, 0); + if (error != 0) { + device_printf(sc->dev, + "could not load TX descriptor ring map.\n"); + goto out; + } + + for (idx = 0; idx < TX_DESC_COUNT; idx++) { + sc->txdesc_ring[idx].tdes0 = DDESC_TDES0_TXCHAIN; + sc->txdesc_ring[idx].tdes1 = 0; + nidx = next_txidx(sc, idx); + sc->txdesc_ring[idx].addr_next = sc->txdesc_ring_paddr + \ + (nidx * sizeof(struct dwc_hwdesc)); + } + + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* Parent tag. */ + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES, 1, /* maxsize, nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->txbuf_tag); + if (error != 0) { + device_printf(sc->dev, + "could not create TX ring DMA tag.\n"); + goto out; + } + + for (idx = 0; idx < TX_DESC_COUNT; idx++) { + error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, + &sc->txbuf_map[idx].map); + if (error != 0) { + device_printf(sc->dev, + "could not create TX buffer DMA map.\n"); + goto out; + } + dwc_setup_txdesc(sc, idx, 0, 0); + } + + /* + * Set up RX descriptor ring, descriptors, dma maps, and mbufs. + */ + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* Parent tag. */ + DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + RX_DESC_SIZE, 1, /* maxsize, nsegments */ + RX_DESC_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->rxdesc_tag); + if (error != 0) { + device_printf(sc->dev, + "could not create RX ring DMA tag.\n"); + goto out; + } + + error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, + BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, + &sc->rxdesc_map); + if (error != 0) { + device_printf(sc->dev, + "could not allocate RX descriptor ring.\n"); + goto out; + } + + error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, + sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, + &sc->rxdesc_ring_paddr, 0); + if (error != 0) { + device_printf(sc->dev, + "could not load RX descriptor ring map.\n"); + goto out; + } + + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* Parent tag. */ + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES, 1, /* maxsize, nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->rxbuf_tag); + if (error != 0) { + device_printf(sc->dev, + "could not create RX buf DMA tag.\n"); + goto out; + } + + for (idx = 0; idx < RX_DESC_COUNT; idx++) { + error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, + &sc->rxbuf_map[idx].map); + if (error != 0) { + device_printf(sc->dev, + "could not create RX buffer DMA map.\n"); + goto out; + } + if ((m = dwc_alloc_mbufcl(sc)) == NULL) { + device_printf(sc->dev, "Could not alloc mbuf\n"); + error = ENOMEM; + goto out; + } + if ((error = dwc_setup_rxbuf(sc, idx, m)) != 0) { + device_printf(sc->dev, + "could not create new RX buffer.\n"); + goto out; + } + } + +out: + if (error != 0) + return (ENXIO); + + return (0); +} + +static int +dwc_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr) +{ + int rnd; + int lo; + int hi; + + /* + * Try to recover a MAC address from the running hardware. If there's + * something non-zero there, assume the bootloader did the right thing + * and just use it. + * + * Otherwise, set the address to a convenient locally assigned address, + * 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally + * assigned bit set, and the broadcast/multicast bit clear. + */ + lo = READ4(sc, MAC_ADDRESS_LOW(0)); + hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff; + if ((lo != 0xffffffff) || (hi != 0xffff)) { + hwaddr[0] = (lo >> 0) & 0xff; + hwaddr[1] = (lo >> 8) & 0xff; + hwaddr[2] = (lo >> 16) & 0xff; + hwaddr[3] = (lo >> 24) & 0xff; + hwaddr[4] = (hi >> 0) & 0xff; + hwaddr[5] = (hi >> 8) & 0xff; + } else { + rnd = arc4random() & 0x00ffffff; + hwaddr[0] = 'b'; + hwaddr[1] = 's'; + hwaddr[2] = 'd'; + hwaddr[3] = rnd >> 16; + hwaddr[4] = rnd >> 8; + hwaddr[5] = rnd >> 0; + } + + return (0); +} + +static int +dwc_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_is_compatible(dev, "snps,dwmac")) + return (ENXIO); + + device_set_desc(dev, "Gigabit Ethernet Controller"); + return (BUS_PROBE_DEFAULT); +} + +static int +dwc_attach(device_t dev) +{ + uint8_t macaddr[ETHER_ADDR_LEN]; + struct dwc_softc *sc; + struct ifnet *ifp; + int error; + int reg; + int i; + + sc = device_get_softc(dev); + sc->dev = dev; + sc->mii_clk = MII_CLK_VAL; + sc->rx_idx = 0; + + sc->txcount = TX_DESC_COUNT; + + if (bus_alloc_resources(dev, dwc_spec, sc->res)) { + device_printf(dev, "could not allocate resources\n"); + return (ENXIO); + } + + /* Memory interface */ + sc->bst = rman_get_bustag(sc->res[0]); + sc->bsh = rman_get_bushandle(sc->res[0]); + + /* Read MAC before reset */ + if (dwc_get_hwaddr(sc, macaddr)) { + device_printf(sc->dev, "can't get mac\n"); + return (ENXIO); + } + + /* Reset */ + reg = READ4(sc, BUS_MODE); + reg |= (BUS_MODE_SWR); + WRITE4(sc, BUS_MODE, reg); + + for (i = 0; i < MAC_RESET_TIMEOUT; i++) { + if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0) + break; + DELAY(10); + } + if (i >= MAC_RESET_TIMEOUT) { + device_printf(sc->dev, "Can't reset DWC.\n"); + return (ENXIO); + } + + reg = READ4(sc, BUS_MODE); + reg |= (BUS_MODE_EIGHTXPBL); + reg |= (BUS_MODE_PBL_BEATS_8 << BUS_MODE_PBL_SHIFT); + WRITE4(sc, BUS_MODE, reg); + + /* + * DMA must be stop while changing descriptor list addresses. + */ + reg = READ4(sc, OPERATION_MODE); + reg &= ~(MODE_ST | MODE_SR); + WRITE4(sc, OPERATION_MODE, reg); + + if (setup_dma(sc)) + return (ENXIO); + + /* Setup addresses */ + WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); + WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); + + mtx_init(&sc->mtx, device_get_nameunit(sc->dev), + MTX_NETWORK_LOCK, MTX_DEF); + + callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0); + + /* Setup interrupt handler. */ + error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, + NULL, dwc_intr, sc, &sc->intr_cookie); + if (error != 0) { + device_printf(dev, "could not setup interrupt handler.\n"); + return (ENXIO); + } + + /* Set up the ethernet interface. */ + sc->ifp = ifp = if_alloc(IFT_ETHER); + + ifp->if_softc = sc; + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_capabilities = IFCAP_VLAN_MTU; + ifp->if_capenable = ifp->if_capabilities; + ifp->if_start = dwc_txstart; + ifp->if_ioctl = dwc_ioctl; + ifp->if_init = dwc_init; + IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1); + ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1; + IFQ_SET_READY(&ifp->if_snd); + ifp->if_hdrlen = sizeof(struct ether_vlan_header); + + /* Attach the mii driver. */ + error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change, + dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, + MII_OFFSET_ANY, 0); + + if (error != 0) { + device_printf(dev, "PHY attach failed\n"); + return (ENXIO); + } + sc->mii_softc = device_get_softc(sc->miibus); + + /* All ready to run, attach the ethernet interface. */ + ether_ifattach(ifp, macaddr); + sc->is_attached = true; + + return (0); +} + +static int +dwc_miibus_read_reg(device_t dev, int phy, int reg) +{ + struct dwc_softc *sc; + uint16_t mii; + size_t cnt; + int rv = 0; + + sc = device_get_softc(dev); + + mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) + | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) + | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) + | GMII_ADDRESS_GB; /* Busy flag */ + + WRITE4(sc, GMII_ADDRESS, mii); + + for (cnt = 0; cnt < 1000; cnt++) { + if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { + rv = READ4(sc, GMII_DATA); + break; + } + DELAY(10); + } + + return rv; +} + +static int +dwc_miibus_write_reg(device_t dev, int phy, int reg, int val) +{ + struct dwc_softc *sc; + uint16_t mii; + size_t cnt; + + sc = device_get_softc(dev); + + mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) + | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) + | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) + | GMII_ADDRESS_GB | GMII_ADDRESS_GW; + + WRITE4(sc, GMII_DATA, val); + WRITE4(sc, GMII_ADDRESS, mii); + + for (cnt = 0; cnt < 1000; cnt++) { + if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { + break; + } + DELAY(10); + } + + return (0); +} + +static void +dwc_miibus_statchg(device_t dev) +{ + struct dwc_softc *sc; + struct mii_data *mii; + int reg; + + /* + * Called by the MII bus driver when the PHY establishes + * link to set the MAC interface registers. + */ + + sc = device_get_softc(dev); + + DWC_ASSERT_LOCKED(sc); + + mii = sc->mii_softc; + + if (mii->mii_media_status & IFM_ACTIVE) + sc->link_is_up = true; + else + sc->link_is_up = false; + + reg = READ4(sc, MAC_CONFIGURATION); + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_1000_T: + case IFM_1000_SX: + reg &= ~(CONF_FES | CONF_PS); + break; + case IFM_100_TX: + reg |= (CONF_FES | CONF_PS); + break; + case IFM_10_T: + reg &= ~(CONF_FES); + reg |= (CONF_PS); + break; + case IFM_NONE: + sc->link_is_up = false; + return; + default: + sc->link_is_up = false; + device_printf(dev, "Unsupported media %u\n", + IFM_SUBTYPE(mii->mii_media_active)); + return; + } + if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) + reg |= (CONF_DM); + else + reg &= ~(CONF_DM); + WRITE4(sc, MAC_CONFIGURATION, reg); +} + +static device_method_t dwc_methods[] = { + DEVMETHOD(device_probe, dwc_probe), + DEVMETHOD(device_attach, dwc_attach), + + /* MII Interface */ + DEVMETHOD(miibus_readreg, dwc_miibus_read_reg), + DEVMETHOD(miibus_writereg, dwc_miibus_write_reg), + DEVMETHOD(miibus_statchg, dwc_miibus_statchg), + + { 0, 0 } +}; + +static driver_t dwc_driver = { + "dwc", + dwc_methods, + sizeof(struct dwc_softc), +}; + +static devclass_t dwc_devclass; + +DRIVER_MODULE(dwc, simplebus, dwc_driver, dwc_devclass, 0, 0); +DRIVER_MODULE(miibus, dwc, miibus_driver, miibus_devclass, 0, 0); + +MODULE_DEPEND(dwc, ether, 1, 1, 1); +MODULE_DEPEND(dwc, miibus, 1, 1, 1); diff --git a/freebsd/sys/dev/dwc/if_dwc.h b/freebsd/sys/dev/dwc/if_dwc.h new file mode 100644 index 00000000..918ef008 --- /dev/null +++ b/freebsd/sys/dev/dwc/if_dwc.h @@ -0,0 +1,262 @@ +/*- + * Copyright (c) 2014 Ruslan Bukin + * All rights reserved. + * + * This software was developed by SRI International and the University of + * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) + * ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Register names were taken almost as is from the documentation. + */ + +#define MAC_CONFIGURATION 0x0 +#define CONF_JD (1 << 22) /* jabber timer disable */ +#define CONF_BE (1 << 21) /* Frame Burst Enable */ +#define CONF_PS (1 << 15) /* GMII/MII */ +#define CONF_FES (1 << 14) /* MII speed select */ +#define CONF_DM (1 << 11) /* Full Duplex Enable */ +#define CONF_ACS (1 << 7) +#define CONF_TE (1 << 3) +#define CONF_RE (1 << 2) +#define MAC_FRAME_FILTER 0x4 +#define FRAME_FILTER_RA (1 << 31) /* Receive All */ +#define FRAME_FILTER_HPF (1 << 10) /* Hash or Perfect Filter */ +#define FRAME_FILTER_PM (1 << 4) /* Pass multicast */ +#define FRAME_FILTER_HMC (1 << 2) +#define FRAME_FILTER_HUC (1 << 1) +#define FRAME_FILTER_PR (1 << 0) /* All Incoming Frames */ +#define GMII_ADDRESS 0x10 +#define GMII_ADDRESS_PA_MASK 0x1f /* Phy device */ +#define GMII_ADDRESS_PA_SHIFT 11 +#define GMII_ADDRESS_GR_MASK 0x1f /* Phy register */ +#define GMII_ADDRESS_GR_SHIFT 6 +#define GMII_ADDRESS_CR_MASK 0xf +#define GMII_ADDRESS_CR_SHIFT 2 /* Clock */ +#define GMII_ADDRESS_GW (1 << 1) /* Write operation */ +#define GMII_ADDRESS_GB (1 << 0) /* Busy */ +#define GMII_DATA 0x14 +#define FLOW_CONTROL 0x18 +#define GMAC_VLAN_TAG 0x1C +#define VERSION 0x20 +#define DEBUG 0x24 +#define LPI_CONTROL_STATUS 0x30 +#define LPI_TIMERS_CONTROL 0x34 +#define INTERRUPT_STATUS 0x38 +#define INTERRUPT_MASK 0x3C +#define MAC_ADDRESS_HIGH(n) ((n > 15 ? 0x800 : 0x40) + 0x8 * n) +#define MAC_ADDRESS_LOW(n) ((n > 15 ? 0x804 : 0x44) + 0x8 * n) + +#define SGMII_RGMII_SMII_CTRL_STATUS 0xD8 +#define MMC_CONTROL 0x100 +#define MMC_CONTROL_CNTRST (1 << 0) +#define MMC_RECEIVE_INTERRUPT 0x104 +#define MMC_TRANSMIT_INTERRUPT 0x108 +#define MMC_RECEIVE_INTERRUPT_MASK 0x10C +#define MMC_TRANSMIT_INTERRUPT_MASK 0x110 +#define TXOCTETCOUNT_GB 0x114 +#define TXFRAMECOUNT_GB 0x118 +#define TXBROADCASTFRAMES_G 0x11C +#define TXMULTICASTFRAMES_G 0x120 +#define TX64OCTETS_GB 0x124 +#define TX65TO127OCTETS_GB 0x128 +#define TX128TO255OCTETS_GB 0x12C +#define TX256TO511OCTETS_GB 0x130 +#define TX512TO1023OCTETS_GB 0x134 +#define TX1024TOMAXOCTETS_GB 0x138 +#define TXUNICASTFRAMES_GB 0x13C +#define TXMULTICASTFRAMES_GB 0x140 +#define TXBROADCASTFRAMES_GB 0x144 +#define TXUNDERFLOWERROR 0x148 +#define TXSINGLECOL_G 0x14C +#define TXMULTICOL_G 0x150 +#define TXDEFERRED 0x154 +#define TXLATECOL 0x158 +#define TXEXESSCOL 0x15C +#define TXCARRIERERR 0x160 +#define TXOCTETCNT 0x164 +#define TXFRAMECOUNT_G 0x168 +#define TXEXCESSDEF 0x16C +#define TXPAUSEFRAMES 0x170 +#define TXVLANFRAMES_G 0x174 +#define TXOVERSIZE_G 0x178 +#define RXFRAMECOUNT_GB 0x180 +#define RXOCTETCOUNT_GB 0x184 +#define RXOCTETCOUNT_G 0x188 +#define RXBROADCASTFRAMES_G 0x18C +#define RXMULTICASTFRAMES_G 0x190 +#define RXCRCERROR 0x194 +#define RXALIGNMENTERROR 0x198 +#define RXRUNTERROR 0x19C +#define RXJABBERERROR 0x1A0 +#define RXUNDERSIZE_G 0x1A4 +#define RXOVERSIZE_G 0x1A8 +#define RX64OCTETS_GB 0x1AC +#define RX65TO127OCTETS_GB 0x1B0 +#define RX128TO255OCTETS_GB 0x1B4 +#define RX256TO511OCTETS_GB 0x1B8 +#define RX512TO1023OCTETS_GB 0x1BC +#define RX1024TOMAXOCTETS_GB 0x1C0 +#define RXUNICASTFRAMES_G 0x1C4 +#define RXLENGTHERROR 0x1C8 +#define RXOUTOFRANGETYPE 0x1CC +#define RXPAUSEFRAMES 0x1D0 +#define RXFIFOOVERFLOW 0x1D4 +#define RXVLANFRAMES_GB 0x1D8 +#define RXWATCHDOGERROR 0x1DC +#define RXRCVERROR 0x1E0 +#define RXCTRLFRAMES_G 0x1E4 +#define MMC_IPC_RECEIVE_INT_MASK 0x200 +#define MMC_IPC_RECEIVE_INT 0x208 +#define RXIPV4_GD_FRMS 0x210 +#define RXIPV4_HDRERR_FRMS 0x214 +#define RXIPV4_NOPAY_FRMS 0x218 +#define RXIPV4_FRAG_FRMS 0x21C +#define RXIPV4_UDSBL_FRMS 0x220 +#define RXIPV6_GD_FRMS 0x224 +#define RXIPV6_HDRERR_FRMS 0x228 +#define RXIPV6_NOPAY_FRMS 0x22C +#define RXUDP_GD_FRMS 0x230 +#define RXUDP_ERR_FRMS 0x234 +#define RXTCP_GD_FRMS 0x238 +#define RXTCP_ERR_FRMS 0x23C +#define RXICMP_GD_FRMS 0x240 +#define RXICMP_ERR_FRMS 0x244 +#define RXIPV4_GD_OCTETS 0x250 +#define RXIPV4_HDRERR_OCTETS 0x254 +#define RXIPV4_NOPAY_OCTETS 0x258 +#define RXIPV4_FRAG_OCTETS 0x25C +#define RXIPV4_UDSBL_OCTETS 0x260 +#define RXIPV6_GD_OCTETS 0x264 +#define RXIPV6_HDRERR_OCTETS 0x268 +#define RXIPV6_NOPAY_OCTETS 0x26C +#define RXUDP_GD_OCTETS 0x270 +#define RXUDP_ERR_OCTETS 0x274 +#define RXTCP_GD_OCTETS 0x278 +#define RXTCPERROCTETS 0x27C +#define RXICMP_GD_OCTETS 0x280 +#define RXICMP_ERR_OCTETS 0x284 +#define L3_L4_CONTROL0 0x400 +#define LAYER4_ADDRESS0 0x404 +#define LAYER3_ADDR0_REG0 0x410 +#define LAYER3_ADDR1_REG0 0x414 +#define LAYER3_ADDR2_REG0 0x418 +#define LAYER3_ADDR3_REG0 0x41C +#define L3_L4_CONTROL1 0x430 +#define LAYER4_ADDRESS1 0x434 +#define LAYER3_ADDR0_REG1 0x440 +#define LAYER3_ADDR1_REG1 0x444 +#define LAYER3_ADDR2_REG1 0x448 +#define LAYER3_ADDR3_REG1 0x44C +#define L3_L4_CONTROL2 0x460 +#define LAYER4_ADDRESS2 0x464 +#define LAYER3_ADDR0_REG2 0x470 +#define LAYER3_ADDR1_REG2 0x474 +#define LAYER3_ADDR2_REG2 0x478 +#define LAYER3_ADDR3_REG2 0x47C +#define L3_L4_CONTROL3 0x490 +#define LAYER4_ADDRESS3 0x494 +#define LAYER3_ADDR0_REG3 0x4A0 +#define LAYER3_ADDR1_REG3 0x4A4 +#define LAYER3_ADDR2_REG3 0x4A8 +#define LAYER3_ADDR3_REG3 0x4AC +#define HASH_TABLE_REG(n) 0x500 + (0x4 * n) +#define VLAN_INCL_REG 0x584 +#define VLAN_HASH_TABLE_REG 0x588 +#define TIMESTAMP_CONTROL 0x700 +#define SUB_SECOND_INCREMENT 0x704 +#define SYSTEM_TIME_SECONDS 0x708 +#define SYSTEM_TIME_NANOSECONDS 0x70C +#define SYSTEM_TIME_SECONDS_UPDATE 0x710 +#define SYSTEM_TIME_NANOSECONDS_UPDATE 0x714 +#define TIMESTAMP_ADDEND 0x718 +#define TARGET_TIME_SECONDS 0x71C +#define TARGET_TIME_NANOSECONDS 0x720 +#define SYSTEM_TIME_HIGHER_WORD_SECONDS 0x724 +#define TIMESTAMP_STATUS 0x728 +#define PPS_CONTROL 0x72C +#define AUXILIARY_TIMESTAMP_NANOSECONDS 0x730 +#define AUXILIARY_TIMESTAMP_SECONDS 0x734 +#define PPS0_INTERVAL 0x760 +#define PPS0_WIDTH 0x764 + +/* DMA */ +#define BUS_MODE 0x1000 +#define BUS_MODE_EIGHTXPBL (1 << 24) /* Multiplies PBL by 8 */ +#define BUS_MODE_PBL_SHIFT 8 /* Single block transfer size */ +#define BUS_MODE_PBL_BEATS_8 8 +#define BUS_MODE_SWR (1 << 0) /* Reset */ +#define TRANSMIT_POLL_DEMAND 0x1004 +#define RECEIVE_POLL_DEMAND 0x1008 +#define RX_DESCR_LIST_ADDR 0x100C +#define TX_DESCR_LIST_ADDR 0x1010 +#define DMA_STATUS 0x1014 +#define DMA_STATUS_NIS (1 << 16) +#define DMA_STATUS_AIS (1 << 15) +#define DMA_STATUS_FBI (1 << 13) +#define DMA_STATUS_RI (1 << 6) +#define DMA_STATUS_TI (1 << 0) +#define DMA_STATUS_INTR_MASK 0x1ffff +#define OPERATION_MODE 0x1018 +#define MODE_RSF (1 << 25) /* RX Full Frame */ +#define MODE_TSF (1 << 21) /* TX Full Frame */ +#define MODE_FTF (1 << 20) /* Flush TX FIFO */ +#define MODE_ST (1 << 13) /* Start DMA TX */ +#define MODE_FUF (1 << 6) /* TX frames < 64bytes */ +#define MODE_RTC_LEV32 0x1 +#define MODE_RTC_SHIFT 3 +#define MODE_OSF (1 << 2) /* Process Second frame */ +#define MODE_SR (1 << 1) /* Start DMA RX */ +#define INTERRUPT_ENABLE 0x101C +#define INT_EN_NIE (1 << 16) /* Normal/Summary */ +#define INT_EN_AIE (1 << 15) /* Abnormal/Summary */ +#define INT_EN_ERE (1 << 14) /* Early receive */ +#define INT_EN_FBE (1 << 13) /* Fatal bus error */ +#define INT_EN_ETE (1 << 10) /* Early transmit */ +#define INT_EN_RWE (1 << 9) /* Receive watchdog */ +#define INT_EN_RSE (1 << 8) /* Receive stopped */ +#define INT_EN_RUE (1 << 7) /* Recv buf unavailable */ +#define INT_EN_RIE (1 << 6) /* Receive interrupt */ +#define INT_EN_UNE (1 << 5) /* Tx underflow */ +#define INT_EN_OVE (1 << 4) /* Receive overflow */ +#define INT_EN_TJE (1 << 3) /* Transmit jabber */ +#define INT_EN_TUE (1 << 2) /* Tx. buf unavailable */ +#define INT_EN_TSE (1 << 1) /* Transmit stopped */ +#define INT_EN_TIE (1 << 0) /* Transmit interrupt */ +#define INT_EN_DEFAULT (INT_EN_TIE|INT_EN_RIE| \ + INT_EN_NIE|INT_EN_AIE| \ + INT_EN_FBE|INT_EN_UNE) + +#define MISSED_FRAMEBUF_OVERFLOW_CNTR 0x1020 +#define RECEIVE_INT_WATCHDOG_TMR 0x1024 +#define AXI_BUS_MODE 0x1028 +#define AHB_OR_AXI_STATUS 0x102C +#define CURRENT_HOST_TRANSMIT_DESCR 0x1048 +#define CURRENT_HOST_RECEIVE_DESCR 0x104C +#define CURRENT_HOST_TRANSMIT_BUF_ADDR 0x1050 +#define CURRENT_HOST_RECEIVE_BUF_ADDR 0x1054 +#define HW_FEATURE 0x1058 -- cgit v1.2.3