From 70bb42ba19d2cae1a3a8cff226dc4a01bff592f5 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Wed, 13 Nov 2013 10:24:18 +0100 Subject: if_tsec: Add from FreeBSD --- Makefile | 1 + freebsd-to-rtems.py | 4 + freebsd/sys/dev/ofw/openfirm.h | 144 +++ freebsd/sys/dev/tsec/if_tsec.c | 1926 ++++++++++++++++++++++++++++++++ freebsd/sys/dev/tsec/if_tsec.h | 375 +++++++ freebsd/sys/dev/tsec/if_tsecreg.h | 390 +++++++ rtemsbsd/include/machine/ofw_machdep.h | 37 + 7 files changed, 2877 insertions(+) create mode 100644 freebsd/sys/dev/ofw/openfirm.h create mode 100644 freebsd/sys/dev/tsec/if_tsec.c create mode 100644 freebsd/sys/dev/tsec/if_tsec.h create mode 100644 freebsd/sys/dev/tsec/if_tsecreg.h create mode 100644 rtemsbsd/include/machine/ofw_machdep.h diff --git a/Makefile b/Makefile index 1181ab6a..10c45b95 100644 --- a/Makefile +++ b/Makefile @@ -414,6 +414,7 @@ LIB_C_FILES += freebsd/sys/dev/mii/mii_bitbang.c LIB_C_FILES += freebsd/sys/dev/mii/mii_physubr.c LIB_C_FILES += freebsd/sys/dev/mii/icsphy.c LIB_C_FILES += freebsd/sys/dev/mii/brgphy.c +LIB_C_FILES += freebsd/sys/dev/tsec/if_tsec.c LIB_C_FILES += freebsd/sys/dev/usb/usb_busdma.c LIB_C_FILES += freebsd/sys/dev/usb/usb_core.c LIB_C_FILES += freebsd/sys/dev/usb/usb_debug.c diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py index b29bcb19..b440d73d 100755 --- a/freebsd-to-rtems.py +++ b/freebsd-to-rtems.py @@ -1179,6 +1179,9 @@ devNet.addHeaderFiles( 'sys/net/if_types.h', 'sys/net/if_var.h', 'sys/net/vnet.h', + 'sys/dev/ofw/openfirm.h', + 'sys/dev/tsec/if_tsec.h', + 'sys/dev/tsec/if_tsecreg.h', ] ) devNet.addSourceFiles( @@ -1188,6 +1191,7 @@ devNet.addSourceFiles( 'sys/dev/mii/mii_physubr.c', 'sys/dev/mii/icsphy.c', 'sys/dev/mii/brgphy.c', + 'sys/dev/tsec/if_tsec.c', ] ) diff --git a/freebsd/sys/dev/ofw/openfirm.h b/freebsd/sys/dev/ofw/openfirm.h new file mode 100644 index 00000000..18b2694c --- /dev/null +++ b/freebsd/sys/dev/ofw/openfirm.h @@ -0,0 +1,144 @@ +/* $NetBSD: openfirm.h,v 1.1 1998/05/15 10:16:00 tsubai Exp $ */ + +/*- + * Copyright (C) 1995, 1996 Wolfgang Solfrank. + * Copyright (C) 1995, 1996 TooLs GmbH. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by TooLs GmbH. + * 4. The name of TooLs GmbH may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + * Copyright (C) 2000 Benno Rice. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _DEV_OPENFIRM_H_ +#define _DEV_OPENFIRM_H_ + +#include + +/* + * Prototypes for Open Firmware Interface Routines + */ + +typedef uint32_t ihandle_t; +typedef uint32_t phandle_t; +typedef uint32_t pcell_t; + +#ifdef _KERNEL +#include + +#include + +MALLOC_DECLARE(M_OFWPROP); + +/* + * Open Firmware interface initialization. OF_install installs the named + * interface as the Open Firmware access mechanism, OF_init initializes it. + */ + +boolean_t OF_install(char *name, int prio); +int OF_init(void *cookie); + +/* + * Known Open Firmware interface names + */ + +#define OFW_STD_DIRECT "ofw_std" /* Standard OF interface */ +#define OFW_STD_REAL "ofw_real" /* Real-mode OF interface */ +#define OFW_STD_32BIT "ofw_32bit" /* 32-bit OF interface */ +#define OFW_FDT "ofw_fdt" /* Flattened Device Tree */ + +/* Generic functions */ +int OF_test(const char *name); +void OF_printf(const char *fmt, ...); + +/* Device tree functions */ +phandle_t OF_peer(phandle_t node); +phandle_t OF_child(phandle_t node); +phandle_t OF_parent(phandle_t node); +ssize_t OF_getproplen(phandle_t node, const char *propname); +ssize_t OF_getprop(phandle_t node, const char *propname, void *buf, + size_t len); +ssize_t OF_searchprop(phandle_t node, const char *propname, void *buf, + size_t len); +ssize_t OF_getprop_alloc(phandle_t node, const char *propname, + int elsz, void **buf); +int OF_nextprop(phandle_t node, const char *propname, char *buf, + size_t len); +int OF_setprop(phandle_t node, const char *name, const void *buf, + size_t len); +ssize_t OF_canon(const char *path, char *buf, size_t len); +phandle_t OF_finddevice(const char *path); +ssize_t OF_package_to_path(phandle_t node, char *buf, size_t len); + +/* Device I/O functions */ +ihandle_t OF_open(const char *path); +void OF_close(ihandle_t instance); +ssize_t OF_read(ihandle_t instance, void *buf, size_t len); +ssize_t OF_write(ihandle_t instance, const void *buf, size_t len); +int OF_seek(ihandle_t instance, uint64_t where); + +phandle_t OF_instance_to_package(ihandle_t instance); +ssize_t OF_instance_to_path(ihandle_t instance, char *buf, size_t len); +int OF_call_method(const char *method, ihandle_t instance, + int nargs, int nreturns, ...); + +/* Memory functions */ +void *OF_claim(void *virtrequest, size_t size, u_int align); +void OF_release(void *virt, size_t size); + +/* Control transfer functions */ +void OF_enter(void); +void OF_exit(void) __attribute__((noreturn)); + +/* User interface functions */ +int OF_interpret(const char *cmd, int nreturns, ...); + +#endif /* _KERNEL */ +#endif /* _DEV_OPENFIRM_H_ */ diff --git a/freebsd/sys/dev/tsec/if_tsec.c b/freebsd/sys/dev/tsec/if_tsec.c new file mode 100644 index 00000000..c1da1b8b --- /dev/null +++ b/freebsd/sys/dev/tsec/if_tsec.c @@ -0,0 +1,1926 @@ +#include + +/*- + * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski + * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. + */ +#include +__FBSDID("$FreeBSD$"); + +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include +#include + +static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, + bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, + const char *dname); +static void tsec_dma_ctl(struct tsec_softc *sc, int state); +static int tsec_encap(struct tsec_softc *sc, struct mbuf *m_head, + int fcb_inserted); +static void tsec_free_dma(struct tsec_softc *sc); +static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); +static int tsec_ifmedia_upd(struct ifnet *ifp); +static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); +static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, + struct mbuf **mbufp, uint32_t *paddr); +static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, + int nseg, int error); +static void tsec_intrs_ctl(struct tsec_softc *sc, int state); +static void tsec_init(void *xsc); +static void tsec_init_locked(struct tsec_softc *sc); +static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data); +static void tsec_reset_mac(struct tsec_softc *sc); +static void tsec_setfilter(struct tsec_softc *sc); +static void tsec_set_mac_address(struct tsec_softc *sc); +static void tsec_start(struct ifnet *ifp); +static void tsec_start_locked(struct ifnet *ifp); +static void tsec_stop(struct tsec_softc *sc); +static void tsec_tick(void *arg); +static void tsec_watchdog(struct tsec_softc *sc); +static void tsec_add_sysctls(struct tsec_softc *sc); +static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); +static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); +static void tsec_set_rxic(struct tsec_softc *sc); +static void tsec_set_txic(struct tsec_softc *sc); +static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); +static void tsec_transmit_intr_locked(struct tsec_softc *sc); +static void tsec_error_intr_locked(struct tsec_softc *sc, int count); +static void tsec_offload_setup(struct tsec_softc *sc); +static void tsec_offload_process_frame(struct tsec_softc *sc, + struct mbuf *m); +static void tsec_setup_multicast(struct tsec_softc *sc); +static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); + +devclass_t tsec_devclass; +DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0); +MODULE_DEPEND(tsec, ether, 1, 1, 1); +MODULE_DEPEND(tsec, miibus, 1, 1, 1); + +int +tsec_attach(struct tsec_softc *sc) +{ + uint8_t hwaddr[ETHER_ADDR_LEN]; + struct ifnet *ifp; + bus_dmamap_t *map_ptr; + bus_dmamap_t **map_pptr; + int error = 0; + int i; + + /* Reset all TSEC counters */ + TSEC_TX_RX_COUNTERS_INIT(sc); + + /* Stop DMA engine if enabled by firmware */ + tsec_dma_ctl(sc, 0); + + /* Reset MAC */ + tsec_reset_mac(sc); + + /* Disable interrupts for now */ + tsec_intrs_ctl(sc, 0); + + /* Configure defaults for interrupts coalescing */ + sc->rx_ic_time = 768; + sc->rx_ic_count = 16; + sc->tx_ic_time = 768; + sc->tx_ic_count = 16; + tsec_set_rxic(sc); + tsec_set_txic(sc); + tsec_add_sysctls(sc); + + /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ + error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, + &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, + (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); + + if (error) { + tsec_detach(sc); + return (ENXIO); + } + + /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ + error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, + &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, + (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); + if (error) { + tsec_detach(sc); + return (ENXIO); + } + + /* Allocate a busdma tag for TX mbufs. */ + error = bus_dma_tag_create(NULL, /* parent */ + TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ + TSEC_TX_NUM_DESC - 1, /* nsegments */ + MCLBYTES, 0, /* maxsegsz, flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->tsec_tx_mtag); /* dmat */ + if (error) { + device_printf(sc->dev, "failed to allocate busdma tag " + "(tx mbufs)\n"); + tsec_detach(sc); + return (ENXIO); + } + + /* Allocate a busdma tag for RX mbufs. */ + error = bus_dma_tag_create(NULL, /* parent */ + TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + MCLBYTES, /* maxsize */ + 1, /* nsegments */ + MCLBYTES, 0, /* maxsegsz, flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->tsec_rx_mtag); /* dmat */ + if (error) { + device_printf(sc->dev, "failed to allocate busdma tag " + "(rx mbufs)\n"); + tsec_detach(sc); + return (ENXIO); + } + + /* Create TX busdma maps */ + map_ptr = sc->tx_map_data; + map_pptr = sc->tx_map_unused_data; + + for (i = 0; i < TSEC_TX_NUM_DESC; i++) { + map_pptr[i] = &map_ptr[i]; + error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]); + if (error) { + device_printf(sc->dev, "failed to init TX ring\n"); + tsec_detach(sc); + return (ENXIO); + } + } + + /* Create RX busdma maps and zero mbuf handlers */ + for (i = 0; i < TSEC_RX_NUM_DESC; i++) { + error = bus_dmamap_create(sc->tsec_rx_mtag, 0, + &sc->rx_data[i].map); + if (error) { + device_printf(sc->dev, "failed to init RX ring\n"); + tsec_detach(sc); + return (ENXIO); + } + sc->rx_data[i].mbuf = NULL; + } + + /* Create mbufs for RX buffers */ + for (i = 0; i < TSEC_RX_NUM_DESC; i++) { + error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, + &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); + if (error) { + device_printf(sc->dev, "can't load rx DMA map %d, " + "error = %d\n", i, error); + tsec_detach(sc); + return (error); + } + } + + /* Create network interface for upper layers */ + ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(sc->dev, "if_alloc() failed\n"); + tsec_detach(sc); + return (ENOMEM); + } + + ifp->if_softc = sc; + if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); + ifp->if_mtu = ETHERMTU; + ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; + ifp->if_init = tsec_init; + ifp->if_start = tsec_start; + ifp->if_ioctl = tsec_ioctl; + + IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); + ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; + IFQ_SET_READY(&ifp->if_snd); + + ifp->if_capabilities = IFCAP_VLAN_MTU; + if (sc->is_etsec) + ifp->if_capabilities |= IFCAP_HWCSUM; + + ifp->if_capenable = ifp->if_capabilities; + +#ifdef DEVICE_POLLING + /* Advertise that polling is supported */ + ifp->if_capabilities |= IFCAP_POLLING; +#endif + + /* Attach PHY(s) */ + error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, + tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, + 0); + if (error) { + device_printf(sc->dev, "attaching PHYs failed\n"); + if_free(ifp); + sc->tsec_ifp = NULL; + tsec_detach(sc); + return (error); + } + sc->tsec_mii = device_get_softc(sc->tsec_miibus); + + /* Set MAC address */ + tsec_get_hwaddr(sc, hwaddr); + ether_ifattach(ifp, hwaddr); + + return (0); +} + +int +tsec_detach(struct tsec_softc *sc) +{ + + if (sc->tsec_ifp != NULL) { +#ifdef DEVICE_POLLING + if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) + ether_poll_deregister(sc->tsec_ifp); +#endif + + /* Stop TSEC controller and free TX queue */ + if (sc->sc_rres) + tsec_shutdown(sc->dev); + + /* Detach network interface */ + ether_ifdetach(sc->tsec_ifp); + if_free(sc->tsec_ifp); + sc->tsec_ifp = NULL; + } + + /* Free DMA resources */ + tsec_free_dma(sc); + + return (0); +} + +int +tsec_shutdown(device_t dev) +{ + struct tsec_softc *sc; + + sc = device_get_softc(dev); + + TSEC_GLOBAL_LOCK(sc); + tsec_stop(sc); + TSEC_GLOBAL_UNLOCK(sc); + return (0); +} + +int +tsec_suspend(device_t dev) +{ + + /* TODO not implemented! */ + return (0); +} + +int +tsec_resume(device_t dev) +{ + + /* TODO not implemented! */ + return (0); +} + +static void +tsec_init(void *xsc) +{ + struct tsec_softc *sc = xsc; + + TSEC_GLOBAL_LOCK(sc); + tsec_init_locked(sc); + TSEC_GLOBAL_UNLOCK(sc); +} + +static void +tsec_init_locked(struct tsec_softc *sc) +{ + struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; + struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; + struct ifnet *ifp = sc->tsec_ifp; + uint32_t timeout, val, i; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + return; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + tsec_stop(sc); + + /* + * These steps are according to the MPC8555E PowerQUICCIII RM: + * 14.7 Initialization/Application Information + */ + + /* Step 1: soft reset MAC */ + tsec_reset_mac(sc); + + /* Step 2: Initialize MACCFG2 */ + TSEC_WRITE(sc, TSEC_REG_MACCFG2, + TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ + TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ + TSEC_MACCFG2_GMII | /* I/F Mode bit */ + TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ + ); + + /* Step 3: Initialize ECNTRL + * While the documentation states that R100M is ignored if RPM is + * not set, it does seem to be needed to get the orange boxes to + * work (which have a Marvell 88E1111 PHY). Go figure. + */ + + /* + * XXX kludge - use circumstancial evidence to program ECNTRL + * correctly. Ideally we need some board information to guide + * us here. + */ + i = TSEC_READ(sc, TSEC_REG_ID2); + val = (i & 0xffff) + ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ + : TSEC_ECNTRL_R100M; /* Orange + CDS */ + TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); + + /* Step 4: Initialize MAC station address */ + tsec_set_mac_address(sc); + + /* + * Step 5: Assign a Physical address to the TBI so as to not conflict + * with the external PHY physical address + */ + TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); + + /* Step 6: Reset the management interface */ + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); + + /* Step 7: Setup the MII Mgmt clock speed */ + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); + + /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ + timeout = TSEC_READ_RETRY; + while (--timeout && (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) & + TSEC_MIIMIND_BUSY)) + DELAY(TSEC_READ_DELAY); + if (timeout == 0) { + if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); + return; + } + + /* Step 9: Setup the MII Mgmt */ + mii_mediachg(sc->tsec_mii); + + /* Step 10: Clear IEVENT register */ + TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); + + /* Step 11: Enable interrupts */ +#ifdef DEVICE_POLLING + /* + * ...only if polling is not turned on. Disable interrupts explicitly + * if polling is enabled. + */ + if (ifp->if_capenable & IFCAP_POLLING ) + tsec_intrs_ctl(sc, 0); + else +#endif /* DEVICE_POLLING */ + tsec_intrs_ctl(sc, 1); + + /* Step 12: Initialize IADDRn */ + TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); + TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); + + /* Step 13: Initialize GADDRn */ + TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); + TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); + + /* Step 14: Initialize RCTRL */ + TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); + + /* Step 15: Initialize DMACTRL */ + tsec_dma_ctl(sc, 1); + + /* Step 16: Initialize FIFO_PAUSE_CTRL */ + TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); + + /* + * Step 17: Initialize transmit/receive descriptor rings. + * Initialize TBASE and RBASE. + */ + TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); + TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); + + for (i = 0; i < TSEC_TX_NUM_DESC; i++) { + tx_desc[i].bufptr = 0; + tx_desc[i].length = 0; + tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? + TSEC_TXBD_W : 0); + } + bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + for (i = 0; i < TSEC_RX_NUM_DESC; i++) { + rx_desc[i].bufptr = sc->rx_data[i].paddr; + rx_desc[i].length = 0; + rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | + ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); + } + bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Step 18: Initialize the maximum receive buffer length */ + TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); + + /* Step 19: Configure ethernet frame sizes */ + TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); + tsec_set_mtu(sc, ifp->if_mtu); + + /* Step 20: Enable Rx and RxBD sdata snooping */ + TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); + TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); + + /* Step 21: Reset collision counters in hardware */ + TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); + + /* Step 22: Mask all CAM interrupts */ + TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); + TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); + + /* Step 23: Enable Rx and Tx */ + val = TSEC_READ(sc, TSEC_REG_MACCFG1); + val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); + TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); + + /* Step 24: Reset TSEC counters for Tx and Rx rings */ + TSEC_TX_RX_COUNTERS_INIT(sc); + + /* Step 25: Setup TCP/IP Off-Load engine */ + if (sc->is_etsec) + tsec_offload_setup(sc); + + /* Step 26: Setup multicast filters */ + tsec_setup_multicast(sc); + + /* Step 27: Activate network interface */ + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + sc->tsec_if_flags = ifp->if_flags; + sc->tsec_watchdog = 0; + + /* Schedule watchdog timeout */ + callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); +} + +static void +tsec_set_mac_address(struct tsec_softc *sc) +{ + uint32_t macbuf[2] = { 0, 0 }; + char *macbufp, *curmac; + int i; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), + ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN, + sizeof(macbuf))); + + macbufp = (char *)macbuf; + curmac = (char *)IF_LLADDR(sc->tsec_ifp); + + /* Correct order of MAC address bytes */ + for (i = 1; i <= ETHER_ADDR_LEN; i++) + macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; + + /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ + TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); + TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); +} + +/* + * DMA control function, if argument state is: + * 0 - DMA engine will be disabled + * 1 - DMA engine will be enabled + */ +static void +tsec_dma_ctl(struct tsec_softc *sc, int state) +{ + device_t dev; + uint32_t dma_flags, timeout; + + dev = sc->dev; + + dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); + + switch (state) { + case 0: + /* Temporarily clear stop graceful stop bits. */ + tsec_dma_ctl(sc, 1000); + + /* Set it again */ + dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); + break; + case 1000: + case 1: + /* Set write with response (WWR), wait (WOP) and snoop bits */ + dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | + DMACTRL_WWR | DMACTRL_WOP); + + /* Clear graceful stop bits */ + dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); + break; + default: + device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", + state); + } + + TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); + + switch (state) { + case 0: + /* Wait for DMA stop */ + timeout = TSEC_READ_RETRY; + while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & + (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) + DELAY(TSEC_READ_DELAY); + + if (timeout == 0) + device_printf(dev, "tsec_dma_ctl(): timeout!\n"); + break; + case 1: + /* Restart transmission function */ + TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); + } +} + +/* + * Interrupts control function, if argument state is: + * 0 - all TSEC interrupts will be masked + * 1 - all TSEC interrupts will be unmasked + */ +static void +tsec_intrs_ctl(struct tsec_softc *sc, int state) +{ + device_t dev; + + dev = sc->dev; + + switch (state) { + case 0: + TSEC_WRITE(sc, TSEC_REG_IMASK, 0); + break; + case 1: + TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | + TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | + TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | + TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); + break; + default: + device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", + state); + } +} + +static void +tsec_reset_mac(struct tsec_softc *sc) +{ + uint32_t maccfg1_flags; + + /* Set soft reset bit */ + maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); + maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; + TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); + + /* Clear soft reset bit */ + maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); + maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; + TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); +} + +static void +tsec_watchdog(struct tsec_softc *sc) +{ + struct ifnet *ifp; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) + return; + + ifp = sc->tsec_ifp; + ifp->if_oerrors++; + if_printf(ifp, "watchdog timeout\n"); + + tsec_stop(sc); + tsec_init_locked(sc); +} + +static void +tsec_start(struct ifnet *ifp) +{ + struct tsec_softc *sc = ifp->if_softc; + + TSEC_TRANSMIT_LOCK(sc); + tsec_start_locked(ifp); + TSEC_TRANSMIT_UNLOCK(sc); +} + +static void +tsec_start_locked(struct ifnet *ifp) +{ + struct tsec_softc *sc; + struct mbuf *m0, *mtmp; + struct tsec_tx_fcb *tx_fcb; + unsigned int queued = 0; + int csum_flags, fcb_inserted = 0; + + sc = ifp->if_softc; + + TSEC_TRANSMIT_LOCK_ASSERT(sc); + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) + return; + + if (sc->tsec_link == 0) + return; + + bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + /* Get packet from the queue */ + IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); + if (m0 == NULL) + break; + + /* Insert TCP/IP Off-load frame control block */ + csum_flags = m0->m_pkthdr.csum_flags; + if (csum_flags) { + + M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT); + if (m0 == NULL) + break; + + tx_fcb = mtod(m0, struct tsec_tx_fcb *); + tx_fcb->flags = 0; + tx_fcb->l3_offset = ETHER_HDR_LEN; + tx_fcb->l4_offset = sizeof(struct ip); + + if (csum_flags & CSUM_IP) + tx_fcb->flags |= TSEC_TX_FCB_IP4 | + TSEC_TX_FCB_CSUM_IP; + + if (csum_flags & CSUM_TCP) + tx_fcb->flags |= TSEC_TX_FCB_TCP | + TSEC_TX_FCB_CSUM_TCP_UDP; + + if (csum_flags & CSUM_UDP) + tx_fcb->flags |= TSEC_TX_FCB_UDP | + TSEC_TX_FCB_CSUM_TCP_UDP; + + fcb_inserted = 1; + } + + mtmp = m_defrag(m0, M_NOWAIT); + if (mtmp) + m0 = mtmp; + + if (tsec_encap(sc, m0, fcb_inserted)) { + IFQ_DRV_PREPEND(&ifp->if_snd, m0); + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + queued++; + BPF_MTAP(ifp, m0); + } + bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + if (queued) { + /* Enable transmitter and watchdog timer */ + TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); + sc->tsec_watchdog = 5; + } +} + +static int +tsec_encap(struct tsec_softc *sc, struct mbuf *m0, int fcb_inserted) +{ + struct tsec_desc *tx_desc = NULL; + struct ifnet *ifp; + bus_dma_segment_t segs[TSEC_TX_NUM_DESC]; + bus_dmamap_t *mapp; + int csum_flag = 0, error, seg, nsegs; + + TSEC_TRANSMIT_LOCK_ASSERT(sc); + + ifp = sc->tsec_ifp; + + if (TSEC_FREE_TX_DESC(sc) == 0) { + /* No free descriptors */ + return (-1); + } + + /* Fetch unused map */ + mapp = TSEC_ALLOC_TX_MAP(sc); + + /* Create mapping in DMA memory */ + error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, + *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT); + if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) { + bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); + TSEC_FREE_TX_MAP(sc, mapp); + return ((error != 0) ? error : -1); + } + bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE); + + if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1)) + if_printf(ifp, "TX buffer has %d segments\n", nsegs); + + if (fcb_inserted) + csum_flag = TSEC_TXBD_TOE; + + /* Everything is ok, now we can send buffers */ + for (seg = 0; seg < nsegs; seg++) { + tx_desc = TSEC_GET_CUR_TX_DESC(sc); + + tx_desc->length = segs[seg].ds_len; + tx_desc->bufptr = segs[seg].ds_addr; + + /* + * Set flags: + * - wrap + * - checksum + * - ready to send + * - transmit the CRC sequence after the last data byte + * - interrupt after the last buffer + */ + tx_desc->flags = + (tx_desc->flags & TSEC_TXBD_W) | + ((seg == 0) ? csum_flag : 0) | TSEC_TXBD_R | TSEC_TXBD_TC | + ((seg == nsegs - 1) ? TSEC_TXBD_L | TSEC_TXBD_I : 0); + } + + /* Save mbuf and DMA mapping for release at later stage */ + TSEC_PUT_TX_MBUF(sc, m0); + TSEC_PUT_TX_MAP(sc, mapp); + + return (0); +} + +static void +tsec_setfilter(struct tsec_softc *sc) +{ + struct ifnet *ifp; + uint32_t flags; + + ifp = sc->tsec_ifp; + flags = TSEC_READ(sc, TSEC_REG_RCTRL); + + /* Promiscuous mode */ + if (ifp->if_flags & IFF_PROMISC) + flags |= TSEC_RCTRL_PROM; + else + flags &= ~TSEC_RCTRL_PROM; + + TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); +} + +#ifdef DEVICE_POLLING +static poll_handler_t tsec_poll; + +static int +tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) +{ + uint32_t ie; + struct tsec_softc *sc = ifp->if_softc; + int rx_npkts; + + rx_npkts = 0; + + TSEC_GLOBAL_LOCK(sc); + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { + TSEC_GLOBAL_UNLOCK(sc); + return (rx_npkts); + } + + if (cmd == POLL_AND_CHECK_STATUS) { + tsec_error_intr_locked(sc, count); + + /* Clear all events reported */ + ie = TSEC_READ(sc, TSEC_REG_IEVENT); + TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); + } + + tsec_transmit_intr_locked(sc); + + TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); + + rx_npkts = tsec_receive_intr_locked(sc, count); + + TSEC_RECEIVE_UNLOCK(sc); + + return (rx_npkts); +} +#endif /* DEVICE_POLLING */ + +static int +tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data) +{ + struct tsec_softc *sc = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *)data; + device_t dev; + int mask, error = 0; + + dev = sc->dev; + + switch (command) { + case SIOCSIFMTU: + TSEC_GLOBAL_LOCK(sc); + if (tsec_set_mtu(sc, ifr->ifr_mtu)) + ifp->if_mtu = ifr->ifr_mtu; + else + error = EINVAL; + TSEC_GLOBAL_UNLOCK(sc); + break; + case SIOCSIFFLAGS: + TSEC_GLOBAL_LOCK(sc); + if (ifp->if_flags & IFF_UP) { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + if ((sc->tsec_if_flags ^ ifp->if_flags) & + IFF_PROMISC) + tsec_setfilter(sc); + + if ((sc->tsec_if_flags ^ ifp->if_flags) & + IFF_ALLMULTI) + tsec_setup_multicast(sc); + } else + tsec_init_locked(sc); + } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) + tsec_stop(sc); + + sc->tsec_if_flags = ifp->if_flags; + TSEC_GLOBAL_UNLOCK(sc); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + TSEC_GLOBAL_LOCK(sc); + tsec_setup_multicast(sc); + TSEC_GLOBAL_UNLOCK(sc); + } + case SIOCGIFMEDIA: + case SIOCSIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, + command); + break; + case SIOCSIFCAP: + mask = ifp->if_capenable ^ ifr->ifr_reqcap; + if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { + TSEC_GLOBAL_LOCK(sc); + ifp->if_capenable &= ~IFCAP_HWCSUM; + ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; + tsec_offload_setup(sc); + TSEC_GLOBAL_UNLOCK(sc); + } +#ifdef DEVICE_POLLING + if (mask & IFCAP_POLLING) { + if (ifr->ifr_reqcap & IFCAP_POLLING) { + error = ether_poll_register(tsec_poll, ifp); + if (error) + return (error); + + TSEC_GLOBAL_LOCK(sc); + /* Disable interrupts */ + tsec_intrs_ctl(sc, 0); + ifp->if_capenable |= IFCAP_POLLING; + TSEC_GLOBAL_UNLOCK(sc); + } else { + error = ether_poll_deregister(ifp); + TSEC_GLOBAL_LOCK(sc); + /* Enable interrupts */ + tsec_intrs_ctl(sc, 1); + ifp->if_capenable &= ~IFCAP_POLLING; + TSEC_GLOBAL_UNLOCK(sc); + } + } +#endif + break; + + default: + error = ether_ioctl(ifp, command, data); + } + + /* Flush buffers if not empty */ + if (ifp->if_flags & IFF_UP) + tsec_start(ifp); + return (error); +} + +static int +tsec_ifmedia_upd(struct ifnet *ifp) +{ + struct tsec_softc *sc = ifp->if_softc; + struct mii_data *mii; + + TSEC_TRANSMIT_LOCK(sc); + + mii = sc->tsec_mii; + mii_mediachg(mii); + + TSEC_TRANSMIT_UNLOCK(sc); + return (0); +} + +static void +tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct tsec_softc *sc = ifp->if_softc; + struct mii_data *mii; + + TSEC_TRANSMIT_LOCK(sc); + + mii = sc->tsec_mii; + mii_pollstat(mii); + + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; + + TSEC_TRANSMIT_UNLOCK(sc); +} + +static int +tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, + uint32_t *paddr) +{ + struct mbuf *new_mbuf; + bus_dma_segment_t seg[1]; + int error, nsegs; + + KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); + + new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); + if (new_mbuf == NULL) + return (ENOBUFS); + new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; + + if (*mbufp) { + bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(tag, map); + } + + error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, + BUS_DMA_NOWAIT); + KASSERT(nsegs == 1, ("Too many segments returned!")); + if (nsegs != 1 || error) + panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); + +#if 0 + if (error) { + printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", + error); + m_freem(new_mbuf); + return (ENOBUFS); + } +#endif + +#if 0 + KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, + ("Wrong alignment of RX buffer!")); +#endif + bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); + + (*mbufp) = new_mbuf; + (*paddr) = seg->ds_addr; + return (0); +} + +static void +tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + u_int32_t *paddr; + + KASSERT(nseg == 1, ("wrong number of segments, should be 1")); + paddr = arg; + *paddr = segs->ds_addr; +} + +static int +tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, + bus_size_t dsize, void **vaddr, void *raddr, const char *dname) +{ + int error; + + /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ + error = bus_dma_tag_create(NULL, /* parent */ + PAGE_SIZE, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + dsize, 1, /* maxsize, nsegments */ + dsize, 0, /* maxsegsz, flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + dtag); /* dmat */ + + if (error) { + device_printf(dev, "failed to allocate busdma %s tag\n", + dname); + (*vaddr) = NULL; + return (ENXIO); + } + + error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, + dmap); + if (error) { + device_printf(dev, "failed to allocate %s DMA safe memory\n", + dname); + bus_dma_tag_destroy(*dtag); + (*vaddr) = NULL; + return (ENXIO); + } + + error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, + tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); + if (error) { + device_printf(dev, "cannot get address of the %s " + "descriptors\n", dname); + bus_dmamem_free(*dtag, *vaddr, *dmap); + bus_dma_tag_destroy(*dtag); + (*vaddr) = NULL; + return (ENXIO); + } + + return (0); +} + +static void +tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) +{ + + if (vaddr == NULL) + return; + + /* Unmap descriptors from DMA memory */ + bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(dtag, dmap); + + /* Free descriptors memory */ + bus_dmamem_free(dtag, vaddr, dmap); + + /* Destroy descriptors tag */ + bus_dma_tag_destroy(dtag); +} + +static void +tsec_free_dma(struct tsec_softc *sc) +{ + int i; + + /* Free TX maps */ + for (i = 0; i < TSEC_TX_NUM_DESC; i++) + if (sc->tx_map_data[i] != NULL) + bus_dmamap_destroy(sc->tsec_tx_mtag, + sc->tx_map_data[i]); + /* Destroy tag for TX mbufs */ + bus_dma_tag_destroy(sc->tsec_tx_mtag); + + /* Free RX mbufs and maps */ + for (i = 0; i < TSEC_RX_NUM_DESC; i++) { + if (sc->rx_data[i].mbuf) { + /* Unload buffer from DMA */ + bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->tsec_rx_mtag, + sc->rx_data[i].map); + + /* Free buffer */ + m_freem(sc->rx_data[i].mbuf); + } + /* Destroy map for this buffer */ + if (sc->rx_data[i].map != NULL) + bus_dmamap_destroy(sc->tsec_rx_mtag, + sc->rx_data[i].map); + } + /* Destroy tag for RX mbufs */ + bus_dma_tag_destroy(sc->tsec_rx_mtag); + + /* Unload TX/RX descriptors */ + tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, + sc->tsec_tx_vaddr); + tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, + sc->tsec_rx_vaddr); +} + +static void +tsec_stop(struct tsec_softc *sc) +{ + struct ifnet *ifp; + struct mbuf *m0; + bus_dmamap_t *mapp; + uint32_t tmpval; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + ifp = sc->tsec_ifp; + + /* Disable interface and watchdog timer */ + callout_stop(&sc->tsec_callout); + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + sc->tsec_watchdog = 0; + + /* Disable all interrupts and stop DMA */ + tsec_intrs_ctl(sc, 0); + tsec_dma_ctl(sc, 0); + + /* Remove pending data from TX queue */ + while (!TSEC_EMPTYQ_TX_MBUF(sc)) { + m0 = TSEC_GET_TX_MBUF(sc); + mapp = TSEC_GET_TX_MAP(sc); + + bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); + + TSEC_FREE_TX_MAP(sc, mapp); + m_freem(m0); + } + + /* Disable RX and TX */ + tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); + tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); + TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); + DELAY(10); +} + +static void +tsec_tick(void *arg) +{ + struct tsec_softc *sc = arg; + struct ifnet *ifp; + int link; + + TSEC_GLOBAL_LOCK(sc); + + tsec_watchdog(sc); + + ifp = sc->tsec_ifp; + link = sc->tsec_link; + + mii_tick(sc->tsec_mii); + + if (link == 0 && sc->tsec_link == 1 && + (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))) + tsec_start_locked(ifp); + + /* Schedule another timeout one second from now. */ + callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); + + TSEC_GLOBAL_UNLOCK(sc); +} + +/* + * This is the core RX routine. It replenishes mbufs in the descriptor and + * sends data which have been dma'ed into host memory to upper layer. + * + * Loops at most count times if count is > 0, or until done if count < 0. + */ +static int +tsec_receive_intr_locked(struct tsec_softc *sc, int count) +{ + struct tsec_desc *rx_desc; + struct ifnet *ifp; + struct rx_data_type *rx_data; + struct mbuf *m; + device_t dev; + uint32_t i; + int c, rx_npkts; + uint16_t flags; + + TSEC_RECEIVE_LOCK_ASSERT(sc); + + ifp = sc->tsec_ifp; + rx_data = sc->rx_data; + dev = sc->dev; + rx_npkts = 0; + + bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + for (c = 0; ; c++) { + if (count >= 0 && count-- == 0) + break; + + rx_desc = TSEC_GET_CUR_RX_DESC(sc); + flags = rx_desc->flags; + + /* Check if there is anything to receive */ + if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { + /* + * Avoid generating another interrupt + */ + if (flags & TSEC_RXBD_E) + TSEC_WRITE(sc, TSEC_REG_IEVENT, + TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); + /* + * We didn't consume current descriptor and have to + * return it to the queue + */ + TSEC_BACK_CUR_RX_DESC(sc); + break; + } + + if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | + TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { + + rx_desc->length = 0; + rx_desc->flags = (rx_desc->flags & + ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; + + if (sc->frame != NULL) { + m_free(sc->frame); + sc->frame = NULL; + } + + continue; + } + + /* Ok... process frame */ + i = TSEC_GET_CUR_RX_DESC_CNT(sc); + m = rx_data[i].mbuf; + m->m_len = rx_desc->length; + + if (sc->frame != NULL) { + if ((flags & TSEC_RXBD_L) != 0) + m->m_len -= m_length(sc->frame, NULL); + + m->m_flags &= ~M_PKTHDR; + m_cat(sc->frame, m); + } else { + sc->frame = m; + } + + m = NULL; + + if ((flags & TSEC_RXBD_L) != 0) { + m = sc->frame; + sc->frame = NULL; + } + + if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, + &rx_data[i].mbuf, &rx_data[i].paddr)) { + ifp->if_ierrors++; + /* + * We ran out of mbufs; didn't consume current + * descriptor and have to return it to the queue. + */ + TSEC_BACK_CUR_RX_DESC(sc); + break; + } + + /* Attach new buffer to descriptor and clear flags */ + rx_desc->bufptr = rx_data[i].paddr; + rx_desc->length = 0; + rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | + TSEC_RXBD_E | TSEC_RXBD_I; + + if (m != NULL) { + m->m_pkthdr.rcvif = ifp; + + m_fixhdr(m); + m_adj(m, -ETHER_CRC_LEN); + + if (sc->is_etsec) + tsec_offload_process_frame(sc, m); + + TSEC_RECEIVE_UNLOCK(sc); + (*ifp->if_input)(ifp, m); + TSEC_RECEIVE_LOCK(sc); + rx_npkts++; + } + } + + bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* + * Make sure TSEC receiver is not halted. + * + * Various conditions can stop the TSEC receiver, but not all are + * signaled and handled by error interrupt, so make sure the receiver + * is running. Writing to TSEC_REG_RSTAT restarts the receiver when + * halted, and is harmless if already running. + */ + TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); + return (rx_npkts); +} + +void +tsec_receive_intr(void *arg) +{ + struct tsec_softc *sc = arg; + + TSEC_RECEIVE_LOCK(sc); + +#ifdef DEVICE_POLLING + if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { + TSEC_RECEIVE_UNLOCK(sc); + return; + } +#endif + + /* Confirm the interrupt was received by driver */ + TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); + tsec_receive_intr_locked(sc, -1); + + TSEC_RECEIVE_UNLOCK(sc); +} + +static void +tsec_transmit_intr_locked(struct tsec_softc *sc) +{ + struct tsec_desc *tx_desc; + struct ifnet *ifp; + struct mbuf *m0; + bus_dmamap_t *mapp; + int send = 0; + + TSEC_TRANSMIT_LOCK_ASSERT(sc); + + ifp = sc->tsec_ifp; + + /* Update collision statistics */ + ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL); + + /* Reset collision counters in hardware */ + TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); + TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); + + bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) { + tx_desc = TSEC_GET_DIRTY_TX_DESC(sc); + if (tx_desc->flags & TSEC_TXBD_R) { + TSEC_BACK_DIRTY_TX_DESC(sc); + break; + } + + if ((tx_desc->flags & TSEC_TXBD_L) == 0) + continue; + + /* + * This is the last buf in this packet, so unmap and free it. + */ + m0 = TSEC_GET_TX_MBUF(sc); + mapp = TSEC_GET_TX_MAP(sc); + + bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); + + TSEC_FREE_TX_MAP(sc, mapp); + m_freem(m0); + + ifp->if_opackets++; + send = 1; + } + bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + if (send) { + /* Now send anything that was pending */ + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + tsec_start_locked(ifp); + + /* Stop wathdog if all sent */ + if (TSEC_EMPTYQ_TX_MBUF(sc)) + sc->tsec_watchdog = 0; + } +} + +void +tsec_transmit_intr(void *arg) +{ + struct tsec_softc *sc = arg; + + TSEC_TRANSMIT_LOCK(sc); + +#ifdef DEVICE_POLLING + if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { + TSEC_TRANSMIT_UNLOCK(sc); + return; + } +#endif + /* Confirm the interrupt was received by driver */ + TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); + tsec_transmit_intr_locked(sc); + + TSEC_TRANSMIT_UNLOCK(sc); +} + +static void +tsec_error_intr_locked(struct tsec_softc *sc, int count) +{ + struct ifnet *ifp; + uint32_t eflags; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + ifp = sc->tsec_ifp; + + eflags = TSEC_READ(sc, TSEC_REG_IEVENT); + + /* Clear events bits in hardware */ + TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | + TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | + TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | + TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); + + /* Check transmitter errors */ + if (eflags & TSEC_IEVENT_TXE) { + ifp->if_oerrors++; + + if (eflags & TSEC_IEVENT_LC) + ifp->if_collisions++; + + TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); + } + + /* Check receiver errors */ + if (eflags & TSEC_IEVENT_BSY) { + ifp->if_ierrors++; + ifp->if_iqdrops++; + + /* Get data from RX buffers */ + tsec_receive_intr_locked(sc, count); + } + + if (ifp->if_flags & IFF_DEBUG) + if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", + eflags); + + if (eflags & TSEC_IEVENT_EBERR) { + if_printf(ifp, "System bus error occurred during" + "DMA transaction (flags: 0x%x)\n", eflags); + tsec_init_locked(sc); + } + + if (eflags & TSEC_IEVENT_BABT) + ifp->if_oerrors++; + + if (eflags & TSEC_IEVENT_BABR) + ifp->if_ierrors++; +} + +void +tsec_error_intr(void *arg) +{ + struct tsec_softc *sc = arg; + + TSEC_GLOBAL_LOCK(sc); + tsec_error_intr_locked(sc, -1); + TSEC_GLOBAL_UNLOCK(sc); +} + +int +tsec_miibus_readreg(device_t dev, int phy, int reg) +{ + struct tsec_softc *sc; + uint32_t timeout; + + sc = device_get_softc(dev); + + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMADD, (phy << 8) | reg); + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCOM, 0); + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); + + timeout = TSEC_READ_RETRY; + while (--timeout && TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) & + (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY)) + DELAY(TSEC_READ_DELAY); + + if (timeout == 0) + device_printf(dev, "Timeout while reading from PHY!\n"); + + return (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMSTAT)); +} + +int +tsec_miibus_writereg(device_t dev, int phy, int reg, int value) +{ + struct tsec_softc *sc; + uint32_t timeout; + + sc = device_get_softc(dev); + + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMADD, (phy << 8) | reg); + TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCON, value); + + timeout = TSEC_READ_RETRY; + while (--timeout && (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) & + TSEC_MIIMIND_BUSY)) + DELAY(TSEC_READ_DELAY); + + if (timeout == 0) + device_printf(dev, "Timeout while writing to PHY!\n"); + + return (0); +} + +void +tsec_miibus_statchg(device_t dev) +{ + struct tsec_softc *sc; + struct mii_data *mii; + uint32_t ecntrl, id, tmp; + int link; + + sc = device_get_softc(dev); + mii = sc->tsec_mii; + link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); + + tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; + + if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) + tmp |= TSEC_MACCFG2_FULLDUPLEX; + else + tmp &= ~TSEC_MACCFG2_FULLDUPLEX; + + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_1000_T: + case IFM_1000_SX: + tmp |= TSEC_MACCFG2_GMII; + sc->tsec_link = link; + break; + case IFM_100_TX: + case IFM_10_T: + tmp |= TSEC_MACCFG2_MII; + sc->tsec_link = link; + break; + case IFM_NONE: + if (link) + device_printf(dev, "No speed selected but link " + "active!\n"); + sc->tsec_link = 0; + return; + default: + sc->tsec_link = 0; + device_printf(dev, "Unknown speed (%d), link %s!\n", + IFM_SUBTYPE(mii->mii_media_active), + ((link) ? "up" : "down")); + return; + } + TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); + + /* XXX kludge - use circumstantial evidence for reduced mode. */ + id = TSEC_READ(sc, TSEC_REG_ID2); + if (id & 0xffff) { + ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; + ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; + TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); + } +} + +static void +tsec_add_sysctls(struct tsec_softc *sc) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *children; + struct sysctl_oid *tree; + + ctx = device_get_sysctl_ctx(sc->dev); + children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); + tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", + CTLFLAG_RD, 0, "TSEC Interrupts coalescing"); + children = SYSCTL_CHILDREN(tree); + + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", + CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time, + "I", "IC RX time threshold (0-65535)"); + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", + CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count, + "I", "IC RX frame count threshold (0-255)"); + + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", + CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time, + "I", "IC TX time threshold (0-65535)"); + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", + CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count, + "I", "IC TX frame count threshold (0-255)"); +} + +/* + * With Interrupt Coalescing (IC) active, a transmit/receive frame + * interrupt is raised either upon: + * + * - threshold-defined period of time elapsed, or + * - threshold-defined number of frames is received/transmitted, + * whichever occurs first. + * + * The following sysctls regulate IC behaviour (for TX/RX separately): + * + * dev.tsec..int_coal.rx_time + * dev.tsec..int_coal.rx_count + * dev.tsec..int_coal.tx_time + * dev.tsec..int_coal.tx_count + * + * Values: + * + * - 0 for either time or count disables IC on the given TX/RX path + * + * - count: 1-255 (expresses frame count number; note that value of 1 is + * effectively IC off) + * + * - time: 1-65535 (value corresponds to a real time period and is + * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer + * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, + * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the + * TSEC reference manual. + */ +static int +tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) +{ + int error; + uint32_t time; + struct tsec_softc *sc = (struct tsec_softc *)arg1; + + time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; + + error = sysctl_handle_int(oidp, &time, 0, req); + if (error != 0) + return (error); + + if (time > 65535) + return (EINVAL); + + TSEC_IC_LOCK(sc); + if (arg2 == TSEC_IC_RX) { + sc->rx_ic_time = time; + tsec_set_rxic(sc); + } else { + sc->tx_ic_time = time; + tsec_set_txic(sc); + } + TSEC_IC_UNLOCK(sc); + + return (0); +} + +static int +tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) +{ + int error; + uint32_t count; + struct tsec_softc *sc = (struct tsec_softc *)arg1; + + count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; + + error = sysctl_handle_int(oidp, &count, 0, req); + if (error != 0) + return (error); + + if (count > 255) + return (EINVAL); + + TSEC_IC_LOCK(sc); + if (arg2 == TSEC_IC_RX) { + sc->rx_ic_count = count; + tsec_set_rxic(sc); + } else { + sc->tx_ic_count = count; + tsec_set_txic(sc); + } + TSEC_IC_UNLOCK(sc); + + return (0); +} + +static void +tsec_set_rxic(struct tsec_softc *sc) +{ + uint32_t rxic_val; + + if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) + /* Disable RX IC */ + rxic_val = 0; + else { + rxic_val = 0x80000000; + rxic_val |= (sc->rx_ic_count << 21); + rxic_val |= sc->rx_ic_time; + } + + TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); +} + +static void +tsec_set_txic(struct tsec_softc *sc) +{ + uint32_t txic_val; + + if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) + /* Disable TX IC */ + txic_val = 0; + else { + txic_val = 0x80000000; + txic_val |= (sc->tx_ic_count << 21); + txic_val |= sc->tx_ic_time; + } + + TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); +} + +static void +tsec_offload_setup(struct tsec_softc *sc) +{ + struct ifnet *ifp = sc->tsec_ifp; + uint32_t reg; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + reg = TSEC_READ(sc, TSEC_REG_TCTRL); + reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; + + if (ifp->if_capenable & IFCAP_TXCSUM) + ifp->if_hwassist = TSEC_CHECKSUM_FEATURES; + else + ifp->if_hwassist = 0; + + TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); + + reg = TSEC_READ(sc, TSEC_REG_RCTRL); + reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); + reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; + + if (ifp->if_capenable & IFCAP_RXCSUM) + reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | + TSEC_RCTRL_PRSDEP_PARSE_L234; + + TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); +} + + +static void +tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) +{ + struct tsec_rx_fcb rx_fcb; + int csum_flags = 0; + int protocol, flags; + + TSEC_RECEIVE_LOCK_ASSERT(sc); + + m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); + flags = rx_fcb.flags; + protocol = rx_fcb.protocol; + + if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { + csum_flags |= CSUM_IP_CHECKED; + + if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) + csum_flags |= CSUM_IP_VALID; + } + + if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && + TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && + (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { + + csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; + } + + m->m_pkthdr.csum_flags = csum_flags; + + if (flags & TSEC_RX_FCB_VLAN) { + m->m_pkthdr.ether_vtag = rx_fcb.vlan; + m->m_flags |= M_VLANTAG; + } + + m_adj(m, sizeof(struct tsec_rx_fcb)); +} + +static void +tsec_setup_multicast(struct tsec_softc *sc) +{ + uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + struct ifnet *ifp = sc->tsec_ifp; + struct ifmultiaddr *ifma; + uint32_t h; + int i; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + if (ifp->if_flags & IFF_ALLMULTI) { + for (i = 0; i < 8; i++) + TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); + + return; + } + + if_maddr_rlock(ifp); + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + + h = (ether_crc32_be(LLADDR((struct sockaddr_dl *) + ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF; + + hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); + } + if_maddr_runlock(ifp); + + for (i = 0; i < 8; i++) + TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); +} + +static int +tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) +{ + + mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; + + TSEC_GLOBAL_LOCK_ASSERT(sc); + + if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { + TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); + return (mtu); + } + + return (0); +} diff --git a/freebsd/sys/dev/tsec/if_tsec.h b/freebsd/sys/dev/tsec/if_tsec.h new file mode 100644 index 00000000..cfba3f6b --- /dev/null +++ b/freebsd/sys/dev/tsec/if_tsec.h @@ -0,0 +1,375 @@ +/*- + * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _IF_TSEC_H +#define _IF_TSEC_H + +#include + +#define TSEC_RX_NUM_DESC 256 +#define TSEC_TX_NUM_DESC 256 + +/* Interrupt Coalescing types */ +#define TSEC_IC_RX 0 +#define TSEC_IC_TX 1 + +/* eTSEC ID */ +#define TSEC_ETSEC_ID 0x0124 + +/* Frame sizes */ +#define TSEC_MIN_FRAME_SIZE 64 +#define TSEC_MAX_FRAME_SIZE 9600 + +struct tsec_softc { + /* XXX MII bus requires that struct ifnet is first!!! */ + struct ifnet *tsec_ifp; + + struct mtx transmit_lock; /* transmitter lock */ + struct mtx receive_lock; /* receiver lock */ + + phandle_t node; + device_t dev; + device_t tsec_miibus; + struct mii_data *tsec_mii; /* MII media control */ + int tsec_link; + + bus_dma_tag_t tsec_tx_dtag; /* TX descriptors tag */ + bus_dmamap_t tsec_tx_dmap; /* TX descriptors map */ + struct tsec_desc *tsec_tx_vaddr;/* vadress of TX descriptors */ + uint32_t tsec_tx_raddr; /* real adress of TX descriptors */ + + bus_dma_tag_t tsec_rx_dtag; /* RX descriptors tag */ + bus_dmamap_t tsec_rx_dmap; /* RX descriptors map */ + struct tsec_desc *tsec_rx_vaddr; /* vadress of RX descriptors */ + uint32_t tsec_rx_raddr; /* real adress of RX descriptors */ + + bus_dma_tag_t tsec_tx_mtag; /* TX mbufs tag */ + bus_dma_tag_t tsec_rx_mtag; /* TX mbufs tag */ + + struct rx_data_type { + bus_dmamap_t map; /* mbuf map */ + struct mbuf *mbuf; + uint32_t paddr; /* DMA addres of buffer */ + } rx_data[TSEC_RX_NUM_DESC]; + + uint32_t tx_cur_desc_cnt; + uint32_t tx_dirty_desc_cnt; + uint32_t rx_cur_desc_cnt; + + struct resource *sc_rres; /* register resource */ + int sc_rrid; /* register rid */ + struct { + bus_space_tag_t bst; + bus_space_handle_t bsh; + } sc_bas; + + struct resource *sc_transmit_ires; + void *sc_transmit_ihand; + int sc_transmit_irid; + struct resource *sc_receive_ires; + void *sc_receive_ihand; + int sc_receive_irid; + struct resource *sc_error_ires; + void *sc_error_ihand; + int sc_error_irid; + + int tsec_if_flags; + int is_etsec; + + /* Watchdog and MII tick related */ + struct callout tsec_callout; + int tsec_watchdog; + + /* TX maps */ + bus_dmamap_t tx_map_data[TSEC_TX_NUM_DESC]; + + /* unused TX maps data */ + uint32_t tx_map_unused_get_cnt; + uint32_t tx_map_unused_put_cnt; + bus_dmamap_t *tx_map_unused_data[TSEC_TX_NUM_DESC]; + + /* used TX maps data */ + uint32_t tx_map_used_get_cnt; + uint32_t tx_map_used_put_cnt; + bus_dmamap_t *tx_map_used_data[TSEC_TX_NUM_DESC]; + + /* mbufs in TX queue */ + uint32_t tx_mbuf_used_get_cnt; + uint32_t tx_mbuf_used_put_cnt; + struct mbuf *tx_mbuf_used_data[TSEC_TX_NUM_DESC]; + + /* interrupt coalescing */ + struct mtx ic_lock; + uint32_t rx_ic_time; /* RW, valid values 0..65535 */ + uint32_t rx_ic_count; /* RW, valid values 0..255 */ + uint32_t tx_ic_time; + uint32_t tx_ic_count; + + /* currently received frame */ + struct mbuf *frame; + + int phyaddr; + struct tsec_softc *phy_sc; +}; + +/* interface to get/put generic objects */ +#define TSEC_CNT_INIT(cnt, wrap) ((cnt) = ((wrap) - 1)) + +#define TSEC_INC(count, wrap) (count = ((count) + 1) & ((wrap) - 1)) + +#define TSEC_GET_GENERIC(hand, tab, count, wrap) \ + ((hand)->tab[TSEC_INC((hand)->count, wrap)]) + +#define TSEC_PUT_GENERIC(hand, tab, count, wrap, val) \ + ((hand)->tab[TSEC_INC((hand)->count, wrap)] = val) + +#define TSEC_BACK_GENERIC(sc, count, wrap) do { \ + if ((sc)->count > 0) \ + (sc)->count--; \ + else \ + (sc)->count = (wrap) - 1; \ +} while (0) + +/* TX maps interface */ +#define TSEC_TX_MAP_CNT_INIT(sc) do { \ + TSEC_CNT_INIT((sc)->tx_map_unused_get_cnt, TSEC_TX_NUM_DESC); \ + TSEC_CNT_INIT((sc)->tx_map_unused_put_cnt, TSEC_TX_NUM_DESC); \ + TSEC_CNT_INIT((sc)->tx_map_used_get_cnt, TSEC_TX_NUM_DESC); \ + TSEC_CNT_INIT((sc)->tx_map_used_put_cnt, TSEC_TX_NUM_DESC); \ +} while (0) + +/* interface to get/put unused TX maps */ +#define TSEC_ALLOC_TX_MAP(sc) \ + TSEC_GET_GENERIC(sc, tx_map_unused_data, tx_map_unused_get_cnt, \ + TSEC_TX_NUM_DESC) + +#define TSEC_FREE_TX_MAP(sc, val) \ + TSEC_PUT_GENERIC(sc, tx_map_unused_data, tx_map_unused_put_cnt, \ + TSEC_TX_NUM_DESC, val) + +/* interface to get/put used TX maps */ +#define TSEC_GET_TX_MAP(sc) \ + TSEC_GET_GENERIC(sc, tx_map_used_data, tx_map_used_get_cnt, \ + TSEC_TX_NUM_DESC) + +#define TSEC_PUT_TX_MAP(sc, val) \ + TSEC_PUT_GENERIC(sc, tx_map_used_data, tx_map_used_put_cnt, \ + TSEC_TX_NUM_DESC, val) + +/* interface to get/put TX mbufs in send queue */ +#define TSEC_TX_MBUF_CNT_INIT(sc) do { \ + TSEC_CNT_INIT((sc)->tx_mbuf_used_get_cnt, TSEC_TX_NUM_DESC); \ + TSEC_CNT_INIT((sc)->tx_mbuf_used_put_cnt, TSEC_TX_NUM_DESC); \ +} while (0) + +#define TSEC_GET_TX_MBUF(sc) \ + TSEC_GET_GENERIC(sc, tx_mbuf_used_data, tx_mbuf_used_get_cnt, \ + TSEC_TX_NUM_DESC) + +#define TSEC_PUT_TX_MBUF(sc, val) \ + TSEC_PUT_GENERIC(sc, tx_mbuf_used_data, tx_mbuf_used_put_cnt, \ + TSEC_TX_NUM_DESC, val) + +#define TSEC_EMPTYQ_TX_MBUF(sc) \ + ((sc)->tx_mbuf_used_get_cnt == (sc)->tx_mbuf_used_put_cnt) + +/* interface for manage tx tsec_desc */ +#define TSEC_TX_DESC_CNT_INIT(sc) do { \ + TSEC_CNT_INIT((sc)->tx_cur_desc_cnt, TSEC_TX_NUM_DESC); \ + TSEC_CNT_INIT((sc)->tx_dirty_desc_cnt, TSEC_TX_NUM_DESC); \ +} while (0) + +#define TSEC_GET_CUR_TX_DESC(sc) \ + &TSEC_GET_GENERIC(sc, tsec_tx_vaddr, tx_cur_desc_cnt, \ + TSEC_TX_NUM_DESC) + +#define TSEC_GET_DIRTY_TX_DESC(sc) \ + &TSEC_GET_GENERIC(sc, tsec_tx_vaddr, tx_dirty_desc_cnt, \ + TSEC_TX_NUM_DESC) + +#define TSEC_BACK_DIRTY_TX_DESC(sc) \ + TSEC_BACK_GENERIC(sc, tx_dirty_desc_cnt, TSEC_TX_NUM_DESC) + +#define TSEC_CUR_DIFF_DIRTY_TX_DESC(sc) \ + ((sc)->tx_cur_desc_cnt != (sc)->tx_dirty_desc_cnt) + +#define TSEC_FREE_TX_DESC(sc) \ + (((sc)->tx_cur_desc_cnt < (sc)->tx_dirty_desc_cnt) ? \ + ((sc)->tx_dirty_desc_cnt - (sc)->tx_cur_desc_cnt - 1) \ + : \ + (TSEC_TX_NUM_DESC - (sc)->tx_cur_desc_cnt \ + + (sc)->tx_dirty_desc_cnt - 1)) + +/* interface for manage rx tsec_desc */ +#define TSEC_RX_DESC_CNT_INIT(sc) do { \ + TSEC_CNT_INIT((sc)->rx_cur_desc_cnt, TSEC_RX_NUM_DESC); \ +} while (0) + +#define TSEC_GET_CUR_RX_DESC(sc) \ + &TSEC_GET_GENERIC(sc, tsec_rx_vaddr, rx_cur_desc_cnt, \ + TSEC_RX_NUM_DESC) + +#define TSEC_BACK_CUR_RX_DESC(sc) \ + TSEC_BACK_GENERIC(sc, rx_cur_desc_cnt, TSEC_RX_NUM_DESC) + +#define TSEC_GET_CUR_RX_DESC_CNT(sc) \ + ((sc)->rx_cur_desc_cnt) + +/* init all counters (for init only!) */ +#define TSEC_TX_RX_COUNTERS_INIT(sc) do { \ + TSEC_TX_MAP_CNT_INIT(sc); \ + TSEC_TX_MBUF_CNT_INIT(sc); \ + TSEC_TX_DESC_CNT_INIT(sc); \ + TSEC_RX_DESC_CNT_INIT(sc); \ +} while (0) + +/* read/write bus functions */ +#define TSEC_READ(sc, reg) \ + bus_space_read_4((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg)) +#define TSEC_WRITE(sc, reg, val) \ + bus_space_write_4((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg), (val)) + +/* Lock for transmitter */ +#define TSEC_TRANSMIT_LOCK(sc) do { \ + mtx_assert(&(sc)->receive_lock, MA_NOTOWNED); \ + mtx_lock(&(sc)->transmit_lock); \ +} while (0) + +#define TSEC_TRANSMIT_UNLOCK(sc) mtx_unlock(&(sc)->transmit_lock) +#define TSEC_TRANSMIT_LOCK_ASSERT(sc) mtx_assert(&(sc)->transmit_lock, MA_OWNED) + +/* Lock for receiver */ +#define TSEC_RECEIVE_LOCK(sc) do { \ + mtx_assert(&(sc)->transmit_lock, MA_NOTOWNED); \ + mtx_lock(&(sc)->receive_lock); \ +} while (0) + +#define TSEC_RECEIVE_UNLOCK(sc) mtx_unlock(&(sc)->receive_lock) +#define TSEC_RECEIVE_LOCK_ASSERT(sc) mtx_assert(&(sc)->receive_lock, MA_OWNED) + +/* Lock for interrupts coalescing */ +#define TSEC_IC_LOCK(sc) do { \ + mtx_assert(&(sc)->ic_lock, MA_NOTOWNED); \ + mtx_lock(&(sc)->ic_lock); \ +} while (0) + +#define TSEC_IC_UNLOCK(sc) mtx_unlock(&(sc)->ic_lock) +#define TSEC_IC_LOCK_ASSERT(sc) mtx_assert(&(sc)->ic_lock, MA_OWNED) + +/* Global tsec lock (with all locks) */ +#define TSEC_GLOBAL_LOCK(sc) do { \ + if ((mtx_owned(&(sc)->transmit_lock) ? 1 : 0) != \ + (mtx_owned(&(sc)->receive_lock) ? 1 : 0)) { \ + panic("tsec deadlock possibility detection!"); \ + } \ + mtx_lock(&(sc)->transmit_lock); \ + mtx_lock(&(sc)->receive_lock); \ +} while (0) + +#define TSEC_GLOBAL_UNLOCK(sc) do { \ + TSEC_RECEIVE_UNLOCK(sc); \ + TSEC_TRANSMIT_UNLOCK(sc); \ +} while (0) + +#define TSEC_GLOBAL_LOCK_ASSERT(sc) do { \ + TSEC_TRANSMIT_LOCK_ASSERT(sc); \ + TSEC_RECEIVE_LOCK_ASSERT(sc); \ +} while (0) + +/* From global to {transmit,receive} */ +#define TSEC_GLOBAL_TO_TRANSMIT_LOCK(sc) do { \ + mtx_unlock(&(sc)->receive_lock);\ +} while (0) + +#define TSEC_GLOBAL_TO_RECEIVE_LOCK(sc) do { \ + mtx_unlock(&(sc)->transmit_lock);\ +} while (0) + +struct tsec_desc { + volatile uint16_t flags; /* descriptor flags */ + volatile uint16_t length; /* buffer length */ + volatile uint32_t bufptr; /* buffer pointer */ +}; + +#define TSEC_READ_RETRY 10000 +#define TSEC_READ_DELAY 100 + +/* Structures and defines for TCP/IP Off-load */ +struct tsec_tx_fcb { + volatile uint16_t flags; + volatile uint8_t l4_offset; + volatile uint8_t l3_offset; + volatile uint16_t ph_chsum; + volatile uint16_t vlan; +}; + +struct tsec_rx_fcb { + volatile uint16_t flags; + volatile uint8_t rq_index; + volatile uint8_t protocol; + volatile uint16_t unused; + volatile uint16_t vlan; +}; + +#define TSEC_CHECKSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) + +#define TSEC_TX_FCB_IP4 TSEC_TX_FCB_L3_IS_IP +#define TSEC_TX_FCB_IP6 (TSEC_TX_FCB_L3_IS_IP | TSEC_TX_FCB_L3_IS_IP6) + +#define TSEC_TX_FCB_TCP TSEC_TX_FCB_L4_IS_TCP_UDP +#define TSEC_TX_FCB_UDP (TSEC_TX_FCB_L4_IS_TCP_UDP | TSEC_TX_FCB_L4_IS_UDP) + +#define TSEC_RX_FCB_IP_CSUM_CHECKED(flags) \ + ((flags & (TSEC_RX_FCB_IP_FOUND | TSEC_RX_FCB_IP6_FOUND | \ + TSEC_RX_FCB_IP_CSUM | TSEC_RX_FCB_PARSE_ERROR)) \ + == (TSEC_RX_FCB_IP_FOUND | TSEC_RX_FCB_IP_CSUM)) + +#define TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) \ + ((flags & (TSEC_RX_FCB_TCP_UDP_FOUND | TSEC_RX_FCB_TCP_UDP_CSUM \ + | TSEC_RX_FCB_PARSE_ERROR)) \ + == (TSEC_RX_FCB_TCP_UDP_FOUND | TSEC_RX_FCB_TCP_UDP_CSUM)) + +/* Prototypes */ +extern devclass_t tsec_devclass; + +int tsec_attach(struct tsec_softc *sc); +int tsec_detach(struct tsec_softc *sc); + +void tsec_error_intr(void *arg); +void tsec_receive_intr(void *arg); +void tsec_transmit_intr(void *arg); + +int tsec_miibus_readreg(device_t dev, int phy, int reg); +int tsec_miibus_writereg(device_t dev, int phy, int reg, int value); +void tsec_miibus_statchg(device_t dev); +int tsec_resume(device_t dev); /* XXX */ +int tsec_shutdown(device_t dev); +int tsec_suspend(device_t dev); /* XXX */ + +void tsec_get_hwaddr(struct tsec_softc *sc, uint8_t *addr); + +#endif /* _IF_TSEC_H */ diff --git a/freebsd/sys/dev/tsec/if_tsecreg.h b/freebsd/sys/dev/tsec/if_tsecreg.h new file mode 100644 index 00000000..4ba1997e --- /dev/null +++ b/freebsd/sys/dev/tsec/if_tsecreg.h @@ -0,0 +1,390 @@ +/*- + * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik + * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#define TSEC_REG_ID 0x000 /* Controller ID register #1. */ +#define TSEC_REG_ID2 0x004 /* Controller ID register #2. */ + +/* TSEC General Control and Status Registers */ +#define TSEC_REG_IEVENT 0x010 /* Interrupt event register */ +#define TSEC_REG_IMASK 0x014 /* Interrupt mask register */ +#define TSEC_REG_EDIS 0x018 /* Error disabled register */ +#define TSEC_REG_ECNTRL 0x020 /* Ethernet control register */ +#define TSEC_REG_MINFLR 0x024 /* Minimum frame length register */ +#define TSEC_REG_PTV 0x028 /* Pause time value register */ +#define TSEC_REG_DMACTRL 0x02c /* DMA control register */ +#define TSEC_REG_TBIPA 0x030 /* TBI PHY address register */ + +/* TSEC FIFO Control and Status Registers */ +#define TSEC_REG_FIFO_PAUSE_CTRL 0x04c /* FIFO pause control register */ +#define TSEC_REG_FIFO_TX_THR 0x08c /* FIFO transmit threshold register */ +#define TSEC_REG_FIFO_TX_STARVE 0x098 /* FIFO transmit starve register */ +#define TSEC_REG_FIFO_TX_STARVE_SHUTOFF 0x09c /* FIFO transmit starve shutoff + * register */ + +/* TSEC Transmit Control and Status Registers */ +#define TSEC_REG_TCTRL 0x100 /* Transmit control register */ +#define TSEC_REG_TSTAT 0x104 /* Transmit Status Register */ +#define TSEC_REG_TBDLEN 0x10c /* TxBD data length register */ +#define TSEC_REG_TXIC 0x110 /* Transmit interrupt coalescing + * configuration register */ +#define TSEC_REG_CTBPTR 0x124 /* Current TxBD pointer register */ +#define TSEC_REG_TBPTR 0x184 /* TxBD pointer register */ +#define TSEC_REG_TBASE 0x204 /* TxBD base address register */ +#define TSEC_REG_OSTBD 0x2b0 /* Out-of-sequence TxBD register */ +#define TSEC_REG_OSTBDP 0x2b4 /* Out-of-sequence Tx data buffer pointer + * register */ + +/* TSEC Receive Control and Status Registers */ +#define TSEC_REG_RCTRL 0x300 /* Receive control register */ +#define TSEC_REG_RSTAT 0x304 /* Receive status register */ +#define TSEC_REG_RBDLEN 0x30c /* RxBD data length register */ +#define TSEC_REG_RXIC 0x310 /* Receive interrupt coalescing + * configuration register */ +#define TSEC_REG_CRBPTR 0x324 /* Current RxBD pointer register */ +#define TSEC_REG_MRBLR 0x340 /* Maximum receive buffer length register */ +#define TSEC_REG_RBPTR 0x384 /* RxBD pointer register */ +#define TSEC_REG_RBASE 0x404 /* RxBD base address register */ + +/* TSEC MAC Registers */ +#define TSEC_REG_MACCFG1 0x500 /* MAC configuration 1 register */ +#define TSEC_REG_MACCFG2 0x504 /* MAC configuration 2 register */ +#define TSEC_REG_IPGIFG 0x508 /* Inter-packet gap/inter-frame gap + * register */ +#define TSEC_REG_HAFDUP 0x50c /* Half-duplex register */ +#define TSEC_REG_MAXFRM 0x510 /* Maximum frame length register */ +#define TSEC_REG_MIIMCFG 0x520 /* MII Management configuration register */ +#define TSEC_REG_MIIMCOM 0x524 /* MII Management command register */ +#define TSEC_REG_MIIMADD 0x528 /* MII Management address register */ +#define TSEC_REG_MIIMCON 0x52c /* MII Management control register */ +#define TSEC_REG_MIIMSTAT 0x530 /* MII Management status register */ +#define TSEC_REG_MIIMIND 0x534 /* MII Management indicator register */ +#define TSEC_REG_IFSTAT 0x53c /* Interface status register */ +#define TSEC_REG_MACSTNADDR1 0x540 /* Station address register, part 1 */ +#define TSEC_REG_MACSTNADDR2 0x544 /* Station address register, part 2 */ + +/* TSEC Transmit and Receive Counters */ +#define TSEC_REG_MON_TR64 0x680 /* Transmit and receive 64-byte + * frame counter register */ +#define TSEC_REG_MON_TR127 0x684 /* Transmit and receive 65-127 byte + * frame counter register */ +#define TSEC_REG_MON_TR255 0x688 /* Transmit and receive 128-255 byte + * frame counter register */ +#define TSEC_REG_MON_TR511 0x68c /* Transmit and receive 256-511 byte + * frame counter register */ +#define TSEC_REG_MON_TR1K 0x690 /* Transmit and receive 512-1023 byte + * frame counter register */ +#define TSEC_REG_MON_TRMAX 0x694 /* Transmit and receive 1024-1518 byte + * frame counter register */ +#define TSEC_REG_MON_TRMGV 0x698 /* Transmit and receive 1519-1522 byte + * good VLAN frame counter register */ + +/* TSEC Receive Counters */ +#define TSEC_REG_MON_RBYT 0x69c /* Receive byte counter register */ +#define TSEC_REG_MON_RPKT 0x6a0 /* Receive packet counter register */ +#define TSEC_REG_MON_RFCS 0x6a4 /* Receive FCS error counter register */ +#define TSEC_REG_MON_RMCA 0x6a8 /* Receive multicast packet counter + * register */ +#define TSEC_REG_MON_RBCA 0x6ac /* Receive broadcast packet counter + * register */ +#define TSEC_REG_MON_RXCF 0x6b0 /* Receive control frame packet counter + * register */ +#define TSEC_REG_MON_RXPF 0x6b4 /* Receive pause frame packet counter + * register */ +#define TSEC_REG_MON_RXUO 0x6b8 /* Receive unknown OP code counter + * register */ +#define TSEC_REG_MON_RALN 0x6bc /* Receive alignment error counter + * register */ +#define TSEC_REG_MON_RFLR 0x6c0 /* Receive frame length error counter + * register */ +#define TSEC_REG_MON_RCDE 0x6c4 /* Receive code error counter register */ +#define TSEC_REG_MON_RCSE 0x6c8 /* Receive carrier sense error counter + * register */ +#define TSEC_REG_MON_RUND 0x6cc /* Receive undersize packet counter + * register */ +#define TSEC_REG_MON_ROVR 0x6d0 /* Receive oversize packet counter + * register */ +#define TSEC_REG_MON_RFRG 0x6d4 /* Receive fragments counter register */ +#define TSEC_REG_MON_RJBR 0x6d8 /* Receive jabber counter register */ +#define TSEC_REG_MON_RDRP 0x6dc /* Receive drop counter register */ + +/* TSEC Transmit Counters */ +#define TSEC_REG_MON_TBYT 0x6e0 /* Transmit byte counter register */ +#define TSEC_REG_MON_TPKT 0x6e4 /* Transmit packet counter register */ +#define TSEC_REG_MON_TMCA 0x6e8 /* Transmit multicast packet counter + * register */ +#define TSEC_REG_MON_TBCA 0x6ec /* Transmit broadcast packet counter + * register */ +#define TSEC_REG_MON_TXPF 0x6f0 /* Transmit PAUSE control frame counter + * register */ +#define TSEC_REG_MON_TDFR 0x6f4 /* Transmit deferral packet counter + * register */ +#define TSEC_REG_MON_TEDF 0x6f8 /* Transmit excessive deferral packet + * counter register */ +#define TSEC_REG_MON_TSCL 0x6fc /* Transmit single collision packet counter + * register */ +#define TSEC_REG_MON_TMCL 0x700 /* Transmit multiple collision packet counter + * register */ +#define TSEC_REG_MON_TLCL 0x704 /* Transmit late collision packet counter + * register */ +#define TSEC_REG_MON_TXCL 0x708 /* Transmit excessive collision packet + * counter register */ +#define TSEC_REG_MON_TNCL 0x70c /* Transmit total collision counter + * register */ +#define TSEC_REG_MON_TDRP 0x714 /* Transmit drop frame counter register */ +#define TSEC_REG_MON_TJBR 0x718 /* Transmit jabber frame counter register */ +#define TSEC_REG_MON_TFCS 0x71c /* Transmit FCS error counter register */ +#define TSEC_REG_MON_TXCF 0x720 /* Transmit control frame counter register */ +#define TSEC_REG_MON_TOVR 0x724 /* Transmit oversize frame counter + * register */ +#define TSEC_REG_MON_TUND 0x728 /* Transmit undersize frame counter + * register */ +#define TSEC_REG_MON_TFRG 0x72c /* Transmit fragments frame counter + * register */ + +/* TSEC General Registers */ +#define TSEC_REG_MON_CAR1 0x730 /* Carry register one register */ +#define TSEC_REG_MON_CAR2 0x734 /* Carry register two register */ +#define TSEC_REG_MON_CAM1 0x738 /* Carry register one mask register */ +#define TSEC_REG_MON_CAM2 0x73c /* Carry register two mask register */ + +/* TSEC Hash Function Registers */ +#define TSEC_REG_IADDR0 0x800 /* Indivdual address register 0 */ +#define TSEC_REG_IADDR1 0x804 /* Indivdual address register 1 */ +#define TSEC_REG_IADDR2 0x808 /* Indivdual address register 2 */ +#define TSEC_REG_IADDR3 0x80c /* Indivdual address register 3 */ +#define TSEC_REG_IADDR4 0x810 /* Indivdual address register 4 */ +#define TSEC_REG_IADDR5 0x814 /* Indivdual address register 5 */ +#define TSEC_REG_IADDR6 0x818 /* Indivdual address register 6 */ +#define TSEC_REG_IADDR7 0x81c /* Indivdual address register 7 */ +#define TSEC_REG_GADDR0 0x880 /* Group address register 0 */ +#define TSEC_REG_GADDR1 0x884 /* Group address register 1 */ +#define TSEC_REG_GADDR2 0x888 /* Group address register 2 */ +#define TSEC_REG_GADDR3 0x88c /* Group address register 3 */ +#define TSEC_REG_GADDR4 0x890 /* Group address register 4 */ +#define TSEC_REG_GADDR5 0x894 /* Group address register 5 */ +#define TSEC_REG_GADDR6 0x898 /* Group address register 6 */ +#define TSEC_REG_GADDR7 0x89c /* Group address register 7 */ +#define TSEC_REG_IADDR(n) (TSEC_REG_IADDR0 + (n << 2)) +#define TSEC_REG_GADDR(n) (TSEC_REG_GADDR0 + (n << 2)) + +/* TSEC attribute registers */ +#define TSEC_REG_ATTR 0xbf8 /* Attributes Register */ +#define TSEC_REG_ATTRELI 0xbfc /* Attributes EL & EI register */ + +/* Size of TSEC registers area */ +#define TSEC_IO_SIZE 0x1000 + +/* reg bits */ +#define TSEC_FIFO_PAUSE_CTRL_EN 0x0002 + +#define TSEC_DMACTRL_TDSEN 0x00000080 /* Tx Data snoop enable */ +#define TSEC_DMACTRL_TBDSEN 0x00000040 /* TxBD snoop enable */ +#define TSEC_DMACTRL_GRS 0x00000010 /* Graceful receive stop */ +#define TSEC_DMACTRL_GTS 0x00000008 /* Graceful transmit stop */ +#define DMACTRL_WWR 0x00000002 /* Write with response */ +#define DMACTRL_WOP 0x00000001 /* Wait or poll */ + +#define TSEC_RCTRL_VLEX 0x00002000 /* Enable automatic VLAN tag + * extraction and deletion + * from Ethernet frames */ +#define TSEC_RCTRL_IPCSEN 0x00000200 /* IP Checksum verification enable */ +#define TSEC_RCTRL_TUCSEN 0x00000100 /* TCP or UDP Checksum verification enable */ +#define TSEC_RCTRL_PRSDEP 0x000000C0 /* Parser control */ +#define TSEC_RCRTL_PRSFM 0x00000020 /* FIFO-mode parsing */ +#define TSEC_RCTRL_BC_REJ 0x00000010 /* Broadcast frame reject */ +#define TSEC_RCTRL_PROM 0x00000008 /* Promiscuous mode */ +#define TSEC_RCTRL_RSF 0x00000004 /* Receive short frame mode */ + +#define TSEC_RCTRL_PRSDEP_PARSER_OFF 0x00000000 /* Parser Disabled */ +#define TSEC_RCTRL_PRSDEP_PARSE_L2 0x00000040 /* Parse L2 */ +#define TSEC_RCTRL_PRSDEP_PARSE_L23 0x00000080 /* Parse L2 and L3 */ +#define TSEC_RCTRL_PRSDEP_PARSE_L234 0x000000C0 /* Parse L2, L3 and L4 */ + +#define TSEC_TCTRL_IPCSEN 0x00004000 /* IP header checksum generation enable */ +#define TSEC_TCTRL_TUCSEN 0x00002000 /* TCP/UDP header checksum generation enable */ + +#define TSEC_TSTAT_THLT 0x80000000 /* Transmit halt */ +#define TSEC_RSTAT_QHLT 0x00800000 /* RxBD queue is halted */ + +#define TSEC_IEVENT_BABR 0x80000000 /* Babbling receive error */ +#define TSEC_IEVENT_RXC 0x40000000 /* Receive control interrupt */ +#define TSEC_IEVENT_BSY 0x20000000 /* Busy condition interrupt */ +#define TSEC_IEVENT_EBERR 0x10000000 /* Ethernet bus error */ +#define TSEC_IEVENT_MSRO 0x04000000 /* MSTAT Register Overflow */ +#define TSEC_IEVENT_GTSC 0x02000000 /* Graceful transmit stop complete */ +#define TSEC_IEVENT_BABT 0x01000000 /* Babbling transmit error */ +#define TSEC_IEVENT_TXC 0x00800000 /* Transmit control interrupt */ +#define TSEC_IEVENT_TXE 0x00400000 /* Transmit error */ +#define TSEC_IEVENT_TXB 0x00200000 /* Transmit buffer */ +#define TSEC_IEVENT_TXF 0x00100000 /* Transmit frame interrupt */ +#define TSEC_IEVENT_LC 0x00040000 /* Late collision */ +#define TSEC_IEVENT_CRL 0x00020000 /* Collision retry limit/excessive + * defer abort */ +#define TSEC_IEVENT_XFUN 0x00010000 /* Transmit FIFO underrun */ +#define TSEC_IEVENT_RXB 0x00008000 /* Receive buffer */ +#define TSEC_IEVENT_MMRD 0x00000400 /* MII management read completion */ +#define TSEC_IEVENT_MMWR 0x00000200 /* MII management write completion */ +#define TSEC_IEVENT_GRSC 0x00000100 /* Graceful receive stop complete */ +#define TSEC_IEVENT_RXF 0x00000080 /* Receive frame interrupt */ + +#define TSEC_IMASK_BREN 0x80000000 /* Babbling receiver interrupt */ +#define TSEC_IMASK_RXCEN 0x40000000 /* Receive control interrupt */ +#define TSEC_IMASK_BSYEN 0x20000000 /* Busy interrupt */ +#define TSEC_IMASK_EBERREN 0x10000000 /* Ethernet controller bus error */ +#define TSEC_IMASK_MSROEN 0x04000000 /* MSTAT register overflow interrupt */ +#define TSEC_IMASK_GTSCEN 0x02000000 /* Graceful transmit stop complete interrupt */ +#define TSEC_IMASK_BTEN 0x01000000 /* Babbling transmitter interrupt */ +#define TSEC_IMASK_TXCEN 0x00800000 /* Transmit control interrupt */ +#define TSEC_IMASK_TXEEN 0x00400000 /* Transmit error interrupt */ +#define TSEC_IMASK_TXBEN 0x00200000 /* Transmit buffer interrupt */ +#define TSEC_IMASK_TXFEN 0x00100000 /* Transmit frame interrupt */ +#define TSEC_IMASK_LCEN 0x00040000 /* Late collision */ +#define TSEC_IMASK_CRLEN 0x00020000 /* Collision retry limit/excessive defer */ +#define TSEC_IMASK_XFUNEN 0x00010000 /* Transmit FIFO underrun */ +#define TSEC_IMASK_RXBEN 0x00008000 /* Receive buffer interrupt */ +#define TSEC_IMASK_MMRD 0x00000400 /* MII management read completion */ +#define TSEC_IMASK_MMWR 0x00000200 /* MII management write completion */ +#define TSEC_IMASK_GRSCEN 0x00000100 /* Graceful receive stop complete interrupt */ +#define TSEC_IMASK_RXFEN 0x00000080 /* Receive frame interrupt */ + +#define TSEC_ATTR_ELCWT 0x00004000 /* Write extracted data to L2 cache */ +#define TSEC_ATTR_BDLWT 0x00000800 /* Write buffer descriptor to L2 cache */ +#define TSEC_ATTR_RDSEN 0x00000080 /* Rx data snoop enable */ +#define TSEC_ATTR_RBDSEN 0x00000040 /* RxBD snoop enable */ + +#define TSEC_MACCFG1_SOFT_RESET 0x80000000 /* Soft reset */ +#define TSEC_MACCFG1_RESET_RX_MC 0x00080000 /* Reset receive MAC control block */ +#define TSEC_MACCFG1_RESET_TX_MC 0x00040000 /* Reset transmit MAC control block */ +#define TSEC_MACCFG1_RESET_RX_FUN 0x00020000 /* Reset receive function block */ +#define TSEC_MACCFG1_RESET_TX_FUN 0x00010000 /* Reset transmit function block */ +#define TSEC_MACCFG1_LOOPBACK 0x00000100 /* Loopback */ +#define TSEC_MACCFG1_RX_FLOW 0x00000020 /* Receive flow */ +#define TSEC_MACCFG1_TX_FLOW 0x00000010 /* Transmit flow */ +#define TSEC_MACCFG1_SYNCD_RX_EN 0x00000008 /* Receive enable synchronized + * to the receive stream (Read-only) */ +#define TSEC_MACCFG1_RX_EN 0x00000004 /* Receive enable */ +#define TSEC_MACCFG1_SYNCD_TX_EN 0x00000002 /* Transmit enable synchronized + * to the transmit stream (Read-only) */ +#define TSEC_MACCFG1_TX_EN 0x00000001 /* Transmit enable */ + +#define TSEC_MACCFG2_PRECNT 0x00007000 /* Preamble Length (0x7) */ +#define TSEC_MACCFG2_IF 0x00000300 /* Determines the type of interface + * to which the MAC is connected */ +#define TSEC_MACCFG2_MII 0x00000100 /* Nibble mode (MII) */ +#define TSEC_MACCFG2_GMII 0x00000200 /* Byte mode (GMII/TBI) */ +#define TSEC_MACCFG2_HUGEFRAME 0x00000020 /* Huge frame enable */ +#define TSEC_MACCFG2_LENGTHCHECK 0x00000010 /* Length check */ +#define TSEC_MACCFG2_PADCRC 0x00000004 /* Pad and append CRC */ +#define TSEC_MACCFG2_CRCEN 0x00000002 /* CRC enable */ +#define TSEC_MACCFG2_FULLDUPLEX 0x00000001 /* Full duplex configure */ + +#define TSEC_ECNTRL_STEN 0x00001000 /* Statistics enabled */ +#define TSEC_ECNTRL_GMIIM 0x00000040 /* GMII I/F mode */ +#define TSEC_ECNTRL_TBIM 0x00000020 /* Ten-bit I/F mode */ +#define TSEC_ECNTRL_R100M 0x00000008 /* RGMII/RMII 100 mode */ +#define TSEC_ECNTRL_RMM 0x00000004 /* Reduced-pin mode */ +#define TSEC_ECNTRL_SGMIIM 0x00000002 /* Serial GMII mode */ + +#define TSEC_MIIMCFG_RESETMGMT 0x80000000 /* Reset management */ +#define TSEC_MIIMCFG_NOPRE 0x00000010 /* Preamble suppress */ +#define TSEC_MIIMCFG_CLKDIV28 0x00000007 /* source clock divided by 28 */ +#define TSEC_MIIMCFG_CLKDIV20 0x00000006 /* source clock divided by 20 */ +#define TSEC_MIIMCFG_CLKDIV14 0x00000005 /* source clock divided by 14 */ +#define TSEC_MIIMCFG_CLKDIV10 0x00000004 /* source clock divided by 10 */ +#define TSEC_MIIMCFG_CLKDIV8 0x00000003 /* source clock divided by 8 */ +#define TSEC_MIIMCFG_CLKDIV6 0x00000002 /* source clock divided by 6 */ +#define TSEC_MIIMCFG_CLKDIV4 0x00000001 /* source clock divided by 4 */ + +#define TSEC_MIIMIND_NOTVALID 0x00000004 /* Not valid */ +#define TSEC_MIIMIND_SCAN 0x00000002 /* Scan in progress */ +#define TSEC_MIIMIND_BUSY 0x00000001 /* Busy */ + +#define TSEC_MIIMCOM_SCANCYCLE 0x00000002 /* Scan cycle */ +#define TSEC_MIIMCOM_READCYCLE 0x00000001 /* Read cycle */ + +/* Transmit Data Buffer Descriptor (TxBD) Field Descriptions */ +#define TSEC_TXBD_R 0x8000 /* Ready */ +#define TSEC_TXBD_PADCRC 0x4000 /* PAD/CRC */ +#define TSEC_TXBD_W 0x2000 /* Wrap */ +#define TSEC_TXBD_I 0x1000 /* Interrupt */ +#define TSEC_TXBD_L 0x0800 /* Last in frame */ +#define TSEC_TXBD_TC 0x0400 /* Tx CRC */ +#define TSEC_TXBD_DEF 0x0200 /* Defer indication */ +#define TSEC_TXBD_TO1 0x0100 /* Transmit software ownership */ +#define TSEC_TXBD_HFE 0x0080 /* Huge frame enable (written by user) */ +#define TSEC_TXBD_LC 0x0080 /* Late collision (written by TSEC) */ +#define TSEC_TXBD_RL 0x0040 /* Retransmission Limit */ +#define TSEC_TXBD_TOE 0x0002 /* TCP/IP Offload Enable */ +#define TSEC_TXBD_UN 0x0002 /* Underrun */ +#define TSEC_TXBD_TXTRUNC 0x0001 /* TX truncation */ + +/* Receive Data Buffer Descriptor (RxBD) Field Descriptions */ +#define TSEC_RXBD_E 0x8000 /* Empty */ +#define TSEC_RXBD_RO1 0x4000 /* Receive software ownership bit */ +#define TSEC_RXBD_W 0x2000 /* Wrap */ +#define TSEC_RXBD_I 0x1000 /* Interrupt */ +#define TSEC_RXBD_L 0x0800 /* Last in frame */ +#define TSEC_RXBD_F 0x0400 /* First in frame */ +#define TSEC_RXBD_M 0x0100 /* Miss - The frame was received because + * of promiscuous mode. */ +#define TSEC_RXBD_B 0x0080 /* Broadcast */ +#define TSEC_RXBD_MC 0x0040 /* Multicast */ +#define TSEC_RXBD_LG 0x0020 /* Large - Rx frame length violation */ +#define TSEC_RXBD_NO 0x0010 /* Rx non-octet aligned frame */ +#define TSEC_RXBD_SH 0x0008 /* Short frame */ +#define TSEC_RXBD_CR 0x0004 /* Rx CRC error */ +#define TSEC_RXBD_OV 0x0002 /* Overrun */ +#define TSEC_RXBD_TR 0x0001 /* Truncation */ +#define TSEC_RXBD_ZEROONINIT (TSEC_RXBD_TR | TSEC_RXBD_OV | TSEC_RXBD_CR | \ + TSEC_RXBD_SH | TSEC_RXBD_NO | TSEC_RXBD_LG | TSEC_RXBD_MC | \ + TSEC_RXBD_B | TSEC_RXBD_M) + +#define TSEC_TXBUFFER_ALIGNMENT 64 +#define TSEC_RXBUFFER_ALIGNMENT 64 + +/* Transmit Path Off-Load Frame Control Block flags */ +#define TSEC_TX_FCB_VLAN 0x8000 /* VLAN control word valid */ +#define TSEC_TX_FCB_L3_IS_IP 0x4000 /* Layer 3 header is an IP header */ +#define TSEC_TX_FCB_L3_IS_IP6 0x2000 /* IP header is IP version 6 */ +#define TSEC_TX_FCB_L4_IS_TCP_UDP 0x1000 /* Layer 4 header is a TCP or UDP header */ +#define TSEC_TX_FCB_L4_IS_UDP 0x0800 /* UDP protocol at layer 4 */ +#define TSEC_TX_FCB_CSUM_IP 0x0400 /* Checksum IP header enable */ +#define TSEC_TX_FCB_CSUM_TCP_UDP 0x0200 /* Checksum TCP or UDP header enable */ +#define TSEC_TX_FCB_FLAG_NO_PH_CSUM 0x0100 /* Disable pseudo-header checksum */ +#define TSEC_TX_FCB_FLAG_PTP 0x0001 /* This is a PTP packet */ + +/* Receive Path Off-Load Frame Control Block flags */ +#define TSEC_RX_FCB_VLAN 0x8000 /* VLAN tag recognized */ +#define TSEC_RX_FCB_IP_FOUND 0x4000 /* IP header found at layer 3 */ +#define TSEC_RX_FCB_IP6_FOUND 0x2000 /* IP version 6 header found at layer 3 */ +#define TSEC_RX_FCB_TCP_UDP_FOUND 0x1000 /* TCP or UDP header found at layer 4 */ +#define TSEC_RX_FCB_IP_CSUM 0x0800 /* IPv4 header checksum checked */ +#define TSEC_RX_FCB_TCP_UDP_CSUM 0x0400 /* TCP or UDP header checksum checked */ +#define TSEC_RX_FCB_IP_CSUM_ERROR 0x0200 /* IPv4 header checksum verification error */ +#define TSEC_RX_FCB_TCP_UDP_CSUM_ERROR 0x0100 /* TCP or UDP header checksum verification error */ +#define TSEC_RX_FCB_PARSE_ERROR 0x000C /* Parse error */ diff --git a/rtemsbsd/include/machine/ofw_machdep.h b/rtemsbsd/include/machine/ofw_machdep.h new file mode 100644 index 00000000..11797da4 --- /dev/null +++ b/rtemsbsd/include/machine/ofw_machdep.h @@ -0,0 +1,37 @@ +/*- + * Copyright (c) 2009 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Semihalf under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_OFW_MACHDEP_H_ +#define _MACHINE_OFW_MACHDEP_H_ + +typedef uint32_t cell_t; + +#endif /* _MACHINE_OFW_MACHDEP_H_ */ -- cgit v1.2.3