summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INSTALL53
-rw-r--r--Makefile.am16
-rwxr-xr-xbootstrap45
-rw-r--r--bsd_eth_drivers/.cvsignore1
-rw-r--r--bsd_eth_drivers/ChangeLog113
-rw-r--r--bsd_eth_drivers/Makefile.am2
-rw-r--r--bsd_eth_drivers/if_bge/.cvsignore1
-rw-r--r--bsd_eth_drivers/if_bge/Makefile.am22
-rw-r--r--bsd_eth_drivers/if_bge/if_bge.c4959
-rw-r--r--bsd_eth_drivers/if_bge/if_bgereg.h2591
-rw-r--r--bsd_eth_drivers/if_em/.cvsignore1
-rw-r--r--bsd_eth_drivers/if_em/Makefile.am1
-rw-r--r--bsd_eth_drivers/if_em/e1000_osdep.h227
-rw-r--r--bsd_eth_drivers/if_em/if_em.c17
-rw-r--r--bsd_eth_drivers/if_fxp/.cvsignore1
-rw-r--r--bsd_eth_drivers/if_fxp/Makefile.am22
-rw-r--r--bsd_eth_drivers/if_fxp/if_fxp.c2909
-rw-r--r--bsd_eth_drivers/if_fxp/if_fxpreg.h473
-rw-r--r--bsd_eth_drivers/if_fxp/if_fxpvar.h206
-rw-r--r--bsd_eth_drivers/if_fxp/rcvbundl.h1257
-rw-r--r--bsd_eth_drivers/if_le/.cvsignore1
-rw-r--r--bsd_eth_drivers/if_pcn/.cvsignore1
-rw-r--r--bsd_eth_drivers/if_pcn/if_pcn.c17
-rw-r--r--bsd_eth_drivers/if_re/.cvsignore1
-rw-r--r--bsd_eth_drivers/if_re/Makefile.am2
-rw-r--r--bsd_eth_drivers/if_re/if_re.c2167
-rw-r--r--bsd_eth_drivers/if_re/if_rl.c2351
-rw-r--r--bsd_eth_drivers/if_re/if_rlreg.h224
-rw-r--r--bsd_eth_drivers/libbsdport/.cvsignore1
-rw-r--r--bsd_eth_drivers/libbsdport/Makefile.am27
-rw-r--r--bsd_eth_drivers/libbsdport/alldrv.c30
-rw-r--r--bsd_eth_drivers/libbsdport/bus.h121
-rw-r--r--bsd_eth_drivers/libbsdport/callout.h44
-rw-r--r--bsd_eth_drivers/libbsdport/devicet.c73
-rw-r--r--bsd_eth_drivers/libbsdport/ifmedia.c10
-rw-r--r--bsd_eth_drivers/libbsdport/libbsdport.h50
-rw-r--r--bsd_eth_drivers/libbsdport/libbsdport_api.h16
-rw-r--r--bsd_eth_drivers/libbsdport/libbsdport_post.h35
-rw-r--r--bsd_eth_drivers/libbsdport/miistuff.c58
-rw-r--r--bsd_eth_drivers/libbsdport/misc.c179
-rw-r--r--bsd_eth_drivers/libbsdport/mutex.h3
-rw-r--r--bsd_eth_drivers/libbsdport/rtems_callout.c31
-rw-r--r--bsd_eth_drivers/libbsdport/sysbus.c60
-rw-r--r--bsd_eth_drivers/libbsdport/taskqueue.h2
-rw-r--r--bsd_eth_drivers/links.am12
-rw-r--r--config.h.in48
-rw-r--r--configure.ac376
-rw-r--r--m4/acinclude.m416
-rw-r--r--m4/config-if-present.m423
-rw-r--r--m4/cvstag.m441
-rw-r--r--m4/multilib-fix.m478
-rw-r--r--m4/multilib-installdir.m421
-rw-r--r--m4/rtems-bsp-postlink.m432
-rw-r--r--m4/rtems-bsplist.m441
-rw-r--r--m4/rtems-check-libargs.m49
-rw-r--r--m4/rtems-checkprog.m49
-rw-r--r--m4/rtems-checktool.m411
-rw-r--r--m4/rtems-checktop.m423
-rw-r--r--m4/rtems-fixup-prefix.m459
-rw-r--r--m4/rtems-isml.m411
-rw-r--r--m4/rtems-ismultibsp.m423
-rw-r--r--m4/rtems-isrtems.m423
-rw-r--r--m4/rtems-makevars.m4142
-rw-r--r--m4/rtems-multilib.m428
-rw-r--r--m4/rtems-options.m441
-rw-r--r--m4/rtems-setup-recurse.m4224
-rw-r--r--m4/rtems-tools.m415
-rw-r--r--m4/rtems-trim-builddir.m426
-rw-r--r--m4/rtems-verscheck.m436
-rw-r--r--makefile.top.am12
-rw-r--r--makefile.top.in670
-rw-r--r--rtems-pre.am5
-rw-r--r--rtems.am41
-rw-r--r--ssrlApps.components.in1
74 files changed, 17269 insertions, 3249 deletions
diff --git a/INSTALL b/INSTALL
deleted file mode 100644
index 38c11f6..0000000
--- a/INSTALL
+++ /dev/null
@@ -1,53 +0,0 @@
-HOW TO BUILD THE SSRL RTEMS-APPLICATIONS
-
-$Id$
-
-Till Straumann, 2008/10/3
-
-***************************************************
-THIS FILE HAS NOT BEEN WRITTEN YET, SORRY!
-
-Some useful information regarding 'configure' options
-can be found in cexp/INSTALL.
-
-Basic instructions:
-
- - make sure PATH contains toolchain directory
- - mkdir build
- - chdir build
- - ../configure --with-rtems-top=/afs/slac/package/rtems/<version> \
- --prefix=/afs/slac/package/rtems/<version>
- - make
- - make install
-
-This builds ssrlApps for all architectures/BSPs which are installed
-under --with-rtems-top. The list of BSPs can be explicitly defined
-using --with-rtemsbsp='bsp1 bsp2 bsp3 ...'.
-
-The default installation path for binaries, libraries and includes is
-
- <prefix>/target/ssrlApps/<cpu>-rtems/<bsp>/bin
- <prefix>/target/ssrlApps/<cpu>-rtems/<bsp>/lib
- <prefix>/target/ssrlApps/<cpu>-rtems/<bsp>/include
-
-but can be modified using the standard --exec-prefix, --libdir, --includedir
-options (see 'configure' documentation and cexp/INSTALL).
-
-Other useful options:
-
- --enable-std-rtems-installdirs
- Install directly into the RTEMS installation directories;
- by default a location *outside* of the standard location
- is used. If you don't use this option you can also fine-tune
- the installation using the usual --prefix, --exec-prefix,
- --libdir, --includedir etc. options. If you use this
- option '--prefix' & friends are effectively overridden.
-
- --with-hostbindir=<path>
- Where tools, i.e., programs that execute on the development
- platform are to be installed. Defaults to
-
- PREFIX/host/${build_alias}/bin
-
-
-***************************************************
diff --git a/Makefile.am b/Makefile.am
deleted file mode 100644
index 8fae066..0000000
--- a/Makefile.am
+++ /dev/null
@@ -1,16 +0,0 @@
-AUTOMAKE_OPTIONS=foreign
-
-ACLOCAL_AMFLAGS= -I./m4
-
-SUBDIRS=. @enable_subdirs@
-
-DIST_SUBDIRS= @all_subdirs@
-
-EXTRA_DIST=makefile.top.am makefile.top.in ssrlApps.components.in
-
-all-local:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL="$(INSTALL_IF_CHANGE)" prefix="$(abs_top_builddir)/data" exec_prefix='$$(prefix)' includedir='$$(prefix)/include' install-data
- $(MAKE) $(AM_MAKEFLAGS) INSTALL="$(INSTALL_IF_CHANGE)" prefix="$(abs_top_builddir)/data" exec_prefix='$$(prefix)' includedir='$$(prefix)/include' install-exec
-
-clean-local:
- $(RM) -r data
diff --git a/bootstrap b/bootstrap
deleted file mode 100755
index d0762ad..0000000
--- a/bootstrap
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh
-
-localhack=
-while getopts "lf" theopt ; do
- case $theopt in
- l)
- localhack=yes
- ;;
- f)
- force=--force
- ;;
- *)
- ;;
- esac
-done
-
-if test -d cexp ; then
- (cd cexp; make src)
-fi
-
-for val in cexp/binutils* binutils* ; do
- if test -d "$val" ; then
- echo "*************** ERROR Found $val;"
- echo "You must run `basename $0` before unpacking 'binutils';"
- echo "Please move the 'binutils' directory temporarily out of the source tree,"
- echo "run `basename $0` and then move binutils back"
- exit 1
- fi
-done
-
-if test "$localhack" = "yes" ; then
-# for some strange reason it is not
-# possible to pass autoreconf an option
-# directing it to search directories
-# for '.m4' files for aclocal. The '-I/-B'
-# options don't seem to work. We hack
-# around this by setting ACLOCAL
- if test "${ACLOCAL+set}" = "set" ; then
- echo "Warning: ACLOCAL is already set; I add a -I option";
- else
- ACLOCAL=aclocal
- fi
- export ACLOCAL="$ACLOCAL -I `(cd \`dirname $0\`; pwd)`/autognu"
-fi
-autoreconf -i $force
diff --git a/bsd_eth_drivers/.cvsignore b/bsd_eth_drivers/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/ChangeLog b/bsd_eth_drivers/ChangeLog
index 33cb584..3870355 100644
--- a/bsd_eth_drivers/ChangeLog
+++ b/bsd_eth_drivers/ChangeLog
@@ -1,3 +1,116 @@
+2010-03-07 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * links.am: POSIXified sed commands for sake of portability.
+
+2010-02-12 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/Makefile.am: Removed obsolete (and commented)
+ stuff.
+
+2009-10-20 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/libbsdport.h, if_em/e1000_osdep.h:
+ We need __BSD_VISIBLE defined. Some things in the rtems headers
+ changed with 4.10 (can't pinpoint the details). We now
+ #define __INSIDE_RTEMS_BSD_TCPIP_STACK__ before explicitly
+ including <rtems/rtems_bsdnet.h> and this seemed to fix
+ things for 4.10.
+
+2009-10-20 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/libbsdport_post.h: added missing declaration
+ of m_defrag() (Chris replaced the dummy #define a while
+ ago and provided an implementation instead [thanks!] but
+ we also need a declaration).
+
+2009-10-20 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/ifmedia.c: fixed 'unused variable' warning
+ by removing unused var.
+
+2009-10-20 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * if_re/if_re.c: fixed 'unused variable' warning by
+ commenting via #ifndef __rtems__ (since code which uses
+ the 'msi_disable' var is commented the same way).
+
+2009-10-20 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * if_pcn/if_pcn.c: fixed 'type-punned pointer' warning.
+ Use 'memcpy' to copy 2 bytes to a short. This requires
+ another #ifdef __rtems__, however.
+
+2009-10-20 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * if_bge/if_bge.c: bge_intr must be passed as a 'handler'
+ to bus_setup_intr(). However, the irq_check_dis and irq_enable
+ methods seem to still be skeletons so this driver won't work...
+ (I have no hardware or emulation available to test).
+
+2009-10-20 Joel Sherrill <joel.sherrill@OARcorp.com>
+
+ * libbsdport/alldrv.c: Revert.
+
+2009-10-20 Joel Sherrill <joel.sherrill@OARcorp.com>
+
+ * if_bge/if_bge.c, libbsdport/alldrv.c: Add local changes (untested).
+
+2009-09-12 Chris Johns <chrisj@rtems.org>
+
+ * links.am: Fixed the links so they work with absolute paths.
+
+2009-08-15 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * if_re/Makefile.am: added 'if_rlreg.h' to SOURCES so
+ that it is 'distributed'.
+
+2009-08-15 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * if_em/31000_osdep.h: added missing 'void' return
+ type of __out_le32() inline function.
+
+2009-08-06 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/rtems_callout.c: fixed possible race
+ condition. callout_stop() must check again from
+ critical/protected section of code if callout is still on
+ the list/active. Otherwise, the callout-task could
+ have executed and removed the callout between
+ callout_stop() checking the p_prev pointer and
+ entering the critical section.
+
+2009-08-05 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * if_em/Makefile.am, if_em/e1000_osdep.h, if_em/if_em.c:
+ Changed 'e1000_osdep.h' to provide stdalone I/O methods for
+ select architectures (x86 and PPC -- others fall back on
+ libbsdport/bus.h). This lets the low-level driver (everything
+ except for 'if_em.c/if_em.h') API be independent of
+ BSD networking and libbsdport which is desirable since
+ certain applications may wish to just use the low-level
+ API for implementing dedicated 'raw-ethernet' drivers
+ for BSD-independent, proprietary GigE communication.
+
+2009-08-05 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/alldrv.c: reactivate weak aliases for all
+ known/supported drivers. This magic allows the user to
+ just link the desired (sub-)set of drivers.
+
+2009-08-05 Till Straumann <Till.Straumann@TU-Berlin.de>
+
+ * libbsdport/libbsdport.h: define _KERNEL only if not defined already
+ (e.g., from Makefile).
+
+2009-06-04 Joel Sherrill <joel.sherrill@oarcorp.com>
+
+ * libbsdport/alldrv.c, libbsdport/libbsdport_api.h: Add include
+ <stdio.h> for FILE *.
+
+ 2009/05/12 (TS)
+ - bugfix: bus_write_2() must map to bus_space_write_2 (not 4)
+ 2009/04/22 (TS)
+ - imported SLAC version into OAR repository; added FXP driver.
2008/03/22 (TS)
- silence more compiler warnings:
* make DMA address void* instead of caddr_t to avoid strict-aliasing violation
diff --git a/bsd_eth_drivers/Makefile.am b/bsd_eth_drivers/Makefile.am
index 347eb90..3611b3b 100644
--- a/bsd_eth_drivers/Makefile.am
+++ b/bsd_eth_drivers/Makefile.am
@@ -1,3 +1,3 @@
AUTOMAKE_OPTIONS=foreign
-SUBDIRS=libbsdport if_pcn if_le if_em
+SUBDIRS=libbsdport if_bge if_em if_le if_pcn if_re if_fxp
diff --git a/bsd_eth_drivers/if_bge/.cvsignore b/bsd_eth_drivers/if_bge/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/if_bge/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/if_bge/Makefile.am b/bsd_eth_drivers/if_bge/Makefile.am
new file mode 100644
index 0000000..1290360
--- /dev/null
+++ b/bsd_eth_drivers/if_bge/Makefile.am
@@ -0,0 +1,22 @@
+# $Id$
+AUTOMAKE_OPTIONS=foreign
+
+include $(top_srcdir)/rtems-pre.am
+
+libif_bge_a_SOURCES = if_bge.c if_bgereg.h
+
+##EXTRA_libif_bge_a_SOURCES =
+
+CPPFLAGS_82542_SUPPORT_NO = -DNO_82542_SUPPORT
+CPPFLAGS_ICH8LAN_SUPPORT_NO = -DNO_ICH8LAN_SUPPORT
+
+libif_bge_a_LIBADD =
+
+libif_bge_a_DEPENDENCIES = $(libif_bge_a_LIBADD)
+
+lib_LIBRARIES = libif_bge.a
+
+AM_CPPFLAGS += -I$(srcdir)
+AM_CPPFLAGS += -I$(srcdir)/../libbsdport -I../libbsdport -I../libbsdport/dummyheaders
+
+include $(top_srcdir)/rtems.am
diff --git a/bsd_eth_drivers/if_bge/if_bge.c b/bsd_eth_drivers/if_bge/if_bge.c
new file mode 100644
index 0000000..3a820e1
--- /dev/null
+++ b/bsd_eth_drivers/if_bge/if_bge.c
@@ -0,0 +1,4959 @@
+/*-
+ * Copyright (c) 2001 Wind River Systems
+ * Copyright (c) 1997, 1998, 1999, 2001
+ * Bill Paul <wpaul@windriver.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __rtems__
+#include <libbsdport.h>
+#endif
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/dev/bge/if_bge.c,v 1.198.2.10.2.2 2008/12/15 20:36:32 marius Exp $");
+
+/*
+ * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
+ *
+ * The Broadcom BCM5700 is based on technology originally developed by
+ * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
+ * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
+ * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
+ * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
+ * frames, highly configurable RX filtering, and 16 RX and TX queues
+ * (which, along with RX filter rules, can be used for QOS applications).
+ * Other features, such as TCP segmentation, may be available as part
+ * of value-added firmware updates. Unlike the Tigon I and Tigon II,
+ * firmware images can be stored in hardware and need not be compiled
+ * into the driver.
+ *
+ * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
+ * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
+ *
+ * The BCM5701 is a single-chip solution incorporating both the BCM5700
+ * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
+ * does not support external SSRAM.
+ *
+ * Broadcom also produces a variation of the BCM5700 under the "Altima"
+ * brand name, which is functionally similar but lacks PCI-X support.
+ *
+ * Without external SSRAM, you can only have at most 4 TX rings,
+ * and the use of the mini RX ring is disabled. This seems to imply
+ * that these features are simply not available on the BCM5701. As a
+ * result, this driver does not implement any support for the mini RX
+ * ring.
+ */
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include "miidevs.h"
+#include <dev/mii/brgphyreg.h>
+
+#ifdef __sparc64__
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/openfirm.h>
+#include <machine/ofw_machdep.h>
+#include <machine/ver.h>
+#endif
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#ifndef __rtems__
+#include <dev/bge/if_bgereg.h>
+#else
+#include "if_bgereg.h"
+#endif
+
+#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
+#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
+
+MODULE_DEPEND(bge, pci, 1, 1, 1);
+MODULE_DEPEND(bge, ether, 1, 1, 1);
+MODULE_DEPEND(bge, miibus, 1, 1, 1);
+
+/* "device miibus" required. See GENERIC if you get errors here. */
+#include "miibus_if.h"
+
+#ifdef __rtems__
+#include <libbsdport_post.h>
+#define TUNABLE_INT(_a,_b)
+#define m_cljget(_a, _b, _c)
+#define MJUM9BYTES 0
+#define static
+#define PCIM_MSICTRL_MSI_ENABLE 0
+#define M_WRITABLE(_m) 1
+#define m_collapse(_m, _f, _f1) (_m)
+#define M_FIRSTFRAG 0
+#endif
+
+/*
+ * Various supported device vendors/types and their names. Note: the
+ * spec seems to indicate that the hardware still has Alteon's vendor
+ * ID burned into it, though it will always be overriden by the vendor
+ * ID in the EEPROM. Just to be safe, we cover all possibilities.
+ */
+static struct bge_type {
+ uint16_t bge_vid;
+ uint16_t bge_did;
+} bge_devs[] = {
+ { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
+ { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
+
+ { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
+ { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
+ { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
+
+ { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
+
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
+
+ { SK_VENDORID, SK_DEVICEID_ALTIMA },
+
+ { TC_VENDORID, TC_DEVICEID_3C996 },
+
+ { 0, 0 }
+};
+
+static const struct bge_vendor {
+ uint16_t v_id;
+ const char *v_name;
+} bge_vendors[] = {
+ { ALTEON_VENDORID, "Alteon" },
+ { ALTIMA_VENDORID, "Altima" },
+ { APPLE_VENDORID, "Apple" },
+ { BCOM_VENDORID, "Broadcom" },
+ { SK_VENDORID, "SysKonnect" },
+ { TC_VENDORID, "3Com" },
+
+ { 0, NULL }
+};
+
+static const struct bge_revision {
+ uint32_t br_chipid;
+ const char *br_name;
+} bge_revisions[] = {
+ { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
+ { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
+ { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
+ { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
+ { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
+ { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
+ { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
+ { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
+ { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
+ { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
+ { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
+ { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
+ { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
+ { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
+ { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
+ { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
+ { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
+ { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
+ { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
+ { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
+ { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
+ { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
+ { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
+ { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
+ { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
+ { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
+ { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
+ { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
+ { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
+ { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
+ { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
+ { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
+ { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
+ { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
+ { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
+ { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
+ { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
+ { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
+ { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
+ { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
+ { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
+ { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
+ { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
+ { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
+ { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
+ { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
+ { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
+ /* 5754 and 5787 share the same ASIC ID */
+ { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
+ { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
+ { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
+ { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
+ { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
+
+ { 0, NULL }
+};
+
+/*
+ * Some defaults for major revisions, so that newer steppings
+ * that we don't know about have a shot at working.
+ */
+static const struct bge_revision bge_majorrevs[] = {
+ { BGE_ASICREV_BCM5700, "unknown BCM5700" },
+ { BGE_ASICREV_BCM5701, "unknown BCM5701" },
+ { BGE_ASICREV_BCM5703, "unknown BCM5703" },
+ { BGE_ASICREV_BCM5704, "unknown BCM5704" },
+ { BGE_ASICREV_BCM5705, "unknown BCM5705" },
+ { BGE_ASICREV_BCM5750, "unknown BCM5750" },
+ { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
+ { BGE_ASICREV_BCM5752, "unknown BCM5752" },
+ { BGE_ASICREV_BCM5780, "unknown BCM5780" },
+ { BGE_ASICREV_BCM5714, "unknown BCM5714" },
+ { BGE_ASICREV_BCM5755, "unknown BCM5755" },
+ /* 5754 and 5787 share the same ASIC ID */
+ { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
+ { BGE_ASICREV_BCM5906, "unknown BCM5906" },
+
+ { 0, NULL }
+};
+
+#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
+#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
+#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
+#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
+#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
+
+const struct bge_revision * bge_lookup_rev(uint32_t);
+const struct bge_vendor * bge_lookup_vendor(uint16_t);
+
+typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
+
+static int bge_probe(device_t);
+static int bge_attach(device_t);
+static int bge_detach(device_t);
+static int bge_suspend(device_t);
+static int bge_resume(device_t);
+static void bge_release_resources(struct bge_softc *);
+static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
+static int bge_dma_alloc(device_t);
+static void bge_dma_free(struct bge_softc *);
+
+static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
+static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
+static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
+static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
+static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
+
+static void bge_txeof(struct bge_softc *);
+static void bge_rxeof(struct bge_softc *);
+
+static void bge_asf_driver_up (struct bge_softc *);
+static void bge_tick(void *);
+static void bge_stats_update(struct bge_softc *);
+static void bge_stats_update_regs(struct bge_softc *);
+static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
+
+static void bge_intr(void *);
+static void bge_start_locked(struct ifnet *);
+static void bge_start(struct ifnet *);
+#ifndef __rtems__
+static int bge_ioctl(struct ifnet *, u_long, caddr_t);
+#else
+static int bge_ioctl(struct ifnet *, ioctl_command_t, caddr_t);
+#endif
+static void bge_init_locked(struct bge_softc *);
+static void bge_init(void *);
+static void bge_stop(struct bge_softc *);
+static void bge_watchdog(struct bge_softc *);
+static void bge_shutdown(device_t);
+static int bge_ifmedia_upd_locked(struct ifnet *);
+static int bge_ifmedia_upd(struct ifnet *);
+static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+
+static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
+static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
+
+static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
+static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
+
+static void bge_setpromisc(struct bge_softc *);
+static void bge_setmulti(struct bge_softc *);
+static void bge_setvlan(struct bge_softc *);
+
+static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
+static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
+static int bge_init_rx_ring_std(struct bge_softc *);
+static void bge_free_rx_ring_std(struct bge_softc *);
+static int bge_init_rx_ring_jumbo(struct bge_softc *);
+static void bge_free_rx_ring_jumbo(struct bge_softc *);
+static void bge_free_tx_ring(struct bge_softc *);
+static int bge_init_tx_ring(struct bge_softc *);
+
+static int bge_chipinit(struct bge_softc *);
+static int bge_blockinit(struct bge_softc *);
+
+static int bge_has_eaddr(struct bge_softc *);
+static uint32_t bge_readmem_ind(struct bge_softc *, int);
+static void bge_writemem_ind(struct bge_softc *, int, int);
+static void bge_writembx(struct bge_softc *, int, int);
+#ifdef notdef
+static uint32_t bge_readreg_ind(struct bge_softc *, int);
+#endif
+static void bge_writemem_direct(struct bge_softc *, int, int);
+static void bge_writereg_ind(struct bge_softc *, int, int);
+
+#ifndef __rtems__
+static int bge_miibus_readreg(device_t, int, int);
+static int bge_miibus_writereg(device_t, int, int, int);
+static void bge_miibus_statchg(device_t);
+#endif
+#ifdef DEVICE_POLLING
+static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
+#endif
+
+#define BGE_RESET_START 1
+#define BGE_RESET_STOP 2
+static void bge_sig_post_reset(struct bge_softc *, int);
+static void bge_sig_legacy(struct bge_softc *, int);
+static void bge_sig_pre_reset(struct bge_softc *, int);
+static int bge_reset(struct bge_softc *);
+static void bge_link_upd(struct bge_softc *);
+
+/*
+ * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
+ * leak information to untrusted users. It is also known to cause alignment
+ * traps on certain architectures.
+ */
+#ifdef BGE_REGISTER_DEBUG
+static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
+static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
+static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
+#endif
+#ifndef __rtems__
+static void bge_add_sysctls(struct bge_softc *);
+#endif
+static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
+
+#ifndef __rtems__
+static device_method_t bge_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, bge_probe),
+ DEVMETHOD(device_attach, bge_attach),
+ DEVMETHOD(device_detach, bge_detach),
+ DEVMETHOD(device_shutdown, bge_shutdown),
+ DEVMETHOD(device_suspend, bge_suspend),
+ DEVMETHOD(device_resume, bge_resume),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, bge_miibus_readreg),
+ DEVMETHOD(miibus_writereg, bge_miibus_writereg),
+ DEVMETHOD(miibus_statchg, bge_miibus_statchg),
+
+ { 0, 0 }
+};
+
+static driver_t bge_driver = {
+ "bge",
+ bge_methods,
+ sizeof(struct bge_softc)
+};
+
+static devclass_t bge_devclass;
+
+DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
+DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
+#else
+
+static int
+bge_irq_check_dis(device_t d)
+{
+ // struct bge_softc *sc = device_get_softc(d);
+ return 0;
+}
+
+static void
+bge_irq_en(device_t d)
+{
+ // struct bge_softc *sc = device_get_softc(d);
+ /* This can be called from IRQ context -- since all register accesses
+ * involve RAP we must take care to preserve it across this routine!
+ */
+}
+
+static device_method_t bge_methods = {
+ probe: bge_probe,
+ attach: bge_attach,
+ shutdown: bge_shutdown,
+ detach: bge_detach,
+ irq_check_dis: bge_irq_check_dis,
+ irq_en: bge_irq_en,
+};
+
+driver_t libbsdport_bge_driver = {
+ "bge",
+ &bge_methods,
+ DEV_TYPE_PCI,
+ sizeof(struct bge_softc)
+};
+
+#endif
+
+static int bge_allow_asf = 0;
+
+TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
+
+#ifndef __rtems__
+SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
+SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
+ "Allow ASF mode if available");
+#endif
+
+#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
+#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
+#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
+#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
+#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
+
+static int
+bge_has_eaddr(struct bge_softc *sc)
+{
+#ifdef __sparc64__
+ char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
+ device_t dev;
+ uint32_t subvendor;
+
+ dev = sc->bge_dev;
+
+ /*
+ * The on-board BGEs found in sun4u machines aren't fitted with
+ * an EEPROM which means that we have to obtain the MAC address
+ * via OFW and that some tests will always fail. We distinguish
+ * such BGEs by the subvendor ID, which also has to be obtained
+ * from OFW instead of the PCI configuration space as the latter
+ * indicates Broadcom as the subvendor of the netboot interface.
+ * For early Blade 1500 and 2500 we even have to check the OFW
+ * device path as the subvendor ID always defaults to Broadcom
+ * there.
+ */
+ if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
+ &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
+ subvendor == SUN_VENDORID)
+ return (0);
+ memset(buf, 0, sizeof(buf));
+ if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
+ if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
+ strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
+ return (0);
+ if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
+ strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
+ return (0);
+ }
+#endif
+ return (1);
+}
+
+static uint32_t
+bge_readmem_ind(struct bge_softc *sc, int off)
+{
+ device_t dev;
+ uint32_t val;
+
+ dev = sc->bge_dev;
+
+ pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
+ val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
+ pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
+ return (val);
+}
+
+static void
+bge_writemem_ind(struct bge_softc *sc, int off, int val)
+{
+ device_t dev;
+
+ dev = sc->bge_dev;
+
+ pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
+ pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
+ pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
+}
+
+#ifdef notdef
+static uint32_t
+bge_readreg_ind(struct bge_softc *sc, int off)
+{
+ device_t dev;
+
+ dev = sc->bge_dev;
+
+ pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
+ return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
+}
+#endif
+
+static void
+bge_writereg_ind(struct bge_softc *sc, int off, int val)
+{
+ device_t dev;
+
+ dev = sc->bge_dev;
+
+ pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
+ pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
+}
+
+static void
+bge_writemem_direct(struct bge_softc *sc, int off, int val)
+{
+ CSR_WRITE_4(sc, off, val);
+}
+
+static void
+bge_writembx(struct bge_softc *sc, int off, int val)
+{
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
+ off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
+
+ CSR_WRITE_4(sc, off, val);
+}
+
+/*
+ * Map a single buffer address.
+ */
+
+static void
+bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct bge_dmamap_arg *ctx;
+
+ if (error)
+ return;
+
+ ctx = arg;
+
+ if (nseg > ctx->bge_maxsegs) {
+ ctx->bge_maxsegs = 0;
+ return;
+ }
+
+ ctx->bge_busaddr = segs->ds_addr;
+}
+
+static uint8_t
+bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
+{
+ uint32_t access, byte = 0;
+ int i;
+
+ /* Lock. */
+ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
+ for (i = 0; i < 8000; i++) {
+ if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
+ break;
+ DELAY(20);
+ }
+ if (i == 8000)
+ return (1);
+
+ /* Enable access. */
+ access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
+ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
+
+ CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
+ CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
+ for (i = 0; i < BGE_TIMEOUT * 10; i++) {
+ DELAY(10);
+ if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
+ DELAY(10);
+ break;
+ }
+ }
+
+ if (i == BGE_TIMEOUT * 10) {
+ if_printf(sc->bge_ifp, "nvram read timed out\n");
+ return (1);
+ }
+
+ /* Get result. */
+ byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
+
+ *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
+
+ /* Disable access. */
+ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
+
+ /* Unlock. */
+ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
+ CSR_READ_4(sc, BGE_NVRAM_SWARB);
+
+ return (0);
+}
+
+/*
+ * Read a sequence of bytes from NVRAM.
+ */
+static int
+bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
+{
+ int err = 0, i;
+ uint8_t byte = 0;
+
+ if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
+ return (1);
+
+ for (i = 0; i < cnt; i++) {
+ err = bge_nvram_getbyte(sc, off + i, &byte);
+ if (err)
+ break;
+ *(dest + i) = byte;
+ }
+
+ return (err ? 1 : 0);
+}
+
+/*
+ * Read a byte of data stored in the EEPROM at address 'addr.' The
+ * BCM570x supports both the traditional bitbang interface and an
+ * auto access interface for reading the EEPROM. We use the auto
+ * access method.
+ */
+static uint8_t
+bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
+{
+ int i;
+ uint32_t byte = 0;
+
+ /*
+ * Enable use of auto EEPROM access so we can avoid
+ * having to use the bitbang method.
+ */
+ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
+
+ /* Reset the EEPROM, load the clock period. */
+ CSR_WRITE_4(sc, BGE_EE_ADDR,
+ BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
+ DELAY(20);
+
+ /* Issue the read EEPROM command. */
+ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
+
+ /* Wait for completion */
+ for(i = 0; i < BGE_TIMEOUT * 10; i++) {
+ DELAY(10);
+ if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
+ break;
+ }
+
+ if (i == BGE_TIMEOUT * 10) {
+ device_printf(sc->bge_dev, "EEPROM read timed out\n");
+ return (1);
+ }
+
+ /* Get result. */
+ byte = CSR_READ_4(sc, BGE_EE_DATA);
+
+ *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
+
+ return (0);
+}
+
+/*
+ * Read a sequence of bytes from the EEPROM.
+ */
+static int
+bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
+{
+ int i, error = 0;
+ uint8_t byte = 0;
+
+ for (i = 0; i < cnt; i++) {
+ error = bge_eeprom_getbyte(sc, off + i, &byte);
+ if (error)
+ break;
+ *(dest + i) = byte;
+ }
+
+ return (error ? 1 : 0);
+}
+
+#ifndef __rtems__
+static int
+bge_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct bge_softc *sc;
+ uint32_t val, autopoll;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * Broadcom's own driver always assumes the internal
+ * PHY is at GMII address 1. On some chips, the PHY responds
+ * to accesses at all addresses, which could cause us to
+ * bogusly attach the PHY 32 times at probe type. Always
+ * restricting the lookup to address 1 is simpler than
+ * trying to figure out which chips revisions should be
+ * special-cased.
+ */
+ if (phy != 1)
+ return (0);
+
+ /* Reading with autopolling on may trigger PCI errors */
+ autopoll = CSR_READ_4(sc, BGE_MI_MODE);
+ if (autopoll & BGE_MIMODE_AUTOPOLL) {
+ BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
+ DELAY(40);
+ }
+
+ CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
+ BGE_MIPHY(phy) | BGE_MIREG(reg));
+
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ DELAY(10);
+ val = CSR_READ_4(sc, BGE_MI_COMM);
+ if (!(val & BGE_MICOMM_BUSY))
+ break;
+ }
+
+ if (i == BGE_TIMEOUT) {
+ device_printf(sc->bge_dev,
+ "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
+ phy, reg, val);
+ val = 0;
+ goto done;
+ }
+
+ DELAY(5);
+ val = CSR_READ_4(sc, BGE_MI_COMM);
+
+done:
+ if (autopoll & BGE_MIMODE_AUTOPOLL) {
+ BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
+ DELAY(40);
+ }
+
+ if (val & BGE_MICOMM_READFAIL)
+ return (0);
+
+ return (val & 0xFFFF);
+}
+
+static int
+bge_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct bge_softc *sc;
+ uint32_t autopoll;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
+ (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
+ return(0);
+
+ /* Reading with autopolling on may trigger PCI errors */
+ autopoll = CSR_READ_4(sc, BGE_MI_MODE);
+ if (autopoll & BGE_MIMODE_AUTOPOLL) {
+ BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
+ DELAY(40);
+ }
+
+ CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
+ BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
+
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ DELAY(10);
+ if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
+ DELAY(5);
+ CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
+ break;
+ }
+ }
+
+ if (i == BGE_TIMEOUT) {
+ device_printf(sc->bge_dev,
+ "PHY write timed out (phy %d, reg %d, val %d)\n",
+ phy, reg, val);
+ return (0);
+ }
+
+ if (autopoll & BGE_MIMODE_AUTOPOLL) {
+ BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
+ DELAY(40);
+ }
+
+ return (0);
+}
+
+static void
+bge_miibus_statchg(device_t dev)
+{
+ struct bge_softc *sc;
+ struct mii_data *mii;
+ sc = device_get_softc(dev);
+ mii = device_get_softc(sc->bge_miibus);
+
+ BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
+ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
+ BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
+ else
+ BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
+
+ if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
+ BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
+ else
+ BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
+}
+#endif
+
+/*
+ * Intialize a standard receive ring descriptor.
+ */
+static int
+bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
+{
+ struct mbuf *m_new = NULL;
+ struct bge_rx_bd *r;
+ struct bge_dmamap_arg ctx;
+ int error;
+
+ if (m == NULL) {
+ m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m_new == NULL)
+ return (ENOBUFS);
+ m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
+ } else {
+ m_new = m;
+ m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
+ m_new->m_data = m_new->m_ext.ext_buf;
+ }
+
+ if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
+ m_adj(m_new, ETHER_ALIGN);
+ sc->bge_cdata.bge_rx_std_chain[i] = m_new;
+ r = &sc->bge_ldata.bge_rx_std_ring[i];
+ ctx.bge_maxsegs = 1;
+ ctx.sc = sc;
+ error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
+ m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+ if (error || ctx.bge_maxsegs == 0) {
+ if (m == NULL) {
+ sc->bge_cdata.bge_rx_std_chain[i] = NULL;
+ m_freem(m_new);
+ }
+ return (ENOMEM);
+ }
+ r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
+ r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
+ r->bge_flags = BGE_RXBDFLAG_END;
+ r->bge_len = m_new->m_len;
+ r->bge_idx = i;
+
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[i],
+ BUS_DMASYNC_PREREAD);
+
+ return (0);
+}
+
+/*
+ * Initialize a jumbo receive ring descriptor. This allocates
+ * a jumbo buffer from the pool managed internally by the driver.
+ */
+static int
+bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
+{
+ bus_dma_segment_t segs[BGE_NSEG_JUMBO];
+ struct bge_extrx_bd *r;
+ struct mbuf *m_new = NULL;
+ int nsegs;
+ int error;
+
+ if (m == NULL) {
+ MGETHDR(m_new, M_DONTWAIT, MT_DATA);
+ if (m_new == NULL)
+ return (ENOBUFS);
+
+ m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
+ if (!(m_new->m_flags & M_EXT)) {
+ m_freem(m_new);
+ return (ENOBUFS);
+ }
+ m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
+ } else {
+ m_new = m;
+ m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
+ m_new->m_data = m_new->m_ext.ext_buf;
+ }
+
+ if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
+ m_adj(m_new, ETHER_ALIGN);
+
+ error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[i],
+ m_new, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error) {
+ if (m == NULL)
+ m_freem(m_new);
+ return (error);
+ }
+ sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
+
+ /*
+ * Fill in the extended RX buffer descriptor.
+ */
+ r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
+ r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
+ r->bge_idx = i;
+ r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
+ switch (nsegs) {
+ case 4:
+ r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
+ r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
+ r->bge_len3 = segs[3].ds_len;
+ case 3:
+ r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
+ r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
+ r->bge_len2 = segs[2].ds_len;
+ case 2:
+ r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
+ r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
+ r->bge_len1 = segs[1].ds_len;
+ case 1:
+ r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
+ r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
+ r->bge_len0 = segs[0].ds_len;
+ break;
+ default:
+ panic("%s: %d segments\n", __func__, nsegs);
+ }
+
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[i],
+ BUS_DMASYNC_PREREAD);
+
+ return (0);
+}
+
+/*
+ * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
+ * that's 1MB or memory, which is a lot. For now, we fill only the first
+ * 256 ring entries and hope that our CPU is fast enough to keep up with
+ * the NIC.
+ */
+static int
+bge_init_rx_ring_std(struct bge_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < BGE_SSLOTS; i++) {
+ if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
+ return (ENOBUFS);
+ };
+
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_cdata.bge_rx_std_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ sc->bge_std = i - 1;
+ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
+
+ return (0);
+}
+
+static void
+bge_free_rx_ring_std(struct bge_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
+ if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[i],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[i]);
+ m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
+ sc->bge_cdata.bge_rx_std_chain[i] = NULL;
+ }
+ bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
+ sizeof(struct bge_rx_bd));
+ }
+}
+
+static int
+bge_init_rx_ring_jumbo(struct bge_softc *sc)
+{
+ struct bge_rcb *rcb;
+ int i;
+
+ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
+ if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
+ return (ENOBUFS);
+ };
+
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_cdata.bge_rx_jumbo_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ sc->bge_jumbo = i - 1;
+
+ rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
+ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
+ BGE_RCB_FLAG_USE_EXT_RX_BD);
+ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
+
+ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
+
+ return (0);
+}
+
+static void
+bge_free_rx_ring_jumbo(struct bge_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
+ if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[i],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
+ m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
+ sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
+ }
+ bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
+ sizeof(struct bge_extrx_bd));
+ }
+}
+
+static void
+bge_free_tx_ring(struct bge_softc *sc)
+{
+ int i;
+
+ if (sc->bge_ldata.bge_tx_ring == NULL)
+ return;
+
+ for (i = 0; i < BGE_TX_RING_CNT; i++) {
+ if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_tx_dmamap[i],
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_tx_dmamap[i]);
+ m_freem(sc->bge_cdata.bge_tx_chain[i]);
+ sc->bge_cdata.bge_tx_chain[i] = NULL;
+ }
+ bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
+ sizeof(struct bge_tx_bd));
+ }
+}
+
+static int
+bge_init_tx_ring(struct bge_softc *sc)
+{
+ sc->bge_txcnt = 0;
+ sc->bge_tx_saved_considx = 0;
+
+ /* Initialize transmit producer index for host-memory send ring. */
+ sc->bge_tx_prodidx = 0;
+ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
+
+ /* 5700 b2 errata */
+ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
+ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
+
+ /* NIC-memory send ring not used; initialize to zero. */
+ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
+ /* 5700 b2 errata */
+ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
+ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
+
+ return (0);
+}
+
+static void
+bge_setpromisc(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+
+ BGE_LOCK_ASSERT(sc);
+
+ ifp = sc->bge_ifp;
+
+ /* Enable or disable promiscuous mode as needed. */
+ if (ifp->if_flags & IFF_PROMISC)
+ BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
+ else
+ BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
+}
+
+static void
+bge_setmulti(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+#ifndef __rtems__
+ struct ifmultiaddr *ifma;
+#endif
+ int h;
+ uint32_t hashes[4] = { 0, 0, 0, 0 };
+ int i;
+
+ BGE_LOCK_ASSERT(sc);
+
+ ifp = sc->bge_ifp;
+
+ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
+ for (i = 0; i < 4; i++)
+ CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
+ return;
+ }
+
+ /* First, zot all the existing filters. */
+ for (i = 0; i < 4; i++)
+ CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
+
+ /* Now program new ones. */
+#ifndef __rtems__
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
+ ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
+ hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
+ }
+ IF_ADDR_UNLOCK(ifp);
+#else
+ {
+ /* UNTESTED */
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ ETHER_FIRST_MULTI(step, (struct arpcom*)ifp, enm);
+ while ( enm != NULL ) {
+ h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x7F;
+ hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
+ ETHER_NEXT_MULTI( step, enm );
+ }
+ }
+#endif
+
+ for (i = 0; i < 4; i++)
+ CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
+}
+
+static void
+bge_setvlan(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+
+ BGE_LOCK_ASSERT(sc);
+
+ ifp = sc->bge_ifp;
+
+ /* Enable or disable VLAN tag stripping as needed. */
+#ifndef __rtems__
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
+ else
+ BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
+#endif
+}
+
+static void
+bge_sig_pre_reset(sc, type)
+ struct bge_softc *sc;
+ int type;
+{
+ /*
+ * Some chips don't like this so only do this if ASF is enabled
+ */
+ if (sc->bge_asf_mode)
+ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
+
+ if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
+ switch (type) {
+ case BGE_RESET_START:
+ bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
+ break;
+ case BGE_RESET_STOP:
+ bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
+ break;
+ }
+ }
+}
+
+static void
+bge_sig_post_reset(sc, type)
+ struct bge_softc *sc;
+ int type;
+{
+ if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
+ switch (type) {
+ case BGE_RESET_START:
+ bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
+ /* START DONE */
+ break;
+ case BGE_RESET_STOP:
+ bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
+ break;
+ }
+ }
+}
+
+static void
+bge_sig_legacy(sc, type)
+ struct bge_softc *sc;
+ int type;
+{
+ if (sc->bge_asf_mode) {
+ switch (type) {
+ case BGE_RESET_START:
+ bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
+ break;
+ case BGE_RESET_STOP:
+ bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
+ break;
+ }
+ }
+}
+
+void bge_stop_fw(struct bge_softc *);
+void
+bge_stop_fw(sc)
+ struct bge_softc *sc;
+{
+ int i;
+
+ if (sc->bge_asf_mode) {
+ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
+ CSR_WRITE_4(sc, BGE_CPU_EVENT,
+ CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
+
+ for (i = 0; i < 100; i++ ) {
+ if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
+ break;
+ DELAY(10);
+ }
+ }
+}
+
+/*
+ * Do endian, PCI and DMA initialization. Also check the on-board ROM
+ * self-test results.
+ */
+static int
+bge_chipinit(struct bge_softc *sc)
+{
+ uint32_t dma_rw_ctl;
+ int i;
+
+ /* Set endianness before we access any non-PCI registers. */
+ pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
+
+ /*
+ * Check the 'ROM failed' bit on the RX CPU to see if
+ * self-tests passed. Skip this check when there's no
+ * chip containing the Ethernet address fitted, since
+ * in that case it will always fail.
+ */
+ if ((sc->bge_flags & BGE_FLAG_EADDR) &&
+ CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
+ device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
+ return (ENODEV);
+ }
+
+ /* Clear the MAC control register */
+ CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
+
+ /*
+ * Clear the MAC statistics block in the NIC's
+ * internal memory.
+ */
+ for (i = BGE_STATS_BLOCK;
+ i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
+ BGE_MEMWIN_WRITE(sc, i, 0);
+
+ for (i = BGE_STATUS_BLOCK;
+ i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
+ BGE_MEMWIN_WRITE(sc, i, 0);
+
+ /*
+ * Set up the PCI DMA control register.
+ */
+ dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
+ BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
+ if (sc->bge_flags & BGE_FLAG_PCIE) {
+ /* Read watermark not used, 128 bytes for write. */
+ dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
+ } else if (sc->bge_flags & BGE_FLAG_PCIX) {
+ if (BGE_IS_5714_FAMILY(sc)) {
+ /* 256 bytes for read and write. */
+ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
+ BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
+ dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
+ BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
+ BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
+ } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
+ /* 1536 bytes for read, 384 bytes for write. */
+ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
+ BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
+ } else {
+ /* 384 bytes for read and write. */
+ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
+ BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
+ 0x0F;
+ }
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5704) {
+ uint32_t tmp;
+
+ /* Set ONE_DMA_AT_ONCE for hardware workaround. */
+ tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
+ if (tmp == 6 || tmp == 7)
+ dma_rw_ctl |=
+ BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
+
+ /* Set PCI-X DMA write workaround. */
+ dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
+ }
+ } else {
+ /* Conventional PCI bus: 256 bytes for read and write. */
+ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
+ BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
+
+ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
+ sc->bge_asicrev != BGE_ASICREV_BCM5750)
+ dma_rw_ctl |= 0x0F;
+ }
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5701)
+ dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
+ BGE_PCIDMARWCTL_ASRT_ALL_BE;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5704)
+ dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
+ pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
+
+ /*
+ * Set up general mode register.
+ */
+ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
+ BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
+ BGE_MODECTL_TX_NO_PHDR_CSUM);
+
+ /*
+ * BCM5701 B5 have a bug causing data corruption when using
+ * 64-bit DMA reads, which can be terminated early and then
+ * completed later as 32-bit accesses, in combination with
+ * certain bridges.
+ */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
+ sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
+ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
+
+ /*
+ * Tell the firmware the driver is running
+ */
+ if (sc->bge_asf_mode & ASF_STACKUP)
+ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+
+ /*
+ * Disable memory write invalidate. Apparently it is not supported
+ * properly by these devices.
+ */
+ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
+
+ /* Set the timer prescaler (always 66Mhz) */
+ CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
+
+ /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
+ DELAY(40); /* XXX */
+
+ /* Put PHY into ready state */
+ BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
+ CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
+ DELAY(40);
+ }
+
+ return (0);
+}
+
+static int
+bge_blockinit(struct bge_softc *sc)
+{
+ struct bge_rcb *rcb;
+ bus_size_t vrcb;
+ bge_hostaddr taddr;
+ uint32_t val;
+ int i;
+
+ /*
+ * Initialize the memory window pointer register so that
+ * we can access the first 32K of internal NIC RAM. This will
+ * allow us to set up the TX send ring RCBs and the RX return
+ * ring RCBs, plus other things which live in NIC memory.
+ */
+ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
+
+ /* Note: the BCM5704 has a smaller mbuf space than other chips. */
+
+ if (!(BGE_IS_5705_PLUS(sc))) {
+ /* Configure mbuf memory pool */
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
+ else
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
+
+ /* Configure DMA resource pool */
+ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
+ BGE_DMA_DESCRIPTORS);
+ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
+ }
+
+ /* Configure mbuf pool watermarks */
+ if (!BGE_IS_5705_PLUS(sc)) {
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
+ } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
+ } else {
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
+ }
+
+ /* Configure DMA resource watermarks */
+ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
+ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
+
+ /* Enable buffer manager */
+ if (!(BGE_IS_5705_PLUS(sc))) {
+ CSR_WRITE_4(sc, BGE_BMAN_MODE,
+ BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
+
+ /* Poll for buffer manager start indication */
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ DELAY(10);
+ if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
+ break;
+ }
+
+ if (i == BGE_TIMEOUT) {
+ device_printf(sc->bge_dev,
+ "buffer manager failed to start\n");
+ return (ENXIO);
+ }
+ }
+
+ /* Enable flow-through queues */
+ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
+ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
+
+ /* Wait until queue initialization is complete */
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ DELAY(10);
+ if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
+ break;
+ }
+
+ if (i == BGE_TIMEOUT) {
+ device_printf(sc->bge_dev, "flow-through queue init failed\n");
+ return (ENXIO);
+ }
+
+ /* Initialize the standard RX ring control block */
+ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
+ rcb->bge_hostaddr.bge_addr_lo =
+ BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
+ rcb->bge_hostaddr.bge_addr_hi =
+ BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
+ if (BGE_IS_5705_PLUS(sc))
+ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
+ else
+ rcb->bge_maxlen_flags =
+ BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
+ rcb->bge_nicaddr = BGE_STD_RX_RINGS;
+ CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
+ CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
+
+ CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
+ CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
+
+ /*
+ * Initialize the jumbo RX ring control block
+ * We set the 'ring disabled' bit in the flags
+ * field until we're actually ready to start
+ * using this ring (i.e. once we set the MTU
+ * high enough to require it).
+ */
+ if (BGE_IS_JUMBO_CAPABLE(sc)) {
+ rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
+
+ rcb->bge_hostaddr.bge_addr_lo =
+ BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
+ rcb->bge_hostaddr.bge_addr_hi =
+ BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_cdata.bge_rx_jumbo_ring_map,
+ BUS_DMASYNC_PREREAD);
+ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
+ BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
+ rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
+ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
+ rcb->bge_hostaddr.bge_addr_hi);
+ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
+ rcb->bge_hostaddr.bge_addr_lo);
+
+ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
+ rcb->bge_maxlen_flags);
+ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
+
+ /* Set up dummy disabled mini ring RCB */
+ rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
+ rcb->bge_maxlen_flags =
+ BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
+ CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
+ rcb->bge_maxlen_flags);
+ }
+
+ /*
+ * Set the BD ring replentish thresholds. The recommended
+ * values are 1/8th the number of descriptors allocated to
+ * each ring.
+ * XXX The 5754 requires a lower threshold, so it might be a
+ * requirement of all 575x family chips. The Linux driver sets
+ * the lower threshold for all 5705 family chips as well, but there
+ * are reports that it might not need to be so strict.
+ *
+ * XXX Linux does some extra fiddling here for the 5906 parts as
+ * well.
+ */
+ if (BGE_IS_5705_PLUS(sc))
+ val = 8;
+ else
+ val = BGE_STD_RX_RING_CNT / 8;
+ CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
+ CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
+
+ /*
+ * Disable all unused send rings by setting the 'ring disabled'
+ * bit in the flags field of all the TX send ring control blocks.
+ * These are located in NIC memory.
+ */
+ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
+ for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
+ RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
+ BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
+ RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
+ vrcb += sizeof(struct bge_rcb);
+ }
+
+ /* Configure TX RCB 0 (we use only the first ring) */
+ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
+ BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
+ RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
+ RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
+ RCB_WRITE_4(sc, vrcb, bge_nicaddr,
+ BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
+ if (!(BGE_IS_5705_PLUS(sc)))
+ RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
+ BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
+
+ /* Disable all unused RX return rings */
+ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
+ for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
+ RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
+ RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
+ RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
+ BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
+ BGE_RCB_FLAG_RING_DISABLED));
+ RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
+ bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
+ (i * (sizeof(uint64_t))), 0);
+ vrcb += sizeof(struct bge_rcb);
+ }
+
+ /* Initialize RX ring indexes */
+ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
+ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
+ bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
+
+ /*
+ * Set up RX return ring 0
+ * Note that the NIC address for RX return rings is 0x00000000.
+ * The return rings live entirely within the host, so the
+ * nicaddr field in the RCB isn't used.
+ */
+ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
+ BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
+ RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
+ RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
+ RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
+ RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
+ BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
+
+ /* Set random backoff seed for TX */
+ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
+ IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
+ IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
+ IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
+ BGE_TX_BACKOFF_SEED_MASK);
+
+ /* Set inter-packet gap */
+ CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
+
+ /*
+ * Specify which ring to use for packets that don't match
+ * any RX rules.
+ */
+ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
+
+ /*
+ * Configure number of RX lists. One interrupt distribution
+ * list, sixteen active lists, one bad frames class.
+ */
+ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
+
+ /* Inialize RX list placement stats mask. */
+ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
+ CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
+
+ /* Disable host coalescing until we get it set up */
+ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
+
+ /* Poll to make sure it's shut down. */
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ DELAY(10);
+ if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
+ break;
+ }
+
+ if (i == BGE_TIMEOUT) {
+ device_printf(sc->bge_dev,
+ "host coalescing engine failed to idle\n");
+ return (ENXIO);
+ }
+
+ /* Set up host coalescing defaults */
+ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
+ CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
+ CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
+ CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
+ if (!(BGE_IS_5705_PLUS(sc))) {
+ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
+ CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
+ }
+ CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
+ CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
+
+ /* Set up address of statistics block */
+ if (!(BGE_IS_5705_PLUS(sc))) {
+ CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
+ BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
+ CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
+ BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
+ CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
+ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
+ CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
+ }
+
+ /* Set up address of status block */
+ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
+ BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
+ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
+ BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
+ sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
+ sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
+
+ /* Turn on host coalescing state machine */
+ CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
+
+ /* Turn on RX BD completion state machine and enable attentions */
+ CSR_WRITE_4(sc, BGE_RBDC_MODE,
+ BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
+
+ /* Turn on RX list placement state machine */
+ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
+
+ /* Turn on RX list selector state machine. */
+ if (!(BGE_IS_5705_PLUS(sc)))
+ CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
+
+ /* Turn on DMA, clear stats */
+ CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
+ BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
+ BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
+ BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
+ ((sc->bge_flags & BGE_FLAG_TBI) ?
+ BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
+
+ /* Set misc. local control, enable interrupts on attentions */
+ CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
+
+#ifdef notdef
+ /* Assert GPIO pins for PHY reset */
+ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
+ BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
+ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
+ BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
+#endif
+
+ /* Turn on DMA completion state machine */
+ if (!(BGE_IS_5705_PLUS(sc)))
+ CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
+
+ val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
+
+ /* Enable host coalescing bug fix. */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5787)
+ val |= 1 << 29;
+
+ /* Turn on write DMA state machine */
+ CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
+
+ /* Turn on read DMA state machine */
+ CSR_WRITE_4(sc, BGE_RDMA_MODE,
+ BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS);
+
+ /* Turn on RX data completion state machine */
+ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
+
+ /* Turn on RX BD initiator state machine */
+ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
+
+ /* Turn on RX data and RX BD initiator state machine */
+ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
+
+ /* Turn on Mbuf cluster free state machine */
+ if (!(BGE_IS_5705_PLUS(sc)))
+ CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
+
+ /* Turn on send BD completion state machine */
+ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
+
+ /* Turn on send data completion state machine */
+ CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
+
+ /* Turn on send data initiator state machine */
+ CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
+
+ /* Turn on send BD initiator state machine */
+ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
+
+ /* Turn on send BD selector state machine */
+ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
+
+ CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
+ CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
+ BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
+
+ /* ack/clear link change events */
+ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
+ BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
+ BGE_MACSTAT_LINK_CHANGED);
+ CSR_WRITE_4(sc, BGE_MI_STS, 0);
+
+ /* Enable PHY auto polling (for MII/GMII only) */
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+ CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
+ } else {
+ BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
+ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
+ BGE_EVTENB_MI_INTERRUPT);
+ }
+
+ /*
+ * Clear any pending link state attention.
+ * Otherwise some link state change events may be lost until attention
+ * is cleared by bge_intr() -> bge_link_upd() sequence.
+ * It's not necessary on newer BCM chips - perhaps enabling link
+ * state change attentions implies clearing pending attention.
+ */
+ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
+ BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
+ BGE_MACSTAT_LINK_CHANGED);
+
+ /* Enable link state change attentions. */
+ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
+
+ return (0);
+}
+
+const struct bge_revision *
+bge_lookup_rev(uint32_t chipid)
+{
+ const struct bge_revision *br;
+
+ for (br = bge_revisions; br->br_name != NULL; br++) {
+ if (br->br_chipid == chipid)
+ return (br);
+ }
+
+ for (br = bge_majorrevs; br->br_name != NULL; br++) {
+ if (br->br_chipid == BGE_ASICREV(chipid))
+ return (br);
+ }
+
+ return (NULL);
+}
+
+const struct bge_vendor *
+bge_lookup_vendor(uint16_t vid)
+{
+ const struct bge_vendor *v;
+
+ for (v = bge_vendors; v->v_name != NULL; v++)
+ if (v->v_id == vid)
+ return (v);
+
+ panic("%s: unknown vendor %d", __func__, vid);
+ return (NULL);
+}
+
+/*
+ * Probe for a Broadcom chip. Check the PCI vendor and device IDs
+ * against our list and return its name if we find a match.
+ *
+ * Note that since the Broadcom controller contains VPD support, we
+ * try to get the device name string from the controller itself instead
+ * of the compiled-in string. It guarantees we'll always announce the
+ * right product name. We fall back to the compiled-in string when
+ * VPD is unavailable or corrupt.
+ */
+static int
+bge_probe(device_t dev)
+{
+ struct bge_type *t = bge_devs;
+ struct bge_softc *sc = device_get_softc(dev);
+ uint16_t vid, did;
+
+ sc->bge_dev = dev;
+ vid = pci_get_vendor(dev);
+ did = pci_get_device(dev);
+ while(t->bge_vid != 0) {
+ if ((vid == t->bge_vid) && (did == t->bge_did)) {
+ char model[64], buf[96];
+ const struct bge_revision *br;
+ const struct bge_vendor *v;
+ uint32_t id;
+
+ id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
+ BGE_PCIMISCCTL_ASICREV;
+ br = bge_lookup_rev(id);
+ v = bge_lookup_vendor(vid);
+ {
+#if __FreeBSD_version > 700024
+ const char *pname;
+
+ if (pci_get_vpd_ident(dev, &pname) == 0)
+ snprintf(model, 64, "%s", pname);
+ else
+#endif
+ snprintf(model, 64, "%s %s",
+ v->v_name,
+ br != NULL ? br->br_name :
+ "NetXtreme Ethernet Controller");
+ }
+ snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
+ br != NULL ? "" : "unknown ", (int)id >> 16);
+ device_set_desc_copy(dev, buf);
+ if (pci_get_subvendor(dev) == DELL_VENDORID)
+ sc->bge_flags |= BGE_FLAG_NO_3LED;
+ if (did == BCOM_DEVICEID_BCM5755M)
+ sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
+ return (0);
+ }
+ t++;
+ }
+
+ return (ENXIO);
+}
+
+static void
+bge_dma_free(struct bge_softc *sc)
+{
+ int i;
+
+ /* Destroy DMA maps for RX buffers. */
+ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
+ if (sc->bge_cdata.bge_rx_std_dmamap[i])
+ bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[i]);
+ }
+
+ /* Destroy DMA maps for jumbo RX buffers. */
+ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
+ if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
+ bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
+ }
+
+ /* Destroy DMA maps for TX buffers. */
+ for (i = 0; i < BGE_TX_RING_CNT; i++) {
+ if (sc->bge_cdata.bge_tx_dmamap[i])
+ bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_tx_dmamap[i]);
+ }
+
+ if (sc->bge_cdata.bge_mtag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
+
+
+ /* Destroy standard RX ring. */
+ if (sc->bge_cdata.bge_rx_std_ring_map)
+ bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_cdata.bge_rx_std_ring_map);
+ if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
+ bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_ldata.bge_rx_std_ring,
+ sc->bge_cdata.bge_rx_std_ring_map);
+
+ if (sc->bge_cdata.bge_rx_std_ring_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
+
+ /* Destroy jumbo RX ring. */
+ if (sc->bge_cdata.bge_rx_jumbo_ring_map)
+ bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_cdata.bge_rx_jumbo_ring_map);
+
+ if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
+ sc->bge_ldata.bge_rx_jumbo_ring)
+ bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_ldata.bge_rx_jumbo_ring,
+ sc->bge_cdata.bge_rx_jumbo_ring_map);
+
+ if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
+
+ /* Destroy RX return ring. */
+ if (sc->bge_cdata.bge_rx_return_ring_map)
+ bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
+ sc->bge_cdata.bge_rx_return_ring_map);
+
+ if (sc->bge_cdata.bge_rx_return_ring_map &&
+ sc->bge_ldata.bge_rx_return_ring)
+ bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
+ sc->bge_ldata.bge_rx_return_ring,
+ sc->bge_cdata.bge_rx_return_ring_map);
+
+ if (sc->bge_cdata.bge_rx_return_ring_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
+
+ /* Destroy TX ring. */
+ if (sc->bge_cdata.bge_tx_ring_map)
+ bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
+ sc->bge_cdata.bge_tx_ring_map);
+
+ if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
+ bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
+ sc->bge_ldata.bge_tx_ring,
+ sc->bge_cdata.bge_tx_ring_map);
+
+ if (sc->bge_cdata.bge_tx_ring_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
+
+ /* Destroy status block. */
+ if (sc->bge_cdata.bge_status_map)
+ bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
+ sc->bge_cdata.bge_status_map);
+
+ if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
+ bus_dmamem_free(sc->bge_cdata.bge_status_tag,
+ sc->bge_ldata.bge_status_block,
+ sc->bge_cdata.bge_status_map);
+
+ if (sc->bge_cdata.bge_status_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
+
+ /* Destroy statistics block. */
+ if (sc->bge_cdata.bge_stats_map)
+ bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
+ sc->bge_cdata.bge_stats_map);
+
+ if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
+ bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
+ sc->bge_ldata.bge_stats,
+ sc->bge_cdata.bge_stats_map);
+
+ if (sc->bge_cdata.bge_stats_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
+
+ /* Destroy the parent tag. */
+ if (sc->bge_cdata.bge_parent_tag)
+ bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
+}
+
+static int
+bge_dma_alloc(device_t dev)
+{
+ struct bge_dmamap_arg ctx;
+ struct bge_softc *sc;
+ int i, error;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * Allocate the parent bus DMA tag appropriate for PCI.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
+ 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
+ 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
+
+ if (error != 0) {
+ device_printf(sc->bge_dev,
+ "could not allocate parent dma tag\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Create tag for mbufs.
+ */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
+ 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
+ BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
+
+ if (error) {
+ device_printf(sc->bge_dev, "could not allocate dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Create DMA maps for RX buffers. */
+ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
+ &sc->bge_cdata.bge_rx_std_dmamap[i]);
+ if (error) {
+ device_printf(sc->bge_dev,
+ "can't create DMA map for RX\n");
+ return (ENOMEM);
+ }
+ }
+
+ /* Create DMA maps for TX buffers. */
+ for (i = 0; i < BGE_TX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
+ &sc->bge_cdata.bge_tx_dmamap[i]);
+ if (error) {
+ device_printf(sc->bge_dev,
+ "can't create DMA map for RX\n");
+ return (ENOMEM);
+ }
+ }
+
+ /* Create tag for standard RX ring. */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
+ NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
+
+ if (error) {
+ device_printf(sc->bge_dev, "could not allocate dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate DMA'able memory for standard RX ring. */
+ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
+ (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
+ &sc->bge_cdata.bge_rx_std_ring_map);
+ if (error)
+ return (ENOMEM);
+
+ bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
+
+ /* Load the address of the standard RX ring. */
+ ctx.bge_maxsegs = 1;
+ ctx.sc = sc;
+
+ error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
+ BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+
+ if (error)
+ return (ENOMEM);
+
+ sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
+
+ /* Create tags for jumbo mbufs. */
+ if (BGE_IS_JUMBO_CAPABLE(sc)) {
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
+ 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
+ if (error) {
+ device_printf(sc->bge_dev,
+ "could not allocate jumbo dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Create tag for jumbo RX ring. */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
+ NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
+
+ if (error) {
+ device_printf(sc->bge_dev,
+ "could not allocate jumbo ring dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate DMA'able memory for jumbo RX ring. */
+ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &sc->bge_cdata.bge_rx_jumbo_ring_map);
+ if (error)
+ return (ENOMEM);
+
+ /* Load the address of the jumbo RX ring. */
+ ctx.bge_maxsegs = 1;
+ ctx.sc = sc;
+
+ error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_cdata.bge_rx_jumbo_ring_map,
+ sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
+ bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+
+ if (error)
+ return (ENOMEM);
+
+ sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
+
+ /* Create DMA maps for jumbo RX buffers. */
+ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
+ 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
+ if (error) {
+ device_printf(sc->bge_dev,
+ "can't create DMA map for jumbo RX\n");
+ return (ENOMEM);
+ }
+ }
+
+ }
+
+ /* Create tag for RX return ring. */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
+ NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
+
+ if (error) {
+ device_printf(sc->bge_dev, "could not allocate dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate DMA'able memory for RX return ring. */
+ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
+ (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
+ &sc->bge_cdata.bge_rx_return_ring_map);
+ if (error)
+ return (ENOMEM);
+
+ bzero((char *)sc->bge_ldata.bge_rx_return_ring,
+ BGE_RX_RTN_RING_SZ(sc));
+
+ /* Load the address of the RX return ring. */
+ ctx.bge_maxsegs = 1;
+ ctx.sc = sc;
+
+ error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
+ sc->bge_cdata.bge_rx_return_ring_map,
+ sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
+ bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+
+ if (error)
+ return (ENOMEM);
+
+ sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
+
+ /* Create tag for TX ring. */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
+ &sc->bge_cdata.bge_tx_ring_tag);
+
+ if (error) {
+ device_printf(sc->bge_dev, "could not allocate dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate DMA'able memory for TX ring. */
+ error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
+ (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
+ &sc->bge_cdata.bge_tx_ring_map);
+ if (error)
+ return (ENOMEM);
+
+ bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
+
+ /* Load the address of the TX ring. */
+ ctx.bge_maxsegs = 1;
+ ctx.sc = sc;
+
+ error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
+ sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
+ BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+
+ if (error)
+ return (ENOMEM);
+
+ sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
+
+ /* Create tag for status block. */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
+ NULL, NULL, &sc->bge_cdata.bge_status_tag);
+
+ if (error) {
+ device_printf(sc->bge_dev, "could not allocate dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate DMA'able memory for status block. */
+ error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
+ (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
+ &sc->bge_cdata.bge_status_map);
+ if (error)
+ return (ENOMEM);
+
+ bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
+
+ /* Load the address of the status block. */
+ ctx.sc = sc;
+ ctx.bge_maxsegs = 1;
+
+ error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
+ sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
+ BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+
+ if (error)
+ return (ENOMEM);
+
+ sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
+
+ /* Create tag for statistics block. */
+ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
+ &sc->bge_cdata.bge_stats_tag);
+
+ if (error) {
+ device_printf(sc->bge_dev, "could not allocate dma tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate DMA'able memory for statistics block. */
+ error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
+ (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
+ &sc->bge_cdata.bge_stats_map);
+ if (error)
+ return (ENOMEM);
+
+ bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
+
+ /* Load the address of the statstics block. */
+ ctx.sc = sc;
+ ctx.bge_maxsegs = 1;
+
+ error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
+ sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
+ BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
+
+ if (error)
+ return (ENOMEM);
+
+ sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
+
+ return (0);
+}
+
+#if __FreeBSD_version > 602105
+/*
+ * Return true if this device has more than one port.
+ */
+static int
+bge_has_multiple_ports(struct bge_softc *sc)
+{
+ device_t dev = sc->bge_dev;
+ u_int b, d, f, fscan, s;
+
+ d = pci_get_domain(dev);
+ b = pci_get_bus(dev);
+ s = pci_get_slot(dev);
+ f = pci_get_function(dev);
+ for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
+ if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
+ return (1);
+ return (0);
+}
+
+/*
+ * Return true if MSI can be used with this device.
+ */
+static int
+bge_can_use_msi(struct bge_softc *sc)
+{
+ int can_use_msi = 0;
+
+ switch (sc->bge_asicrev) {
+ case BGE_ASICREV_BCM5714:
+ /*
+ * Apparently, MSI doesn't work when this chip is configured
+ * in single-port mode.
+ */
+ if (bge_has_multiple_ports(sc))
+ can_use_msi = 1;
+ break;
+ case BGE_ASICREV_BCM5750:
+ if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
+ sc->bge_chiprev != BGE_CHIPREV_5750_BX)
+ can_use_msi = 1;
+ break;
+ case BGE_ASICREV_BCM5752:
+ case BGE_ASICREV_BCM5780:
+ can_use_msi = 1;
+ break;
+ }
+ return (can_use_msi);
+}
+#endif
+
+static int
+bge_attach(device_t dev)
+{
+ struct ifnet *ifp;
+ struct bge_softc *sc;
+ uint32_t hwcfg = 0, misccfg;
+ u_char eaddr[ETHER_ADDR_LEN];
+ int error, reg, rid;
+#ifndef __rtems__
+ int trys;
+#endif
+
+ sc = device_get_softc(dev);
+ sc->bge_dev = dev;
+
+ /*
+ * Map control/status registers.
+ */
+ pci_enable_busmaster(dev);
+
+ rid = BGE_PCI_BAR0;
+ sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE | PCI_RF_DENSE);
+
+ if (sc->bge_res == NULL) {
+ device_printf (sc->bge_dev, "couldn't map memory\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ sc->bge_btag = rman_get_bustag(sc->bge_res);
+ sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
+
+ /* Save ASIC rev. */
+
+ sc->bge_chipid =
+ pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
+ BGE_PCIMISCCTL_ASICREV;
+ sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
+ sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
+
+ /*
+ * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
+ * 5705 A0 and A1 chips.
+ */
+ if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
+ sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
+ sc->bge_flags |= BGE_FLAG_WIRESPEED;
+
+ if (bge_has_eaddr(sc))
+ sc->bge_flags |= BGE_FLAG_EADDR;
+
+ /* Save chipset family. */
+ switch (sc->bge_asicrev) {
+ case BGE_ASICREV_BCM5700:
+ case BGE_ASICREV_BCM5701:
+ case BGE_ASICREV_BCM5703:
+ case BGE_ASICREV_BCM5704:
+ sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
+ break;
+ case BGE_ASICREV_BCM5714_A0:
+ case BGE_ASICREV_BCM5780:
+ case BGE_ASICREV_BCM5714:
+ sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
+ /* FALLTHRU */
+ case BGE_ASICREV_BCM5750:
+ case BGE_ASICREV_BCM5752:
+ case BGE_ASICREV_BCM5755:
+ case BGE_ASICREV_BCM5787:
+ case BGE_ASICREV_BCM5906:
+ sc->bge_flags |= BGE_FLAG_575X_PLUS;
+ /* FALLTHRU */
+ case BGE_ASICREV_BCM5705:
+ sc->bge_flags |= BGE_FLAG_5705_PLUS;
+ break;
+ }
+
+ /* Set various bug flags. */
+ if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
+ sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
+ sc->bge_flags |= BGE_FLAG_CRC_BUG;
+ if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
+ sc->bge_chiprev == BGE_CHIPREV_5704_AX)
+ sc->bge_flags |= BGE_FLAG_ADC_BUG;
+ if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
+ sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
+ if (BGE_IS_5705_PLUS(sc) &&
+ !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5787) {
+ if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
+ sc->bge_flags |= BGE_FLAG_JITTER_BUG;
+ } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
+ sc->bge_flags |= BGE_FLAG_BER_BUG;
+ }
+
+
+ /*
+ * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
+ * but I do not know the DEVICEID for the 5788M.
+ */
+ misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
+ if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
+ misccfg == BGE_MISCCFG_BOARD_ID_5788M)
+ sc->bge_flags |= BGE_FLAG_5788;
+
+ /*
+ * Check if this is a PCI-X or PCI Express device.
+ */
+#if __FreeBSD_version > 602101
+ if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
+ /*
+ * Found a PCI Express capabilities register, this
+ * must be a PCI Express device.
+ */
+ if (reg != 0)
+ sc->bge_flags |= BGE_FLAG_PCIE;
+#else
+ if (BGE_IS_5705_PLUS(sc)) {
+ reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
+ if ((reg & 0xFF) == BGE_PCIE_CAPID)
+ sc->bge_flags |= BGE_FLAG_PCIE;
+#endif
+ } else {
+ /*
+ * Check if the device is in PCI-X Mode.
+ * (This bit is not valid on PCI Express controllers.)
+ */
+ if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
+ BGE_PCISTATE_PCI_BUSMODE) == 0)
+ sc->bge_flags |= BGE_FLAG_PCIX;
+ }
+
+#if __FreeBSD_version > 602105
+ {
+ int msicount;
+
+ /*
+ * Allocate the interrupt, using MSI if possible. These devices
+ * support 8 MSI messages, but only the first one is used in
+ * normal operation.
+ */
+ if (bge_can_use_msi(sc)) {
+ msicount = pci_msi_count(dev);
+ if (msicount > 1)
+ msicount = 1;
+ } else
+ msicount = 0;
+ if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
+ rid = 1;
+ sc->bge_flags |= BGE_FLAG_MSI;
+ } else
+ rid = 0;
+ }
+#else
+ rid = 0;
+#endif
+
+ sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+
+ if (sc->bge_irq == NULL) {
+ device_printf(sc->bge_dev, "couldn't map interrupt\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ BGE_LOCK_INIT(sc, device_get_nameunit(dev));
+
+ /* Try to reset the chip. */
+ if (bge_reset(sc)) {
+ device_printf(sc->bge_dev, "chip reset failed\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ sc->bge_asf_mode = 0;
+ if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
+ == BGE_MAGIC_NUMBER)) {
+ if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
+ & BGE_HWCFG_ASF) {
+ sc->bge_asf_mode |= ASF_ENABLE;
+ sc->bge_asf_mode |= ASF_STACKUP;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
+ sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
+ }
+ }
+ }
+
+ /* Try to reset the chip again the nice way. */
+ bge_stop_fw(sc);
+ bge_sig_pre_reset(sc, BGE_RESET_STOP);
+ if (bge_reset(sc)) {
+ device_printf(sc->bge_dev, "chip reset failed\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ bge_sig_legacy(sc, BGE_RESET_STOP);
+ bge_sig_post_reset(sc, BGE_RESET_STOP);
+
+ if (bge_chipinit(sc)) {
+ device_printf(sc->bge_dev, "chip initialization failed\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ error = bge_get_eaddr(sc, eaddr);
+ if (error) {
+ device_printf(sc->bge_dev,
+ "failed to read station address\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ /* 5705 limits RX return ring to 512 entries. */
+ if (BGE_IS_5705_PLUS(sc))
+ sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
+ else
+ sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
+
+ if (bge_dma_alloc(dev)) {
+ device_printf(sc->bge_dev,
+ "failed to allocate DMA resources\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ /* Set default tuneable values. */
+ sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
+ sc->bge_rx_coal_ticks = 150;
+ sc->bge_tx_coal_ticks = 150;
+ sc->bge_rx_max_coal_bds = 10;
+ sc->bge_tx_max_coal_bds = 10;
+
+ /* Set up ifnet structure */
+ ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(sc->bge_dev, "failed to if_alloc()\n");
+ error = ENXIO;
+ goto fail;
+ }
+ ifp->if_softc = sc;
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = bge_ioctl;
+ ifp->if_start = bge_start;
+ ifp->if_init = bge_init;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+#ifndef __rtems__
+ ifp->if_hwassist = BGE_CSUM_FEATURES;
+ ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
+ IFCAP_VLAN_MTU;
+#ifdef IFCAP_VLAN_HWCSUM
+ ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
+#endif
+ ifp->if_capenable = ifp->if_capabilities;
+#ifdef DEVICE_POLLING
+ ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+
+#ifdef __rtems__
+ taskqueue_create_fast("re_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &taskqueue_fast);
+ taskqueue_start_threads(&taskqueue_fast, 1, PI_NET, "%s taskq",
+ device_get_nameunit(dev));
+#endif
+
+ /*
+ * 5700 B0 chips do not support checksumming correctly due
+ * to hardware bugs.
+ */
+ if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
+ ifp->if_capabilities &= ~IFCAP_HWCSUM;
+ ifp->if_capenable &= IFCAP_HWCSUM;
+ ifp->if_hwassist = 0;
+ }
+#endif
+
+ /*
+ * Figure out what sort of media we have by checking the
+ * hardware config word in the first 32k of NIC internal memory,
+ * or fall back to examining the EEPROM if necessary.
+ * Note: on some BCM5700 cards, this value appears to be unset.
+ * If that's the case, we have to rely on identifying the NIC
+ * by its PCI subsystem ID, as we do below for the SysKonnect
+ * SK-9D41.
+ */
+ if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
+ hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
+ else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
+ (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
+ if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
+ sizeof(hwcfg))) {
+ device_printf(sc->bge_dev, "failed to read EEPROM\n");
+ error = ENXIO;
+ goto fail;
+ }
+ hwcfg = ntohl(hwcfg);
+ }
+
+ if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
+ sc->bge_flags |= BGE_FLAG_TBI;
+
+ /* The SysKonnect SK-9D41 is a 1000baseSX card. */
+ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
+ sc->bge_flags |= BGE_FLAG_TBI;
+
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+ ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
+ bge_ifmedia_sts);
+ ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
+ sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
+ } else {
+ /*
+ * Do transceiver setup and tell the firmware the
+ * driver is down so we can try to get access the
+ * probe if ASF is running. Retry a couple of times
+ * if we get a conflict with the ASF firmware accessing
+ * the PHY.
+ */
+ BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+#ifndef __rtems__
+again:
+ bge_asf_driver_up(sc);
+
+ trys = 0;
+ if (mii_phy_probe(dev, &sc->bge_miibus,
+ bge_ifmedia_upd, bge_ifmedia_sts)) {
+ if (trys++ < 4) {
+ device_printf(sc->bge_dev, "Try again\n");
+ bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
+ BMCR_RESET);
+ goto again;
+ }
+
+ device_printf(sc->bge_dev, "MII without any PHY!\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ /*
+ * Now tell the firmware we are going up after probing the PHY
+ */
+ if (sc->bge_asf_mode & ASF_STACKUP)
+ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+#else
+ #warning "no phy probe"
+#endif
+ }
+
+ /*
+ * When using the BCM5701 in PCI-X mode, data corruption has
+ * been observed in the first few bytes of some received packets.
+ * Aligning the packet buffer in memory eliminates the corruption.
+ * Unfortunately, this misaligns the packet payloads. On platforms
+ * which do not support unaligned accesses, we will realign the
+ * payloads by copying the received packets.
+ */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
+ sc->bge_flags & BGE_FLAG_PCIX)
+ sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
+
+ /*
+ * Call MI attach routine.
+ */
+ ether_ifattach(ifp, eaddr);
+ callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
+
+ /*
+ * Hookup IRQ last.
+ */
+#if defined(__rtems__)
+ error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, bge_intr, sc, &sc->bge_intrhand);
+#elif (__FreeBSD_version > 700030)
+ error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, bge_intr, sc, &sc->bge_intrhand);
+#else
+ error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
+ bge_intr, sc, &sc->bge_intrhand);
+#endif
+
+ if (error) {
+ bge_detach(dev);
+ device_printf(sc->bge_dev, "couldn't set up irq\n");
+ }
+
+#ifndef __rtems__
+ bge_add_sysctls(sc);
+#endif
+
+ return (0);
+
+fail:
+ bge_release_resources(sc);
+
+ return (error);
+}
+
+static int
+bge_detach(device_t dev)
+{
+ struct bge_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ ifp = sc->bge_ifp;
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING)
+ ether_poll_deregister(ifp);
+#endif
+
+ BGE_LOCK(sc);
+ bge_stop(sc);
+ bge_reset(sc);
+ BGE_UNLOCK(sc);
+
+ callout_drain(&sc->bge_stat_ch);
+
+ ether_ifdetach(ifp);
+
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+#ifndef __rtems__
+ ifmedia_removeall(&sc->bge_ifmedia);
+#endif
+ ;
+ } else {
+ bus_generic_detach(dev);
+ device_delete_child(dev, sc->bge_miibus);
+ }
+
+ bge_release_resources(sc);
+
+ return (0);
+}
+
+static void
+bge_release_resources(struct bge_softc *sc)
+{
+ device_t dev;
+
+ dev = sc->bge_dev;
+
+ if (sc->bge_intrhand != NULL)
+ bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
+
+ if (sc->bge_irq != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
+
+#if __FreeBSD_version > 602105
+ if (sc->bge_flags & BGE_FLAG_MSI)
+ pci_release_msi(dev);
+#endif
+
+ if (sc->bge_res != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ BGE_PCI_BAR0, sc->bge_res);
+
+ if (sc->bge_ifp != NULL)
+ if_free(sc->bge_ifp);
+
+ bge_dma_free(sc);
+
+ if (mtx_initialized(&sc->bge_mtx)) /* XXX */
+ BGE_LOCK_DESTROY(sc);
+}
+
+static int
+bge_reset(struct bge_softc *sc)
+{
+ device_t dev;
+ uint32_t cachesize, command, pcistate, reset, val;
+ void (*write_op)(struct bge_softc *, int, int);
+ int i;
+
+ dev = sc->bge_dev;
+
+ if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
+ (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
+ if (sc->bge_flags & BGE_FLAG_PCIE)
+ write_op = bge_writemem_direct;
+ else
+ write_op = bge_writemem_ind;
+ } else
+ write_op = bge_writereg_ind;
+
+ /* Save some important PCI state. */
+ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
+ command = pci_read_config(dev, BGE_PCI_CMD, 4);
+ pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
+
+ pci_write_config(dev, BGE_PCI_MISC_CTL,
+ BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
+ BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
+
+ /* Disable fastboot on controllers that support it. */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5787) {
+ if (bootverbose)
+ device_printf(sc->bge_dev, "Disabling fastboot\n");
+ CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
+ }
+
+ /*
+ * Write the magic number to SRAM at offset 0xB50.
+ * When firmware finishes its initialization it will
+ * write ~BGE_MAGIC_NUMBER to the same location.
+ */
+ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
+
+ reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
+
+ /* XXX: Broadcom Linux driver. */
+ if (sc->bge_flags & BGE_FLAG_PCIE) {
+ if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
+ CSR_WRITE_4(sc, 0x7E2C, 0x20);
+ if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
+ /* Prevent PCIE link training during global reset */
+ CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
+ reset |= 1 << 29;
+ }
+ }
+
+ /*
+ * Set GPHY Power Down Override to leave GPHY
+ * powered up in D0 uninitialized.
+ */
+ if (BGE_IS_5705_PLUS(sc))
+ reset |= 0x04000000;
+
+ /* Issue global reset */
+ write_op(sc, BGE_MISC_CFG, reset);
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
+ val = CSR_READ_4(sc, BGE_VCPU_STATUS);
+ CSR_WRITE_4(sc, BGE_VCPU_STATUS,
+ val | BGE_VCPU_STATUS_DRV_RESET);
+ val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
+ CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
+ val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
+ }
+
+ DELAY(1000);
+
+ /* XXX: Broadcom Linux driver. */
+ if (sc->bge_flags & BGE_FLAG_PCIE) {
+ if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
+ DELAY(500000); /* wait for link training to complete */
+ val = pci_read_config(dev, 0xC4, 4);
+ pci_write_config(dev, 0xC4, val | (1 << 15), 4);
+ }
+ /*
+ * Set PCIE max payload size to 128 bytes and clear error
+ * status.
+ */
+ pci_write_config(dev, 0xD8, 0xF5000, 4);
+ }
+
+ /* Reset some of the PCI state that got zapped by reset. */
+ pci_write_config(dev, BGE_PCI_MISC_CTL,
+ BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
+ BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
+ pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
+ pci_write_config(dev, BGE_PCI_CMD, command, 4);
+ write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
+
+ /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
+ if (BGE_IS_5714_FAMILY(sc)) {
+ /* This chip disables MSI on reset. */
+ if (sc->bge_flags & BGE_FLAG_MSI) {
+ val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
+ pci_write_config(dev, BGE_PCI_MSI_CTL,
+ val | PCIM_MSICTRL_MSI_ENABLE, 2);
+ val = CSR_READ_4(sc, BGE_MSI_MODE);
+ CSR_WRITE_4(sc, BGE_MSI_MODE,
+ val | BGE_MSIMODE_ENABLE);
+ }
+ val = CSR_READ_4(sc, BGE_MARB_MODE);
+ CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
+ } else
+ CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ val = CSR_READ_4(sc, BGE_VCPU_STATUS);
+ if (val & BGE_VCPU_STATUS_INIT_DONE)
+ break;
+ DELAY(100);
+ }
+ if (i == BGE_TIMEOUT) {
+ device_printf(sc->bge_dev, "reset timed out\n");
+ return (1);
+ }
+ } else {
+ /*
+ * Poll until we see the 1's complement of the magic number.
+ * This indicates that the firmware initialization is complete.
+ * We expect this to fail if no chip containing the Ethernet
+ * address is fitted though.
+ */
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ DELAY(10);
+ val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
+ if (val == ~BGE_MAGIC_NUMBER)
+ break;
+ }
+
+ if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
+ device_printf(sc->bge_dev, "firmware handshake timed out, "
+ "found 0x%08x\n", val);
+ }
+
+ /*
+ * XXX Wait for the value of the PCISTATE register to
+ * return to its original pre-reset state. This is a
+ * fairly good indicator of reset completion. If we don't
+ * wait for the reset to fully complete, trying to read
+ * from the device's non-PCI registers may yield garbage
+ * results.
+ */
+ for (i = 0; i < BGE_TIMEOUT; i++) {
+ if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
+ break;
+ DELAY(10);
+ }
+
+ if (sc->bge_flags & BGE_FLAG_PCIE) {
+ reset = bge_readmem_ind(sc, 0x7C00);
+ bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
+ }
+
+ /* Fix up byte swapping. */
+ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
+ BGE_MODECTL_BYTESWAP_DATA);
+
+ /* Tell the ASF firmware we are up */
+ if (sc->bge_asf_mode & ASF_STACKUP)
+ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+
+ CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
+
+ /*
+ * The 5704 in TBI mode apparently needs some special
+ * adjustment to insure the SERDES drive level is set
+ * to 1.2V.
+ */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
+ sc->bge_flags & BGE_FLAG_TBI) {
+ val = CSR_READ_4(sc, BGE_SERDES_CFG);
+ val = (val & ~0xFFF) | 0x880;
+ CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
+ }
+
+ /* XXX: Broadcom Linux driver. */
+ if (sc->bge_flags & BGE_FLAG_PCIE &&
+ sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
+ val = CSR_READ_4(sc, 0x7C00);
+ CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
+ }
+ DELAY(10000);
+
+ return(0);
+}
+
+/*
+ * Frame reception handling. This is called if there's a frame
+ * on the receive return list.
+ *
+ * Note: we have to be able to handle two possibilities here:
+ * 1) the frame is from the jumbo receive ring
+ * 2) the frame is from the standard receive ring
+ */
+
+static void
+bge_rxeof(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+ int stdcnt = 0, jumbocnt = 0;
+
+ BGE_LOCK_ASSERT(sc);
+
+ /* Nothing to do. */
+ if (sc->bge_rx_saved_considx ==
+ sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
+ return;
+
+ ifp = sc->bge_ifp;
+
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
+ sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
+ if (BGE_IS_JUMBO_CAPABLE(sc))
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
+
+ while(sc->bge_rx_saved_considx !=
+ sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
+ struct bge_rx_bd *cur_rx;
+ uint32_t rxidx;
+ struct mbuf *m = NULL;
+#ifndef __rtems__
+ uint16_t vlan_tag = 0;
+ int have_tag = 0;
+#endif
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ if (sc->rxcycles <= 0)
+ break;
+ sc->rxcycles--;
+ }
+#endif
+
+ cur_rx =
+ &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
+
+ rxidx = cur_rx->bge_idx;
+ BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
+
+#ifndef __rtems__
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
+ cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
+ have_tag = 1;
+ vlan_tag = cur_rx->bge_vlan_tag;
+ }
+#endif
+
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
+ BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
+ sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
+ m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
+ sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
+ jumbocnt++;
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
+ ifp->if_ierrors++;
+ bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
+ continue;
+ }
+ if (bge_newbuf_jumbo(sc,
+ sc->bge_jumbo, NULL) == ENOBUFS) {
+ ifp->if_ierrors++;
+ bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
+ continue;
+ }
+ } else {
+ BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[rxidx],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
+ m = sc->bge_cdata.bge_rx_std_chain[rxidx];
+ sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
+ stdcnt++;
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
+ ifp->if_ierrors++;
+ bge_newbuf_std(sc, sc->bge_std, m);
+ continue;
+ }
+ if (bge_newbuf_std(sc, sc->bge_std,
+ NULL) == ENOBUFS) {
+ ifp->if_ierrors++;
+ bge_newbuf_std(sc, sc->bge_std, m);
+ continue;
+ }
+ }
+
+ ifp->if_ipackets++;
+#ifndef __NO_STRICT_ALIGNMENT
+ /*
+ * For architectures with strict alignment we must make sure
+ * the payload is aligned.
+ */
+ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
+ bcopy(m->m_data, m->m_data + ETHER_ALIGN,
+ cur_rx->bge_len);
+ m->m_data += ETHER_ALIGN;
+ }
+#endif
+ m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
+ m->m_pkthdr.rcvif = ifp;
+
+#ifndef __rtems__
+ if (ifp->if_capenable & IFCAP_RXCSUM) {
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
+ m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
+ m->m_pkthdr.csum_data =
+ cur_rx->bge_tcp_udp_csum;
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ }
+ }
+
+ /*
+ * If we received a packet with a vlan tag,
+ * attach that information to the packet.
+ */
+ if (have_tag) {
+#if __FreeBSD_version > 700022
+ m->m_pkthdr.ether_vtag = vlan_tag;
+ m->m_flags |= M_VLANTAG;
+#else
+ VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
+ if (m == NULL)
+ continue;
+#endif
+ }
+#endif
+
+ BGE_UNLOCK(sc);
+#ifndef __rtems__
+ (*ifp->if_input)(ifp, m);
+#else
+ ether_input_skipping(ifp, m);
+#endif
+
+ BGE_LOCK(sc);
+ }
+
+ if (stdcnt > 0)
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
+ sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
+
+ if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
+ bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
+ sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
+
+ bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
+ if (stdcnt)
+ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
+ if (jumbocnt)
+ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
+#ifdef notyet
+ /*
+ * This register wraps very quickly under heavy packet drops.
+ * If you need correct statistics, you can enable this check.
+ */
+ if (BGE_IS_5705_PLUS(sc))
+ ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
+#endif
+}
+
+static void
+bge_txeof(struct bge_softc *sc)
+{
+ struct bge_tx_bd *cur_tx = NULL;
+ struct ifnet *ifp;
+
+ BGE_LOCK_ASSERT(sc);
+
+ /* Nothing to do. */
+ if (sc->bge_tx_saved_considx ==
+ sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
+ return;
+
+ ifp = sc->bge_ifp;
+
+ bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
+ sc->bge_cdata.bge_tx_ring_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ /*
+ * Go through our tx ring and free mbufs for those
+ * frames that have been sent.
+ */
+ while (sc->bge_tx_saved_considx !=
+ sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
+ uint32_t idx = 0;
+
+ idx = sc->bge_tx_saved_considx;
+ cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
+ if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
+ ifp->if_opackets++;
+ if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_tx_dmamap[idx],
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag,
+ sc->bge_cdata.bge_tx_dmamap[idx]);
+ m_freem(sc->bge_cdata.bge_tx_chain[idx]);
+ sc->bge_cdata.bge_tx_chain[idx] = NULL;
+ }
+ sc->bge_txcnt--;
+ BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
+ }
+
+ if (cur_tx != NULL)
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if (sc->bge_txcnt == 0)
+ sc->bge_timer = 0;
+}
+
+#ifdef DEVICE_POLLING
+static void
+bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+ struct bge_softc *sc = ifp->if_softc;
+ uint32_t statusword;
+
+ BGE_LOCK(sc);
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ BGE_UNLOCK(sc);
+ return;
+ }
+
+ bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
+ sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
+
+ statusword = atomic_readandclear_32(
+ &sc->bge_ldata.bge_status_block->bge_status);
+
+ bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
+ sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
+
+ /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
+ if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
+ sc->bge_link_evt++;
+
+ if (cmd == POLL_AND_CHECK_STATUS)
+ if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
+ sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
+ bge_link_upd(sc);
+
+ sc->rxcycles = count;
+ bge_rxeof(sc);
+ bge_txeof(sc);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ bge_start_locked(ifp);
+
+ BGE_UNLOCK(sc);
+}
+#endif /* DEVICE_POLLING */
+
+static void
+bge_intr(void *xsc)
+{
+ struct bge_softc *sc;
+ struct ifnet *ifp;
+ uint32_t statusword;
+
+ sc = xsc;
+
+ BGE_LOCK(sc);
+
+ ifp = sc->bge_ifp;
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ BGE_UNLOCK(sc);
+ return;
+ }
+#endif
+
+ /*
+ * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
+ * disable interrupts by writing nonzero like we used to, since with
+ * our current organization this just gives complications and
+ * pessimizations for re-enabling interrupts. We used to have races
+ * instead of the necessary complications. Disabling interrupts
+ * would just reduce the chance of a status update while we are
+ * running (by switching to the interrupt-mode coalescence
+ * parameters), but this chance is already very low so it is more
+ * efficient to get another interrupt than prevent it.
+ *
+ * We do the ack first to ensure another interrupt if there is a
+ * status update after the ack. We don't check for the status
+ * changing later because it is more efficient to get another
+ * interrupt than prevent it, not quite as above (not checking is
+ * a smaller optimization than not toggling the interrupt enable,
+ * since checking doesn't involve PCI accesses and toggling require
+ * the status check). So toggling would probably be a pessimization
+ * even with MSI. It would only be needed for using a task queue.
+ */
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
+
+ /*
+ * Do the mandatory PCI flush as well as get the link status.
+ */
+ statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
+
+ /* Make sure the descriptor ring indexes are coherent. */
+ bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
+ sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
+ sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
+
+ if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
+ statusword || sc->bge_link_evt)
+ bge_link_upd(sc);
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /* Check RX return ring producer/consumer. */
+ bge_rxeof(sc);
+
+ /* Check TX ring producer/consumer. */
+ bge_txeof(sc);
+ }
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
+ !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ bge_start_locked(ifp);
+
+ BGE_UNLOCK(sc);
+}
+
+static void
+bge_asf_driver_up(struct bge_softc *sc)
+{
+ if (sc->bge_asf_mode & ASF_STACKUP) {
+ /* Send ASF heartbeat aprox. every 2s */
+ if (sc->bge_asf_count)
+ sc->bge_asf_count --;
+ else {
+ sc->bge_asf_count = 5;
+ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
+ BGE_FW_DRV_ALIVE);
+ bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
+ bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
+ CSR_WRITE_4(sc, BGE_CPU_EVENT,
+ CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
+ }
+ }
+}
+
+static void
+bge_tick(void *xsc)
+{
+ struct bge_softc *sc = xsc;
+ struct mii_data *mii = NULL;
+
+ BGE_LOCK_ASSERT(sc);
+
+ /* Synchronize with possible callout reset/stop. */
+ if (callout_pending(&sc->bge_stat_ch) ||
+ !callout_active(&sc->bge_stat_ch))
+ return;
+
+ if (BGE_IS_5705_PLUS(sc))
+ bge_stats_update_regs(sc);
+ else
+ bge_stats_update(sc);
+
+ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
+ mii = device_get_softc(sc->bge_miibus);
+ /*
+ * Do not touch PHY if we have link up. This could break
+ * IPMI/ASF mode or produce extra input errors
+ * (extra errors was reported for bcm5701 & bcm5704).
+ */
+#ifndef __rtems__
+ if (!sc->bge_link)
+ mii_tick(mii);
+#endif
+ } else {
+ /*
+ * Since in TBI mode auto-polling can't be used we should poll
+ * link status manually. Here we register pending link event
+ * and trigger interrupt.
+ */
+#ifdef DEVICE_POLLING
+ /* In polling mode we poll link state in bge_poll(). */
+ if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
+#endif
+ {
+ sc->bge_link_evt++;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
+ sc->bge_flags & BGE_FLAG_5788)
+ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
+ else
+ BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
+ }
+ }
+
+ bge_asf_driver_up(sc);
+ bge_watchdog(sc);
+
+ callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
+}
+
+static void
+bge_stats_update_regs(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->bge_ifp;
+
+ ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
+ offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
+
+ ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
+}
+
+static void
+bge_stats_update(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+ bus_size_t stats;
+ uint32_t cnt; /* current register value */
+
+ ifp = sc->bge_ifp;
+
+ stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
+
+#define READ_STAT(sc, stats, stat) \
+ CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
+
+ cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
+ ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
+ sc->bge_tx_collisions = cnt;
+
+ cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
+ ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
+ sc->bge_rx_discards = cnt;
+
+ cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
+ ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
+ sc->bge_tx_discards = cnt;
+
+#undef READ_STAT
+}
+
+/*
+ * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
+ * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
+ * but when such padded frames employ the bge IP/TCP checksum offload,
+ * the hardware checksum assist gives incorrect results (possibly
+ * from incorporating its own padding into the UDP/TCP checksum; who knows).
+ * If we pad such runts with zeros, the onboard checksum comes out correct.
+ */
+static __inline int
+bge_cksum_pad(struct mbuf *m)
+{
+ int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
+ struct mbuf *last;
+
+ /* If there's only the packet-header and we can pad there, use it. */
+ if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
+ M_TRAILINGSPACE(m) >= padlen) {
+ last = m;
+ } else {
+ /*
+ * Walk packet chain to find last mbuf. We will either
+ * pad there, or append a new mbuf and pad it.
+ */
+ for (last = m; last->m_next != NULL; last = last->m_next);
+ if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
+ /* Allocate new empty mbuf, pad it. Compact later. */
+ struct mbuf *n;
+
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (n == NULL)
+ return (ENOBUFS);
+ n->m_len = 0;
+ last->m_next = n;
+ last = n;
+ }
+ }
+
+ /* Now zero the pad area, to avoid the bge cksum-assist bug. */
+ memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
+ last->m_len += padlen;
+ m->m_pkthdr.len += padlen;
+
+ return (0);
+}
+
+/*
+ * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
+ * pointers to descriptors.
+ */
+static int
+bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
+{
+ bus_dma_segment_t segs[BGE_NSEG_NEW];
+ bus_dmamap_t map;
+ struct bge_tx_bd *d;
+ struct mbuf *m = *m_head;
+ uint32_t idx = *txidx;
+ uint16_t csum_flags;
+ int nsegs, i, error;
+
+ csum_flags = 0;
+#ifndef __rtems__
+ if (m->m_pkthdr.csum_flags) {
+ if (m->m_pkthdr.csum_flags & CSUM_IP)
+ csum_flags |= BGE_TXBDFLAG_IP_CSUM;
+ if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
+ csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
+ if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
+ (error = bge_cksum_pad(m)) != 0) {
+ m_freem(m);
+ *m_head = NULL;
+ return (error);
+ }
+ }
+ if (m->m_flags & M_LASTFRAG)
+ csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
+ else if (m->m_flags & M_FRAG)
+ csum_flags |= BGE_TXBDFLAG_IP_FRAG;
+ }
+#endif
+
+ map = sc->bge_cdata.bge_tx_dmamap[idx];
+ error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error == EFBIG) {
+ m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
+ if (m == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ *m_head = m;
+ error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error) {
+ m_freem(m);
+ *m_head = NULL;
+ return (error);
+ }
+ } else if (error != 0)
+ return (error);
+
+ /*
+ * Sanity check: avoid coming within 16 descriptors
+ * of the end of the ring.
+ */
+ if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
+ bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
+
+ for (i = 0; ; i++) {
+ d = &sc->bge_ldata.bge_tx_ring[idx];
+ d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
+ d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
+ d->bge_len = segs[i].ds_len;
+ d->bge_flags = csum_flags;
+ if (i == nsegs - 1)
+ break;
+ BGE_INC(idx, BGE_TX_RING_CNT);
+ }
+
+ /* Mark the last segment as end of packet... */
+ d->bge_flags |= BGE_TXBDFLAG_END;
+
+ /* ... and put VLAN tag into first segment. */
+ d = &sc->bge_ldata.bge_tx_ring[*txidx];
+#ifndef __rtems__
+#if __FreeBSD_version > 700022
+ if (m->m_flags & M_VLANTAG) {
+ d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
+ d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
+ } else
+ d->bge_vlan_tag = 0;
+#else
+ {
+ struct m_tag *mtag;
+
+ if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
+ d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
+ d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
+ } else
+ d->bge_vlan_tag = 0;
+ }
+#endif
+#endif
+
+ /*
+ * Insure that the map for this transmission
+ * is placed at the array index of the last descriptor
+ * in this chain.
+ */
+ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
+ sc->bge_cdata.bge_tx_dmamap[idx] = map;
+ sc->bge_cdata.bge_tx_chain[idx] = m;
+ sc->bge_txcnt += nsegs;
+
+ BGE_INC(idx, BGE_TX_RING_CNT);
+ *txidx = idx;
+
+ return (0);
+}
+
+/*
+ * Main transmit routine. To avoid having to do mbuf copies, we put pointers
+ * to the mbuf data regions directly in the transmit descriptors.
+ */
+static void
+bge_start_locked(struct ifnet *ifp)
+{
+ struct bge_softc *sc;
+ struct mbuf *m_head = NULL;
+ uint32_t prodidx;
+ int count = 0;
+
+ sc = ifp->if_softc;
+
+ if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ return;
+
+ prodidx = sc->bge_tx_prodidx;
+
+ while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+
+ /*
+ * XXX
+ * The code inside the if() block is never reached since we
+ * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
+ * requests to checksum TCP/UDP in a fragmented packet.
+ *
+ * XXX
+ * safety overkill. If this is a fragmented packet chain
+ * with delayed TCP/UDP checksums, then only encapsulate
+ * it if we have enough descriptors to handle the entire
+ * chain at once.
+ * (paranoia -- may not actually be needed)
+ */
+#ifndef __rtems__
+ if (m_head->m_flags & M_FIRSTFRAG &&
+ m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
+ if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
+ m_head->m_pkthdr.csum_data + 16) {
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+ }
+#endif
+
+ /*
+ * Pack the data into the transmit ring. If we
+ * don't have room, set the OACTIVE flag and wait
+ * for the NIC to drain the ring.
+ */
+ if (bge_encap(sc, &m_head, &prodidx)) {
+ if (m_head == NULL)
+ break;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+ ++count;
+
+ /*
+ * If there's a BPF listener, bounce a copy of this frame
+ * to him.
+ */
+#ifdef ETHER_BPF_MTAP
+ ETHER_BPF_MTAP(ifp, m_head);
+#else
+ BPF_MTAP(ifp, m_head);
+#endif
+ }
+
+ if (count == 0)
+ /* No packets were dequeued. */
+ return;
+
+ /* Transmit. */
+ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
+ /* 5700 b2 errata */
+ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
+ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
+
+ sc->bge_tx_prodidx = prodidx;
+
+ /*
+ * Set a timeout in case the chip goes out to lunch.
+ */
+ sc->bge_timer = 5;
+}
+
+/*
+ * Main transmit routine. To avoid having to do mbuf copies, we put pointers
+ * to the mbuf data regions directly in the transmit descriptors.
+ */
+static void
+bge_start(struct ifnet *ifp)
+{
+ struct bge_softc *sc;
+
+ sc = ifp->if_softc;
+ BGE_LOCK(sc);
+ bge_start_locked(ifp);
+ BGE_UNLOCK(sc);
+}
+
+static void
+bge_init_locked(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+ uint16_t *m;
+
+ BGE_LOCK_ASSERT(sc);
+
+ ifp = sc->bge_ifp;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ /* Cancel pending I/O and flush buffers. */
+ bge_stop(sc);
+
+ bge_stop_fw(sc);
+ bge_sig_pre_reset(sc, BGE_RESET_START);
+ bge_reset(sc);
+ bge_sig_legacy(sc, BGE_RESET_START);
+ bge_sig_post_reset(sc, BGE_RESET_START);
+
+ bge_chipinit(sc);
+
+ /*
+ * Init the various state machines, ring
+ * control blocks and firmware.
+ */
+ if (bge_blockinit(sc)) {
+ device_printf(sc->bge_dev, "initialization failure\n");
+ return;
+ }
+
+ ifp = sc->bge_ifp;
+
+ /* Specify MTU. */
+ CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN +
+#ifndef __rtems__
+ (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)
+#else
+ 0
+#endif
+ );
+
+ /* Load our MAC address. */
+ m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
+ CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
+ CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
+
+ /* Program promiscuous mode. */
+ bge_setpromisc(sc);
+
+ /* Program multicast filter. */
+ bge_setmulti(sc);
+
+ /* Program VLAN tag stripping. */
+ bge_setvlan(sc);
+
+ /* Init RX ring. */
+ bge_init_rx_ring_std(sc);
+
+ /*
+ * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
+ * memory to insure that the chip has in fact read the first
+ * entry of the ring.
+ */
+ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
+ uint32_t v, i;
+ for (i = 0; i < 10; i++) {
+ DELAY(20);
+ v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
+ if (v == (MCLBYTES - ETHER_ALIGN))
+ break;
+ }
+ if (i == 10)
+ device_printf (sc->bge_dev,
+ "5705 A0 chip failed to load RX ring\n");
+ }
+
+ /* Init jumbo RX ring. */
+ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
+ bge_init_rx_ring_jumbo(sc);
+
+ /* Init our RX return ring index. */
+ sc->bge_rx_saved_considx = 0;
+
+ /* Init our RX/TX stat counters. */
+ sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
+
+ /* Init TX ring. */
+ bge_init_tx_ring(sc);
+
+ /* Turn on transmitter. */
+ BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
+
+ /* Turn on receiver. */
+ BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
+
+ /* Tell firmware we're alive. */
+ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+
+#ifdef DEVICE_POLLING
+ /* Disable interrupts if we are polling. */
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
+ BGE_PCIMISCCTL_MASK_PCI_INTR);
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
+ } else
+#endif
+
+ /* Enable host interrupts. */
+ {
+ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
+ BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
+ }
+
+ bge_ifmedia_upd_locked(ifp);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
+}
+
+static void
+bge_init(void *xsc)
+{
+ struct bge_softc *sc = xsc;
+
+ BGE_LOCK(sc);
+ bge_init_locked(sc);
+ BGE_UNLOCK(sc);
+}
+
+/*
+ * Set media options.
+ */
+static int
+bge_ifmedia_upd(struct ifnet *ifp)
+{
+ struct bge_softc *sc = ifp->if_softc;
+ int res;
+
+ BGE_LOCK(sc);
+ res = bge_ifmedia_upd_locked(ifp);
+ BGE_UNLOCK(sc);
+
+ return (res);
+}
+
+static int
+bge_ifmedia_upd_locked(struct ifnet *ifp)
+{
+ struct bge_softc *sc = ifp->if_softc;
+ struct mii_data *mii;
+ struct ifmedia *ifm;
+
+ BGE_LOCK_ASSERT(sc);
+
+ ifm = &sc->bge_ifmedia;
+
+ /* If this is a 1000baseX NIC, enable the TBI port. */
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+ switch(IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ /*
+ * The BCM5704 ASIC appears to have a special
+ * mechanism for programming the autoneg
+ * advertisement registers in TBI mode.
+ */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
+ uint32_t sgdig;
+ sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
+ if (sgdig & BGE_SGDIGSTS_DONE) {
+ CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
+ sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
+ sgdig |= BGE_SGDIGCFG_AUTO |
+ BGE_SGDIGCFG_PAUSE_CAP |
+ BGE_SGDIGCFG_ASYM_PAUSE;
+ CSR_WRITE_4(sc, BGE_SGDIG_CFG,
+ sgdig | BGE_SGDIGCFG_SEND);
+ DELAY(5);
+ CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
+ }
+ }
+ break;
+ case IFM_1000_SX:
+ if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
+ BGE_CLRBIT(sc, BGE_MAC_MODE,
+ BGE_MACMODE_HALF_DUPLEX);
+ } else {
+ BGE_SETBIT(sc, BGE_MAC_MODE,
+ BGE_MACMODE_HALF_DUPLEX);
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+ }
+
+ sc->bge_link_evt++;
+ mii = device_get_softc(sc->bge_miibus);
+#ifndef __rtems__
+ if (mii->mii_instance) {
+ struct mii_softc *miisc;
+ for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
+ miisc = LIST_NEXT(miisc, mii_list))
+ mii_phy_reset(miisc);
+ }
+ mii_mediachg(mii);
+#endif
+
+ /*
+ * Force an interrupt so that we will call bge_link_upd
+ * if needed and clear any pending link state attention.
+ * Without this we are not getting any further interrupts
+ * for link state changes and thus will not UP the link and
+ * not be able to send in bge_start_locked. The only
+ * way to get things working was to receive a packet and
+ * get an RX intr.
+ * bge_tick should help for fiber cards and we might not
+ * need to do this here if BGE_FLAG_TBI is set but as
+ * we poll for fiber anyway it should not harm.
+ */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
+ sc->bge_flags & BGE_FLAG_5788)
+ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
+ else
+ BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
+
+ return (0);
+}
+
+/*
+ * Report current media status.
+ */
+static void
+bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct bge_softc *sc = ifp->if_softc;
+#ifndef __rtems__
+ struct mii_data *mii;
+#endif
+
+ BGE_LOCK(sc);
+
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+ if (CSR_READ_4(sc, BGE_MAC_STS) &
+ BGE_MACSTAT_TBI_PCS_SYNCHED)
+ ifmr->ifm_status |= IFM_ACTIVE;
+ else {
+ ifmr->ifm_active |= IFM_NONE;
+ BGE_UNLOCK(sc);
+ return;
+ }
+ ifmr->ifm_active |= IFM_1000_SX;
+ if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
+ ifmr->ifm_active |= IFM_HDX;
+ else
+ ifmr->ifm_active |= IFM_FDX;
+ BGE_UNLOCK(sc);
+ return;
+ }
+
+#ifndef __rtems__
+ mii = device_get_softc(sc->bge_miibus);
+ mii_pollstat(mii);
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+#endif
+
+ BGE_UNLOCK(sc);
+}
+
+static int
+#ifndef __rtems__
+bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+#else
+bge_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
+#endif
+{
+ struct bge_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ struct mii_data *mii;
+#ifndef __rtems__
+ int mask;
+#endif
+ int flags, error = 0;
+
+ switch (command) {
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN ||
+ ((BGE_IS_JUMBO_CAPABLE(sc)) &&
+ ifr->ifr_mtu > BGE_JUMBO_MTU) ||
+ ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
+ ifr->ifr_mtu > ETHERMTU))
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ ifp->if_mtu = ifr->ifr_mtu;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ bge_init(sc);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ BGE_LOCK(sc);
+ if (ifp->if_flags & IFF_UP) {
+ /*
+ * If only the state of the PROMISC flag changed,
+ * then just use the 'set promisc mode' command
+ * instead of reinitializing the entire NIC. Doing
+ * a full re-init means reloading the firmware and
+ * waiting for it to start up, which may take a
+ * second or two. Similarly for ALLMULTI.
+ */
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ flags = ifp->if_flags ^ sc->bge_if_flags;
+ if (flags & IFF_PROMISC)
+ bge_setpromisc(sc);
+ if (flags & IFF_ALLMULTI)
+ bge_setmulti(sc);
+ } else
+ bge_init_locked(sc);
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ bge_stop(sc);
+ }
+ }
+ sc->bge_if_flags = ifp->if_flags;
+ BGE_UNLOCK(sc);
+ error = 0;
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+#ifdef __rtems__
+ if ( ETHER_SIOCMULTIFRAG(error, command, ifr, ifp) )
+ break;
+#endif
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ BGE_LOCK(sc);
+ bge_setmulti(sc);
+ BGE_UNLOCK(sc);
+ error = 0;
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+ error = ifmedia_ioctl(ifp, ifr,
+ &sc->bge_ifmedia, command);
+ } else {
+ mii = device_get_softc(sc->bge_miibus);
+#ifndef __rtems__
+ error = ifmedia_ioctl(ifp, ifr,
+ &mii->mii_media, command);
+#endif
+ }
+ break;
+#ifndef __rtems__
+ case SIOCSIFCAP:
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+#ifdef DEVICE_POLLING
+ if (mask & IFCAP_POLLING) {
+ if (ifr->ifr_reqcap & IFCAP_POLLING) {
+ error = ether_poll_register(bge_poll, ifp);
+ if (error)
+ return (error);
+ BGE_LOCK(sc);
+ BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
+ BGE_PCIMISCCTL_MASK_PCI_INTR);
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
+ ifp->if_capenable |= IFCAP_POLLING;
+ BGE_UNLOCK(sc);
+ } else {
+ error = ether_poll_deregister(ifp);
+ /* Enable interrupt even in error case */
+ BGE_LOCK(sc);
+ BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
+ BGE_PCIMISCCTL_MASK_PCI_INTR);
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
+ ifp->if_capenable &= ~IFCAP_POLLING;
+ BGE_UNLOCK(sc);
+ }
+ }
+#endif
+ if (mask & IFCAP_HWCSUM) {
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (IFCAP_HWCSUM & ifp->if_capenable &&
+ IFCAP_HWCSUM & ifp->if_capabilities)
+ ifp->if_hwassist = BGE_CSUM_FEATURES;
+ else
+ ifp->if_hwassist = 0;
+#ifdef VLAN_CAPABILITIES
+ VLAN_CAPABILITIES(ifp);
+#endif
+ }
+
+ if (mask & IFCAP_VLAN_MTU) {
+ ifp->if_capenable ^= IFCAP_VLAN_MTU;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ bge_init(sc);
+ }
+
+ if (mask & IFCAP_VLAN_HWTAGGING) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ BGE_LOCK(sc);
+ bge_setvlan(sc);
+ BGE_UNLOCK(sc);
+#ifdef VLAN_CAPABILITIES
+ VLAN_CAPABILITIES(ifp);
+#endif
+ }
+
+ break;
+#endif
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+bge_watchdog(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+
+ BGE_LOCK_ASSERT(sc);
+
+ if (sc->bge_timer == 0 || --sc->bge_timer)
+ return;
+
+ ifp = sc->bge_ifp;
+
+ if_printf(ifp, "watchdog timeout -- resetting\n");
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ bge_init_locked(sc);
+
+ ifp->if_oerrors++;
+}
+
+/*
+ * Stop the adapter and free any mbufs allocated to the
+ * RX and TX lists.
+ */
+static void
+bge_stop(struct bge_softc *sc)
+{
+ struct ifnet *ifp;
+ struct mii_data *mii = NULL;
+ int itmp;
+#ifndef __rtems__
+ struct ifmedia_entry *ifm;
+ int mtmp;
+#endif
+
+ BGE_LOCK_ASSERT(sc);
+
+ ifp = sc->bge_ifp;
+
+ if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
+ mii = device_get_softc(sc->bge_miibus);
+
+ callout_stop(&sc->bge_stat_ch);
+
+ /*
+ * Disable all of the receiver blocks.
+ */
+ BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
+ if (!(BGE_IS_5705_PLUS(sc)))
+ BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
+
+ /*
+ * Disable all of the transmit blocks.
+ */
+ BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
+ if (!(BGE_IS_5705_PLUS(sc)))
+ BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
+
+ /*
+ * Shut down all of the memory managers and related
+ * state machines.
+ */
+ BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
+ if (!(BGE_IS_5705_PLUS(sc)))
+ BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
+ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
+ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
+ if (!(BGE_IS_5705_PLUS(sc))) {
+ BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
+ BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
+ }
+
+ /* Disable host interrupts. */
+ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
+
+ /*
+ * Tell firmware we're shutting down.
+ */
+
+ bge_stop_fw(sc);
+ bge_sig_pre_reset(sc, BGE_RESET_STOP);
+ bge_reset(sc);
+ bge_sig_legacy(sc, BGE_RESET_STOP);
+ bge_sig_post_reset(sc, BGE_RESET_STOP);
+
+ /*
+ * Keep the ASF firmware running if up.
+ */
+ if (sc->bge_asf_mode & ASF_STACKUP)
+ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+ else
+ BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+
+ /* Free the RX lists. */
+ bge_free_rx_ring_std(sc);
+
+ /* Free jumbo RX list. */
+ if (BGE_IS_JUMBO_CAPABLE(sc))
+ bge_free_rx_ring_jumbo(sc);
+
+ /* Free TX buffers. */
+ bge_free_tx_ring(sc);
+
+ /*
+ * Isolate/power down the PHY, but leave the media selection
+ * unchanged so that things will be put back to normal when
+ * we bring the interface back up.
+ */
+ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
+ itmp = ifp->if_flags;
+ ifp->if_flags |= IFF_UP;
+ /*
+ * If we are called from bge_detach(), mii is already NULL.
+ */
+#ifndef __rtems__
+ if (mii != NULL) {
+ ifm = mii->mii_media.ifm_cur;
+ mtmp = ifm->ifm_media;
+ ifm->ifm_media = IFM_ETHER | IFM_NONE;
+ mii_mediachg(mii);
+ ifm->ifm_media = mtmp;
+ }
+ ifp->if_flags = itmp;
+#endif
+ }
+
+ sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
+
+ /* Clear MAC's link state (PHY may still have link UP). */
+ if (bootverbose && sc->bge_link)
+ if_printf(sc->bge_ifp, "link DOWN\n");
+ sc->bge_link = 0;
+
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+}
+
+/*
+ * Stop all chip I/O so that the kernel's probe routines don't
+ * get confused by errant DMAs when rebooting.
+ */
+static void
+bge_shutdown(device_t dev)
+{
+ struct bge_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ BGE_LOCK(sc);
+ bge_stop(sc);
+ bge_reset(sc);
+ BGE_UNLOCK(sc);
+}
+
+static int
+bge_suspend(device_t dev)
+{
+ struct bge_softc *sc;
+
+ sc = device_get_softc(dev);
+ BGE_LOCK(sc);
+ bge_stop(sc);
+ BGE_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+bge_resume(device_t dev)
+{
+ struct bge_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ BGE_LOCK(sc);
+ ifp = sc->bge_ifp;
+ if (ifp->if_flags & IFF_UP) {
+ bge_init_locked(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ bge_start_locked(ifp);
+ }
+ BGE_UNLOCK(sc);
+
+ return (0);
+}
+
+static void
+bge_link_upd(struct bge_softc *sc)
+{
+#ifndef __rtems__
+ struct mii_data *mii;
+ uint32_t link, status;
+#endif
+
+ BGE_LOCK_ASSERT(sc);
+
+ /* Clear 'pending link event' flag. */
+ sc->bge_link_evt = 0;
+
+ /*
+ * Process link state changes.
+ * Grrr. The link status word in the status block does
+ * not work correctly on the BCM5700 rev AX and BX chips,
+ * according to all available information. Hence, we have
+ * to enable MII interrupts in order to properly obtain
+ * async link changes. Unfortunately, this also means that
+ * we have to read the MAC status register to detect link
+ * changes, thereby adding an additional register access to
+ * the interrupt handler.
+ *
+ * XXX: perhaps link state detection procedure used for
+ * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
+ */
+
+#ifndef __rtems__
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
+ status = CSR_READ_4(sc, BGE_MAC_STS);
+ if (status & BGE_MACSTAT_MI_INTERRUPT) {
+ mii = device_get_softc(sc->bge_miibus);
+ mii_pollstat(mii);
+ if (!sc->bge_link &&
+ mii->mii_media_status & IFM_ACTIVE &&
+ IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
+ sc->bge_link++;
+ if (bootverbose)
+ if_printf(sc->bge_ifp, "link UP\n");
+ } else if (sc->bge_link &&
+ (!(mii->mii_media_status & IFM_ACTIVE) ||
+ IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
+ sc->bge_link = 0;
+ if (bootverbose)
+ if_printf(sc->bge_ifp, "link DOWN\n");
+ }
+
+ /* Clear the interrupt. */
+ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
+ BGE_EVTENB_MI_INTERRUPT);
+ bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
+ bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
+ BRGPHY_INTRS);
+ }
+ return;
+ }
+
+ if (sc->bge_flags & BGE_FLAG_TBI) {
+ status = CSR_READ_4(sc, BGE_MAC_STS);
+ if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
+ if (!sc->bge_link) {
+ sc->bge_link++;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
+ BGE_CLRBIT(sc, BGE_MAC_MODE,
+ BGE_MACMODE_TBI_SEND_CFGS);
+ CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
+ if (bootverbose)
+ if_printf(sc->bge_ifp, "link UP\n");
+ if_link_state_change(sc->bge_ifp,
+ LINK_STATE_UP);
+ }
+ } else if (sc->bge_link) {
+ sc->bge_link = 0;
+ if (bootverbose)
+ if_printf(sc->bge_ifp, "link DOWN\n");
+ if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
+ }
+ } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
+ /*
+ * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
+ * in status word always set. Workaround this bug by reading
+ * PHY link status directly.
+ */
+ link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
+
+ if (link != sc->bge_link ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5700) {
+ mii = device_get_softc(sc->bge_miibus);
+ mii_pollstat(mii);
+ if (!sc->bge_link &&
+ mii->mii_media_status & IFM_ACTIVE &&
+ IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
+ sc->bge_link++;
+ if (bootverbose)
+ if_printf(sc->bge_ifp, "link UP\n");
+ } else if (sc->bge_link &&
+ (!(mii->mii_media_status & IFM_ACTIVE) ||
+ IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
+ sc->bge_link = 0;
+ if (bootverbose)
+ if_printf(sc->bge_ifp, "link DOWN\n");
+ }
+ }
+ } else {
+ /*
+ * Discard link events for MII/GMII controllers
+ * if MI auto-polling is disabled.
+ */
+ }
+
+#endif
+ /* Clear the attention. */
+ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
+ BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
+ BGE_MACSTAT_LINK_CHANGED);
+}
+
+#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
+ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
+ sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
+ desc)
+
+#ifndef __rtems__
+static void
+bge_add_sysctls(struct bge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children, *schildren;
+ struct sysctl_oid *tree;
+
+ ctx = device_get_sysctl_ctx(sc->bge_dev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
+
+#ifdef BGE_REGISTER_DEBUG
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
+ "Debug Information");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
+ "Register Read");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
+ "Memory Read");
+
+#endif
+
+ if (BGE_IS_5705_PLUS(sc))
+ return;
+
+ tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
+ NULL, "BGE Statistics");
+ schildren = children = SYSCTL_CHILDREN(tree);
+ BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
+ children, COSFramesDroppedDueToFilters,
+ "FramesDroppedDueToFilters");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
+ children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
+ children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
+ children, nicNoMoreRxBDs, "NoMoreRxBDs");
+ BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
+ children, ifInDiscards, "InputDiscards");
+ BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
+ children, ifInErrors, "InputErrors");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
+ children, nicRecvThresholdHit, "RecvThresholdHit");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
+ children, nicDmaReadQueueFull, "DmaReadQueueFull");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
+ children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
+ children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
+ children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
+ children, nicRingStatusUpdate, "RingStatusUpdate");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
+ children, nicInterrupts, "Interrupts");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
+ children, nicAvoidedInterrupts, "AvoidedInterrupts");
+ BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
+ children, nicSendThresholdHit, "SendThresholdHit");
+
+ tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
+ NULL, "BGE RX Statistics");
+ children = SYSCTL_CHILDREN(tree);
+ BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
+ children, rxstats.ifHCInOctets, "Octets");
+ BGE_SYSCTL_STAT(sc, ctx, "Fragments",
+ children, rxstats.etherStatsFragments, "Fragments");
+ BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
+ children, rxstats.ifHCInUcastPkts, "UcastPkts");
+ BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
+ children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
+ BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
+ children, rxstats.dot3StatsFCSErrors, "FCSErrors");
+ BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
+ children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
+ BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
+ children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
+ BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
+ children, rxstats.xoffPauseFramesReceived,
+ "xoffPauseFramesReceived");
+ BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
+ children, rxstats.macControlFramesReceived,
+ "ControlFramesReceived");
+ BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
+ children, rxstats.xoffStateEntered, "xoffStateEntered");
+ BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
+ children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
+ BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
+ children, rxstats.etherStatsJabbers, "Jabbers");
+ BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
+ children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
+ BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
+ children, rxstats.inRangeLengthError, "inRangeLengthError");
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
+ children, rxstats.outRangeLengthError, "outRangeLengthError");
+
+ tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
+ NULL, "BGE TX Statistics");
+ children = SYSCTL_CHILDREN(tree);
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
+ children, txstats.ifHCOutOctets, "Octets");
+ BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
+ children, txstats.etherStatsCollisions, "Collisions");
+ BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
+ children, txstats.outXonSent, "XonSent");
+ BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
+ children, txstats.outXoffSent, "XoffSent");
+ BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
+ children, txstats.flowControlDone, "flowControlDone");
+ BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
+ children, txstats.dot3StatsInternalMacTransmitErrors,
+ "InternalMacTransmitErrors");
+ BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
+ children, txstats.dot3StatsSingleCollisionFrames,
+ "SingleCollisionFrames");
+ BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
+ children, txstats.dot3StatsMultipleCollisionFrames,
+ "MultipleCollisionFrames");
+ BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
+ children, txstats.dot3StatsDeferredTransmissions,
+ "DeferredTransmissions");
+ BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
+ children, txstats.dot3StatsExcessiveCollisions,
+ "ExcessiveCollisions");
+ BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
+ children, txstats.dot3StatsLateCollisions,
+ "LateCollisions");
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
+ children, txstats.ifHCOutUcastPkts, "UcastPkts");
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
+ children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
+ children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
+ BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
+ children, txstats.dot3StatsCarrierSenseErrors,
+ "CarrierSenseErrors");
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
+ children, txstats.ifOutDiscards, "Discards");
+ BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
+ children, txstats.ifOutErrors, "Errors");
+}
+#endif
+
+static int
+bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct bge_softc *sc;
+ uint32_t result;
+ int offset;
+
+ sc = (struct bge_softc *)arg1;
+ offset = arg2;
+ result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
+ offsetof(bge_hostaddr, bge_addr_lo));
+ return (sysctl_handle_int(oidp, &result, 0, req));
+}
+
+#ifdef BGE_REGISTER_DEBUG
+static int
+bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
+{
+ struct bge_softc *sc;
+ uint16_t *sbdata;
+ int error;
+ int result;
+ int i, j;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+ if (error || (req->newptr == NULL))
+ return (error);
+
+ if (result == 1) {
+ sc = (struct bge_softc *)arg1;
+
+ sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
+ printf("Status Block:\n");
+ for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
+ printf("%06x:", i);
+ for (j = 0; j < 8; j++) {
+ printf(" %04x", sbdata[i]);
+ i += 4;
+ }
+ printf("\n");
+ }
+
+ printf("Registers:\n");
+ for (i = 0x800; i < 0xA00; ) {
+ printf("%06x:", i);
+ for (j = 0; j < 8; j++) {
+ printf(" %08x", CSR_READ_4(sc, i));
+ i += 4;
+ }
+ printf("\n");
+ }
+
+ printf("Hardware Flags:\n");
+ if (BGE_IS_575X_PLUS(sc))
+ printf(" - 575X Plus\n");
+ if (BGE_IS_5705_PLUS(sc))
+ printf(" - 5705 Plus\n");
+ if (BGE_IS_5714_FAMILY(sc))
+ printf(" - 5714 Family\n");
+ if (BGE_IS_5700_FAMILY(sc))
+ printf(" - 5700 Family\n");
+ if (sc->bge_flags & BGE_FLAG_JUMBO)
+ printf(" - Supports Jumbo Frames\n");
+ if (sc->bge_flags & BGE_FLAG_PCIX)
+ printf(" - PCI-X Bus\n");
+ if (sc->bge_flags & BGE_FLAG_PCIE)
+ printf(" - PCI Express Bus\n");
+ if (sc->bge_flags & BGE_FLAG_NO_3LED)
+ printf(" - No 3 LEDs\n");
+ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
+ printf(" - RX Alignment Bug\n");
+ }
+
+ return (error);
+}
+
+static int
+bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
+{
+ struct bge_softc *sc;
+ int error;
+ uint16_t result;
+ uint32_t val;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+ if (error || (req->newptr == NULL))
+ return (error);
+
+ if (result < 0x8000) {
+ sc = (struct bge_softc *)arg1;
+ val = CSR_READ_4(sc, result);
+ printf("reg 0x%06X = 0x%08X\n", result, val);
+ }
+
+ return (error);
+}
+
+static int
+bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
+{
+ struct bge_softc *sc;
+ int error;
+ uint16_t result;
+ uint32_t val;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+ if (error || (req->newptr == NULL))
+ return (error);
+
+ if (result < 0x8000) {
+ sc = (struct bge_softc *)arg1;
+ val = bge_readmem_ind(sc, result);
+ printf("mem 0x%06X = 0x%08X\n", result, val);
+ }
+
+ return (error);
+}
+#endif
+
+static int
+bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
+{
+
+ if (sc->bge_flags & BGE_FLAG_EADDR)
+ return (1);
+
+#ifdef __sparc64__
+ OF_getetheraddr(sc->bge_dev, ether_addr);
+ return (0);
+#endif
+ return (1);
+}
+
+static int
+bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
+{
+ uint32_t mac_addr;
+
+ mac_addr = bge_readmem_ind(sc, 0x0c14);
+ if ((mac_addr >> 16) == 0x484b) {
+ ether_addr[0] = (uint8_t)(mac_addr >> 8);
+ ether_addr[1] = (uint8_t)mac_addr;
+ mac_addr = bge_readmem_ind(sc, 0x0c18);
+ ether_addr[2] = (uint8_t)(mac_addr >> 24);
+ ether_addr[3] = (uint8_t)(mac_addr >> 16);
+ ether_addr[4] = (uint8_t)(mac_addr >> 8);
+ ether_addr[5] = (uint8_t)mac_addr;
+ return (0);
+ }
+ return (1);
+}
+
+static int
+bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
+{
+ int mac_offset = BGE_EE_MAC_OFFSET;
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
+ mac_offset = BGE_EE_MAC_OFFSET_5906;
+
+ return (bge_read_nvram(sc, (caddr_t)ether_addr, mac_offset + 2,
+ ETHER_ADDR_LEN));
+}
+
+static int
+bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
+{
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
+ return (1);
+
+ return (bge_read_eeprom(sc, (caddr_t) ether_addr, BGE_EE_MAC_OFFSET + 2,
+ ETHER_ADDR_LEN));
+}
+
+static int
+bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
+{
+ static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
+ /* NOTE: Order is critical */
+ bge_get_eaddr_fw,
+ bge_get_eaddr_mem,
+ bge_get_eaddr_nvram,
+ bge_get_eaddr_eeprom,
+ NULL
+ };
+ const bge_eaddr_fcn_t *func;
+
+ for (func = bge_eaddr_funcs; *func != NULL; ++func) {
+ if ((*func)(sc, eaddr) == 0)
+ break;
+ }
+ return (*func == NULL ? ENXIO : 0);
+}
diff --git a/bsd_eth_drivers/if_bge/if_bgereg.h b/bsd_eth_drivers/if_bge/if_bgereg.h
new file mode 100644
index 0000000..bf4b73b
--- /dev/null
+++ b/bsd_eth_drivers/if_bge/if_bgereg.h
@@ -0,0 +1,2591 @@
+/*-
+ * Copyright (c) 2001 Wind River Systems
+ * Copyright (c) 1997, 1998, 1999, 2001
+ * Bill Paul <wpaul@windriver.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/bge/if_bgereg.h,v 1.73.2.4.2.1 2008/11/25 02:59:29 kensmith Exp $
+ */
+
+/*
+ * BCM570x memory map. The internal memory layout varies somewhat
+ * depending on whether or not we have external SSRAM attached.
+ * The BCM5700 can have up to 16MB of external memory. The BCM5701
+ * is apparently not designed to use external SSRAM. The mappings
+ * up to the first 4 send rings are the same for both internal and
+ * external memory configurations. Note that mini RX ring space is
+ * only available with external SSRAM configurations, which means
+ * the mini RX ring is not supported on the BCM5701.
+ *
+ * The NIC's memory can be accessed by the host in one of 3 ways:
+ *
+ * 1) Indirect register access. The MEMWIN_BASEADDR and MEMWIN_DATA
+ * registers in PCI config space can be used to read any 32-bit
+ * address within the NIC's memory.
+ *
+ * 2) Memory window access. The MEMWIN_BASEADDR register in PCI config
+ * space can be used in conjunction with the memory window in the
+ * device register space at offset 0x8000 to read any 32K chunk
+ * of NIC memory.
+ *
+ * 3) Flat mode. If the 'flat mode' bit in the PCI state register is
+ * set, the device I/O mapping consumes 32MB of host address space,
+ * allowing all of the registers and internal NIC memory to be
+ * accessed directly. NIC memory addresses are offset by 0x01000000.
+ * Flat mode consumes so much host address space that it is not
+ * recommended.
+ */
+#define BGE_PAGE_ZERO 0x00000000
+#define BGE_PAGE_ZERO_END 0x000000FF
+#define BGE_SEND_RING_RCB 0x00000100
+#define BGE_SEND_RING_RCB_END 0x000001FF
+#define BGE_RX_RETURN_RING_RCB 0x00000200
+#define BGE_RX_RETURN_RING_RCB_END 0x000002FF
+#define BGE_STATS_BLOCK 0x00000300
+#define BGE_STATS_BLOCK_END 0x00000AFF
+#define BGE_STATUS_BLOCK 0x00000B00
+#define BGE_STATUS_BLOCK_END 0x00000B4F
+#define BGE_SOFTWARE_GENCOMM 0x00000B50
+#define BGE_SOFTWARE_GENCOMM_SIG 0x00000B54
+#define BGE_SOFTWARE_GENCOMM_NICCFG 0x00000B58
+#define BGE_SOFTWARE_GENCOMM_FW 0x00000B78
+#define BGE_SOFTWARE_GENNCOMM_FW_LEN 0x00000B7C
+#define BGE_SOFTWARE_GENNCOMM_FW_DATA 0x00000B80
+#define BGE_SOFTWARE_GENCOMM_END 0x00000FFF
+#define BGE_UNMAPPED 0x00001000
+#define BGE_UNMAPPED_END 0x00001FFF
+#define BGE_DMA_DESCRIPTORS 0x00002000
+#define BGE_DMA_DESCRIPTORS_END 0x00003FFF
+#define BGE_SEND_RING_1_TO_4 0x00004000
+#define BGE_SEND_RING_1_TO_4_END 0x00005FFF
+
+/* Firmware interface */
+#define BGE_FW_DRV_ALIVE 0x00000001
+#define BGE_FW_PAUSE 0x00000002
+
+/* Mappings for internal memory configuration */
+#define BGE_STD_RX_RINGS 0x00006000
+#define BGE_STD_RX_RINGS_END 0x00006FFF
+#define BGE_JUMBO_RX_RINGS 0x00007000
+#define BGE_JUMBO_RX_RINGS_END 0x00007FFF
+#define BGE_BUFFPOOL_1 0x00008000
+#define BGE_BUFFPOOL_1_END 0x0000FFFF
+#define BGE_BUFFPOOL_2 0x00010000 /* or expansion ROM */
+#define BGE_BUFFPOOL_2_END 0x00017FFF
+#define BGE_BUFFPOOL_3 0x00018000 /* or expansion ROM */
+#define BGE_BUFFPOOL_3_END 0x0001FFFF
+
+/* Mappings for external SSRAM configurations */
+#define BGE_SEND_RING_5_TO_6 0x00006000
+#define BGE_SEND_RING_5_TO_6_END 0x00006FFF
+#define BGE_SEND_RING_7_TO_8 0x00007000
+#define BGE_SEND_RING_7_TO_8_END 0x00007FFF
+#define BGE_SEND_RING_9_TO_16 0x00008000
+#define BGE_SEND_RING_9_TO_16_END 0x0000BFFF
+#define BGE_EXT_STD_RX_RINGS 0x0000C000
+#define BGE_EXT_STD_RX_RINGS_END 0x0000CFFF
+#define BGE_EXT_JUMBO_RX_RINGS 0x0000D000
+#define BGE_EXT_JUMBO_RX_RINGS_END 0x0000DFFF
+#define BGE_MINI_RX_RINGS 0x0000E000
+#define BGE_MINI_RX_RINGS_END 0x0000FFFF
+#define BGE_AVAIL_REGION1 0x00010000 /* or expansion ROM */
+#define BGE_AVAIL_REGION1_END 0x00017FFF
+#define BGE_AVAIL_REGION2 0x00018000 /* or expansion ROM */
+#define BGE_AVAIL_REGION2_END 0x0001FFFF
+#define BGE_EXT_SSRAM 0x00020000
+#define BGE_EXT_SSRAM_END 0x000FFFFF
+
+
+/*
+ * BCM570x register offsets. These are memory mapped registers
+ * which can be accessed with the CSR_READ_4()/CSR_WRITE_4() macros.
+ * Each register must be accessed using 32 bit operations.
+ *
+ * All registers are accessed through a 32K shared memory block.
+ * The first group of registers are actually copies of the PCI
+ * configuration space registers.
+ */
+
+/*
+ * PCI registers defined in the PCI 2.2 spec.
+ */
+#define BGE_PCI_VID 0x00
+#define BGE_PCI_DID 0x02
+#define BGE_PCI_CMD 0x04
+#define BGE_PCI_STS 0x06
+#define BGE_PCI_REV 0x08
+#define BGE_PCI_CLASS 0x09
+#define BGE_PCI_CACHESZ 0x0C
+#define BGE_PCI_LATTIMER 0x0D
+#define BGE_PCI_HDRTYPE 0x0E
+#define BGE_PCI_BIST 0x0F
+#define BGE_PCI_BAR0 0x10
+#define BGE_PCI_BAR1 0x14
+#define BGE_PCI_SUBSYS 0x2C
+#define BGE_PCI_SUBVID 0x2E
+#define BGE_PCI_ROMBASE 0x30
+#define BGE_PCI_CAPPTR 0x34
+#define BGE_PCI_INTLINE 0x3C
+#define BGE_PCI_INTPIN 0x3D
+#define BGE_PCI_MINGNT 0x3E
+#define BGE_PCI_MAXLAT 0x3F
+#define BGE_PCI_PCIXCAP 0x40
+#define BGE_PCI_NEXTPTR_PM 0x41
+#define BGE_PCI_PCIX_CMD 0x42
+#define BGE_PCI_PCIX_STS 0x44
+#define BGE_PCI_PWRMGMT_CAPID 0x48
+#define BGE_PCI_NEXTPTR_VPD 0x49
+#define BGE_PCI_PWRMGMT_CAPS 0x4A
+#define BGE_PCI_PWRMGMT_CMD 0x4C
+#define BGE_PCI_PWRMGMT_STS 0x4D
+#define BGE_PCI_PWRMGMT_DATA 0x4F
+#define BGE_PCI_VPD_CAPID 0x50
+#define BGE_PCI_NEXTPTR_MSI 0x51
+#define BGE_PCI_VPD_ADDR 0x52
+#define BGE_PCI_VPD_DATA 0x54
+#define BGE_PCI_MSI_CAPID 0x58
+#define BGE_PCI_NEXTPTR_NONE 0x59
+#define BGE_PCI_MSI_CTL 0x5A
+#define BGE_PCI_MSI_ADDR_HI 0x5C
+#define BGE_PCI_MSI_ADDR_LO 0x60
+#define BGE_PCI_MSI_DATA 0x64
+
+/* PCI MSI. ??? */
+#define BGE_PCIE_CAPID_REG 0xD0
+#define BGE_PCIE_CAPID 0x10
+
+/*
+ * PCI registers specific to the BCM570x family.
+ */
+#define BGE_PCI_MISC_CTL 0x68
+#define BGE_PCI_DMA_RW_CTL 0x6C
+#define BGE_PCI_PCISTATE 0x70
+#define BGE_PCI_CLKCTL 0x74
+#define BGE_PCI_REG_BASEADDR 0x78
+#define BGE_PCI_MEMWIN_BASEADDR 0x7C
+#define BGE_PCI_REG_DATA 0x80
+#define BGE_PCI_MEMWIN_DATA 0x84
+#define BGE_PCI_MODECTL 0x88
+#define BGE_PCI_MISC_CFG 0x8C
+#define BGE_PCI_MISC_LOCALCTL 0x90
+#define BGE_PCI_UNDI_RX_STD_PRODIDX_HI 0x98
+#define BGE_PCI_UNDI_RX_STD_PRODIDX_LO 0x9C
+#define BGE_PCI_UNDI_RX_RTN_CONSIDX_HI 0xA0
+#define BGE_PCI_UNDI_RX_RTN_CONSIDX_LO 0xA4
+#define BGE_PCI_UNDI_TX_BD_PRODIDX_HI 0xA8
+#define BGE_PCI_UNDI_TX_BD_PRODIDX_LO 0xAC
+#define BGE_PCI_ISR_MBX_HI 0xB0
+#define BGE_PCI_ISR_MBX_LO 0xB4
+
+/* PCI Misc. Host control register */
+#define BGE_PCIMISCCTL_CLEAR_INTA 0x00000001
+#define BGE_PCIMISCCTL_MASK_PCI_INTR 0x00000002
+#define BGE_PCIMISCCTL_ENDIAN_BYTESWAP 0x00000004
+#define BGE_PCIMISCCTL_ENDIAN_WORDSWAP 0x00000008
+#define BGE_PCIMISCCTL_PCISTATE_RW 0x00000010
+#define BGE_PCIMISCCTL_CLOCKCTL_RW 0x00000020
+#define BGE_PCIMISCCTL_REG_WORDSWAP 0x00000040
+#define BGE_PCIMISCCTL_INDIRECT_ACCESS 0x00000080
+#define BGE_PCIMISCCTL_ASICREV 0xFFFF0000
+
+#define BGE_HIF_SWAP_OPTIONS (BGE_PCIMISCCTL_ENDIAN_WORDSWAP)
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define BGE_DMA_SWAP_OPTIONS \
+ BGE_MODECTL_WORDSWAP_NONFRAME| \
+ BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA
+#else
+#define BGE_DMA_SWAP_OPTIONS \
+ BGE_MODECTL_WORDSWAP_NONFRAME|BGE_MODECTL_BYTESWAP_NONFRAME| \
+ BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA
+#endif
+
+#define BGE_INIT \
+ (BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_CLEAR_INTA| \
+ BGE_PCIMISCCTL_MASK_PCI_INTR|BGE_PCIMISCCTL_INDIRECT_ACCESS)
+
+#define BGE_CHIPID_TIGON_I 0x40000000
+#define BGE_CHIPID_TIGON_II 0x60000000
+#define BGE_CHIPID_BCM5700_A0 0x70000000
+#define BGE_CHIPID_BCM5700_A1 0x70010000
+#define BGE_CHIPID_BCM5700_B0 0x71000000
+#define BGE_CHIPID_BCM5700_B1 0x71010000
+#define BGE_CHIPID_BCM5700_B2 0x71020000
+#define BGE_CHIPID_BCM5700_B3 0x71030000
+#define BGE_CHIPID_BCM5700_ALTIMA 0x71040000
+#define BGE_CHIPID_BCM5700_C0 0x72000000
+#define BGE_CHIPID_BCM5701_A0 0x00000000 /* grrrr */
+#define BGE_CHIPID_BCM5701_B0 0x01000000
+#define BGE_CHIPID_BCM5701_B2 0x01020000
+#define BGE_CHIPID_BCM5701_B5 0x01050000
+#define BGE_CHIPID_BCM5703_A0 0x10000000
+#define BGE_CHIPID_BCM5703_A1 0x10010000
+#define BGE_CHIPID_BCM5703_A2 0x10020000
+#define BGE_CHIPID_BCM5703_A3 0x10030000
+#define BGE_CHIPID_BCM5703_B0 0x11000000
+#define BGE_CHIPID_BCM5704_A0 0x20000000
+#define BGE_CHIPID_BCM5704_A1 0x20010000
+#define BGE_CHIPID_BCM5704_A2 0x20020000
+#define BGE_CHIPID_BCM5704_A3 0x20030000
+#define BGE_CHIPID_BCM5704_B0 0x21000000
+#define BGE_CHIPID_BCM5705_A0 0x30000000
+#define BGE_CHIPID_BCM5705_A1 0x30010000
+#define BGE_CHIPID_BCM5705_A2 0x30020000
+#define BGE_CHIPID_BCM5705_A3 0x30030000
+#define BGE_CHIPID_BCM5750_A0 0x40000000
+#define BGE_CHIPID_BCM5750_A1 0x40010000
+#define BGE_CHIPID_BCM5750_A3 0x40030000
+#define BGE_CHIPID_BCM5750_B0 0x41000000
+#define BGE_CHIPID_BCM5750_B1 0x41010000
+#define BGE_CHIPID_BCM5750_C0 0x42000000
+#define BGE_CHIPID_BCM5750_C1 0x42010000
+#define BGE_CHIPID_BCM5750_C2 0x42020000
+#define BGE_CHIPID_BCM5714_A0 0x50000000
+#define BGE_CHIPID_BCM5752_A0 0x60000000
+#define BGE_CHIPID_BCM5752_A1 0x60010000
+#define BGE_CHIPID_BCM5752_A2 0x60020000
+#define BGE_CHIPID_BCM5714_B0 0x80000000
+#define BGE_CHIPID_BCM5714_B3 0x80030000
+#define BGE_CHIPID_BCM5715_A0 0x90000000
+#define BGE_CHIPID_BCM5715_A1 0x90010000
+#define BGE_CHIPID_BCM5715_A3 0x90030000
+#define BGE_CHIPID_BCM5755_A0 0xa0000000
+#define BGE_CHIPID_BCM5755_A1 0xa0010000
+#define BGE_CHIPID_BCM5755_A2 0xa0020000
+#define BGE_CHIPID_BCM5722_A0 0xa2000000
+#define BGE_CHIPID_BCM5754_A0 0xb0000000
+#define BGE_CHIPID_BCM5754_A1 0xb0010000
+#define BGE_CHIPID_BCM5754_A2 0xb0020000
+#define BGE_CHIPID_BCM5787_A0 0xb0000000
+#define BGE_CHIPID_BCM5787_A1 0xb0010000
+#define BGE_CHIPID_BCM5787_A2 0xb0020000
+#define BGE_CHIPID_BCM5906_A1 0xc0010000
+#define BGE_CHIPID_BCM5906_A2 0xc0020000
+
+/* shorthand one */
+#define BGE_ASICREV(x) ((x) >> 28)
+#define BGE_ASICREV_BCM5701 0x00
+#define BGE_ASICREV_BCM5703 0x01
+#define BGE_ASICREV_BCM5704 0x02
+#define BGE_ASICREV_BCM5705 0x03
+#define BGE_ASICREV_BCM5750 0x04
+#define BGE_ASICREV_BCM5714_A0 0x05
+#define BGE_ASICREV_BCM5752 0x06
+#define BGE_ASICREV_BCM5700 0x07
+#define BGE_ASICREV_BCM5780 0x08
+#define BGE_ASICREV_BCM5714 0x09
+#define BGE_ASICREV_BCM5755 0x0a
+#define BGE_ASICREV_BCM5754 0x0b
+#define BGE_ASICREV_BCM5787 0x0b
+#define BGE_ASICREV_BCM5906 0x0c
+
+/* chip revisions */
+#define BGE_CHIPREV(x) ((x) >> 24)
+#define BGE_CHIPREV_5700_AX 0x70
+#define BGE_CHIPREV_5700_BX 0x71
+#define BGE_CHIPREV_5700_CX 0x72
+#define BGE_CHIPREV_5701_AX 0x00
+#define BGE_CHIPREV_5703_AX 0x10
+#define BGE_CHIPREV_5704_AX 0x20
+#define BGE_CHIPREV_5704_BX 0x21
+#define BGE_CHIPREV_5750_AX 0x40
+#define BGE_CHIPREV_5750_BX 0x41
+
+/* PCI DMA Read/Write Control register */
+#define BGE_PCIDMARWCTL_MINDMA 0x000000FF
+#define BGE_PCIDMARWCTL_RDADRR_BNDRY 0x00000700
+#define BGE_PCIDMARWCTL_WRADDR_BNDRY 0x00003800
+#define BGE_PCIDMARWCTL_ONEDMA_ATONCE 0x0000C000
+#define BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL 0x00004000
+#define BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL 0x00008000
+#define BGE_PCIDMARWCTL_RD_WAT 0x00070000
+#define BGE_PCIDMARWCTL_WR_WAT 0x00380000
+#define BGE_PCIDMARWCTL_USE_MRM 0x00400000
+#define BGE_PCIDMARWCTL_ASRT_ALL_BE 0x00800000
+#define BGE_PCIDMARWCTL_DFLT_PCI_RD_CMD 0x0F000000
+#define BGE_PCIDMARWCTL_DFLT_PCI_WR_CMD 0xF0000000
+
+#define BGE_PCIDMARWCTL_RD_WAT_SHIFT(x) ((x) << 16)
+#define BGE_PCIDMARWCTL_WR_WAT_SHIFT(x) ((x) << 19)
+#define BGE_PCIDMARWCTL_RD_CMD_SHIFT(x) ((x) << 24)
+#define BGE_PCIDMARWCTL_WR_CMD_SHIFT(x) ((x) << 28)
+
+#define BGE_PCI_READ_BNDRY_DISABLE 0x00000000
+#define BGE_PCI_READ_BNDRY_16BYTES 0x00000100
+#define BGE_PCI_READ_BNDRY_32BYTES 0x00000200
+#define BGE_PCI_READ_BNDRY_64BYTES 0x00000300
+#define BGE_PCI_READ_BNDRY_128BYTES 0x00000400
+#define BGE_PCI_READ_BNDRY_256BYTES 0x00000500
+#define BGE_PCI_READ_BNDRY_512BYTES 0x00000600
+#define BGE_PCI_READ_BNDRY_1024BYTES 0x00000700
+
+#define BGE_PCI_WRITE_BNDRY_DISABLE 0x00000000
+#define BGE_PCI_WRITE_BNDRY_16BYTES 0x00000800
+#define BGE_PCI_WRITE_BNDRY_32BYTES 0x00001000
+#define BGE_PCI_WRITE_BNDRY_64BYTES 0x00001800
+#define BGE_PCI_WRITE_BNDRY_128BYTES 0x00002000
+#define BGE_PCI_WRITE_BNDRY_256BYTES 0x00002800
+#define BGE_PCI_WRITE_BNDRY_512BYTES 0x00003000
+#define BGE_PCI_WRITE_BNDRY_1024BYTES 0x00003800
+
+/*
+ * PCI state register -- note, this register is read only
+ * unless the PCISTATE_WR bit of the PCI Misc. Host Control
+ * register is set.
+ */
+#define BGE_PCISTATE_FORCE_RESET 0x00000001
+#define BGE_PCISTATE_INTR_STATE 0x00000002
+#define BGE_PCISTATE_PCI_BUSMODE 0x00000004 /* 1 = PCI, 0 = PCI-X */
+#define BGE_PCISTATE_PCI_BUSSPEED 0x00000008 /* 1 = 66/133, 0 = 33/66 */
+#define BGE_PCISTATE_32BIT_BUS 0x00000010 /* 1 = 32bit, 0 = 64bit */
+#define BGE_PCISTATE_WANT_EXPROM 0x00000020
+#define BGE_PCISTATE_EXPROM_RETRY 0x00000040
+#define BGE_PCISTATE_FLATVIEW_MODE 0x00000100
+#define BGE_PCISTATE_PCI_TGT_RETRY_MAX 0x00000E00
+
+/*
+ * PCI Clock Control register -- note, this register is read only
+ * unless the CLOCKCTL_RW bit of the PCI Misc. Host Control
+ * register is set.
+ */
+#define BGE_PCICLOCKCTL_DETECTED_SPEED 0x0000000F
+#define BGE_PCICLOCKCTL_M66EN 0x00000080
+#define BGE_PCICLOCKCTL_LOWPWR_CLKMODE 0x00000200
+#define BGE_PCICLOCKCTL_RXCPU_CLK_DIS 0x00000400
+#define BGE_PCICLOCKCTL_TXCPU_CLK_DIS 0x00000800
+#define BGE_PCICLOCKCTL_ALTCLK 0x00001000
+#define BGE_PCICLOCKCTL_ALTCLK_SRC 0x00002000
+#define BGE_PCICLOCKCTL_PCIPLL_DISABLE 0x00004000
+#define BGE_PCICLOCKCTL_SYSPLL_DISABLE 0x00008000
+#define BGE_PCICLOCKCTL_BIST_ENABLE 0x00010000
+
+
+#ifndef PCIM_CMD_MWIEN
+#define PCIM_CMD_MWIEN 0x0010
+#endif
+
+/*
+ * High priority mailbox registers
+ * Each mailbox is 64-bits wide, though we only use the
+ * lower 32 bits. To write a 64-bit value, write the upper 32 bits
+ * first. The NIC will load the mailbox after the lower 32 bit word
+ * has been updated.
+ */
+#define BGE_MBX_IRQ0_HI 0x0200
+#define BGE_MBX_IRQ0_LO 0x0204
+#define BGE_MBX_IRQ1_HI 0x0208
+#define BGE_MBX_IRQ1_LO 0x020C
+#define BGE_MBX_IRQ2_HI 0x0210
+#define BGE_MBX_IRQ2_LO 0x0214
+#define BGE_MBX_IRQ3_HI 0x0218
+#define BGE_MBX_IRQ3_LO 0x021C
+#define BGE_MBX_GEN0_HI 0x0220
+#define BGE_MBX_GEN0_LO 0x0224
+#define BGE_MBX_GEN1_HI 0x0228
+#define BGE_MBX_GEN1_LO 0x022C
+#define BGE_MBX_GEN2_HI 0x0230
+#define BGE_MBX_GEN2_LO 0x0234
+#define BGE_MBX_GEN3_HI 0x0228
+#define BGE_MBX_GEN3_LO 0x022C
+#define BGE_MBX_GEN4_HI 0x0240
+#define BGE_MBX_GEN4_LO 0x0244
+#define BGE_MBX_GEN5_HI 0x0248
+#define BGE_MBX_GEN5_LO 0x024C
+#define BGE_MBX_GEN6_HI 0x0250
+#define BGE_MBX_GEN6_LO 0x0254
+#define BGE_MBX_GEN7_HI 0x0258
+#define BGE_MBX_GEN7_LO 0x025C
+#define BGE_MBX_RELOAD_STATS_HI 0x0260
+#define BGE_MBX_RELOAD_STATS_LO 0x0264
+#define BGE_MBX_RX_STD_PROD_HI 0x0268
+#define BGE_MBX_RX_STD_PROD_LO 0x026C
+#define BGE_MBX_RX_JUMBO_PROD_HI 0x0270
+#define BGE_MBX_RX_JUMBO_PROD_LO 0x0274
+#define BGE_MBX_RX_MINI_PROD_HI 0x0278
+#define BGE_MBX_RX_MINI_PROD_LO 0x027C
+#define BGE_MBX_RX_CONS0_HI 0x0280
+#define BGE_MBX_RX_CONS0_LO 0x0284
+#define BGE_MBX_RX_CONS1_HI 0x0288
+#define BGE_MBX_RX_CONS1_LO 0x028C
+#define BGE_MBX_RX_CONS2_HI 0x0290
+#define BGE_MBX_RX_CONS2_LO 0x0294
+#define BGE_MBX_RX_CONS3_HI 0x0298
+#define BGE_MBX_RX_CONS3_LO 0x029C
+#define BGE_MBX_RX_CONS4_HI 0x02A0
+#define BGE_MBX_RX_CONS4_LO 0x02A4
+#define BGE_MBX_RX_CONS5_HI 0x02A8
+#define BGE_MBX_RX_CONS5_LO 0x02AC
+#define BGE_MBX_RX_CONS6_HI 0x02B0
+#define BGE_MBX_RX_CONS6_LO 0x02B4
+#define BGE_MBX_RX_CONS7_HI 0x02B8
+#define BGE_MBX_RX_CONS7_LO 0x02BC
+#define BGE_MBX_RX_CONS8_HI 0x02C0
+#define BGE_MBX_RX_CONS8_LO 0x02C4
+#define BGE_MBX_RX_CONS9_HI 0x02C8
+#define BGE_MBX_RX_CONS9_LO 0x02CC
+#define BGE_MBX_RX_CONS10_HI 0x02D0
+#define BGE_MBX_RX_CONS10_LO 0x02D4
+#define BGE_MBX_RX_CONS11_HI 0x02D8
+#define BGE_MBX_RX_CONS11_LO 0x02DC
+#define BGE_MBX_RX_CONS12_HI 0x02E0
+#define BGE_MBX_RX_CONS12_LO 0x02E4
+#define BGE_MBX_RX_CONS13_HI 0x02E8
+#define BGE_MBX_RX_CONS13_LO 0x02EC
+#define BGE_MBX_RX_CONS14_HI 0x02F0
+#define BGE_MBX_RX_CONS14_LO 0x02F4
+#define BGE_MBX_RX_CONS15_HI 0x02F8
+#define BGE_MBX_RX_CONS15_LO 0x02FC
+#define BGE_MBX_TX_HOST_PROD0_HI 0x0300
+#define BGE_MBX_TX_HOST_PROD0_LO 0x0304
+#define BGE_MBX_TX_HOST_PROD1_HI 0x0308
+#define BGE_MBX_TX_HOST_PROD1_LO 0x030C
+#define BGE_MBX_TX_HOST_PROD2_HI 0x0310
+#define BGE_MBX_TX_HOST_PROD2_LO 0x0314
+#define BGE_MBX_TX_HOST_PROD3_HI 0x0318
+#define BGE_MBX_TX_HOST_PROD3_LO 0x031C
+#define BGE_MBX_TX_HOST_PROD4_HI 0x0320
+#define BGE_MBX_TX_HOST_PROD4_LO 0x0324
+#define BGE_MBX_TX_HOST_PROD5_HI 0x0328
+#define BGE_MBX_TX_HOST_PROD5_LO 0x032C
+#define BGE_MBX_TX_HOST_PROD6_HI 0x0330
+#define BGE_MBX_TX_HOST_PROD6_LO 0x0334
+#define BGE_MBX_TX_HOST_PROD7_HI 0x0338
+#define BGE_MBX_TX_HOST_PROD7_LO 0x033C
+#define BGE_MBX_TX_HOST_PROD8_HI 0x0340
+#define BGE_MBX_TX_HOST_PROD8_LO 0x0344
+#define BGE_MBX_TX_HOST_PROD9_HI 0x0348
+#define BGE_MBX_TX_HOST_PROD9_LO 0x034C
+#define BGE_MBX_TX_HOST_PROD10_HI 0x0350
+#define BGE_MBX_TX_HOST_PROD10_LO 0x0354
+#define BGE_MBX_TX_HOST_PROD11_HI 0x0358
+#define BGE_MBX_TX_HOST_PROD11_LO 0x035C
+#define BGE_MBX_TX_HOST_PROD12_HI 0x0360
+#define BGE_MBX_TX_HOST_PROD12_LO 0x0364
+#define BGE_MBX_TX_HOST_PROD13_HI 0x0368
+#define BGE_MBX_TX_HOST_PROD13_LO 0x036C
+#define BGE_MBX_TX_HOST_PROD14_HI 0x0370
+#define BGE_MBX_TX_HOST_PROD14_LO 0x0374
+#define BGE_MBX_TX_HOST_PROD15_HI 0x0378
+#define BGE_MBX_TX_HOST_PROD15_LO 0x037C
+#define BGE_MBX_TX_NIC_PROD0_HI 0x0380
+#define BGE_MBX_TX_NIC_PROD0_LO 0x0384
+#define BGE_MBX_TX_NIC_PROD1_HI 0x0388
+#define BGE_MBX_TX_NIC_PROD1_LO 0x038C
+#define BGE_MBX_TX_NIC_PROD2_HI 0x0390
+#define BGE_MBX_TX_NIC_PROD2_LO 0x0394
+#define BGE_MBX_TX_NIC_PROD3_HI 0x0398
+#define BGE_MBX_TX_NIC_PROD3_LO 0x039C
+#define BGE_MBX_TX_NIC_PROD4_HI 0x03A0
+#define BGE_MBX_TX_NIC_PROD4_LO 0x03A4
+#define BGE_MBX_TX_NIC_PROD5_HI 0x03A8
+#define BGE_MBX_TX_NIC_PROD5_LO 0x03AC
+#define BGE_MBX_TX_NIC_PROD6_HI 0x03B0
+#define BGE_MBX_TX_NIC_PROD6_LO 0x03B4
+#define BGE_MBX_TX_NIC_PROD7_HI 0x03B8
+#define BGE_MBX_TX_NIC_PROD7_LO 0x03BC
+#define BGE_MBX_TX_NIC_PROD8_HI 0x03C0
+#define BGE_MBX_TX_NIC_PROD8_LO 0x03C4
+#define BGE_MBX_TX_NIC_PROD9_HI 0x03C8
+#define BGE_MBX_TX_NIC_PROD9_LO 0x03CC
+#define BGE_MBX_TX_NIC_PROD10_HI 0x03D0
+#define BGE_MBX_TX_NIC_PROD10_LO 0x03D4
+#define BGE_MBX_TX_NIC_PROD11_HI 0x03D8
+#define BGE_MBX_TX_NIC_PROD11_LO 0x03DC
+#define BGE_MBX_TX_NIC_PROD12_HI 0x03E0
+#define BGE_MBX_TX_NIC_PROD12_LO 0x03E4
+#define BGE_MBX_TX_NIC_PROD13_HI 0x03E8
+#define BGE_MBX_TX_NIC_PROD13_LO 0x03EC
+#define BGE_MBX_TX_NIC_PROD14_HI 0x03F0
+#define BGE_MBX_TX_NIC_PROD14_LO 0x03F4
+#define BGE_MBX_TX_NIC_PROD15_HI 0x03F8
+#define BGE_MBX_TX_NIC_PROD15_LO 0x03FC
+
+#define BGE_TX_RINGS_MAX 4
+#define BGE_TX_RINGS_EXTSSRAM_MAX 16
+#define BGE_RX_RINGS_MAX 16
+
+/* Ethernet MAC control registers */
+#define BGE_MAC_MODE 0x0400
+#define BGE_MAC_STS 0x0404
+#define BGE_MAC_EVT_ENB 0x0408
+#define BGE_MAC_LED_CTL 0x040C
+#define BGE_MAC_ADDR1_LO 0x0410
+#define BGE_MAC_ADDR1_HI 0x0414
+#define BGE_MAC_ADDR2_LO 0x0418
+#define BGE_MAC_ADDR2_HI 0x041C
+#define BGE_MAC_ADDR3_LO 0x0420
+#define BGE_MAC_ADDR3_HI 0x0424
+#define BGE_MAC_ADDR4_LO 0x0428
+#define BGE_MAC_ADDR4_HI 0x042C
+#define BGE_WOL_PATPTR 0x0430
+#define BGE_WOL_PATCFG 0x0434
+#define BGE_TX_RANDOM_BACKOFF 0x0438
+#define BGE_RX_MTU 0x043C
+#define BGE_GBIT_PCS_TEST 0x0440
+#define BGE_TX_TBI_AUTONEG 0x0444
+#define BGE_RX_TBI_AUTONEG 0x0448
+#define BGE_MI_COMM 0x044C
+#define BGE_MI_STS 0x0450
+#define BGE_MI_MODE 0x0454
+#define BGE_AUTOPOLL_STS 0x0458
+#define BGE_TX_MODE 0x045C
+#define BGE_TX_STS 0x0460
+#define BGE_TX_LENGTHS 0x0464
+#define BGE_RX_MODE 0x0468
+#define BGE_RX_STS 0x046C
+#define BGE_MAR0 0x0470
+#define BGE_MAR1 0x0474
+#define BGE_MAR2 0x0478
+#define BGE_MAR3 0x047C
+#define BGE_RX_BD_RULES_CTL0 0x0480
+#define BGE_RX_BD_RULES_MASKVAL0 0x0484
+#define BGE_RX_BD_RULES_CTL1 0x0488
+#define BGE_RX_BD_RULES_MASKVAL1 0x048C
+#define BGE_RX_BD_RULES_CTL2 0x0490
+#define BGE_RX_BD_RULES_MASKVAL2 0x0494
+#define BGE_RX_BD_RULES_CTL3 0x0498
+#define BGE_RX_BD_RULES_MASKVAL3 0x049C
+#define BGE_RX_BD_RULES_CTL4 0x04A0
+#define BGE_RX_BD_RULES_MASKVAL4 0x04A4
+#define BGE_RX_BD_RULES_CTL5 0x04A8
+#define BGE_RX_BD_RULES_MASKVAL5 0x04AC
+#define BGE_RX_BD_RULES_CTL6 0x04B0
+#define BGE_RX_BD_RULES_MASKVAL6 0x04B4
+#define BGE_RX_BD_RULES_CTL7 0x04B8
+#define BGE_RX_BD_RULES_MASKVAL7 0x04BC
+#define BGE_RX_BD_RULES_CTL8 0x04C0
+#define BGE_RX_BD_RULES_MASKVAL8 0x04C4
+#define BGE_RX_BD_RULES_CTL9 0x04C8
+#define BGE_RX_BD_RULES_MASKVAL9 0x04CC
+#define BGE_RX_BD_RULES_CTL10 0x04D0
+#define BGE_RX_BD_RULES_MASKVAL10 0x04D4
+#define BGE_RX_BD_RULES_CTL11 0x04D8
+#define BGE_RX_BD_RULES_MASKVAL11 0x04DC
+#define BGE_RX_BD_RULES_CTL12 0x04E0
+#define BGE_RX_BD_RULES_MASKVAL12 0x04E4
+#define BGE_RX_BD_RULES_CTL13 0x04E8
+#define BGE_RX_BD_RULES_MASKVAL13 0x04EC
+#define BGE_RX_BD_RULES_CTL14 0x04F0
+#define BGE_RX_BD_RULES_MASKVAL14 0x04F4
+#define BGE_RX_BD_RULES_CTL15 0x04F8
+#define BGE_RX_BD_RULES_MASKVAL15 0x04FC
+#define BGE_RX_RULES_CFG 0x0500
+#define BGE_SERDES_CFG 0x0590
+#define BGE_SERDES_STS 0x0594
+#define BGE_SGDIG_CFG 0x05B0
+#define BGE_SGDIG_STS 0x05B4
+#define BGE_MAC_STATS 0x0800
+
+/* Ethernet MAC Mode register */
+#define BGE_MACMODE_RESET 0x00000001
+#define BGE_MACMODE_HALF_DUPLEX 0x00000002
+#define BGE_MACMODE_PORTMODE 0x0000000C
+#define BGE_MACMODE_LOOPBACK 0x00000010
+#define BGE_MACMODE_RX_TAGGEDPKT 0x00000080
+#define BGE_MACMODE_TX_BURST_ENB 0x00000100
+#define BGE_MACMODE_MAX_DEFER 0x00000200
+#define BGE_MACMODE_LINK_POLARITY 0x00000400
+#define BGE_MACMODE_RX_STATS_ENB 0x00000800
+#define BGE_MACMODE_RX_STATS_CLEAR 0x00001000
+#define BGE_MACMODE_RX_STATS_FLUSH 0x00002000
+#define BGE_MACMODE_TX_STATS_ENB 0x00004000
+#define BGE_MACMODE_TX_STATS_CLEAR 0x00008000
+#define BGE_MACMODE_TX_STATS_FLUSH 0x00010000
+#define BGE_MACMODE_TBI_SEND_CFGS 0x00020000
+#define BGE_MACMODE_MAGIC_PKT_ENB 0x00040000
+#define BGE_MACMODE_ACPI_PWRON_ENB 0x00080000
+#define BGE_MACMODE_MIP_ENB 0x00100000
+#define BGE_MACMODE_TXDMA_ENB 0x00200000
+#define BGE_MACMODE_RXDMA_ENB 0x00400000
+#define BGE_MACMODE_FRMHDR_DMA_ENB 0x00800000
+
+#define BGE_PORTMODE_NONE 0x00000000
+#define BGE_PORTMODE_MII 0x00000004
+#define BGE_PORTMODE_GMII 0x00000008
+#define BGE_PORTMODE_TBI 0x0000000C
+
+/* MAC Status register */
+#define BGE_MACSTAT_TBI_PCS_SYNCHED 0x00000001
+#define BGE_MACSTAT_TBI_SIGNAL_DETECT 0x00000002
+#define BGE_MACSTAT_RX_CFG 0x00000004
+#define BGE_MACSTAT_CFG_CHANGED 0x00000008
+#define BGE_MACSTAT_SYNC_CHANGED 0x00000010
+#define BGE_MACSTAT_PORT_DECODE_ERROR 0x00000400
+#define BGE_MACSTAT_LINK_CHANGED 0x00001000
+#define BGE_MACSTAT_MI_COMPLETE 0x00400000
+#define BGE_MACSTAT_MI_INTERRUPT 0x00800000
+#define BGE_MACSTAT_AUTOPOLL_ERROR 0x01000000
+#define BGE_MACSTAT_ODI_ERROR 0x02000000
+#define BGE_MACSTAT_RXSTAT_OFLOW 0x04000000
+#define BGE_MACSTAT_TXSTAT_OFLOW 0x08000000
+
+/* MAC Event Enable Register */
+#define BGE_EVTENB_PORT_DECODE_ERROR 0x00000400
+#define BGE_EVTENB_LINK_CHANGED 0x00001000
+#define BGE_EVTENB_MI_COMPLETE 0x00400000
+#define BGE_EVTENB_MI_INTERRUPT 0x00800000
+#define BGE_EVTENB_AUTOPOLL_ERROR 0x01000000
+#define BGE_EVTENB_ODI_ERROR 0x02000000
+#define BGE_EVTENB_RXSTAT_OFLOW 0x04000000
+#define BGE_EVTENB_TXSTAT_OFLOW 0x08000000
+
+/* LED Control Register */
+#define BGE_LEDCTL_LINKLED_OVERRIDE 0x00000001
+#define BGE_LEDCTL_1000MBPS_LED 0x00000002
+#define BGE_LEDCTL_100MBPS_LED 0x00000004
+#define BGE_LEDCTL_10MBPS_LED 0x00000008
+#define BGE_LEDCTL_TRAFLED_OVERRIDE 0x00000010
+#define BGE_LEDCTL_TRAFLED_BLINK 0x00000020
+#define BGE_LEDCTL_TREFLED_BLINK_2 0x00000040
+#define BGE_LEDCTL_1000MBPS_STS 0x00000080
+#define BGE_LEDCTL_100MBPS_STS 0x00000100
+#define BGE_LEDCTL_10MBPS_STS 0x00000200
+#define BGE_LEDCTL_TRADLED_STS 0x00000400
+#define BGE_LEDCTL_BLINKPERIOD 0x7FF80000
+#define BGE_LEDCTL_BLINKPERIOD_OVERRIDE 0x80000000
+
+/* TX backoff seed register */
+#define BGE_TX_BACKOFF_SEED_MASK 0x3F
+
+/* Autopoll status register */
+#define BGE_AUTOPOLLSTS_ERROR 0x00000001
+
+/* Transmit MAC mode register */
+#define BGE_TXMODE_RESET 0x00000001
+#define BGE_TXMODE_ENABLE 0x00000002
+#define BGE_TXMODE_FLOWCTL_ENABLE 0x00000010
+#define BGE_TXMODE_BIGBACKOFF_ENABLE 0x00000020
+#define BGE_TXMODE_LONGPAUSE_ENABLE 0x00000040
+
+/* Transmit MAC status register */
+#define BGE_TXSTAT_RX_XOFFED 0x00000001
+#define BGE_TXSTAT_SENT_XOFF 0x00000002
+#define BGE_TXSTAT_SENT_XON 0x00000004
+#define BGE_TXSTAT_LINK_UP 0x00000008
+#define BGE_TXSTAT_ODI_UFLOW 0x00000010
+#define BGE_TXSTAT_ODI_OFLOW 0x00000020
+
+/* Transmit MAC lengths register */
+#define BGE_TXLEN_SLOTTIME 0x000000FF
+#define BGE_TXLEN_IPG 0x00000F00
+#define BGE_TXLEN_CRS 0x00003000
+
+/* Receive MAC mode register */
+#define BGE_RXMODE_RESET 0x00000001
+#define BGE_RXMODE_ENABLE 0x00000002
+#define BGE_RXMODE_FLOWCTL_ENABLE 0x00000004
+#define BGE_RXMODE_RX_GIANTS 0x00000020
+#define BGE_RXMODE_RX_RUNTS 0x00000040
+#define BGE_RXMODE_8022_LENCHECK 0x00000080
+#define BGE_RXMODE_RX_PROMISC 0x00000100
+#define BGE_RXMODE_RX_NO_CRC_CHECK 0x00000200
+#define BGE_RXMODE_RX_KEEP_VLAN_DIAG 0x00000400
+
+/* Receive MAC status register */
+#define BGE_RXSTAT_REMOTE_XOFFED 0x00000001
+#define BGE_RXSTAT_RCVD_XOFF 0x00000002
+#define BGE_RXSTAT_RCVD_XON 0x00000004
+
+/* Receive Rules Control register */
+#define BGE_RXRULECTL_OFFSET 0x000000FF
+#define BGE_RXRULECTL_CLASS 0x00001F00
+#define BGE_RXRULECTL_HDRTYPE 0x0000E000
+#define BGE_RXRULECTL_COMPARE_OP 0x00030000
+#define BGE_RXRULECTL_MAP 0x01000000
+#define BGE_RXRULECTL_DISCARD 0x02000000
+#define BGE_RXRULECTL_MASK 0x04000000
+#define BGE_RXRULECTL_ACTIVATE_PROC3 0x08000000
+#define BGE_RXRULECTL_ACTIVATE_PROC2 0x10000000
+#define BGE_RXRULECTL_ACTIVATE_PROC1 0x20000000
+#define BGE_RXRULECTL_ANDWITHNEXT 0x40000000
+
+/* Receive Rules Mask register */
+#define BGE_RXRULEMASK_VALUE 0x0000FFFF
+#define BGE_RXRULEMASK_MASKVAL 0xFFFF0000
+
+/* SERDES configuration register */
+#define BGE_SERDESCFG_RXR 0x00000007 /* phase interpolator */
+#define BGE_SERDESCFG_RXG 0x00000018 /* rx gain setting */
+#define BGE_SERDESCFG_RXEDGESEL 0x00000040 /* rising/falling egde */
+#define BGE_SERDESCFG_TX_BIAS 0x00000380 /* TXDAC bias setting */
+#define BGE_SERDESCFG_IBMAX 0x00000400 /* bias current +25% */
+#define BGE_SERDESCFG_IBMIN 0x00000800 /* bias current -25% */
+#define BGE_SERDESCFG_TXMODE 0x00001000
+#define BGE_SERDESCFG_TXEDGESEL 0x00002000 /* rising/falling edge */
+#define BGE_SERDESCFG_MODE 0x00004000 /* TXCP/TXCN disabled */
+#define BGE_SERDESCFG_PLLTEST 0x00008000 /* PLL test mode */
+#define BGE_SERDESCFG_CDET 0x00010000 /* comma detect enable */
+#define BGE_SERDESCFG_TBILOOP 0x00020000 /* local loopback */
+#define BGE_SERDESCFG_REMLOOP 0x00040000 /* remote loopback */
+#define BGE_SERDESCFG_INVPHASE 0x00080000 /* Reverse 125Mhz clock */
+#define BGE_SERDESCFG_12REGCTL 0x00300000 /* 1.2v regulator ctl */
+#define BGE_SERDESCFG_REGCTL 0x00C00000 /* regulator ctl (2.5v) */
+
+/* SERDES status register */
+#define BGE_SERDESSTS_RXSTAT 0x0000000F /* receive status bits */
+#define BGE_SERDESSTS_CDET 0x00000010 /* comma code detected */
+
+/* SGDIG config (not documented) */
+#define BGE_SGDIGCFG_PAUSE_CAP 0x00000800
+#define BGE_SGDIGCFG_ASYM_PAUSE 0x00001000
+#define BGE_SGDIGCFG_SEND 0x40000000
+#define BGE_SGDIGCFG_AUTO 0x80000000
+
+/* SGDIG status (not documented) */
+#define BGE_SGDIGSTS_PAUSE_CAP 0x00080000
+#define BGE_SGDIGSTS_ASYM_PAUSE 0x00100000
+#define BGE_SGDIGSTS_DONE 0x00000002
+
+
+/* MI communication register */
+#define BGE_MICOMM_DATA 0x0000FFFF
+#define BGE_MICOMM_REG 0x001F0000
+#define BGE_MICOMM_PHY 0x03E00000
+#define BGE_MICOMM_CMD 0x0C000000
+#define BGE_MICOMM_READFAIL 0x10000000
+#define BGE_MICOMM_BUSY 0x20000000
+
+#define BGE_MIREG(x) ((x & 0x1F) << 16)
+#define BGE_MIPHY(x) ((x & 0x1F) << 21)
+#define BGE_MICMD_WRITE 0x04000000
+#define BGE_MICMD_READ 0x08000000
+
+/* MI status register */
+#define BGE_MISTS_LINK 0x00000001
+#define BGE_MISTS_10MBPS 0x00000002
+
+#define BGE_MIMODE_SHORTPREAMBLE 0x00000002
+#define BGE_MIMODE_AUTOPOLL 0x00000010
+#define BGE_MIMODE_CLKCNT 0x001F0000
+
+
+/*
+ * Send data initiator control registers.
+ */
+#define BGE_SDI_MODE 0x0C00
+#define BGE_SDI_STATUS 0x0C04
+#define BGE_SDI_STATS_CTL 0x0C08
+#define BGE_SDI_STATS_ENABLE_MASK 0x0C0C
+#define BGE_SDI_STATS_INCREMENT_MASK 0x0C10
+#define BGE_LOCSTATS_COS0 0x0C80
+#define BGE_LOCSTATS_COS1 0x0C84
+#define BGE_LOCSTATS_COS2 0x0C88
+#define BGE_LOCSTATS_COS3 0x0C8C
+#define BGE_LOCSTATS_COS4 0x0C90
+#define BGE_LOCSTATS_COS5 0x0C84
+#define BGE_LOCSTATS_COS6 0x0C98
+#define BGE_LOCSTATS_COS7 0x0C9C
+#define BGE_LOCSTATS_COS8 0x0CA0
+#define BGE_LOCSTATS_COS9 0x0CA4
+#define BGE_LOCSTATS_COS10 0x0CA8
+#define BGE_LOCSTATS_COS11 0x0CAC
+#define BGE_LOCSTATS_COS12 0x0CB0
+#define BGE_LOCSTATS_COS13 0x0CB4
+#define BGE_LOCSTATS_COS14 0x0CB8
+#define BGE_LOCSTATS_COS15 0x0CBC
+#define BGE_LOCSTATS_DMA_RQ_FULL 0x0CC0
+#define BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL 0x0CC4
+#define BGE_LOCSTATS_SDC_QUEUE_FULL 0x0CC8
+#define BGE_LOCSTATS_NIC_SENDPROD_SET 0x0CCC
+#define BGE_LOCSTATS_STATS_UPDATED 0x0CD0
+#define BGE_LOCSTATS_IRQS 0x0CD4
+#define BGE_LOCSTATS_AVOIDED_IRQS 0x0CD8
+#define BGE_LOCSTATS_TX_THRESH_HIT 0x0CDC
+
+/* Send Data Initiator mode register */
+#define BGE_SDIMODE_RESET 0x00000001
+#define BGE_SDIMODE_ENABLE 0x00000002
+#define BGE_SDIMODE_STATS_OFLOW_ATTN 0x00000004
+
+/* Send Data Initiator stats register */
+#define BGE_SDISTAT_STATS_OFLOW_ATTN 0x00000004
+
+/* Send Data Initiator stats control register */
+#define BGE_SDISTATSCTL_ENABLE 0x00000001
+#define BGE_SDISTATSCTL_FASTER 0x00000002
+#define BGE_SDISTATSCTL_CLEAR 0x00000004
+#define BGE_SDISTATSCTL_FORCEFLUSH 0x00000008
+#define BGE_SDISTATSCTL_FORCEZERO 0x00000010
+
+/*
+ * Send Data Completion Control registers
+ */
+#define BGE_SDC_MODE 0x1000
+#define BGE_SDC_STATUS 0x1004
+
+/* Send Data completion mode register */
+#define BGE_SDCMODE_RESET 0x00000001
+#define BGE_SDCMODE_ENABLE 0x00000002
+#define BGE_SDCMODE_ATTN 0x00000004
+
+/* Send Data completion status register */
+#define BGE_SDCSTAT_ATTN 0x00000004
+
+/*
+ * Send BD Ring Selector Control registers
+ */
+#define BGE_SRS_MODE 0x1400
+#define BGE_SRS_STATUS 0x1404
+#define BGE_SRS_HWDIAG 0x1408
+#define BGE_SRS_LOC_NIC_CONS0 0x1440
+#define BGE_SRS_LOC_NIC_CONS1 0x1444
+#define BGE_SRS_LOC_NIC_CONS2 0x1448
+#define BGE_SRS_LOC_NIC_CONS3 0x144C
+#define BGE_SRS_LOC_NIC_CONS4 0x1450
+#define BGE_SRS_LOC_NIC_CONS5 0x1454
+#define BGE_SRS_LOC_NIC_CONS6 0x1458
+#define BGE_SRS_LOC_NIC_CONS7 0x145C
+#define BGE_SRS_LOC_NIC_CONS8 0x1460
+#define BGE_SRS_LOC_NIC_CONS9 0x1464
+#define BGE_SRS_LOC_NIC_CONS10 0x1468
+#define BGE_SRS_LOC_NIC_CONS11 0x146C
+#define BGE_SRS_LOC_NIC_CONS12 0x1470
+#define BGE_SRS_LOC_NIC_CONS13 0x1474
+#define BGE_SRS_LOC_NIC_CONS14 0x1478
+#define BGE_SRS_LOC_NIC_CONS15 0x147C
+
+/* Send BD Ring Selector Mode register */
+#define BGE_SRSMODE_RESET 0x00000001
+#define BGE_SRSMODE_ENABLE 0x00000002
+#define BGE_SRSMODE_ATTN 0x00000004
+
+/* Send BD Ring Selector Status register */
+#define BGE_SRSSTAT_ERROR 0x00000004
+
+/* Send BD Ring Selector HW Diagnostics register */
+#define BGE_SRSHWDIAG_STATE 0x0000000F
+#define BGE_SRSHWDIAG_CURRINGNUM 0x000000F0
+#define BGE_SRSHWDIAG_STAGEDRINGNUM 0x00000F00
+#define BGE_SRSHWDIAG_RINGNUM_IN_MBX 0x0000F000
+
+/*
+ * Send BD Initiator Selector Control registers
+ */
+#define BGE_SBDI_MODE 0x1800
+#define BGE_SBDI_STATUS 0x1804
+#define BGE_SBDI_LOC_NIC_PROD0 0x1808
+#define BGE_SBDI_LOC_NIC_PROD1 0x180C
+#define BGE_SBDI_LOC_NIC_PROD2 0x1810
+#define BGE_SBDI_LOC_NIC_PROD3 0x1814
+#define BGE_SBDI_LOC_NIC_PROD4 0x1818
+#define BGE_SBDI_LOC_NIC_PROD5 0x181C
+#define BGE_SBDI_LOC_NIC_PROD6 0x1820
+#define BGE_SBDI_LOC_NIC_PROD7 0x1824
+#define BGE_SBDI_LOC_NIC_PROD8 0x1828
+#define BGE_SBDI_LOC_NIC_PROD9 0x182C
+#define BGE_SBDI_LOC_NIC_PROD10 0x1830
+#define BGE_SBDI_LOC_NIC_PROD11 0x1834
+#define BGE_SBDI_LOC_NIC_PROD12 0x1838
+#define BGE_SBDI_LOC_NIC_PROD13 0x183C
+#define BGE_SBDI_LOC_NIC_PROD14 0x1840
+#define BGE_SBDI_LOC_NIC_PROD15 0x1844
+
+/* Send BD Initiator Mode register */
+#define BGE_SBDIMODE_RESET 0x00000001
+#define BGE_SBDIMODE_ENABLE 0x00000002
+#define BGE_SBDIMODE_ATTN 0x00000004
+
+/* Send BD Initiator Status register */
+#define BGE_SBDISTAT_ERROR 0x00000004
+
+/*
+ * Send BD Completion Control registers
+ */
+#define BGE_SBDC_MODE 0x1C00
+#define BGE_SBDC_STATUS 0x1C04
+
+/* Send BD Completion Control Mode register */
+#define BGE_SBDCMODE_RESET 0x00000001
+#define BGE_SBDCMODE_ENABLE 0x00000002
+#define BGE_SBDCMODE_ATTN 0x00000004
+
+/* Send BD Completion Control Status register */
+#define BGE_SBDCSTAT_ATTN 0x00000004
+
+/*
+ * Receive List Placement Control registers
+ */
+#define BGE_RXLP_MODE 0x2000
+#define BGE_RXLP_STATUS 0x2004
+#define BGE_RXLP_SEL_LIST_LOCK 0x2008
+#define BGE_RXLP_SEL_NON_EMPTY_BITS 0x200C
+#define BGE_RXLP_CFG 0x2010
+#define BGE_RXLP_STATS_CTL 0x2014
+#define BGE_RXLP_STATS_ENABLE_MASK 0x2018
+#define BGE_RXLP_STATS_INCREMENT_MASK 0x201C
+#define BGE_RXLP_HEAD0 0x2100
+#define BGE_RXLP_TAIL0 0x2104
+#define BGE_RXLP_COUNT0 0x2108
+#define BGE_RXLP_HEAD1 0x2110
+#define BGE_RXLP_TAIL1 0x2114
+#define BGE_RXLP_COUNT1 0x2118
+#define BGE_RXLP_HEAD2 0x2120
+#define BGE_RXLP_TAIL2 0x2124
+#define BGE_RXLP_COUNT2 0x2128
+#define BGE_RXLP_HEAD3 0x2130
+#define BGE_RXLP_TAIL3 0x2134
+#define BGE_RXLP_COUNT3 0x2138
+#define BGE_RXLP_HEAD4 0x2140
+#define BGE_RXLP_TAIL4 0x2144
+#define BGE_RXLP_COUNT4 0x2148
+#define BGE_RXLP_HEAD5 0x2150
+#define BGE_RXLP_TAIL5 0x2154
+#define BGE_RXLP_COUNT5 0x2158
+#define BGE_RXLP_HEAD6 0x2160
+#define BGE_RXLP_TAIL6 0x2164
+#define BGE_RXLP_COUNT6 0x2168
+#define BGE_RXLP_HEAD7 0x2170
+#define BGE_RXLP_TAIL7 0x2174
+#define BGE_RXLP_COUNT7 0x2178
+#define BGE_RXLP_HEAD8 0x2180
+#define BGE_RXLP_TAIL8 0x2184
+#define BGE_RXLP_COUNT8 0x2188
+#define BGE_RXLP_HEAD9 0x2190
+#define BGE_RXLP_TAIL9 0x2194
+#define BGE_RXLP_COUNT9 0x2198
+#define BGE_RXLP_HEAD10 0x21A0
+#define BGE_RXLP_TAIL10 0x21A4
+#define BGE_RXLP_COUNT10 0x21A8
+#define BGE_RXLP_HEAD11 0x21B0
+#define BGE_RXLP_TAIL11 0x21B4
+#define BGE_RXLP_COUNT11 0x21B8
+#define BGE_RXLP_HEAD12 0x21C0
+#define BGE_RXLP_TAIL12 0x21C4
+#define BGE_RXLP_COUNT12 0x21C8
+#define BGE_RXLP_HEAD13 0x21D0
+#define BGE_RXLP_TAIL13 0x21D4
+#define BGE_RXLP_COUNT13 0x21D8
+#define BGE_RXLP_HEAD14 0x21E0
+#define BGE_RXLP_TAIL14 0x21E4
+#define BGE_RXLP_COUNT14 0x21E8
+#define BGE_RXLP_HEAD15 0x21F0
+#define BGE_RXLP_TAIL15 0x21F4
+#define BGE_RXLP_COUNT15 0x21F8
+#define BGE_RXLP_LOCSTAT_COS0 0x2200
+#define BGE_RXLP_LOCSTAT_COS1 0x2204
+#define BGE_RXLP_LOCSTAT_COS2 0x2208
+#define BGE_RXLP_LOCSTAT_COS3 0x220C
+#define BGE_RXLP_LOCSTAT_COS4 0x2210
+#define BGE_RXLP_LOCSTAT_COS5 0x2214
+#define BGE_RXLP_LOCSTAT_COS6 0x2218
+#define BGE_RXLP_LOCSTAT_COS7 0x221C
+#define BGE_RXLP_LOCSTAT_COS8 0x2220
+#define BGE_RXLP_LOCSTAT_COS9 0x2224
+#define BGE_RXLP_LOCSTAT_COS10 0x2228
+#define BGE_RXLP_LOCSTAT_COS11 0x222C
+#define BGE_RXLP_LOCSTAT_COS12 0x2230
+#define BGE_RXLP_LOCSTAT_COS13 0x2234
+#define BGE_RXLP_LOCSTAT_COS14 0x2238
+#define BGE_RXLP_LOCSTAT_COS15 0x223C
+#define BGE_RXLP_LOCSTAT_FILTDROP 0x2240
+#define BGE_RXLP_LOCSTAT_DMA_WRQ_FULL 0x2244
+#define BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL 0x2248
+#define BGE_RXLP_LOCSTAT_OUT_OF_BDS 0x224C
+#define BGE_RXLP_LOCSTAT_IFIN_DROPS 0x2250
+#define BGE_RXLP_LOCSTAT_IFIN_ERRORS 0x2254
+#define BGE_RXLP_LOCSTAT_RXTHRESH_HIT 0x2258
+
+
+/* Receive List Placement mode register */
+#define BGE_RXLPMODE_RESET 0x00000001
+#define BGE_RXLPMODE_ENABLE 0x00000002
+#define BGE_RXLPMODE_CLASS0_ATTN 0x00000004
+#define BGE_RXLPMODE_MAPOUTRANGE_ATTN 0x00000008
+#define BGE_RXLPMODE_STATSOFLOW_ATTN 0x00000010
+
+/* Receive List Placement Status register */
+#define BGE_RXLPSTAT_CLASS0_ATTN 0x00000004
+#define BGE_RXLPSTAT_MAPOUTRANGE_ATTN 0x00000008
+#define BGE_RXLPSTAT_STATSOFLOW_ATTN 0x00000010
+
+/*
+ * Receive Data and Receive BD Initiator Control Registers
+ */
+#define BGE_RDBDI_MODE 0x2400
+#define BGE_RDBDI_STATUS 0x2404
+#define BGE_RX_JUMBO_RCB_HADDR_HI 0x2440
+#define BGE_RX_JUMBO_RCB_HADDR_LO 0x2444
+#define BGE_RX_JUMBO_RCB_MAXLEN_FLAGS 0x2448
+#define BGE_RX_JUMBO_RCB_NICADDR 0x244C
+#define BGE_RX_STD_RCB_HADDR_HI 0x2450
+#define BGE_RX_STD_RCB_HADDR_LO 0x2454
+#define BGE_RX_STD_RCB_MAXLEN_FLAGS 0x2458
+#define BGE_RX_STD_RCB_NICADDR 0x245C
+#define BGE_RX_MINI_RCB_HADDR_HI 0x2460
+#define BGE_RX_MINI_RCB_HADDR_LO 0x2464
+#define BGE_RX_MINI_RCB_MAXLEN_FLAGS 0x2468
+#define BGE_RX_MINI_RCB_NICADDR 0x246C
+#define BGE_RDBDI_JUMBO_RX_CONS 0x2470
+#define BGE_RDBDI_STD_RX_CONS 0x2474
+#define BGE_RDBDI_MINI_RX_CONS 0x2478
+#define BGE_RDBDI_RETURN_PROD0 0x2480
+#define BGE_RDBDI_RETURN_PROD1 0x2484
+#define BGE_RDBDI_RETURN_PROD2 0x2488
+#define BGE_RDBDI_RETURN_PROD3 0x248C
+#define BGE_RDBDI_RETURN_PROD4 0x2490
+#define BGE_RDBDI_RETURN_PROD5 0x2494
+#define BGE_RDBDI_RETURN_PROD6 0x2498
+#define BGE_RDBDI_RETURN_PROD7 0x249C
+#define BGE_RDBDI_RETURN_PROD8 0x24A0
+#define BGE_RDBDI_RETURN_PROD9 0x24A4
+#define BGE_RDBDI_RETURN_PROD10 0x24A8
+#define BGE_RDBDI_RETURN_PROD11 0x24AC
+#define BGE_RDBDI_RETURN_PROD12 0x24B0
+#define BGE_RDBDI_RETURN_PROD13 0x24B4
+#define BGE_RDBDI_RETURN_PROD14 0x24B8
+#define BGE_RDBDI_RETURN_PROD15 0x24BC
+#define BGE_RDBDI_HWDIAG 0x24C0
+
+
+/* Receive Data and Receive BD Initiator Mode register */
+#define BGE_RDBDIMODE_RESET 0x00000001
+#define BGE_RDBDIMODE_ENABLE 0x00000002
+#define BGE_RDBDIMODE_JUMBO_ATTN 0x00000004
+#define BGE_RDBDIMODE_GIANT_ATTN 0x00000008
+#define BGE_RDBDIMODE_BADRINGSZ_ATTN 0x00000010
+
+/* Receive Data and Receive BD Initiator Status register */
+#define BGE_RDBDISTAT_JUMBO_ATTN 0x00000004
+#define BGE_RDBDISTAT_GIANT_ATTN 0x00000008
+#define BGE_RDBDISTAT_BADRINGSZ_ATTN 0x00000010
+
+
+/*
+ * Receive Data Completion Control registers
+ */
+#define BGE_RDC_MODE 0x2800
+
+/* Receive Data Completion Mode register */
+#define BGE_RDCMODE_RESET 0x00000001
+#define BGE_RDCMODE_ENABLE 0x00000002
+#define BGE_RDCMODE_ATTN 0x00000004
+
+/*
+ * Receive BD Initiator Control registers
+ */
+#define BGE_RBDI_MODE 0x2C00
+#define BGE_RBDI_STATUS 0x2C04
+#define BGE_RBDI_NIC_JUMBO_BD_PROD 0x2C08
+#define BGE_RBDI_NIC_STD_BD_PROD 0x2C0C
+#define BGE_RBDI_NIC_MINI_BD_PROD 0x2C10
+#define BGE_RBDI_MINI_REPL_THRESH 0x2C14
+#define BGE_RBDI_STD_REPL_THRESH 0x2C18
+#define BGE_RBDI_JUMBO_REPL_THRESH 0x2C1C
+
+/* Receive BD Initiator Mode register */
+#define BGE_RBDIMODE_RESET 0x00000001
+#define BGE_RBDIMODE_ENABLE 0x00000002
+#define BGE_RBDIMODE_ATTN 0x00000004
+
+/* Receive BD Initiator Status register */
+#define BGE_RBDISTAT_ATTN 0x00000004
+
+/*
+ * Receive BD Completion Control registers
+ */
+#define BGE_RBDC_MODE 0x3000
+#define BGE_RBDC_STATUS 0x3004
+#define BGE_RBDC_JUMBO_BD_PROD 0x3008
+#define BGE_RBDC_STD_BD_PROD 0x300C
+#define BGE_RBDC_MINI_BD_PROD 0x3010
+
+/* Receive BD completion mode register */
+#define BGE_RBDCMODE_RESET 0x00000001
+#define BGE_RBDCMODE_ENABLE 0x00000002
+#define BGE_RBDCMODE_ATTN 0x00000004
+
+/* Receive BD completion status register */
+#define BGE_RBDCSTAT_ERROR 0x00000004
+
+/*
+ * Receive List Selector Control registers
+ */
+#define BGE_RXLS_MODE 0x3400
+#define BGE_RXLS_STATUS 0x3404
+
+/* Receive List Selector Mode register */
+#define BGE_RXLSMODE_RESET 0x00000001
+#define BGE_RXLSMODE_ENABLE 0x00000002
+#define BGE_RXLSMODE_ATTN 0x00000004
+
+/* Receive List Selector Status register */
+#define BGE_RXLSSTAT_ERROR 0x00000004
+
+/*
+ * Mbuf Cluster Free registers (has nothing to do with BSD mbufs)
+ */
+#define BGE_MBCF_MODE 0x3800
+#define BGE_MBCF_STATUS 0x3804
+
+/* Mbuf Cluster Free mode register */
+#define BGE_MBCFMODE_RESET 0x00000001
+#define BGE_MBCFMODE_ENABLE 0x00000002
+#define BGE_MBCFMODE_ATTN 0x00000004
+
+/* Mbuf Cluster Free status register */
+#define BGE_MBCFSTAT_ERROR 0x00000004
+
+/*
+ * Host Coalescing Control registers
+ */
+#define BGE_HCC_MODE 0x3C00
+#define BGE_HCC_STATUS 0x3C04
+#define BGE_HCC_RX_COAL_TICKS 0x3C08
+#define BGE_HCC_TX_COAL_TICKS 0x3C0C
+#define BGE_HCC_RX_MAX_COAL_BDS 0x3C10
+#define BGE_HCC_TX_MAX_COAL_BDS 0x3C14
+#define BGE_HCC_RX_COAL_TICKS_INT 0x3C18 /* ticks during interrupt */
+#define BGE_HCC_TX_COAL_TICKS_INT 0x3C1C /* ticks during interrupt */
+#define BGE_HCC_RX_MAX_COAL_BDS_INT 0x3C20 /* BDs during interrupt */
+#define BGE_HCC_TX_MAX_COAL_BDS_INT 0x3C24 /* BDs during interrupt */
+#define BGE_HCC_STATS_TICKS 0x3C28
+#define BGE_HCC_STATS_ADDR_HI 0x3C30
+#define BGE_HCC_STATS_ADDR_LO 0x3C34
+#define BGE_HCC_STATUSBLK_ADDR_HI 0x3C38
+#define BGE_HCC_STATUSBLK_ADDR_LO 0x3C3C
+#define BGE_HCC_STATS_BASEADDR 0x3C40 /* address in NIC memory */
+#define BGE_HCC_STATUSBLK_BASEADDR 0x3C44 /* address in NIC memory */
+#define BGE_FLOW_ATTN 0x3C48
+#define BGE_HCC_JUMBO_BD_CONS 0x3C50
+#define BGE_HCC_STD_BD_CONS 0x3C54
+#define BGE_HCC_MINI_BD_CONS 0x3C58
+#define BGE_HCC_RX_RETURN_PROD0 0x3C80
+#define BGE_HCC_RX_RETURN_PROD1 0x3C84
+#define BGE_HCC_RX_RETURN_PROD2 0x3C88
+#define BGE_HCC_RX_RETURN_PROD3 0x3C8C
+#define BGE_HCC_RX_RETURN_PROD4 0x3C90
+#define BGE_HCC_RX_RETURN_PROD5 0x3C94
+#define BGE_HCC_RX_RETURN_PROD6 0x3C98
+#define BGE_HCC_RX_RETURN_PROD7 0x3C9C
+#define BGE_HCC_RX_RETURN_PROD8 0x3CA0
+#define BGE_HCC_RX_RETURN_PROD9 0x3CA4
+#define BGE_HCC_RX_RETURN_PROD10 0x3CA8
+#define BGE_HCC_RX_RETURN_PROD11 0x3CAC
+#define BGE_HCC_RX_RETURN_PROD12 0x3CB0
+#define BGE_HCC_RX_RETURN_PROD13 0x3CB4
+#define BGE_HCC_RX_RETURN_PROD14 0x3CB8
+#define BGE_HCC_RX_RETURN_PROD15 0x3CBC
+#define BGE_HCC_TX_BD_CONS0 0x3CC0
+#define BGE_HCC_TX_BD_CONS1 0x3CC4
+#define BGE_HCC_TX_BD_CONS2 0x3CC8
+#define BGE_HCC_TX_BD_CONS3 0x3CCC
+#define BGE_HCC_TX_BD_CONS4 0x3CD0
+#define BGE_HCC_TX_BD_CONS5 0x3CD4
+#define BGE_HCC_TX_BD_CONS6 0x3CD8
+#define BGE_HCC_TX_BD_CONS7 0x3CDC
+#define BGE_HCC_TX_BD_CONS8 0x3CE0
+#define BGE_HCC_TX_BD_CONS9 0x3CE4
+#define BGE_HCC_TX_BD_CONS10 0x3CE8
+#define BGE_HCC_TX_BD_CONS11 0x3CEC
+#define BGE_HCC_TX_BD_CONS12 0x3CF0
+#define BGE_HCC_TX_BD_CONS13 0x3CF4
+#define BGE_HCC_TX_BD_CONS14 0x3CF8
+#define BGE_HCC_TX_BD_CONS15 0x3CFC
+
+
+/* Host coalescing mode register */
+#define BGE_HCCMODE_RESET 0x00000001
+#define BGE_HCCMODE_ENABLE 0x00000002
+#define BGE_HCCMODE_ATTN 0x00000004
+#define BGE_HCCMODE_COAL_NOW 0x00000008
+#define BGE_HCCMODE_MSI_BITS 0x00000070
+#define BGE_HCCMODE_STATBLK_SIZE 0x00000180
+
+#define BGE_STATBLKSZ_FULL 0x00000000
+#define BGE_STATBLKSZ_64BYTE 0x00000080
+#define BGE_STATBLKSZ_32BYTE 0x00000100
+
+/* Host coalescing status register */
+#define BGE_HCCSTAT_ERROR 0x00000004
+
+/* Flow attention register */
+#define BGE_FLOWATTN_MB_LOWAT 0x00000040
+#define BGE_FLOWATTN_MEMARB 0x00000080
+#define BGE_FLOWATTN_HOSTCOAL 0x00008000
+#define BGE_FLOWATTN_DMADONE_DISCARD 0x00010000
+#define BGE_FLOWATTN_RCB_INVAL 0x00020000
+#define BGE_FLOWATTN_RXDATA_CORRUPT 0x00040000
+#define BGE_FLOWATTN_RDBDI 0x00080000
+#define BGE_FLOWATTN_RXLS 0x00100000
+#define BGE_FLOWATTN_RXLP 0x00200000
+#define BGE_FLOWATTN_RBDC 0x00400000
+#define BGE_FLOWATTN_RBDI 0x00800000
+#define BGE_FLOWATTN_SDC 0x08000000
+#define BGE_FLOWATTN_SDI 0x10000000
+#define BGE_FLOWATTN_SRS 0x20000000
+#define BGE_FLOWATTN_SBDC 0x40000000
+#define BGE_FLOWATTN_SBDI 0x80000000
+
+/*
+ * Memory arbiter registers
+ */
+#define BGE_MARB_MODE 0x4000
+#define BGE_MARB_STATUS 0x4004
+#define BGE_MARB_TRAPADDR_HI 0x4008
+#define BGE_MARB_TRAPADDR_LO 0x400C
+
+/* Memory arbiter mode register */
+#define BGE_MARBMODE_RESET 0x00000001
+#define BGE_MARBMODE_ENABLE 0x00000002
+#define BGE_MARBMODE_TX_ADDR_TRAP 0x00000004
+#define BGE_MARBMODE_RX_ADDR_TRAP 0x00000008
+#define BGE_MARBMODE_DMAW1_TRAP 0x00000010
+#define BGE_MARBMODE_DMAR1_TRAP 0x00000020
+#define BGE_MARBMODE_RXRISC_TRAP 0x00000040
+#define BGE_MARBMODE_TXRISC_TRAP 0x00000080
+#define BGE_MARBMODE_PCI_TRAP 0x00000100
+#define BGE_MARBMODE_DMAR2_TRAP 0x00000200
+#define BGE_MARBMODE_RXQ_TRAP 0x00000400
+#define BGE_MARBMODE_RXDI1_TRAP 0x00000800
+#define BGE_MARBMODE_RXDI2_TRAP 0x00001000
+#define BGE_MARBMODE_DC_GRPMEM_TRAP 0x00002000
+#define BGE_MARBMODE_HCOAL_TRAP 0x00004000
+#define BGE_MARBMODE_MBUF_TRAP 0x00008000
+#define BGE_MARBMODE_TXDI_TRAP 0x00010000
+#define BGE_MARBMODE_SDC_DMAC_TRAP 0x00020000
+#define BGE_MARBMODE_TXBD_TRAP 0x00040000
+#define BGE_MARBMODE_BUFFMAN_TRAP 0x00080000
+#define BGE_MARBMODE_DMAW2_TRAP 0x00100000
+#define BGE_MARBMODE_XTSSRAM_ROFLO_TRAP 0x00200000
+#define BGE_MARBMODE_XTSSRAM_RUFLO_TRAP 0x00400000
+#define BGE_MARBMODE_XTSSRAM_WOFLO_TRAP 0x00800000
+#define BGE_MARBMODE_XTSSRAM_WUFLO_TRAP 0x01000000
+#define BGE_MARBMODE_XTSSRAM_PERR_TRAP 0x02000000
+
+/* Memory arbiter status register */
+#define BGE_MARBSTAT_TX_ADDR_TRAP 0x00000004
+#define BGE_MARBSTAT_RX_ADDR_TRAP 0x00000008
+#define BGE_MARBSTAT_DMAW1_TRAP 0x00000010
+#define BGE_MARBSTAT_DMAR1_TRAP 0x00000020
+#define BGE_MARBSTAT_RXRISC_TRAP 0x00000040
+#define BGE_MARBSTAT_TXRISC_TRAP 0x00000080
+#define BGE_MARBSTAT_PCI_TRAP 0x00000100
+#define BGE_MARBSTAT_DMAR2_TRAP 0x00000200
+#define BGE_MARBSTAT_RXQ_TRAP 0x00000400
+#define BGE_MARBSTAT_RXDI1_TRAP 0x00000800
+#define BGE_MARBSTAT_RXDI2_TRAP 0x00001000
+#define BGE_MARBSTAT_DC_GRPMEM_TRAP 0x00002000
+#define BGE_MARBSTAT_HCOAL_TRAP 0x00004000
+#define BGE_MARBSTAT_MBUF_TRAP 0x00008000
+#define BGE_MARBSTAT_TXDI_TRAP 0x00010000
+#define BGE_MARBSTAT_SDC_DMAC_TRAP 0x00020000
+#define BGE_MARBSTAT_TXBD_TRAP 0x00040000
+#define BGE_MARBSTAT_BUFFMAN_TRAP 0x00080000
+#define BGE_MARBSTAT_DMAW2_TRAP 0x00100000
+#define BGE_MARBSTAT_XTSSRAM_ROFLO_TRAP 0x00200000
+#define BGE_MARBSTAT_XTSSRAM_RUFLO_TRAP 0x00400000
+#define BGE_MARBSTAT_XTSSRAM_WOFLO_TRAP 0x00800000
+#define BGE_MARBSTAT_XTSSRAM_WUFLO_TRAP 0x01000000
+#define BGE_MARBSTAT_XTSSRAM_PERR_TRAP 0x02000000
+
+/*
+ * Buffer manager control registers
+ */
+#define BGE_BMAN_MODE 0x4400
+#define BGE_BMAN_STATUS 0x4404
+#define BGE_BMAN_MBUFPOOL_BASEADDR 0x4408
+#define BGE_BMAN_MBUFPOOL_LEN 0x440C
+#define BGE_BMAN_MBUFPOOL_READDMA_LOWAT 0x4410
+#define BGE_BMAN_MBUFPOOL_MACRX_LOWAT 0x4414
+#define BGE_BMAN_MBUFPOOL_HIWAT 0x4418
+#define BGE_BMAN_RXCPU_MBALLOC_REQ 0x441C
+#define BGE_BMAN_RXCPU_MBALLOC_RESP 0x4420
+#define BGE_BMAN_TXCPU_MBALLOC_REQ 0x4424
+#define BGE_BMAN_TXCPU_MBALLOC_RESP 0x4428
+#define BGE_BMAN_DMA_DESCPOOL_BASEADDR 0x442C
+#define BGE_BMAN_DMA_DESCPOOL_LEN 0x4430
+#define BGE_BMAN_DMA_DESCPOOL_LOWAT 0x4434
+#define BGE_BMAN_DMA_DESCPOOL_HIWAT 0x4438
+#define BGE_BMAN_RXCPU_DMAALLOC_REQ 0x443C
+#define BGE_BMAN_RXCPU_DMAALLOC_RESP 0x4440
+#define BGE_BMAN_TXCPU_DMAALLOC_REQ 0x4444
+#define BGE_BMAN_TXCPU_DMALLLOC_RESP 0x4448
+#define BGE_BMAN_HWDIAG_1 0x444C
+#define BGE_BMAN_HWDIAG_2 0x4450
+#define BGE_BMAN_HWDIAG_3 0x4454
+
+/* Buffer manager mode register */
+#define BGE_BMANMODE_RESET 0x00000001
+#define BGE_BMANMODE_ENABLE 0x00000002
+#define BGE_BMANMODE_ATTN 0x00000004
+#define BGE_BMANMODE_TESTMODE 0x00000008
+#define BGE_BMANMODE_LOMBUF_ATTN 0x00000010
+
+/* Buffer manager status register */
+#define BGE_BMANSTAT_ERRO 0x00000004
+#define BGE_BMANSTAT_LOWMBUF_ERROR 0x00000010
+
+
+/*
+ * Read DMA Control registers
+ */
+#define BGE_RDMA_MODE 0x4800
+#define BGE_RDMA_STATUS 0x4804
+
+/* Read DMA mode register */
+#define BGE_RDMAMODE_RESET 0x00000001
+#define BGE_RDMAMODE_ENABLE 0x00000002
+#define BGE_RDMAMODE_PCI_TGT_ABRT_ATTN 0x00000004
+#define BGE_RDMAMODE_PCI_MSTR_ABRT_ATTN 0x00000008
+#define BGE_RDMAMODE_PCI_PERR_ATTN 0x00000010
+#define BGE_RDMAMODE_PCI_ADDROFLOW_ATTN 0x00000020
+#define BGE_RDMAMODE_PCI_FIFOOFLOW_ATTN 0x00000040
+#define BGE_RDMAMODE_PCI_FIFOUFLOW_ATTN 0x00000080
+#define BGE_RDMAMODE_PCI_FIFOOREAD_ATTN 0x00000100
+#define BGE_RDMAMODE_LOCWRITE_TOOBIG 0x00000200
+#define BGE_RDMAMODE_ALL_ATTNS 0x000003FC
+
+/* Read DMA status register */
+#define BGE_RDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004
+#define BGE_RDMASTAT_PCI_MSTR_ABRT_ATTN 0x00000008
+#define BGE_RDMASTAT_PCI_PERR_ATTN 0x00000010
+#define BGE_RDMASTAT_PCI_ADDROFLOW_ATTN 0x00000020
+#define BGE_RDMASTAT_PCI_FIFOOFLOW_ATTN 0x00000040
+#define BGE_RDMASTAT_PCI_FIFOUFLOW_ATTN 0x00000080
+#define BGE_RDMASTAT_PCI_FIFOOREAD_ATTN 0x00000100
+#define BGE_RDMASTAT_LOCWRITE_TOOBIG 0x00000200
+
+/*
+ * Write DMA control registers
+ */
+#define BGE_WDMA_MODE 0x4C00
+#define BGE_WDMA_STATUS 0x4C04
+
+/* Write DMA mode register */
+#define BGE_WDMAMODE_RESET 0x00000001
+#define BGE_WDMAMODE_ENABLE 0x00000002
+#define BGE_WDMAMODE_PCI_TGT_ABRT_ATTN 0x00000004
+#define BGE_WDMAMODE_PCI_MSTR_ABRT_ATTN 0x00000008
+#define BGE_WDMAMODE_PCI_PERR_ATTN 0x00000010
+#define BGE_WDMAMODE_PCI_ADDROFLOW_ATTN 0x00000020
+#define BGE_WDMAMODE_PCI_FIFOOFLOW_ATTN 0x00000040
+#define BGE_WDMAMODE_PCI_FIFOUFLOW_ATTN 0x00000080
+#define BGE_WDMAMODE_PCI_FIFOOREAD_ATTN 0x00000100
+#define BGE_WDMAMODE_LOCREAD_TOOBIG 0x00000200
+#define BGE_WDMAMODE_ALL_ATTNS 0x000003FC
+
+/* Write DMA status register */
+#define BGE_WDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004
+#define BGE_WDMASTAT_PCI_MSTR_ABRT_ATTN 0x00000008
+#define BGE_WDMASTAT_PCI_PERR_ATTN 0x00000010
+#define BGE_WDMASTAT_PCI_ADDROFLOW_ATTN 0x00000020
+#define BGE_WDMASTAT_PCI_FIFOOFLOW_ATTN 0x00000040
+#define BGE_WDMASTAT_PCI_FIFOUFLOW_ATTN 0x00000080
+#define BGE_WDMASTAT_PCI_FIFOOREAD_ATTN 0x00000100
+#define BGE_WDMASTAT_LOCREAD_TOOBIG 0x00000200
+
+
+/*
+ * RX CPU registers
+ */
+#define BGE_RXCPU_MODE 0x5000
+#define BGE_RXCPU_STATUS 0x5004
+#define BGE_RXCPU_PC 0x501C
+
+/* RX CPU mode register */
+#define BGE_RXCPUMODE_RESET 0x00000001
+#define BGE_RXCPUMODE_SINGLESTEP 0x00000002
+#define BGE_RXCPUMODE_P0_DATAHLT_ENB 0x00000004
+#define BGE_RXCPUMODE_P0_INSTRHLT_ENB 0x00000008
+#define BGE_RXCPUMODE_WR_POSTBUF_ENB 0x00000010
+#define BGE_RXCPUMODE_DATACACHE_ENB 0x00000020
+#define BGE_RXCPUMODE_ROMFAIL 0x00000040
+#define BGE_RXCPUMODE_WATCHDOG_ENB 0x00000080
+#define BGE_RXCPUMODE_INSTRCACHE_PRF 0x00000100
+#define BGE_RXCPUMODE_INSTRCACHE_FLUSH 0x00000200
+#define BGE_RXCPUMODE_HALTCPU 0x00000400
+#define BGE_RXCPUMODE_INVDATAHLT_ENB 0x00000800
+#define BGE_RXCPUMODE_MADDRTRAPHLT_ENB 0x00001000
+#define BGE_RXCPUMODE_RADDRTRAPHLT_ENB 0x00002000
+
+/* RX CPU status register */
+#define BGE_RXCPUSTAT_HW_BREAKPOINT 0x00000001
+#define BGE_RXCPUSTAT_HLTINSTR_EXECUTED 0x00000002
+#define BGE_RXCPUSTAT_INVALID_INSTR 0x00000004
+#define BGE_RXCPUSTAT_P0_DATAREF 0x00000008
+#define BGE_RXCPUSTAT_P0_INSTRREF 0x00000010
+#define BGE_RXCPUSTAT_INVALID_DATAACC 0x00000020
+#define BGE_RXCPUSTAT_INVALID_INSTRFTCH 0x00000040
+#define BGE_RXCPUSTAT_BAD_MEMALIGN 0x00000080
+#define BGE_RXCPUSTAT_MADDR_TRAP 0x00000100
+#define BGE_RXCPUSTAT_REGADDR_TRAP 0x00000200
+#define BGE_RXCPUSTAT_DATAACC_STALL 0x00001000
+#define BGE_RXCPUSTAT_INSTRFETCH_STALL 0x00002000
+#define BGE_RXCPUSTAT_MA_WR_FIFOOFLOW 0x08000000
+#define BGE_RXCPUSTAT_MA_RD_FIFOOFLOW 0x10000000
+#define BGE_RXCPUSTAT_MA_DATAMASK_OFLOW 0x20000000
+#define BGE_RXCPUSTAT_MA_REQ_FIFOOFLOW 0x40000000
+#define BGE_RXCPUSTAT_BLOCKING_READ 0x80000000
+
+/*
+ * V? CPU registers
+ */
+#define BGE_VCPU_STATUS 0x5100
+#define BGE_VCPU_EXT_CTRL 0x6890
+
+#define BGE_VCPU_STATUS_INIT_DONE 0x04000000
+#define BGE_VCPU_STATUS_DRV_RESET 0x08000000
+
+#define BGE_VCPU_EXT_CTRL_HALT_CPU 0x00400000
+#define BGE_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000
+
+/*
+ * TX CPU registers
+ */
+#define BGE_TXCPU_MODE 0x5400
+#define BGE_TXCPU_STATUS 0x5404
+#define BGE_TXCPU_PC 0x541C
+
+/* TX CPU mode register */
+#define BGE_TXCPUMODE_RESET 0x00000001
+#define BGE_TXCPUMODE_SINGLESTEP 0x00000002
+#define BGE_TXCPUMODE_P0_DATAHLT_ENB 0x00000004
+#define BGE_TXCPUMODE_P0_INSTRHLT_ENB 0x00000008
+#define BGE_TXCPUMODE_WR_POSTBUF_ENB 0x00000010
+#define BGE_TXCPUMODE_DATACACHE_ENB 0x00000020
+#define BGE_TXCPUMODE_ROMFAIL 0x00000040
+#define BGE_TXCPUMODE_WATCHDOG_ENB 0x00000080
+#define BGE_TXCPUMODE_INSTRCACHE_PRF 0x00000100
+#define BGE_TXCPUMODE_INSTRCACHE_FLUSH 0x00000200
+#define BGE_TXCPUMODE_HALTCPU 0x00000400
+#define BGE_TXCPUMODE_INVDATAHLT_ENB 0x00000800
+#define BGE_TXCPUMODE_MADDRTRAPHLT_ENB 0x00001000
+
+/* TX CPU status register */
+#define BGE_TXCPUSTAT_HW_BREAKPOINT 0x00000001
+#define BGE_TXCPUSTAT_HLTINSTR_EXECUTED 0x00000002
+#define BGE_TXCPUSTAT_INVALID_INSTR 0x00000004
+#define BGE_TXCPUSTAT_P0_DATAREF 0x00000008
+#define BGE_TXCPUSTAT_P0_INSTRREF 0x00000010
+#define BGE_TXCPUSTAT_INVALID_DATAACC 0x00000020
+#define BGE_TXCPUSTAT_INVALID_INSTRFTCH 0x00000040
+#define BGE_TXCPUSTAT_BAD_MEMALIGN 0x00000080
+#define BGE_TXCPUSTAT_MADDR_TRAP 0x00000100
+#define BGE_TXCPUSTAT_REGADDR_TRAP 0x00000200
+#define BGE_TXCPUSTAT_DATAACC_STALL 0x00001000
+#define BGE_TXCPUSTAT_INSTRFETCH_STALL 0x00002000
+#define BGE_TXCPUSTAT_MA_WR_FIFOOFLOW 0x08000000
+#define BGE_TXCPUSTAT_MA_RD_FIFOOFLOW 0x10000000
+#define BGE_TXCPUSTAT_MA_DATAMASK_OFLOW 0x20000000
+#define BGE_TXCPUSTAT_MA_REQ_FIFOOFLOW 0x40000000
+#define BGE_TXCPUSTAT_BLOCKING_READ 0x80000000
+
+
+/*
+ * Low priority mailbox registers
+ */
+#define BGE_LPMBX_IRQ0_HI 0x5800
+#define BGE_LPMBX_IRQ0_LO 0x5804
+#define BGE_LPMBX_IRQ1_HI 0x5808
+#define BGE_LPMBX_IRQ1_LO 0x580C
+#define BGE_LPMBX_IRQ2_HI 0x5810
+#define BGE_LPMBX_IRQ2_LO 0x5814
+#define BGE_LPMBX_IRQ3_HI 0x5818
+#define BGE_LPMBX_IRQ3_LO 0x581C
+#define BGE_LPMBX_GEN0_HI 0x5820
+#define BGE_LPMBX_GEN0_LO 0x5824
+#define BGE_LPMBX_GEN1_HI 0x5828
+#define BGE_LPMBX_GEN1_LO 0x582C
+#define BGE_LPMBX_GEN2_HI 0x5830
+#define BGE_LPMBX_GEN2_LO 0x5834
+#define BGE_LPMBX_GEN3_HI 0x5828
+#define BGE_LPMBX_GEN3_LO 0x582C
+#define BGE_LPMBX_GEN4_HI 0x5840
+#define BGE_LPMBX_GEN4_LO 0x5844
+#define BGE_LPMBX_GEN5_HI 0x5848
+#define BGE_LPMBX_GEN5_LO 0x584C
+#define BGE_LPMBX_GEN6_HI 0x5850
+#define BGE_LPMBX_GEN6_LO 0x5854
+#define BGE_LPMBX_GEN7_HI 0x5858
+#define BGE_LPMBX_GEN7_LO 0x585C
+#define BGE_LPMBX_RELOAD_STATS_HI 0x5860
+#define BGE_LPMBX_RELOAD_STATS_LO 0x5864
+#define BGE_LPMBX_RX_STD_PROD_HI 0x5868
+#define BGE_LPMBX_RX_STD_PROD_LO 0x586C
+#define BGE_LPMBX_RX_JUMBO_PROD_HI 0x5870
+#define BGE_LPMBX_RX_JUMBO_PROD_LO 0x5874
+#define BGE_LPMBX_RX_MINI_PROD_HI 0x5878
+#define BGE_LPMBX_RX_MINI_PROD_LO 0x587C
+#define BGE_LPMBX_RX_CONS0_HI 0x5880
+#define BGE_LPMBX_RX_CONS0_LO 0x5884
+#define BGE_LPMBX_RX_CONS1_HI 0x5888
+#define BGE_LPMBX_RX_CONS1_LO 0x588C
+#define BGE_LPMBX_RX_CONS2_HI 0x5890
+#define BGE_LPMBX_RX_CONS2_LO 0x5894
+#define BGE_LPMBX_RX_CONS3_HI 0x5898
+#define BGE_LPMBX_RX_CONS3_LO 0x589C
+#define BGE_LPMBX_RX_CONS4_HI 0x58A0
+#define BGE_LPMBX_RX_CONS4_LO 0x58A4
+#define BGE_LPMBX_RX_CONS5_HI 0x58A8
+#define BGE_LPMBX_RX_CONS5_LO 0x58AC
+#define BGE_LPMBX_RX_CONS6_HI 0x58B0
+#define BGE_LPMBX_RX_CONS6_LO 0x58B4
+#define BGE_LPMBX_RX_CONS7_HI 0x58B8
+#define BGE_LPMBX_RX_CONS7_LO 0x58BC
+#define BGE_LPMBX_RX_CONS8_HI 0x58C0
+#define BGE_LPMBX_RX_CONS8_LO 0x58C4
+#define BGE_LPMBX_RX_CONS9_HI 0x58C8
+#define BGE_LPMBX_RX_CONS9_LO 0x58CC
+#define BGE_LPMBX_RX_CONS10_HI 0x58D0
+#define BGE_LPMBX_RX_CONS10_LO 0x58D4
+#define BGE_LPMBX_RX_CONS11_HI 0x58D8
+#define BGE_LPMBX_RX_CONS11_LO 0x58DC
+#define BGE_LPMBX_RX_CONS12_HI 0x58E0
+#define BGE_LPMBX_RX_CONS12_LO 0x58E4
+#define BGE_LPMBX_RX_CONS13_HI 0x58E8
+#define BGE_LPMBX_RX_CONS13_LO 0x58EC
+#define BGE_LPMBX_RX_CONS14_HI 0x58F0
+#define BGE_LPMBX_RX_CONS14_LO 0x58F4
+#define BGE_LPMBX_RX_CONS15_HI 0x58F8
+#define BGE_LPMBX_RX_CONS15_LO 0x58FC
+#define BGE_LPMBX_TX_HOST_PROD0_HI 0x5900
+#define BGE_LPMBX_TX_HOST_PROD0_LO 0x5904
+#define BGE_LPMBX_TX_HOST_PROD1_HI 0x5908
+#define BGE_LPMBX_TX_HOST_PROD1_LO 0x590C
+#define BGE_LPMBX_TX_HOST_PROD2_HI 0x5910
+#define BGE_LPMBX_TX_HOST_PROD2_LO 0x5914
+#define BGE_LPMBX_TX_HOST_PROD3_HI 0x5918
+#define BGE_LPMBX_TX_HOST_PROD3_LO 0x591C
+#define BGE_LPMBX_TX_HOST_PROD4_HI 0x5920
+#define BGE_LPMBX_TX_HOST_PROD4_LO 0x5924
+#define BGE_LPMBX_TX_HOST_PROD5_HI 0x5928
+#define BGE_LPMBX_TX_HOST_PROD5_LO 0x592C
+#define BGE_LPMBX_TX_HOST_PROD6_HI 0x5930
+#define BGE_LPMBX_TX_HOST_PROD6_LO 0x5934
+#define BGE_LPMBX_TX_HOST_PROD7_HI 0x5938
+#define BGE_LPMBX_TX_HOST_PROD7_LO 0x593C
+#define BGE_LPMBX_TX_HOST_PROD8_HI 0x5940
+#define BGE_LPMBX_TX_HOST_PROD8_LO 0x5944
+#define BGE_LPMBX_TX_HOST_PROD9_HI 0x5948
+#define BGE_LPMBX_TX_HOST_PROD9_LO 0x594C
+#define BGE_LPMBX_TX_HOST_PROD10_HI 0x5950
+#define BGE_LPMBX_TX_HOST_PROD10_LO 0x5954
+#define BGE_LPMBX_TX_HOST_PROD11_HI 0x5958
+#define BGE_LPMBX_TX_HOST_PROD11_LO 0x595C
+#define BGE_LPMBX_TX_HOST_PROD12_HI 0x5960
+#define BGE_LPMBX_TX_HOST_PROD12_LO 0x5964
+#define BGE_LPMBX_TX_HOST_PROD13_HI 0x5968
+#define BGE_LPMBX_TX_HOST_PROD13_LO 0x596C
+#define BGE_LPMBX_TX_HOST_PROD14_HI 0x5970
+#define BGE_LPMBX_TX_HOST_PROD14_LO 0x5974
+#define BGE_LPMBX_TX_HOST_PROD15_HI 0x5978
+#define BGE_LPMBX_TX_HOST_PROD15_LO 0x597C
+#define BGE_LPMBX_TX_NIC_PROD0_HI 0x5980
+#define BGE_LPMBX_TX_NIC_PROD0_LO 0x5984
+#define BGE_LPMBX_TX_NIC_PROD1_HI 0x5988
+#define BGE_LPMBX_TX_NIC_PROD1_LO 0x598C
+#define BGE_LPMBX_TX_NIC_PROD2_HI 0x5990
+#define BGE_LPMBX_TX_NIC_PROD2_LO 0x5994
+#define BGE_LPMBX_TX_NIC_PROD3_HI 0x5998
+#define BGE_LPMBX_TX_NIC_PROD3_LO 0x599C
+#define BGE_LPMBX_TX_NIC_PROD4_HI 0x59A0
+#define BGE_LPMBX_TX_NIC_PROD4_LO 0x59A4
+#define BGE_LPMBX_TX_NIC_PROD5_HI 0x59A8
+#define BGE_LPMBX_TX_NIC_PROD5_LO 0x59AC
+#define BGE_LPMBX_TX_NIC_PROD6_HI 0x59B0
+#define BGE_LPMBX_TX_NIC_PROD6_LO 0x59B4
+#define BGE_LPMBX_TX_NIC_PROD7_HI 0x59B8
+#define BGE_LPMBX_TX_NIC_PROD7_LO 0x59BC
+#define BGE_LPMBX_TX_NIC_PROD8_HI 0x59C0
+#define BGE_LPMBX_TX_NIC_PROD8_LO 0x59C4
+#define BGE_LPMBX_TX_NIC_PROD9_HI 0x59C8
+#define BGE_LPMBX_TX_NIC_PROD9_LO 0x59CC
+#define BGE_LPMBX_TX_NIC_PROD10_HI 0x59D0
+#define BGE_LPMBX_TX_NIC_PROD10_LO 0x59D4
+#define BGE_LPMBX_TX_NIC_PROD11_HI 0x59D8
+#define BGE_LPMBX_TX_NIC_PROD11_LO 0x59DC
+#define BGE_LPMBX_TX_NIC_PROD12_HI 0x59E0
+#define BGE_LPMBX_TX_NIC_PROD12_LO 0x59E4
+#define BGE_LPMBX_TX_NIC_PROD13_HI 0x59E8
+#define BGE_LPMBX_TX_NIC_PROD13_LO 0x59EC
+#define BGE_LPMBX_TX_NIC_PROD14_HI 0x59F0
+#define BGE_LPMBX_TX_NIC_PROD14_LO 0x59F4
+#define BGE_LPMBX_TX_NIC_PROD15_HI 0x59F8
+#define BGE_LPMBX_TX_NIC_PROD15_LO 0x59FC
+
+/*
+ * Flow throw Queue reset register
+ */
+#define BGE_FTQ_RESET 0x5C00
+
+#define BGE_FTQRESET_DMAREAD 0x00000002
+#define BGE_FTQRESET_DMAHIPRIO_RD 0x00000004
+#define BGE_FTQRESET_DMADONE 0x00000010
+#define BGE_FTQRESET_SBDC 0x00000020
+#define BGE_FTQRESET_SDI 0x00000040
+#define BGE_FTQRESET_WDMA 0x00000080
+#define BGE_FTQRESET_DMAHIPRIO_WR 0x00000100
+#define BGE_FTQRESET_TYPE1_SOFTWARE 0x00000200
+#define BGE_FTQRESET_SDC 0x00000400
+#define BGE_FTQRESET_HCC 0x00000800
+#define BGE_FTQRESET_TXFIFO 0x00001000
+#define BGE_FTQRESET_MBC 0x00002000
+#define BGE_FTQRESET_RBDC 0x00004000
+#define BGE_FTQRESET_RXLP 0x00008000
+#define BGE_FTQRESET_RDBDI 0x00010000
+#define BGE_FTQRESET_RDC 0x00020000
+#define BGE_FTQRESET_TYPE2_SOFTWARE 0x00040000
+
+/*
+ * Message Signaled Interrupt registers
+ */
+#define BGE_MSI_MODE 0x6000
+#define BGE_MSI_STATUS 0x6004
+#define BGE_MSI_FIFOACCESS 0x6008
+
+/* MSI mode register */
+#define BGE_MSIMODE_RESET 0x00000001
+#define BGE_MSIMODE_ENABLE 0x00000002
+#define BGE_MSIMODE_PCI_TGT_ABRT_ATTN 0x00000004
+#define BGE_MSIMODE_PCI_MSTR_ABRT_ATTN 0x00000008
+#define BGE_MSIMODE_PCI_PERR_ATTN 0x00000010
+#define BGE_MSIMODE_MSI_FIFOUFLOW_ATTN 0x00000020
+#define BGE_MSIMODE_MSI_FIFOOFLOW_ATTN 0x00000040
+
+/* MSI status register */
+#define BGE_MSISTAT_PCI_TGT_ABRT_ATTN 0x00000004
+#define BGE_MSISTAT_PCI_MSTR_ABRT_ATTN 0x00000008
+#define BGE_MSISTAT_PCI_PERR_ATTN 0x00000010
+#define BGE_MSISTAT_MSI_FIFOUFLOW_ATTN 0x00000020
+#define BGE_MSISTAT_MSI_FIFOOFLOW_ATTN 0x00000040
+
+
+/*
+ * DMA Completion registers
+ */
+#define BGE_DMAC_MODE 0x6400
+
+/* DMA Completion mode register */
+#define BGE_DMACMODE_RESET 0x00000001
+#define BGE_DMACMODE_ENABLE 0x00000002
+
+
+/*
+ * General control registers.
+ */
+#define BGE_MODE_CTL 0x6800
+#define BGE_MISC_CFG 0x6804
+#define BGE_MISC_LOCAL_CTL 0x6808
+#define BGE_CPU_EVENT 0x6810
+#define BGE_EE_ADDR 0x6838
+#define BGE_EE_DATA 0x683C
+#define BGE_EE_CTL 0x6840
+#define BGE_MDI_CTL 0x6844
+#define BGE_EE_DELAY 0x6848
+#define BGE_FASTBOOT_PC 0x6894
+
+/*
+ * NVRAM Control registers
+ */
+#define BGE_NVRAM_CMD 0x7000
+#define BGE_NVRAM_STAT 0x7004
+#define BGE_NVRAM_WRDATA 0x7008
+#define BGE_NVRAM_ADDR 0x700c
+#define BGE_NVRAM_RDDATA 0x7010
+#define BGE_NVRAM_CFG1 0x7014
+#define BGE_NVRAM_CFG2 0x7018
+#define BGE_NVRAM_CFG3 0x701c
+#define BGE_NVRAM_SWARB 0x7020
+#define BGE_NVRAM_ACCESS 0x7024
+#define BGE_NVRAM_WRITE1 0x7028
+
+#define BGE_NVRAMCMD_RESET 0x00000001
+#define BGE_NVRAMCMD_DONE 0x00000008
+#define BGE_NVRAMCMD_START 0x00000010
+#define BGE_NVRAMCMD_WR 0x00000020 /* 1 = wr, 0 = rd */
+#define BGE_NVRAMCMD_ERASE 0x00000040
+#define BGE_NVRAMCMD_FIRST 0x00000080
+#define BGE_NVRAMCMD_LAST 0x00000100
+
+#define BGE_NVRAM_READCMD \
+ (BGE_NVRAMCMD_FIRST|BGE_NVRAMCMD_LAST| \
+ BGE_NVRAMCMD_START|BGE_NVRAMCMD_DONE)
+#define BGE_NVRAM_WRITECMD \
+ (BGE_NVRAMCMD_FIRST|BGE_NVRAMCMD_LAST| \
+ BGE_NVRAMCMD_START|BGE_NVRAMCMD_DONE|BGE_NVRAMCMD_WR)
+
+#define BGE_NVRAMSWARB_SET0 0x00000001
+#define BGE_NVRAMSWARB_SET1 0x00000002
+#define BGE_NVRAMSWARB_SET2 0x00000003
+#define BGE_NVRAMSWARB_SET3 0x00000004
+#define BGE_NVRAMSWARB_CLR0 0x00000010
+#define BGE_NVRAMSWARB_CLR1 0x00000020
+#define BGE_NVRAMSWARB_CLR2 0x00000040
+#define BGE_NVRAMSWARB_CLR3 0x00000080
+#define BGE_NVRAMSWARB_GNT0 0x00000100
+#define BGE_NVRAMSWARB_GNT1 0x00000200
+#define BGE_NVRAMSWARB_GNT2 0x00000400
+#define BGE_NVRAMSWARB_GNT3 0x00000800
+#define BGE_NVRAMSWARB_REQ0 0x00001000
+#define BGE_NVRAMSWARB_REQ1 0x00002000
+#define BGE_NVRAMSWARB_REQ2 0x00004000
+#define BGE_NVRAMSWARB_REQ3 0x00008000
+
+#define BGE_NVRAMACC_ENABLE 0x00000001
+#define BGE_NVRAMACC_WRENABLE 0x00000002
+
+/* Mode control register */
+#define BGE_MODECTL_INT_SNDCOAL_ONLY 0x00000001
+#define BGE_MODECTL_BYTESWAP_NONFRAME 0x00000002
+#define BGE_MODECTL_WORDSWAP_NONFRAME 0x00000004
+#define BGE_MODECTL_BYTESWAP_DATA 0x00000010
+#define BGE_MODECTL_WORDSWAP_DATA 0x00000020
+#define BGE_MODECTL_NO_FRAME_CRACKING 0x00000200
+#define BGE_MODECTL_NO_RX_CRC 0x00000400
+#define BGE_MODECTL_RX_BADFRAMES 0x00000800
+#define BGE_MODECTL_NO_TX_INTR 0x00002000
+#define BGE_MODECTL_NO_RX_INTR 0x00004000
+#define BGE_MODECTL_FORCE_PCI32 0x00008000
+#define BGE_MODECTL_STACKUP 0x00010000
+#define BGE_MODECTL_HOST_SEND_BDS 0x00020000
+#define BGE_MODECTL_TX_NO_PHDR_CSUM 0x00100000
+#define BGE_MODECTL_RX_NO_PHDR_CSUM 0x00800000
+#define BGE_MODECTL_TX_ATTN_INTR 0x01000000
+#define BGE_MODECTL_RX_ATTN_INTR 0x02000000
+#define BGE_MODECTL_MAC_ATTN_INTR 0x04000000
+#define BGE_MODECTL_DMA_ATTN_INTR 0x08000000
+#define BGE_MODECTL_FLOWCTL_ATTN_INTR 0x10000000
+#define BGE_MODECTL_4X_SENDRING_SZ 0x20000000
+#define BGE_MODECTL_FW_PROCESS_MCASTS 0x40000000
+
+/* Misc. config register */
+#define BGE_MISCCFG_RESET_CORE_CLOCKS 0x00000001
+#define BGE_MISCCFG_TIMER_PRESCALER 0x000000FE
+#define BGE_MISCCFG_BOARD_ID 0x0001E000
+#define BGE_MISCCFG_BOARD_ID_5788 0x00010000
+#define BGE_MISCCFG_BOARD_ID_5788M 0x00018000
+#define BGE_MISCCFG_EPHY_IDDQ 0x00200000
+
+#define BGE_32BITTIME_66MHZ (0x41 << 1)
+
+/* Misc. Local Control */
+#define BGE_MLC_INTR_STATE 0x00000001
+#define BGE_MLC_INTR_CLR 0x00000002
+#define BGE_MLC_INTR_SET 0x00000004
+#define BGE_MLC_INTR_ONATTN 0x00000008
+#define BGE_MLC_MISCIO_IN0 0x00000100
+#define BGE_MLC_MISCIO_IN1 0x00000200
+#define BGE_MLC_MISCIO_IN2 0x00000400
+#define BGE_MLC_MISCIO_OUTEN0 0x00000800
+#define BGE_MLC_MISCIO_OUTEN1 0x00001000
+#define BGE_MLC_MISCIO_OUTEN2 0x00002000
+#define BGE_MLC_MISCIO_OUT0 0x00004000
+#define BGE_MLC_MISCIO_OUT1 0x00008000
+#define BGE_MLC_MISCIO_OUT2 0x00010000
+#define BGE_MLC_EXTRAM_ENB 0x00020000
+#define BGE_MLC_SRAM_SIZE 0x001C0000
+#define BGE_MLC_BANK_SEL 0x00200000 /* 0 = 2 banks, 1 == 1 */
+#define BGE_MLC_SSRAM_TYPE 0x00400000 /* 1 = ZBT, 0 = standard */
+#define BGE_MLC_SSRAM_CYC_DESEL 0x00800000
+#define BGE_MLC_AUTO_EEPROM 0x01000000
+
+#define BGE_SSRAMSIZE_256KB 0x00000000
+#define BGE_SSRAMSIZE_512KB 0x00040000
+#define BGE_SSRAMSIZE_1MB 0x00080000
+#define BGE_SSRAMSIZE_2MB 0x000C0000
+#define BGE_SSRAMSIZE_4MB 0x00100000
+#define BGE_SSRAMSIZE_8MB 0x00140000
+#define BGE_SSRAMSIZE_16M 0x00180000
+
+/* EEPROM address register */
+#define BGE_EEADDR_ADDRESS 0x0000FFFC
+#define BGE_EEADDR_HALFCLK 0x01FF0000
+#define BGE_EEADDR_START 0x02000000
+#define BGE_EEADDR_DEVID 0x1C000000
+#define BGE_EEADDR_RESET 0x20000000
+#define BGE_EEADDR_DONE 0x40000000
+#define BGE_EEADDR_RW 0x80000000 /* 1 = rd, 0 = wr */
+
+#define BGE_EEDEVID(x) ((x & 7) << 26)
+#define BGE_EEHALFCLK(x) ((x & 0x1FF) << 16)
+#define BGE_HALFCLK_384SCL 0x60
+#define BGE_EE_READCMD \
+ (BGE_EEHALFCLK(BGE_HALFCLK_384SCL)|BGE_EEDEVID(0)| \
+ BGE_EEADDR_START|BGE_EEADDR_RW|BGE_EEADDR_DONE)
+#define BGE_EE_WRCMD \
+ (BGE_EEHALFCLK(BGE_HALFCLK_384SCL)|BGE_EEDEVID(0)| \
+ BGE_EEADDR_START|BGE_EEADDR_DONE)
+
+/* EEPROM Control register */
+#define BGE_EECTL_CLKOUT_TRISTATE 0x00000001
+#define BGE_EECTL_CLKOUT 0x00000002
+#define BGE_EECTL_CLKIN 0x00000004
+#define BGE_EECTL_DATAOUT_TRISTATE 0x00000008
+#define BGE_EECTL_DATAOUT 0x00000010
+#define BGE_EECTL_DATAIN 0x00000020
+
+/* MDI (MII/GMII) access register */
+#define BGE_MDI_DATA 0x00000001
+#define BGE_MDI_DIR 0x00000002
+#define BGE_MDI_SEL 0x00000004
+#define BGE_MDI_CLK 0x00000008
+
+#define BGE_MEMWIN_START 0x00008000
+#define BGE_MEMWIN_END 0x0000FFFF
+
+
+#define BGE_MEMWIN_READ(sc, x, val) \
+ do { \
+ pci_write_config(sc->bge_dev, BGE_PCI_MEMWIN_BASEADDR, \
+ (0xFFFF0000 & x), 4); \
+ val = CSR_READ_4(sc, BGE_MEMWIN_START + (x & 0xFFFF)); \
+ } while(0)
+
+#define BGE_MEMWIN_WRITE(sc, x, val) \
+ do { \
+ pci_write_config(sc->bge_dev, BGE_PCI_MEMWIN_BASEADDR, \
+ (0xFFFF0000 & x), 4); \
+ CSR_WRITE_4(sc, BGE_MEMWIN_START + (x & 0xFFFF), val); \
+ } while(0)
+
+/*
+ * This magic number is written to the firmware mailbox at 0xb50
+ * before a software reset is issued. After the internal firmware
+ * has completed its initialization it will write the opposite of
+ * this value, ~BGE_MAGIC_NUMBER, to the same location, allowing the
+ * driver to synchronize with the firmware.
+ */
+#define BGE_MAGIC_NUMBER 0x4B657654
+
+typedef struct {
+ uint32_t bge_addr_hi;
+ uint32_t bge_addr_lo;
+} bge_hostaddr;
+
+#define BGE_HOSTADDR(x, y) \
+ do { \
+ (x).bge_addr_lo = ((uint64_t) (y) & 0xffffffff); \
+ (x).bge_addr_hi = ((uint64_t) (y) >> 32); \
+ } while(0)
+
+#define BGE_ADDR_LO(y) \
+ ((uint64_t) (y) & 0xFFFFFFFF)
+#define BGE_ADDR_HI(y) \
+ ((uint64_t) (y) >> 32)
+
+/* Ring control block structure */
+struct bge_rcb {
+ bge_hostaddr bge_hostaddr;
+ uint32_t bge_maxlen_flags;
+ uint32_t bge_nicaddr;
+};
+
+#define RCB_WRITE_4(sc, rcb, offset, val) \
+ bus_space_write_4(sc->bge_btag, sc->bge_bhandle, \
+ rcb + offsetof(struct bge_rcb, offset), val)
+#define BGE_RCB_MAXLEN_FLAGS(maxlen, flags) ((maxlen) << 16 | (flags))
+
+#define BGE_RCB_FLAG_USE_EXT_RX_BD 0x0001
+#define BGE_RCB_FLAG_RING_DISABLED 0x0002
+
+struct bge_tx_bd {
+ bge_hostaddr bge_addr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t bge_flags;
+ uint16_t bge_len;
+ uint16_t bge_vlan_tag;
+ uint16_t bge_rsvd;
+#else
+ uint16_t bge_len;
+ uint16_t bge_flags;
+ uint16_t bge_rsvd;
+ uint16_t bge_vlan_tag;
+#endif
+};
+
+#define BGE_TXBDFLAG_TCP_UDP_CSUM 0x0001
+#define BGE_TXBDFLAG_IP_CSUM 0x0002
+#define BGE_TXBDFLAG_END 0x0004
+#define BGE_TXBDFLAG_IP_FRAG 0x0008
+#define BGE_TXBDFLAG_IP_FRAG_END 0x0010
+#define BGE_TXBDFLAG_VLAN_TAG 0x0040
+#define BGE_TXBDFLAG_COAL_NOW 0x0080
+#define BGE_TXBDFLAG_CPU_PRE_DMA 0x0100
+#define BGE_TXBDFLAG_CPU_POST_DMA 0x0200
+#define BGE_TXBDFLAG_INSERT_SRC_ADDR 0x1000
+#define BGE_TXBDFLAG_CHOOSE_SRC_ADDR 0x6000
+#define BGE_TXBDFLAG_NO_CRC 0x8000
+
+#define BGE_NIC_TXRING_ADDR(ringno, size) \
+ BGE_SEND_RING_1_TO_4 + \
+ ((ringno * sizeof(struct bge_tx_bd) * size) / 4)
+
+struct bge_rx_bd {
+ bge_hostaddr bge_addr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t bge_len;
+ uint16_t bge_idx;
+ uint16_t bge_flags;
+ uint16_t bge_type;
+ uint16_t bge_tcp_udp_csum;
+ uint16_t bge_ip_csum;
+ uint16_t bge_vlan_tag;
+ uint16_t bge_error_flag;
+#else
+ uint16_t bge_idx;
+ uint16_t bge_len;
+ uint16_t bge_type;
+ uint16_t bge_flags;
+ uint16_t bge_ip_csum;
+ uint16_t bge_tcp_udp_csum;
+ uint16_t bge_error_flag;
+ uint16_t bge_vlan_tag;
+#endif
+ uint32_t bge_rsvd;
+ uint32_t bge_opaque;
+};
+
+struct bge_extrx_bd {
+ bge_hostaddr bge_addr1;
+ bge_hostaddr bge_addr2;
+ bge_hostaddr bge_addr3;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t bge_len2;
+ uint16_t bge_len1;
+ uint16_t bge_rsvd1;
+ uint16_t bge_len3;
+#else
+ uint16_t bge_len1;
+ uint16_t bge_len2;
+ uint16_t bge_len3;
+ uint16_t bge_rsvd1;
+#endif
+ bge_hostaddr bge_addr0;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t bge_len0;
+ uint16_t bge_idx;
+ uint16_t bge_flags;
+ uint16_t bge_type;
+ uint16_t bge_tcp_udp_csum;
+ uint16_t bge_ip_csum;
+ uint16_t bge_vlan_tag;
+ uint16_t bge_error_flag;
+#else
+ uint16_t bge_idx;
+ uint16_t bge_len0;
+ uint16_t bge_type;
+ uint16_t bge_flags;
+ uint16_t bge_ip_csum;
+ uint16_t bge_tcp_udp_csum;
+ uint16_t bge_error_flag;
+ uint16_t bge_vlan_tag;
+#endif
+ uint32_t bge_rsvd0;
+ uint32_t bge_opaque;
+};
+
+#define BGE_RXBDFLAG_END 0x0004
+#define BGE_RXBDFLAG_JUMBO_RING 0x0020
+#define BGE_RXBDFLAG_VLAN_TAG 0x0040
+#define BGE_RXBDFLAG_ERROR 0x0400
+#define BGE_RXBDFLAG_MINI_RING 0x0800
+#define BGE_RXBDFLAG_IP_CSUM 0x1000
+#define BGE_RXBDFLAG_TCP_UDP_CSUM 0x2000
+#define BGE_RXBDFLAG_TCP_UDP_IS_TCP 0x4000
+
+#define BGE_RXERRFLAG_BAD_CRC 0x0001
+#define BGE_RXERRFLAG_COLL_DETECT 0x0002
+#define BGE_RXERRFLAG_LINK_LOST 0x0004
+#define BGE_RXERRFLAG_PHY_DECODE_ERR 0x0008
+#define BGE_RXERRFLAG_MAC_ABORT 0x0010
+#define BGE_RXERRFLAG_RUNT 0x0020
+#define BGE_RXERRFLAG_TRUNC_NO_RSRCS 0x0040
+#define BGE_RXERRFLAG_GIANT 0x0080
+
+struct bge_sts_idx {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t bge_rx_prod_idx;
+ uint16_t bge_tx_cons_idx;
+#else
+ uint16_t bge_tx_cons_idx;
+ uint16_t bge_rx_prod_idx;
+#endif
+};
+
+struct bge_status_block {
+ uint32_t bge_status;
+ uint32_t bge_rsvd0;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t bge_rx_jumbo_cons_idx;
+ uint16_t bge_rx_std_cons_idx;
+ uint16_t bge_rx_mini_cons_idx;
+ uint16_t bge_rsvd1;
+#else
+ uint16_t bge_rx_std_cons_idx;
+ uint16_t bge_rx_jumbo_cons_idx;
+ uint16_t bge_rsvd1;
+ uint16_t bge_rx_mini_cons_idx;
+#endif
+ struct bge_sts_idx bge_idx[16];
+};
+
+#define BGE_TX_CONSIDX(x, i) x->bge_idx[i].bge_tx_considx
+#define BGE_RX_PRODIDX(x, i) x->bge_idx[i].bge_rx_prodidx
+
+#define BGE_STATFLAG_UPDATED 0x00000001
+#define BGE_STATFLAG_LINKSTATE_CHANGED 0x00000002
+#define BGE_STATFLAG_ERROR 0x00000004
+
+
+/*
+ * Broadcom Vendor ID
+ * (Note: the BCM570x still defaults to the Alteon PCI vendor ID
+ * even though they're now manufactured by Broadcom)
+ */
+#define BCOM_VENDORID 0x14E4
+#define BCOM_DEVICEID_BCM5700 0x1644
+#define BCOM_DEVICEID_BCM5701 0x1645
+#define BCOM_DEVICEID_BCM5702 0x1646
+#define BCOM_DEVICEID_BCM5702X 0x16A6
+#define BCOM_DEVICEID_BCM5702_ALT 0x16C6
+#define BCOM_DEVICEID_BCM5703 0x1647
+#define BCOM_DEVICEID_BCM5703X 0x16A7
+#define BCOM_DEVICEID_BCM5703_ALT 0x16C7
+#define BCOM_DEVICEID_BCM5704C 0x1648
+#define BCOM_DEVICEID_BCM5704S 0x16A8
+#define BCOM_DEVICEID_BCM5704S_ALT 0x1649
+#define BCOM_DEVICEID_BCM5705 0x1653
+#define BCOM_DEVICEID_BCM5705K 0x1654
+#define BCOM_DEVICEID_BCM5705F 0x166E
+#define BCOM_DEVICEID_BCM5705M 0x165D
+#define BCOM_DEVICEID_BCM5705M_ALT 0x165E
+#define BCOM_DEVICEID_BCM5714C 0x1668
+#define BCOM_DEVICEID_BCM5714S 0x1669
+#define BCOM_DEVICEID_BCM5715 0x1678
+#define BCOM_DEVICEID_BCM5715S 0x1679
+#define BCOM_DEVICEID_BCM5720 0x1658
+#define BCOM_DEVICEID_BCM5721 0x1659
+#define BCOM_DEVICEID_BCM5722 0x165A
+#define BCOM_DEVICEID_BCM5750 0x1676
+#define BCOM_DEVICEID_BCM5750M 0x167C
+#define BCOM_DEVICEID_BCM5751 0x1677
+#define BCOM_DEVICEID_BCM5751F 0x167E
+#define BCOM_DEVICEID_BCM5751M 0x167D
+#define BCOM_DEVICEID_BCM5752 0x1600
+#define BCOM_DEVICEID_BCM5752M 0x1601
+#define BCOM_DEVICEID_BCM5753 0x16F7
+#define BCOM_DEVICEID_BCM5753F 0x16FE
+#define BCOM_DEVICEID_BCM5753M 0x16FD
+#define BCOM_DEVICEID_BCM5754 0x167A
+#define BCOM_DEVICEID_BCM5754M 0x1672
+#define BCOM_DEVICEID_BCM5755 0x167B
+#define BCOM_DEVICEID_BCM5755M 0x1673
+#define BCOM_DEVICEID_BCM5780 0x166A
+#define BCOM_DEVICEID_BCM5780S 0x166B
+#define BCOM_DEVICEID_BCM5781 0x16DD
+#define BCOM_DEVICEID_BCM5782 0x1696
+#define BCOM_DEVICEID_BCM5786 0x169A
+#define BCOM_DEVICEID_BCM5787 0x169B
+#define BCOM_DEVICEID_BCM5787M 0x1693
+#define BCOM_DEVICEID_BCM5788 0x169C
+#define BCOM_DEVICEID_BCM5789 0x169D
+#define BCOM_DEVICEID_BCM5901 0x170D
+#define BCOM_DEVICEID_BCM5901A2 0x170E
+#define BCOM_DEVICEID_BCM5903M 0x16FF
+#define BCOM_DEVICEID_BCM5906 0x1712
+#define BCOM_DEVICEID_BCM5906M 0x1713
+
+/*
+ * Alteon AceNIC PCI vendor/device ID.
+ */
+#define ALTEON_VENDORID 0x12AE
+#define ALTEON_DEVICEID_ACENIC 0x0001
+#define ALTEON_DEVICEID_ACENIC_COPPER 0x0002
+#define ALTEON_DEVICEID_BCM5700 0x0003
+#define ALTEON_DEVICEID_BCM5701 0x0004
+
+/*
+ * 3Com 3c996 PCI vendor/device ID.
+ */
+#define TC_VENDORID 0x10B7
+#define TC_DEVICEID_3C996 0x0003
+
+/*
+ * SysKonnect PCI vendor ID
+ */
+#define SK_VENDORID 0x1148
+#define SK_DEVICEID_ALTIMA 0x4400
+#define SK_SUBSYSID_9D21 0x4421
+#define SK_SUBSYSID_9D41 0x4441
+
+/*
+ * Altima PCI vendor/device ID.
+ */
+#define ALTIMA_VENDORID 0x173b
+#define ALTIMA_DEVICE_AC1000 0x03e8
+#define ALTIMA_DEVICE_AC1002 0x03e9
+#define ALTIMA_DEVICE_AC9100 0x03ea
+
+/*
+ * Dell PCI vendor ID
+ */
+
+#define DELL_VENDORID 0x1028
+
+/*
+ * Apple PCI vendor ID.
+ */
+#define APPLE_VENDORID 0x106b
+#define APPLE_DEVICE_BCM5701 0x1645
+
+/*
+ * Sun PCI vendor ID
+ */
+#define SUN_VENDORID 0x108e
+
+/*
+ * Offset of MAC address inside EEPROM.
+ */
+#define BGE_EE_MAC_OFFSET 0x7C
+#define BGE_EE_MAC_OFFSET_5906 0x10
+#define BGE_EE_HWCFG_OFFSET 0xC8
+
+#define BGE_HWCFG_VOLTAGE 0x00000003
+#define BGE_HWCFG_PHYLED_MODE 0x0000000C
+#define BGE_HWCFG_MEDIA 0x00000030
+#define BGE_HWCFG_ASF 0x00000080
+
+#define BGE_VOLTAGE_1POINT3 0x00000000
+#define BGE_VOLTAGE_1POINT8 0x00000001
+
+#define BGE_PHYLEDMODE_UNSPEC 0x00000000
+#define BGE_PHYLEDMODE_TRIPLELED 0x00000004
+#define BGE_PHYLEDMODE_SINGLELED 0x00000008
+
+#define BGE_MEDIA_UNSPEC 0x00000000
+#define BGE_MEDIA_COPPER 0x00000010
+#define BGE_MEDIA_FIBER 0x00000020
+
+#define BGE_TICKS_PER_SEC 1000000
+
+/*
+ * Ring size constants.
+ */
+#define BGE_EVENT_RING_CNT 256
+#define BGE_CMD_RING_CNT 64
+#define BGE_STD_RX_RING_CNT 512
+#define BGE_JUMBO_RX_RING_CNT 256
+#define BGE_MINI_RX_RING_CNT 1024
+#define BGE_RETURN_RING_CNT 1024
+
+/* 5705 has smaller return ring size */
+
+#define BGE_RETURN_RING_CNT_5705 512
+
+/*
+ * Possible TX ring sizes.
+ */
+#define BGE_TX_RING_CNT_128 128
+#define BGE_TX_RING_BASE_128 0x3800
+
+#define BGE_TX_RING_CNT_256 256
+#define BGE_TX_RING_BASE_256 0x3000
+
+#define BGE_TX_RING_CNT_512 512
+#define BGE_TX_RING_BASE_512 0x2000
+
+#define BGE_TX_RING_CNT BGE_TX_RING_CNT_512
+#define BGE_TX_RING_BASE BGE_TX_RING_BASE_512
+
+/*
+ * Tigon III statistics counters.
+ */
+/* Statistics maintained MAC Receive block. */
+struct bge_rx_mac_stats {
+ bge_hostaddr ifHCInOctets;
+ bge_hostaddr Reserved1;
+ bge_hostaddr etherStatsFragments;
+ bge_hostaddr ifHCInUcastPkts;
+ bge_hostaddr ifHCInMulticastPkts;
+ bge_hostaddr ifHCInBroadcastPkts;
+ bge_hostaddr dot3StatsFCSErrors;
+ bge_hostaddr dot3StatsAlignmentErrors;
+ bge_hostaddr xonPauseFramesReceived;
+ bge_hostaddr xoffPauseFramesReceived;
+ bge_hostaddr macControlFramesReceived;
+ bge_hostaddr xoffStateEntered;
+ bge_hostaddr dot3StatsFramesTooLong;
+ bge_hostaddr etherStatsJabbers;
+ bge_hostaddr etherStatsUndersizePkts;
+ bge_hostaddr inRangeLengthError;
+ bge_hostaddr outRangeLengthError;
+ bge_hostaddr etherStatsPkts64Octets;
+ bge_hostaddr etherStatsPkts65Octetsto127Octets;
+ bge_hostaddr etherStatsPkts128Octetsto255Octets;
+ bge_hostaddr etherStatsPkts256Octetsto511Octets;
+ bge_hostaddr etherStatsPkts512Octetsto1023Octets;
+ bge_hostaddr etherStatsPkts1024Octetsto1522Octets;
+ bge_hostaddr etherStatsPkts1523Octetsto2047Octets;
+ bge_hostaddr etherStatsPkts2048Octetsto4095Octets;
+ bge_hostaddr etherStatsPkts4096Octetsto8191Octets;
+ bge_hostaddr etherStatsPkts8192Octetsto9022Octets;
+};
+
+
+/* Statistics maintained MAC Transmit block. */
+struct bge_tx_mac_stats {
+ bge_hostaddr ifHCOutOctets;
+ bge_hostaddr Reserved2;
+ bge_hostaddr etherStatsCollisions;
+ bge_hostaddr outXonSent;
+ bge_hostaddr outXoffSent;
+ bge_hostaddr flowControlDone;
+ bge_hostaddr dot3StatsInternalMacTransmitErrors;
+ bge_hostaddr dot3StatsSingleCollisionFrames;
+ bge_hostaddr dot3StatsMultipleCollisionFrames;
+ bge_hostaddr dot3StatsDeferredTransmissions;
+ bge_hostaddr Reserved3;
+ bge_hostaddr dot3StatsExcessiveCollisions;
+ bge_hostaddr dot3StatsLateCollisions;
+ bge_hostaddr dot3Collided2Times;
+ bge_hostaddr dot3Collided3Times;
+ bge_hostaddr dot3Collided4Times;
+ bge_hostaddr dot3Collided5Times;
+ bge_hostaddr dot3Collided6Times;
+ bge_hostaddr dot3Collided7Times;
+ bge_hostaddr dot3Collided8Times;
+ bge_hostaddr dot3Collided9Times;
+ bge_hostaddr dot3Collided10Times;
+ bge_hostaddr dot3Collided11Times;
+ bge_hostaddr dot3Collided12Times;
+ bge_hostaddr dot3Collided13Times;
+ bge_hostaddr dot3Collided14Times;
+ bge_hostaddr dot3Collided15Times;
+ bge_hostaddr ifHCOutUcastPkts;
+ bge_hostaddr ifHCOutMulticastPkts;
+ bge_hostaddr ifHCOutBroadcastPkts;
+ bge_hostaddr dot3StatsCarrierSenseErrors;
+ bge_hostaddr ifOutDiscards;
+ bge_hostaddr ifOutErrors;
+};
+
+/* Stats counters access through registers */
+struct bge_mac_stats_regs {
+ uint32_t ifHCOutOctets;
+ uint32_t Reserved0;
+ uint32_t etherStatsCollisions;
+ uint32_t outXonSent;
+ uint32_t outXoffSent;
+ uint32_t Reserved1;
+ uint32_t dot3StatsInternalMacTransmitErrors;
+ uint32_t dot3StatsSingleCollisionFrames;
+ uint32_t dot3StatsMultipleCollisionFrames;
+ uint32_t dot3StatsDeferredTransmissions;
+ uint32_t Reserved2;
+ uint32_t dot3StatsExcessiveCollisions;
+ uint32_t dot3StatsLateCollisions;
+ uint32_t Reserved3[14];
+ uint32_t ifHCOutUcastPkts;
+ uint32_t ifHCOutMulticastPkts;
+ uint32_t ifHCOutBroadcastPkts;
+ uint32_t Reserved4[2];
+ uint32_t ifHCInOctets;
+ uint32_t Reserved5;
+ uint32_t etherStatsFragments;
+ uint32_t ifHCInUcastPkts;
+ uint32_t ifHCInMulticastPkts;
+ uint32_t ifHCInBroadcastPkts;
+ uint32_t dot3StatsFCSErrors;
+ uint32_t dot3StatsAlignmentErrors;
+ uint32_t xonPauseFramesReceived;
+ uint32_t xoffPauseFramesReceived;
+ uint32_t macControlFramesReceived;
+ uint32_t xoffStateEntered;
+ uint32_t dot3StatsFramesTooLong;
+ uint32_t etherStatsJabbers;
+ uint32_t etherStatsUndersizePkts;
+};
+
+struct bge_stats {
+ uint8_t Reserved0[256];
+
+ /* Statistics maintained by Receive MAC. */
+ struct bge_rx_mac_stats rxstats;
+
+ bge_hostaddr Unused1[37];
+
+ /* Statistics maintained by Transmit MAC. */
+ struct bge_tx_mac_stats txstats;
+
+ bge_hostaddr Unused2[31];
+
+ /* Statistics maintained by Receive List Placement. */
+ bge_hostaddr COSIfHCInPkts[16];
+ bge_hostaddr COSFramesDroppedDueToFilters;
+ bge_hostaddr nicDmaWriteQueueFull;
+ bge_hostaddr nicDmaWriteHighPriQueueFull;
+ bge_hostaddr nicNoMoreRxBDs;
+ bge_hostaddr ifInDiscards;
+ bge_hostaddr ifInErrors;
+ bge_hostaddr nicRecvThresholdHit;
+
+ bge_hostaddr Unused3[9];
+
+ /* Statistics maintained by Send Data Initiator. */
+ bge_hostaddr COSIfHCOutPkts[16];
+ bge_hostaddr nicDmaReadQueueFull;
+ bge_hostaddr nicDmaReadHighPriQueueFull;
+ bge_hostaddr nicSendDataCompQueueFull;
+
+ /* Statistics maintained by Host Coalescing. */
+ bge_hostaddr nicRingSetSendProdIndex;
+ bge_hostaddr nicRingStatusUpdate;
+ bge_hostaddr nicInterrupts;
+ bge_hostaddr nicAvoidedInterrupts;
+ bge_hostaddr nicSendThresholdHit;
+
+ uint8_t Reserved4[320];
+};
+
+/*
+ * Tigon general information block. This resides in host memory
+ * and contains the status counters, ring control blocks and
+ * producer pointers.
+ */
+
+struct bge_gib {
+ struct bge_stats bge_stats;
+ struct bge_rcb bge_tx_rcb[16];
+ struct bge_rcb bge_std_rx_rcb;
+ struct bge_rcb bge_jumbo_rx_rcb;
+ struct bge_rcb bge_mini_rx_rcb;
+ struct bge_rcb bge_return_rcb;
+};
+
+#define BGE_FRAMELEN 1518
+#define BGE_MAX_FRAMELEN 1536
+#define BGE_JUMBO_FRAMELEN 9018
+#define BGE_JUMBO_MTU (BGE_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define BGE_MIN_FRAMELEN 60
+
+/*
+ * Other utility macros.
+ */
+#define BGE_INC(x, y) (x) = (x + 1) % y
+
+/*
+ * Register access macros. The Tigon always uses memory mapped register
+ * accesses and all registers must be accessed with 32 bit operations.
+ */
+
+#define CSR_WRITE_4(sc, reg, val) \
+ bus_space_write_4(sc->bge_btag, sc->bge_bhandle, reg, val)
+
+#define CSR_READ_4(sc, reg) \
+ bus_space_read_4(sc->bge_btag, sc->bge_bhandle, reg)
+
+#define BGE_SETBIT(sc, reg, x) \
+ CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) | (x)))
+#define BGE_CLRBIT(sc, reg, x) \
+ CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) & ~(x)))
+
+#define PCI_SETBIT(dev, reg, x, s) \
+ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) | (x)), s)
+#define PCI_CLRBIT(dev, reg, x, s) \
+ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) & ~(x)), s)
+
+/*
+ * Memory management stuff. Note: the SSLOTS, MSLOTS and JSLOTS
+ * values are tuneable. They control the actual amount of buffers
+ * allocated for the standard, mini and jumbo receive rings.
+ */
+
+#define BGE_SSLOTS 256
+#define BGE_MSLOTS 256
+#define BGE_JSLOTS 384
+
+#define BGE_JRAWLEN (BGE_JUMBO_FRAMELEN + ETHER_ALIGN)
+#define BGE_JLEN (BGE_JRAWLEN + (sizeof(uint64_t) - \
+ (BGE_JRAWLEN % sizeof(uint64_t))))
+#define BGE_JPAGESZ PAGE_SIZE
+#define BGE_RESID (BGE_JPAGESZ - (BGE_JLEN * BGE_JSLOTS) % BGE_JPAGESZ)
+#define BGE_JMEM ((BGE_JLEN * BGE_JSLOTS) + BGE_RESID)
+
+#define BGE_NSEG_JUMBO 4
+#define BGE_NSEG_NEW 32
+
+/*
+ * Ring structures. Most of these reside in host memory and we tell
+ * the NIC where they are via the ring control blocks. The exceptions
+ * are the tx and command rings, which live in NIC memory and which
+ * we access via the shared memory window.
+ */
+
+struct bge_ring_data {
+ struct bge_rx_bd *bge_rx_std_ring;
+ bus_addr_t bge_rx_std_ring_paddr;
+ struct bge_extrx_bd *bge_rx_jumbo_ring;
+ bus_addr_t bge_rx_jumbo_ring_paddr;
+ struct bge_rx_bd *bge_rx_return_ring;
+ bus_addr_t bge_rx_return_ring_paddr;
+ struct bge_tx_bd *bge_tx_ring;
+ bus_addr_t bge_tx_ring_paddr;
+ struct bge_status_block *bge_status_block;
+ bus_addr_t bge_status_block_paddr;
+ struct bge_stats *bge_stats;
+ bus_addr_t bge_stats_paddr;
+ struct bge_gib bge_info;
+};
+
+#define BGE_STD_RX_RING_SZ \
+ (sizeof(struct bge_rx_bd) * BGE_STD_RX_RING_CNT)
+#define BGE_JUMBO_RX_RING_SZ \
+ (sizeof(struct bge_extrx_bd) * BGE_JUMBO_RX_RING_CNT)
+#define BGE_TX_RING_SZ \
+ (sizeof(struct bge_tx_bd) * BGE_TX_RING_CNT)
+#define BGE_RX_RTN_RING_SZ(x) \
+ (sizeof(struct bge_rx_bd) * x->bge_return_ring_cnt)
+
+#define BGE_STATUS_BLK_SZ sizeof (struct bge_status_block)
+
+#define BGE_STATS_SZ sizeof (struct bge_stats)
+
+/*
+ * Mbuf pointers. We need these to keep track of the virtual addresses
+ * of our mbuf chains since we can only convert from physical to virtual,
+ * not the other way around.
+ */
+struct bge_chain_data {
+ bus_dma_tag_t bge_parent_tag;
+ bus_dma_tag_t bge_rx_std_ring_tag;
+ bus_dma_tag_t bge_rx_jumbo_ring_tag;
+ bus_dma_tag_t bge_rx_return_ring_tag;
+ bus_dma_tag_t bge_tx_ring_tag;
+ bus_dma_tag_t bge_status_tag;
+ bus_dma_tag_t bge_stats_tag;
+ bus_dma_tag_t bge_mtag; /* mbuf mapping tag */
+ bus_dma_tag_t bge_mtag_jumbo; /* mbuf mapping tag */
+ bus_dmamap_t bge_tx_dmamap[BGE_TX_RING_CNT];
+ bus_dmamap_t bge_rx_std_dmamap[BGE_STD_RX_RING_CNT];
+ bus_dmamap_t bge_rx_jumbo_dmamap[BGE_JUMBO_RX_RING_CNT];
+ bus_dmamap_t bge_rx_std_ring_map;
+ bus_dmamap_t bge_rx_jumbo_ring_map;
+ bus_dmamap_t bge_tx_ring_map;
+ bus_dmamap_t bge_rx_return_ring_map;
+ bus_dmamap_t bge_status_map;
+ bus_dmamap_t bge_stats_map;
+ struct mbuf *bge_tx_chain[BGE_TX_RING_CNT];
+ struct mbuf *bge_rx_std_chain[BGE_STD_RX_RING_CNT];
+ struct mbuf *bge_rx_jumbo_chain[BGE_JUMBO_RX_RING_CNT];
+};
+
+struct bge_dmamap_arg {
+ struct bge_softc *sc;
+ bus_addr_t bge_busaddr;
+ uint16_t bge_flags;
+ int bge_idx;
+ int bge_maxsegs;
+ struct bge_tx_bd *bge_ring;
+};
+
+#define BGE_HWREV_TIGON 0x01
+#define BGE_HWREV_TIGON_II 0x02
+#define BGE_TIMEOUT 100000
+#define BGE_TXCONS_UNSET 0xFFFF /* impossible value */
+
+struct bge_bcom_hack {
+ int reg;
+ int val;
+};
+
+#define ASF_ENABLE 1
+#define ASF_NEW_HANDSHAKE 2
+#define ASF_STACKUP 4
+
+struct bge_softc {
+ struct ifnet *bge_ifp; /* interface info */
+ device_t bge_dev;
+ struct mtx bge_mtx;
+ device_t bge_miibus;
+ bus_space_handle_t bge_bhandle;
+ bus_space_tag_t bge_btag;
+ void *bge_intrhand;
+ struct resource *bge_irq;
+ struct resource *bge_res;
+ struct ifmedia bge_ifmedia; /* TBI media info */
+ uint32_t bge_flags;
+#define BGE_FLAG_TBI 0x00000001
+#define BGE_FLAG_JUMBO 0x00000002
+#define BGE_FLAG_WIRESPEED 0x00000004
+#define BGE_FLAG_EADDR 0x00000008
+#define BGE_FLAG_MSI 0x00000100
+#define BGE_FLAG_PCIX 0x00000200
+#define BGE_FLAG_PCIE 0x00000400
+#define BGE_FLAG_5700_FAMILY 0x00001000
+#define BGE_FLAG_5705_PLUS 0x00002000
+#define BGE_FLAG_5714_FAMILY 0x00004000
+#define BGE_FLAG_575X_PLUS 0x00008000
+#define BGE_FLAG_RX_ALIGNBUG 0x00100000
+#define BGE_FLAG_NO_3LED 0x00200000
+#define BGE_FLAG_ADC_BUG 0x00400000
+#define BGE_FLAG_5704_A0_BUG 0x00800000
+#define BGE_FLAG_JITTER_BUG 0x01000000
+#define BGE_FLAG_BER_BUG 0x02000000
+#define BGE_FLAG_ADJUST_TRIM 0x04000000
+#define BGE_FLAG_CRC_BUG 0x08000000
+#define BGE_FLAG_5788 0x20000000
+ uint32_t bge_chipid;
+ uint8_t bge_asicrev;
+ uint8_t bge_chiprev;
+ uint8_t bge_asf_mode;
+ uint8_t bge_asf_count;
+ struct bge_ring_data bge_ldata; /* rings */
+ struct bge_chain_data bge_cdata; /* mbufs */
+ uint16_t bge_tx_saved_considx;
+ uint16_t bge_rx_saved_considx;
+ uint16_t bge_ev_saved_considx;
+ uint16_t bge_return_ring_cnt;
+ uint16_t bge_std; /* current std ring head */
+ uint16_t bge_jumbo; /* current jumo ring head */
+ uint32_t bge_stat_ticks;
+ uint32_t bge_rx_coal_ticks;
+ uint32_t bge_tx_coal_ticks;
+ uint32_t bge_tx_prodidx;
+ uint32_t bge_rx_max_coal_bds;
+ uint32_t bge_tx_max_coal_bds;
+ uint32_t bge_tx_buf_ratio;
+ int bge_if_flags;
+ int bge_txcnt;
+ int bge_link; /* link state */
+ int bge_link_evt; /* pending link event */
+ int bge_timer;
+ struct callout bge_stat_ch;
+ uint32_t bge_rx_discards;
+ uint32_t bge_tx_discards;
+ uint32_t bge_tx_collisions;
+#ifdef DEVICE_POLLING
+ int rxcycles;
+#endif /* DEVICE_POLLING */
+};
+
+#define BGE_LOCK_INIT(_sc, _name) \
+ mtx_init(&(_sc)->bge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
+#define BGE_LOCK(_sc) mtx_lock(&(_sc)->bge_mtx)
+#define BGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->bge_mtx, MA_OWNED)
+#define BGE_UNLOCK(_sc) mtx_unlock(&(_sc)->bge_mtx)
+#define BGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->bge_mtx)
diff --git a/bsd_eth_drivers/if_em/.cvsignore b/bsd_eth_drivers/if_em/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/if_em/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/if_em/Makefile.am b/bsd_eth_drivers/if_em/Makefile.am
index 1a6a12f..a795ef8 100644
--- a/bsd_eth_drivers/if_em/Makefile.am
+++ b/bsd_eth_drivers/if_em/Makefile.am
@@ -31,6 +31,7 @@ libif_em_a_DEPENDENCIES = $(libif_em_a_LIBADD)
lib_LIBRARIES = libif_em.a
+AM_CPPFLAGS += -D_KERNEL
AM_CPPFLAGS += -I$(srcdir)
AM_CPPFLAGS += -I$(srcdir)/../libbsdport -I../libbsdport -I../libbsdport/dummyheaders
AM_CPPFLAGS += $(CPPFLAGS_82542_SUPPORT_$(ENBL_82542_SUPPORT))
diff --git a/bsd_eth_drivers/if_em/e1000_osdep.h b/bsd_eth_drivers/if_em/e1000_osdep.h
index b5aa603..96a7d84 100644
--- a/bsd_eth_drivers/if_em/e1000_osdep.h
+++ b/bsd_eth_drivers/if_em/e1000_osdep.h
@@ -37,9 +37,16 @@ POSSIBILITY OF SUCH DAMAGE.
#define _FREEBSD_OS_H_
#include <rtems.h>
-#define _KERNEL
-#include <rtems/rtems_bsdnet_internal.h>
#include <bsp.h>
+#include <rtems/pci.h>
+#include <vm/vm.h> /* for non-_KERNEL boolean_t :-( */
+
+#ifdef _KERNEL
+#ifndef __INSIDE_RTEMS_BSD_TCPIP_STACK__
+#define __INSIDE_RTEMS_BSD_TCPIP_STACK__
+#endif
+#include <rtems/rtems_bsdnet.h>
+#include <rtems/rtems_bsdnet_internal.h>
#include <sys/types.h>
#include <sys/param.h>
@@ -50,7 +57,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/bus.h>
-#include <rtems/pci.h>
#define ASSERT(x) if(!(x)) panic("EM: x")
@@ -69,13 +75,30 @@ POSSIBILITY OF SUCH DAMAGE.
#define DEBUGOUT3(S,A,B,C)
#define DEBUGOUT7(S,A,B,C,D,E,F,G)
+#include <devicet.h>
+
+struct e1000_osdep
+{
+ uint32_t mem_bus_space_handle;
+ uint32_t io_bus_space_handle;
+ uint32_t flash_bus_space_handle;
+ /* these are currently unused; present for freebsd compatibility only */
+ uint32_t mem_bus_space_tag;
+ uint32_t io_bus_space_tag;
+ uint32_t flash_bus_space_tag;
+ device_t dev;
+};
+
#define STATIC static
+#endif
+
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
+
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
@@ -92,90 +115,71 @@ typedef int32_t s32;
typedef int16_t s16;
typedef int8_t s8 ;
-#include <devicet.h>
+typedef volatile uint32_t __uint32_va_t __attribute__((may_alias));
+typedef volatile uint16_t __uint16_va_t __attribute__((may_alias));
-struct e1000_osdep
-{
- uint32_t mem_bus_space_handle;
- uint32_t io_bus_space_handle;
- uint32_t flash_bus_space_handle;
- /* these are currently unused; present for freebsd compatibility only */
- uint32_t mem_bus_space_tag;
- uint32_t io_bus_space_tag;
- uint32_t flash_bus_space_tag;
- device_t dev;
-};
+#ifdef NO_82542_SUPPORT
+#define E1000_REGISTER(hw, reg) reg
+#else
+#define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \
+ ? reg : e1000_translate_register_82542(reg))
+#endif
-typedef volatile uint32_t __attribute__((may_alias)) *__uint32_a_p_t;
-typedef volatile uint16_t __attribute__((may_alias)) *__uint16_a_p_t;
-typedef volatile uint8_t __attribute__((may_alias)) * __uint8_a_p_t;
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+/* Provide our own I/O so that the low-level driver API can
+ * be used independently from the BSD stuff.
+ * This is useful for people who want to use an e1000 adapter
+ * for special ethernet links that do not use BSD TCP/IP.
+ */
#ifdef __PPC__
-#include <libcpu/io.h>
-static inline uint8_t __in_8(uint32_t base, uint32_t offset)
-{
-__uint8_a_p_t a = (__uint8_a_p_t)(base+offset);
-uint8_t rval;
- __asm__ __volatile__(
- "sync;\n"
- "lbz%U1%X1 %0,%1;\n"
- "twi 0,%0,0;\n"
- "isync" : "=r" (rval) : "m"(*a));
- return rval;
-}
-static inline void __out_8(uint32_t base, uint32_t offset, uint8_t val)
-{
-__uint8_a_p_t a = (__uint8_a_p_t)(base+offset);
- __asm__ __volatile__(
- "stb%U0%X0 %1,%0; eieio" : "=m" (*a) : "r"(val)
- );
-}
+#include <libcpu/io.h>
-static inline uint16_t __in_le16(uint32_t base, uint32_t offset)
+static inline uint16_t __in_le16(uint8_t *base, uint32_t offset)
{
-__uint16_a_p_t a = (__uint16_a_p_t)(base+offset);
uint16_t rval;
- __asm__ __volatile__(
- "sync;\n"
- "lhbrx %0,0,%1;\n"
- "twi 0,%0,0;\n"
- "isync" : "=r" (rval) : "r"(a), "m"(*a));
- return rval;
+ __asm__ __volatile__(
+ "lhbrx %0,%2,%1; eieio\n"
+ : "=r" (rval)
+ : "r"(base), "b"(offset), "m"(*(__uint16_va_t*)(base + offset))
+ );
+ return rval;
}
-static inline void __out_le16(uint32_t base, uint32_t offset, uint16_t val)
+static inline void __out_le16(uint8_t *base, uint32_t offset, uint16_t val)
{
-__uint16_a_p_t a = (__uint16_a_p_t)(base+offset);
- __asm__ __volatile__(
- "sync; sthbrx %1,0,%2" : "=m" (*a) : "r"(val), "r"(a)
- );
+ __asm__ __volatile__(
+ "sthbrx %1,%3,%2; eieio"
+ : "=o"(*(__uint16_va_t*)(base+offset))
+ : "r"(val), "r"(base), "b"(offset)
+ );
}
-static inline uint32_t __in_le32(uint32_t base, uint32_t offset)
+static inline uint32_t __in_le32(uint8_t *base, uint32_t offset)
{
-__uint32_a_p_t a = (__uint32_a_p_t)(base+offset);
uint32_t rval;
- __asm__ __volatile__(
- "sync;\n"
- "lwbrx %0,0,%1;\n"
- "twi 0,%0,0;\n"
- "isync" : "=r" (rval) : "r"(a), "m"(*a));
- return rval;
+ __asm__ __volatile__(
+ "lwbrx %0,%2,%1; eieio\n"
+ : "=r" (rval)
+ : "r"(base), "b"(offset), "m"(*(__uint32_va_t*)(base + offset))
+ );
+ return rval;
}
-static inline void __out_le32(uint32_t base, uint32_t offset, uint32_t val)
+static inline void __out_le32(uint8_t *base, uint32_t offset, uint32_t val)
{
-__uint32_a_p_t a = (__uint32_a_p_t)(base+offset);
- __asm__ __volatile__(
- "sync; stwbrx %1,0,%2" : "=m" (*a) : "r"(val), "r"(a)
- );
+ __asm__ __volatile__(
+ "stwbrx %1,%3,%2; eieio"
+ : "=o"(*(__uint32_va_t*)(base+offset))
+ : "r"(val), "r"(base), "b"(offset)
+ );
}
#ifdef _IO_BASE
-static inline void __outport_dword(uint32_t base, uint32_t off, uint32_t val)
+static inline void __outport_dword(unsigned long base, uint32_t off, uint32_t val)
{
- __out_le32(_IO_BASE+base+off,0,val);
+ __out_le32((uint8_t*)(_IO_BASE+base), off, val);
}
#else
#error "_IO_BASE needs to be defined by BSP (bsp.h)"
@@ -183,60 +187,38 @@ static inline void __outport_dword(uint32_t base, uint32_t off, uint32_t val)
#elif defined(__i386__)
#include <libcpu/cpu.h>
-static inline uint8_t __in_8(uint32_t base, uint32_t offset)
-{
-__uint8_a_p_t a = (__uint8_a_p_t)(base+offset);
- return *a;
-}
-
-static inline void __out_8(uint32_t base, uint32_t offset, uint8_t val)
-{
-__uint8_a_p_t a = (__uint8_a_p_t)(base+offset);
- *a = val;
-}
-static inline uint16_t __in_le16(uint32_t base, uint32_t offset)
+static inline uint16_t __in_le16(uint8_t *base, uint32_t offset)
{
-__uint16_a_p_t a = (__uint16_a_p_t)(base+offset);
- return *a;
+ return *(__uint16_va_t*)(base + offset);
}
-static inline void __out_le16(uint32_t base, uint32_t offset, uint16_t val)
+static inline void __out_le16(uint8_t *base, uint32_t offset, uint16_t val)
{
-__uint16_a_p_t a = (__uint16_a_p_t)(base+offset);
- *a = val;
+ *(__uint16_va_t*)(base + offset) = val;
}
-static inline uint32_t __in_le32(uint32_t base, uint32_t offset)
+static inline uint32_t __in_le32(uint8_t *base, uint32_t offset)
{
-__uint32_a_p_t a = (__uint32_a_p_t)(base+offset);
- return *a;
+ return *(__uint32_va_t*)(base + offset);
}
-static inline void __out_le32(uint32_t base, uint32_t offset, uint32_t val)
+static inline void __out_le32(uint8_t *base, uint32_t offset, uint32_t val)
{
-__uint32_a_p_t a = (__uint32_a_p_t)(base+offset);
- *a = val;
+ *(__uint32_va_t*)(base + offset) = val;
}
-
-static inline void __outport_dword(uint32_t base, uint32_t off, uint32_t val)
+static inline void __outport_dword(unsigned long base, uint32_t off, uint32_t val)
{
i386_outport_long( (base + off), val );
}
#else
-#error "not ported to this CPU architecture yet"
+#warning "not ported to this CPU architecture yet -- using libbsdport I/O"
+#define USE_LIBBSDPORT_IO
#endif
-#ifdef NO_82542_SUPPORT
-#define E1000_REGISTER(hw, reg) reg
-#else
-#define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \
- ? reg : e1000_translate_register_82542(reg))
-#endif
-
-#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+#ifdef USE_LIBBSDPORT_IO
#define USE_EXPLICIT_BUSTAGS
@@ -393,5 +375,52 @@ static inline void __outport_dword(uint32_t base, uint32_t off, uint32_t val)
((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value)
#endif /* USE_EXPLICIT_BUSTAGS */
+#else /* USE_LIBBSDPORT_IO */
+
+/* Read from an absolute offset in the adapter's memory space */
+#define E1000_READ_OFFSET(hw, offset) \
+ __in_le32((hw)->hw_addr, offset)
+
+/* Write to an absolute offset in the adapter's memory space */
+#define E1000_WRITE_OFFSET(hw, offset, value) \
+ __out_le32((hw)->hw_addr, offset, value)
+
+/* Register READ/WRITE macros */
+
+#define E1000_READ_REG(hw, reg) \
+ __in_le32((hw)->hw_addr, E1000_REGISTER(hw, reg))
+
+#define E1000_WRITE_REG(hw, reg, value) \
+ __out_le32((hw)->hw_addr, E1000_REGISTER(hw, reg), value)
+
+#define E1000_READ_REG_ARRAY(hw, reg, index) \
+ __in_le32((hw)->hw_addr, E1000_REGISTER(hw, reg) + ((index)<< 2))
+
+#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \
+ __out_le32((hw)->hw_addr, E1000_REGISTER(hw, reg) + ((index)<< 2), value)
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_IO(hw, reg, value) do { \
+ __outport_dword((hw)->io_base, 0, reg); \
+ __outport_dword((hw)->io_base, 4, value); \
+ } while (0)
+
+#define E1000_READ_FLASH_REG(hw, reg) \
+ __in_le32( (hw)->flash_address, reg )
+
+#define E1000_READ_FLASH_REG16(hw, reg) \
+ __in_le16( (hw)->flash_address, reg )
+
+#define E1000_WRITE_FLASH_REG(hw, reg, value) \
+ __out_le32( (hw)->flash_address, reg, value )
+
+#define E1000_WRITE_FLASH_REG16(hw, reg, value) \
+ __out_le16( (hw)->flash_address, reg, value )
+
+#endif /* USE_LIBBSDPORT_IO */
+
+
#endif /* _FREEBSD_OS_H_ */
diff --git a/bsd_eth_drivers/if_em/if_em.c b/bsd_eth_drivers/if_em/if_em.c
index 71d1227..05d4216 100644
--- a/bsd_eth_drivers/if_em/if_em.c
+++ b/bsd_eth_drivers/if_em/if_em.c
@@ -413,8 +413,6 @@ TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
#endif
#ifdef __rtems__
-int em_bootverbose = 0;
-#define bootverbose em_bootverbose
#undef static
#define static static
#endif
@@ -2663,7 +2661,11 @@ em_allocate_pci_resources(struct adapter *adapter)
rman_get_bustag(adapter->res_memory);
adapter->osdep.mem_bus_space_handle =
rman_get_bushandle(adapter->res_memory);
+#ifndef __rtems__
adapter->hw.hw_addr = (uint8_t*)&adapter->osdep.mem_bus_space_handle;
+#else
+ adapter->hw.hw_addr = (uint8_t*)adapter->res_memory;
+#endif
/* Only older adapters use IO mapping */
if ((adapter->hw.mac.type >= e1000_82543) && /* __rtems__ >82542 -> >= 82543 */
@@ -2691,7 +2693,12 @@ em_allocate_pci_resources(struct adapter *adapter)
"ioport\n");
return (ENXIO);
}
+#ifndef __rtems__
adapter->hw.io_base = 0;
+#else
+ adapter->hw.io_base = (unsigned long)adapter->res_ioport
+ & PCI_BASE_ADDRESS_IO_MASK;
+#endif
adapter->osdep.io_bus_space_tag =
rman_get_bustag(adapter->res_ioport);
adapter->osdep.io_bus_space_handle =
@@ -3281,9 +3288,15 @@ em_initialize_transmit_unit(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_TDT, 0);
E1000_WRITE_REG(&adapter->hw, E1000_TDH, 0);
+#ifndef __rtems__
HW_DEBUGOUT2("Base = %x, Length = %x\n",
E1000_READ_REG(&adapter->hw, E1000_TDBAL),
E1000_READ_REG(&adapter->hw, E1000_TDLEN));
+#else
+ HW_DEBUGOUT2("Base = %x, Length = %x\n",
+ (unsigned)E1000_READ_REG(&adapter->hw, E1000_TDBAL),
+ (unsigned)E1000_READ_REG(&adapter->hw, E1000_TDLEN));
+#endif
/* Set the default values for the Tx Inter Packet Gap timer */
switch (adapter->hw.mac.type) {
diff --git a/bsd_eth_drivers/if_fxp/.cvsignore b/bsd_eth_drivers/if_fxp/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/if_fxp/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/if_fxp/Makefile.am b/bsd_eth_drivers/if_fxp/Makefile.am
new file mode 100644
index 0000000..f58c259
--- /dev/null
+++ b/bsd_eth_drivers/if_fxp/Makefile.am
@@ -0,0 +1,22 @@
+# $Id$
+AUTOMAKE_OPTIONS=foreign
+
+include $(top_srcdir)/rtems-pre.am
+
+libif_fxp_a_SOURCES = if_fxp.c
+libif_fxp_a_SOURCES += if_fxpreg.h if_fxpvar.h rcvbundl.h
+
+lib_LIBRARIES = libif_fxp.a
+
+AM_CPPFLAGS += -I$(srcdir)
+AM_CPPFLAGS += -I$(srcdir)/../libbsdport -I../libbsdport -I../libbsdport/dummyheaders
+AM_CPPFLAGS += $(CPPFLAGS_82542_SUPPORT_$(ENBL_82542_SUPPORT))
+AM_CPPFLAGS += $(CPPFLAGS_ICH8LAN_SUPPORT_$(ENBL_ICH8LAN_SUPPORT))
+
+LINKS = dev/fxp/if_fxpreg.h dev/fxp/if_fxpvar.h dev/fxp/rcvbundl.h
+
+BUILT_SOURCES =
+
+$(libif_fxp_a_OBJECTS): $(LINKS)
+
+include ../links.am
diff --git a/bsd_eth_drivers/if_fxp/if_fxp.c b/bsd_eth_drivers/if_fxp/if_fxp.c
new file mode 100644
index 0000000..8bb5235
--- /dev/null
+++ b/bsd_eth_drivers/if_fxp/if_fxp.c
@@ -0,0 +1,2909 @@
+/*-
+ * Copyright (c) 1995, David Greenman
+ * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifdef __rtems__
+#include <libbsdport.h>
+#endif
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/dev/fxp/if_fxp.c,v 1.266.6.1 2008/11/25 02:59:29 kensmith Exp $");
+
+/*
+ * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
+ */
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+ /* #include <sys/mutex.h> */
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <sys/sockio.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+
+#include <net/ethernet.h>
+#include <net/if_arp.h>
+
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#ifdef FXP_IP_CSUM_WAR
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+#endif
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h> /* for PCIM_CMD_xxx */
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/fxp/if_fxpreg.h>
+#include <dev/fxp/if_fxpvar.h>
+#include <dev/fxp/rcvbundl.h>
+
+MODULE_DEPEND(fxp, pci, 1, 1, 1);
+MODULE_DEPEND(fxp, ether, 1, 1, 1);
+MODULE_DEPEND(fxp, miibus, 1, 1, 1);
+#include "miibus_if.h"
+
+#ifdef __rtems__
+#include <libbsdport_post.h>
+#endif
+
+/*
+ * NOTE! On the Alpha, we have an alignment constraint. The
+ * card DMAs the packet immediately following the RFA. However,
+ * the first thing in the packet is a 14-byte Ethernet header.
+ * This means that the packet is misaligned. To compensate,
+ * we actually offset the RFA 2 bytes into the cluster. This
+ * alignes the packet after the Ethernet header at a 32-bit
+ * boundary. HOWEVER! This means that the RFA is misaligned!
+ */
+#define RFA_ALIGNMENT_FUDGE 2
+
+/*
+ * Set initial transmit threshold at 64 (512 bytes). This is
+ * increased by 64 (512 bytes) at a time, to maximum of 192
+ * (1536 bytes), if an underrun occurs.
+ */
+static int tx_threshold = 64;
+
+/*
+ * The configuration byte map has several undefined fields which
+ * must be one or must be zero. Set up a template for these bits
+ * only, (assuming a 82557 chip) leaving the actual configuration
+ * to fxp_init.
+ *
+ * See struct fxp_cb_config for the bit definitions.
+ */
+static u_char fxp_cb_config_template[] = {
+ 0x0, 0x0, /* cb_status */
+ 0x0, 0x0, /* cb_command */
+ 0x0, 0x0, 0x0, 0x0, /* link_addr */
+ 0x0, /* 0 */
+ 0x0, /* 1 */
+ 0x0, /* 2 */
+ 0x0, /* 3 */
+ 0x0, /* 4 */
+ 0x0, /* 5 */
+ 0x32, /* 6 */
+ 0x0, /* 7 */
+ 0x0, /* 8 */
+ 0x0, /* 9 */
+ 0x6, /* 10 */
+ 0x0, /* 11 */
+ 0x0, /* 12 */
+ 0x0, /* 13 */
+ 0xf2, /* 14 */
+ 0x48, /* 15 */
+ 0x0, /* 16 */
+ 0x40, /* 17 */
+ 0xf0, /* 18 */
+ 0x0, /* 19 */
+ 0x3f, /* 20 */
+ 0x5 /* 21 */
+};
+
+struct fxp_ident {
+ uint16_t devid;
+ int16_t revid; /* -1 matches anything */
+ char *name;
+};
+
+/*
+ * Claim various Intel PCI device identifiers for this driver. The
+ * sub-vendor and sub-device field are extensively used to identify
+ * particular variants, but we don't currently differentiate between
+ * them.
+ */
+static struct fxp_ident fxp_ident_table[] = {
+ { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" },
+ { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" },
+ { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
+ { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
+ { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
+ { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
+ { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
+ { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
+ { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
+ { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
+ { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
+ { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
+ { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
+ { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
+ { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
+ { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
+ { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
+ { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
+ { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" },
+ { 0x1064, -1, "Intel 82562EZ (ICH6)" },
+ { 0x1065, -1, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
+ { 0x1068, -1, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
+ { 0x1069, -1, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
+ { 0x1091, -1, "Intel 82562GX Pro/100 Ethernet" },
+ { 0x1092, -1, "Intel Pro/100 VE Network Connection" },
+ { 0x1093, -1, "Intel Pro/100 VM Network Connection" },
+ { 0x1094, -1, "Intel Pro/100 946GZ (ICH7) Network Connection" },
+ { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" },
+ { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" },
+ { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" },
+ { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" },
+ { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" },
+ { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" },
+ { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" },
+ { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" },
+ { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" },
+ { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" },
+ { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" },
+ { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" },
+ { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" },
+ { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" },
+ { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" },
+ { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" },
+ { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
+ { 0x27dc, -1, "Intel 82801GB (ICH7) 10/100 Ethernet" },
+ { 0, -1, NULL },
+};
+
+#ifdef FXP_IP_CSUM_WAR
+#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
+#else
+#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
+#endif
+
+static int fxp_probe(device_t dev);
+static int fxp_attach(device_t dev);
+static int fxp_detach(device_t dev);
+static int fxp_shutdown(device_t dev);
+#ifndef __rtems__
+static int fxp_suspend(device_t dev);
+static int fxp_resume(device_t dev);
+#endif
+
+static void fxp_intr(void *xsc);
+static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp,
+ uint8_t statack, int count);
+static void fxp_init(void *xsc);
+static void fxp_init_body(struct fxp_softc *sc);
+static void fxp_tick(void *xsc);
+static void fxp_start(struct ifnet *ifp);
+static void fxp_start_body(struct ifnet *ifp);
+static int fxp_encap(struct fxp_softc *sc, struct mbuf *m_head);
+static void fxp_stop(struct fxp_softc *sc);
+static void fxp_release(struct fxp_softc *sc);
+#ifndef __rtems__
+static int fxp_ioctl(struct ifnet *ifp, u_long command,
+ caddr_t data);
+#else
+static int fxp_ioctl(struct ifnet *ifp, ioctl_command_t command,
+ caddr_t data);
+#endif
+static void fxp_watchdog(struct fxp_softc *sc);
+static int fxp_add_rfabuf(struct fxp_softc *sc,
+ struct fxp_rx *rxp);
+static int fxp_mc_addrs(struct fxp_softc *sc);
+static void fxp_mc_setup(struct fxp_softc *sc);
+static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
+ int autosize);
+static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
+ uint16_t data);
+static void fxp_autosize_eeprom(struct fxp_softc *sc);
+static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
+ int offset, int words);
+static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
+ int offset, int words);
+#ifndef __rtems__
+static int fxp_ifmedia_upd(struct ifnet *ifp);
+static void fxp_ifmedia_sts(struct ifnet *ifp,
+ struct ifmediareq *ifmr);
+#endif
+static int fxp_serial_ifmedia_upd(struct ifnet *ifp);
+static void fxp_serial_ifmedia_sts(struct ifnet *ifp,
+ struct ifmediareq *ifmr);
+static int fxp_miibus_readreg(device_t dev, int phy, int reg);
+static void fxp_miibus_writereg(device_t dev, int phy, int reg,
+ int value);
+static void fxp_load_ucode(struct fxp_softc *sc);
+#ifndef RTEMS_SYSCTL_NOTYETSUP
+static int sysctl_int_range(SYSCTL_HANDLER_ARGS,
+ int low, int high);
+static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
+static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
+#endif
+static void fxp_scb_wait(struct fxp_softc *sc);
+static void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
+static void fxp_dma_wait(struct fxp_softc *sc,
+ volatile uint16_t *status, bus_dma_tag_t dmat,
+ bus_dmamap_t map);
+
+#ifdef __rtems__
+
+static int
+fxp_irq_check_dis(device_t d)
+{
+struct fxp_softc *sc = device_get_softc(d);
+uint8_t statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
+
+ if ( statack && 0xff != statack ) {
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
+ return FILTER_HANDLED;
+ }
+
+ return FILTER_STRAY;
+}
+
+static void
+fxp_irq_en(device_t d)
+{
+struct fxp_softc *sc = device_get_softc(d);
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
+}
+
+static device_method_t fxp_methods = {
+ probe: fxp_probe,
+ attach: fxp_attach,
+ shutdown: (void (*)(device_t))fxp_shutdown,
+ detach: fxp_detach,
+ irq_check_dis: fxp_irq_check_dis,
+ irq_en: fxp_irq_en,
+};
+
+driver_t libbsdport_fxp_driver = {
+ "fxp",
+ &fxp_methods,
+ DEV_TYPE_PCI,
+ sizeof(struct fxp_softc)
+};
+
+static int mdio_r(int phy, void *uarg, unsigned reg, uint32_t *pval);
+static int mdio_w(int phy, void *uarg, unsigned reg, uint32_t data);
+
+struct rtems_mdio_info fxp_mdio = {
+ mdio_r: mdio_r,
+ mdio_w: mdio_w,
+ has_gmii: 0
+};
+#else
+static device_method_t fxp_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, fxp_probe),
+ DEVMETHOD(device_attach, fxp_attach),
+ DEVMETHOD(device_detach, fxp_detach),
+ DEVMETHOD(device_shutdown, fxp_shutdown),
+ DEVMETHOD(device_suspend, fxp_suspend),
+ DEVMETHOD(device_resume, fxp_resume),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
+ DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
+
+ { 0, 0 }
+};
+
+static driver_t fxp_driver = {
+ "fxp",
+ fxp_methods,
+ sizeof(struct fxp_softc),
+};
+
+static devclass_t fxp_devclass;
+
+DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0);
+DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
+DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
+#endif
+
+static struct resource_spec fxp_res_spec_mem[] = {
+ { SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0 }
+};
+
+static struct resource_spec fxp_res_spec_io[] = {
+ { SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0 }
+};
+
+/*
+ * Wait for the previous command to be accepted (but not necessarily
+ * completed).
+ */
+static void
+fxp_scb_wait(struct fxp_softc *sc)
+{
+ union {
+ uint16_t w;
+ uint8_t b[2];
+ } flowctl;
+ int i = 10000;
+
+ while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
+ DELAY(2);
+ if (i == 0) {
+ flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL);
+ flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL + 1);
+ device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
+ CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
+ CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
+ CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
+ }
+}
+
+static void
+fxp_scb_cmd(struct fxp_softc *sc, int cmd)
+{
+
+ if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
+ CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
+ fxp_scb_wait(sc);
+ }
+ CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
+}
+
+static void
+fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
+ bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ int i = 10000;
+
+ bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
+ while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) {
+ DELAY(2);
+ bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
+ }
+ if (i == 0)
+ device_printf(sc->dev, "DMA timeout\n");
+}
+
+/*
+ * Return identification string if this device is ours.
+ */
+static int
+fxp_probe(device_t dev)
+{
+ uint16_t devid;
+ uint8_t revid;
+ struct fxp_ident *ident;
+
+ if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
+ devid = pci_get_device(dev);
+ revid = pci_get_revid(dev);
+ for (ident = fxp_ident_table; ident->name != NULL; ident++) {
+ if (ident->devid == devid &&
+ (ident->revid == revid || ident->revid == -1)) {
+ device_set_desc(dev, ident->name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ }
+ return (ENXIO);
+}
+
+static void
+fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ uint32_t *addr;
+
+ if (error)
+ return;
+
+ KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
+ addr = arg;
+ *addr = segs->ds_addr;
+}
+
+static int
+fxp_attach(device_t dev)
+{
+ struct fxp_softc *sc;
+ struct fxp_cb_tx *tcbp;
+ struct fxp_tx *txp;
+ struct fxp_rx *rxp;
+ struct ifnet *ifp;
+ uint32_t val;
+ uint16_t data, myea[ETHER_ADDR_LEN / 2];
+ u_char eaddr[ETHER_ADDR_LEN];
+ int i, prefer_iomap;
+ int error;
+
+ error = 0;
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+ callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
+ ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
+ fxp_serial_ifmedia_sts);
+
+ ifp = sc->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "can not if_alloc()\n");
+ error = ENOSPC;
+ goto fail;
+ }
+
+ /*
+ * Enable bus mastering.
+ */
+ pci_enable_busmaster(dev);
+ val = pci_read_config(dev, PCIR_COMMAND, 2);
+
+ /*
+ * Figure out which we should try first - memory mapping or i/o mapping?
+ * We default to memory mapping. Then we accept an override from the
+ * command line. Then we check to see which one is enabled.
+ */
+ prefer_iomap = 0;
+ resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "prefer_iomap", &prefer_iomap);
+ if (prefer_iomap)
+ sc->fxp_spec = fxp_res_spec_io;
+ else
+ sc->fxp_spec = fxp_res_spec_mem;
+
+ error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
+ if (error) {
+ if (sc->fxp_spec == fxp_res_spec_mem)
+ sc->fxp_spec = fxp_res_spec_io;
+ else
+ sc->fxp_spec = fxp_res_spec_mem;
+ error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
+ }
+ if (error) {
+ device_printf(dev, "could not allocate resources\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ if (bootverbose) {
+ device_printf(dev, "using %s space register mapping\n",
+ sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
+ }
+
+ /*
+ * Reset to a stable state.
+ */
+ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
+ DELAY(10);
+
+ /*
+ * Find out how large of an SEEPROM we have.
+ */
+ fxp_autosize_eeprom(sc);
+
+ /*
+ * Find out the chip revision; lump all 82557 revs together.
+ */
+ fxp_read_eeprom(sc, &data, 5, 1);
+ if ((data >> 8) == 1)
+ sc->revision = FXP_REV_82557;
+ else
+ sc->revision = pci_get_revid(dev);
+
+ /*
+ * Determine whether we must use the 503 serial interface.
+ */
+ fxp_read_eeprom(sc, &data, 6, 1);
+ if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
+ && (data & FXP_PHY_SERIAL_ONLY))
+ sc->flags |= FXP_FLAG_SERIAL_MEDIA;
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW,
+ &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
+ "FXP driver receive interrupt microcode bundling delay");
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW,
+ &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
+ "FXP driver receive interrupt microcode bundle size limit");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
+ "FXP RNR events");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0,
+ "FXP flow control disabled");
+
+ /*
+ * Pull in device tunables.
+ */
+ sc->tunable_int_delay = TUNABLE_INT_DELAY;
+ sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
+ sc->tunable_noflow = 1;
+ (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "int_delay", &sc->tunable_int_delay);
+ (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "bundle_max", &sc->tunable_bundle_max);
+ (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "noflow", &sc->tunable_noflow);
+ sc->rnr = 0;
+
+ /*
+ * Enable workarounds for certain chip revision deficiencies.
+ *
+ * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
+ * some systems based a normal 82559 design, have a defect where
+ * the chip can cause a PCI protocol violation if it receives
+ * a CU_RESUME command when it is entering the IDLE state. The
+ * workaround is to disable Dynamic Standby Mode, so the chip never
+ * deasserts CLKRUN#, and always remains in an active state.
+ *
+ * See Intel 82801BA/82801BAM Specification Update, Errata #30.
+ */
+ i = pci_get_device(dev);
+ if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
+ sc->revision >= FXP_REV_82559_A0) {
+ fxp_read_eeprom(sc, &data, 10, 1);
+ if (data & 0x02) { /* STB enable */
+ uint16_t cksum;
+ int i;
+
+ device_printf(dev,
+ "Disabling dynamic standby mode in EEPROM\n");
+ data &= ~0x02;
+ fxp_write_eeprom(sc, &data, 10, 1);
+ device_printf(dev, "New EEPROM ID: 0x%x\n", data);
+ cksum = 0;
+ for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
+ fxp_read_eeprom(sc, &data, i, 1);
+ cksum += data;
+ }
+ i = (1 << sc->eeprom_size) - 1;
+ cksum = 0xBABA - cksum;
+ fxp_read_eeprom(sc, &data, i, 1);
+ fxp_write_eeprom(sc, &cksum, i, 1);
+ device_printf(dev,
+ "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
+ i, data, cksum);
+#if 1
+ /*
+ * If the user elects to continue, try the software
+ * workaround, as it is better than nothing.
+ */
+ sc->flags |= FXP_FLAG_CU_RESUME_BUG;
+#endif
+ }
+ }
+
+ /*
+ * If we are not a 82557 chip, we can enable extended features.
+ */
+ if (sc->revision != FXP_REV_82557) {
+ /*
+ * If MWI is enabled in the PCI configuration, and there
+ * is a valid cacheline size (8 or 16 dwords), then tell
+ * the board to turn on MWI.
+ */
+ if (val & PCIM_CMD_MWRICEN &&
+ pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
+ sc->flags |= FXP_FLAG_MWI_ENABLE;
+
+ /* turn on the extended TxCB feature */
+ sc->flags |= FXP_FLAG_EXT_TXCB;
+
+ /* enable reception of long frames for VLAN */
+ sc->flags |= FXP_FLAG_LONG_PKT_EN;
+ } else {
+ /* a hack to get long VLAN frames on a 82557 */
+ sc->flags |= FXP_FLAG_SAVE_BAD;
+ }
+
+ /*
+ * Enable use of extended RFDs and TCBs for 82550
+ * and later chips. Note: we need extended TXCB support
+ * too, but that's already enabled by the code above.
+ * Be careful to do this only on the right devices.
+ */
+ if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
+ sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
+ || sc->revision == FXP_REV_82551_10) {
+ sc->rfa_size = sizeof (struct fxp_rfa);
+ sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
+ sc->flags |= FXP_FLAG_EXT_RFA;
+ } else {
+ sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
+ sc->tx_cmd = FXP_CB_COMMAND_XMIT;
+ }
+
+ /*
+ * Allocate DMA tags and DMA safe memory.
+ */
+ sc->maxtxseg = FXP_NTXSEG;
+ if (sc->flags & FXP_FLAG_EXT_RFA)
+ sc->maxtxseg--;
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ MCLBYTES * sc->maxtxseg, sc->maxtxseg, MCLBYTES, 0,
+ busdma_lock_mutex, &Giant, &sc->fxp_mtag);
+ if (error) {
+ device_printf(dev, "could not allocate dma tag\n");
+ goto fail;
+ }
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0,
+ busdma_lock_mutex, &Giant, &sc->fxp_stag);
+ if (error) {
+ device_printf(dev, "could not allocate dma tag\n");
+ goto fail;
+ }
+
+ error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap);
+ if (error)
+ goto fail;
+ error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
+ sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
+ if (error) {
+ device_printf(dev, "could not map the stats buffer\n");
+ goto fail;
+ }
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0,
+ busdma_lock_mutex, &Giant, &sc->cbl_tag);
+ if (error) {
+ device_printf(dev, "could not allocate dma tag\n");
+ goto fail;
+ }
+
+ error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map);
+ if (error)
+ goto fail;
+
+ error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
+ sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
+ &sc->fxp_desc.cbl_addr, 0);
+ if (error) {
+ device_printf(dev, "could not map DMA memory\n");
+ goto fail;
+ }
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0,
+ busdma_lock_mutex, &Giant, &sc->mcs_tag);
+ if (error) {
+ device_printf(dev, "could not allocate dma tag\n");
+ goto fail;
+ }
+
+ error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
+ BUS_DMA_NOWAIT, &sc->mcs_map);
+ if (error)
+ goto fail;
+ error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
+ sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
+ if (error) {
+ device_printf(dev, "can't map the multicast setup command\n");
+ goto fail;
+ }
+
+ /*
+ * Pre-allocate the TX DMA maps and setup the pointers to
+ * the TX command blocks.
+ */
+ txp = sc->fxp_desc.tx_list;
+ tcbp = sc->fxp_desc.cbl_list;
+ for (i = 0; i < FXP_NTXCB; i++) {
+ txp[i].tx_cb = tcbp + i;
+ error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map);
+ if (error) {
+ device_printf(dev, "can't create DMA map for TX\n");
+ goto fail;
+ }
+ }
+ error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
+ if (error) {
+ device_printf(dev, "can't create spare DMA map\n");
+ goto fail;
+ }
+
+ /*
+ * Pre-allocate our receive buffers.
+ */
+ sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
+ for (i = 0; i < FXP_NRFABUFS; i++) {
+ rxp = &sc->fxp_desc.rx_list[i];
+ error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
+ if (error) {
+ device_printf(dev, "can't create DMA map for RX\n");
+ goto fail;
+ }
+ if (fxp_add_rfabuf(sc, rxp) != 0) {
+ error = ENOMEM;
+ goto fail;
+ }
+ }
+
+ /*
+ * Read MAC address.
+ */
+ fxp_read_eeprom(sc, myea, 0, 3);
+ eaddr[0] = myea[0] & 0xff;
+ eaddr[1] = myea[0] >> 8;
+ eaddr[2] = myea[1] & 0xff;
+ eaddr[3] = myea[1] >> 8;
+ eaddr[4] = myea[2] & 0xff;
+ eaddr[5] = myea[2] >> 8;
+ if (bootverbose) {
+ device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
+ pci_get_vendor(dev), pci_get_device(dev),
+ pci_get_subvendor(dev), pci_get_subdevice(dev),
+ pci_get_revid(dev));
+ fxp_read_eeprom(sc, &data, 10, 1);
+ device_printf(dev, "Dynamic Standby mode is %s\n",
+ data & 0x02 ? "enabled" : "disabled");
+ }
+
+ /*
+ * If this is only a 10Mbps device, then there is no MII, and
+ * the PHY will use a serial interface instead.
+ *
+ * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
+ * doesn't have a programming interface of any sort. The
+ * media is sensed automatically based on how the link partner
+ * is configured. This is, in essence, manual configuration.
+ */
+ if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
+ ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
+ ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
+#ifdef __rtems__
+ sc->phyidx = -1;
+#endif
+ } else {
+#ifndef __rtems__
+ if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
+ fxp_ifmedia_sts)) {
+ device_printf(dev, "MII without any PHY!\n");
+ error = ENXIO;
+ goto fail;
+ }
+#else
+ sc->phyidx = -2;
+ sc->phyidx = rtems_mii_phy_probe(&fxp_mdio, sc);
+ if ( sc->phyidx < 0 ) {
+ device_printf(dev, "MII without any PHY!\n");
+ error = ENXIO;
+ goto fail;
+ }
+#endif
+ }
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_init = fxp_init;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = fxp_ioctl;
+ ifp->if_start = fxp_start;
+
+#ifndef __rtems__
+ ifp->if_capabilities = ifp->if_capenable = 0;
+
+ /* Enable checksum offload for 82550 or better chips */
+ if (sc->flags & FXP_FLAG_EXT_RFA) {
+ ifp->if_hwassist = FXP_CSUM_FEATURES;
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capenable |= IFCAP_HWCSUM;
+ }
+
+#ifdef DEVICE_POLLING
+ /* Inform the world we support polling. */
+ ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+#endif
+
+ /*
+ * Attach the interface.
+ */
+ ether_ifattach(ifp, eaddr);
+
+#ifndef __rtems__
+ /*
+ * Tell the upper layer(s) we support long frames.
+ * Must appear after the call to ether_ifattach() because
+ * ether_ifattach() sets ifi_hdrlen to the default value.
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+ ifp->if_capabilities |= IFCAP_VLAN_MTU;
+ ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
+#endif
+
+ /*
+ * Let the system queue as many packets as we have available
+ * TX descriptors.
+ */
+ IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
+ ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ /*
+ * Hook our interrupt after all initialization is complete.
+ */
+ error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, fxp_intr, sc, &sc->ih);
+ if (error) {
+ device_printf(dev, "could not setup irq\n");
+ ether_ifdetach(sc->ifp);
+ goto fail;
+ }
+
+fail:
+ if (error)
+ fxp_release(sc);
+ return (error);
+}
+
+/*
+ * Release all resources. The softc lock should not be held and the
+ * interrupt should already be torn down.
+ */
+static void
+fxp_release(struct fxp_softc *sc)
+{
+ struct fxp_rx *rxp;
+ struct fxp_tx *txp;
+ int i;
+
+ FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
+ KASSERT(sc->ih == NULL,
+ ("fxp_release() called with intr handle still active"));
+ if (sc->miibus)
+ device_delete_child(sc->dev, sc->miibus);
+ bus_generic_detach(sc->dev);
+ ifmedia_removeall(&sc->sc_media);
+ if (sc->fxp_desc.cbl_list) {
+ bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
+ bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
+ sc->cbl_map);
+ }
+ if (sc->fxp_stats) {
+ bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
+ bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
+ }
+ if (sc->mcsp) {
+ bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
+ bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
+ }
+ bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
+ if (sc->fxp_mtag) {
+ for (i = 0; i < FXP_NRFABUFS; i++) {
+ rxp = &sc->fxp_desc.rx_list[i];
+ if (rxp->rx_mbuf != NULL) {
+ bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
+ m_freem(rxp->rx_mbuf);
+ }
+ bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
+ }
+ bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
+ for (i = 0; i < FXP_NTXCB; i++) {
+ txp = &sc->fxp_desc.tx_list[i];
+ if (txp->tx_mbuf != NULL) {
+ bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
+ m_freem(txp->tx_mbuf);
+ }
+ bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
+ }
+ bus_dma_tag_destroy(sc->fxp_mtag);
+ }
+ if (sc->fxp_stag)
+ bus_dma_tag_destroy(sc->fxp_stag);
+ if (sc->cbl_tag)
+ bus_dma_tag_destroy(sc->cbl_tag);
+ if (sc->mcs_tag)
+ bus_dma_tag_destroy(sc->mcs_tag);
+ if (sc->ifp)
+ if_free(sc->ifp);
+
+ mtx_destroy(&sc->sc_mtx);
+}
+
+/*
+ * Detach interface.
+ */
+static int
+fxp_detach(device_t dev)
+{
+ struct fxp_softc *sc = device_get_softc(dev);
+
+#ifndef __rtems__
+#ifdef DEVICE_POLLING
+ if (sc->ifp->if_capenable & IFCAP_POLLING)
+ ether_poll_deregister(sc->ifp);
+#endif
+#endif
+
+ FXP_LOCK(sc);
+ sc->suspended = 1; /* Do same thing as we do for suspend */
+ /*
+ * Stop DMA and drop transmit queue, but disable interrupts first.
+ */
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
+ fxp_stop(sc);
+ FXP_UNLOCK(sc);
+ callout_drain(&sc->stat_ch);
+
+ /*
+ * Close down routes etc.
+ */
+ ether_ifdetach(sc->ifp);
+
+ /*
+ * Unhook interrupt before dropping lock. This is to prevent
+ * races with fxp_intr().
+ */
+ bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
+ sc->ih = NULL;
+
+ /* Release our allocated resources. */
+ fxp_release(sc);
+ return (0);
+}
+
+/*
+ * Device shutdown routine. Called at system shutdown after sync. The
+ * main purpose of this routine is to shut off receiver DMA so that
+ * kernel memory doesn't get clobbered during warmboot.
+ */
+static int
+fxp_shutdown(device_t dev)
+{
+ struct fxp_softc *sc = device_get_softc(dev);
+
+ /*
+ * Make sure that DMA is disabled prior to reboot. Not doing
+ * do could allow DMA to corrupt kernel memory during the
+ * reboot before the driver initializes.
+ */
+ FXP_LOCK(sc);
+ fxp_stop(sc);
+ FXP_UNLOCK(sc);
+ return (0);
+}
+
+#ifndef __rtems__
+/*
+ * Device suspend routine. Stop the interface and save some PCI
+ * settings in case the BIOS doesn't restore them properly on
+ * resume.
+ */
+static int
+fxp_suspend(device_t dev)
+{
+ struct fxp_softc *sc = device_get_softc(dev);
+
+ FXP_LOCK(sc);
+
+ fxp_stop(sc);
+
+ sc->suspended = 1;
+
+ FXP_UNLOCK(sc);
+ return (0);
+}
+
+/*
+ * Device resume routine. re-enable busmastering, and restart the interface if
+ * appropriate.
+ */
+static int
+fxp_resume(device_t dev)
+{
+ struct fxp_softc *sc = device_get_softc(dev);
+ struct ifnet *ifp = sc->ifp;
+
+ FXP_LOCK(sc);
+
+ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
+ DELAY(10);
+
+ /* reinitialize interface if necessary */
+ if (ifp->if_flags & IFF_UP)
+ fxp_init_body(sc);
+
+ sc->suspended = 0;
+
+ FXP_UNLOCK(sc);
+ return (0);
+}
+#endif
+
+static void
+fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
+{
+ uint16_t reg;
+ int x;
+
+ /*
+ * Shift in data.
+ */
+ for (x = 1 << (length - 1); x; x >>= 1) {
+ if (data & x)
+ reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
+ else
+ reg = FXP_EEPROM_EECS;
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
+ DELAY(1);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
+ DELAY(1);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
+ DELAY(1);
+ }
+}
+
+/*
+ * Read from the serial EEPROM. Basically, you manually shift in
+ * the read opcode (one bit at a time) and then shift in the address,
+ * and then you shift out the data (all of this one bit at a time).
+ * The word size is 16 bits, so you have to provide the address for
+ * every 16 bits of data.
+ */
+static uint16_t
+fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
+{
+ uint16_t reg, data;
+ int x;
+
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
+ /*
+ * Shift in read opcode.
+ */
+ fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
+ /*
+ * Shift in address.
+ */
+ data = 0;
+ for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
+ if (offset & x)
+ reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
+ else
+ reg = FXP_EEPROM_EECS;
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
+ DELAY(1);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
+ DELAY(1);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
+ DELAY(1);
+ reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
+ data++;
+ if (autosize && reg == 0) {
+ sc->eeprom_size = data;
+ break;
+ }
+ }
+ /*
+ * Shift out data.
+ */
+ data = 0;
+ reg = FXP_EEPROM_EECS;
+ for (x = 1 << 15; x; x >>= 1) {
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
+ DELAY(1);
+ if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
+ data |= x;
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
+ DELAY(1);
+ }
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
+ DELAY(1);
+
+ return (data);
+}
+
+static void
+fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
+{
+ int i;
+
+ /*
+ * Erase/write enable.
+ */
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
+ fxp_eeprom_shiftin(sc, 0x4, 3);
+ fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
+ DELAY(1);
+ /*
+ * Shift in write opcode, address, data.
+ */
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
+ fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
+ fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
+ fxp_eeprom_shiftin(sc, data, 16);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
+ DELAY(1);
+ /*
+ * Wait for EEPROM to finish up.
+ */
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
+ DELAY(1);
+ for (i = 0; i < 1000; i++) {
+ if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
+ break;
+ DELAY(50);
+ }
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
+ DELAY(1);
+ /*
+ * Erase/write disable.
+ */
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
+ fxp_eeprom_shiftin(sc, 0x4, 3);
+ fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
+ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
+ DELAY(1);
+}
+
+/*
+ * From NetBSD:
+ *
+ * Figure out EEPROM size.
+ *
+ * 559's can have either 64-word or 256-word EEPROMs, the 558
+ * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
+ * talks about the existance of 16 to 256 word EEPROMs.
+ *
+ * The only known sizes are 64 and 256, where the 256 version is used
+ * by CardBus cards to store CIS information.
+ *
+ * The address is shifted in msb-to-lsb, and after the last
+ * address-bit the EEPROM is supposed to output a `dummy zero' bit,
+ * after which follows the actual data. We try to detect this zero, by
+ * probing the data-out bit in the EEPROM control register just after
+ * having shifted in a bit. If the bit is zero, we assume we've
+ * shifted enough address bits. The data-out should be tri-state,
+ * before this, which should translate to a logical one.
+ */
+static void
+fxp_autosize_eeprom(struct fxp_softc *sc)
+{
+
+ /* guess maximum size of 256 words */
+ sc->eeprom_size = 8;
+
+ /* autosize */
+ (void) fxp_eeprom_getword(sc, 0, 1);
+}
+
+static void
+fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++)
+ data[i] = fxp_eeprom_getword(sc, offset + i, 0);
+}
+
+static void
+fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++)
+ fxp_eeprom_putword(sc, offset + i, data[i]);
+}
+
+/*
+ * Grab the softc lock and call the real fxp_start_body() routine
+ */
+static void
+fxp_start(struct ifnet *ifp)
+{
+ struct fxp_softc *sc = ifp->if_softc;
+
+ FXP_LOCK(sc);
+ fxp_start_body(ifp);
+ FXP_UNLOCK(sc);
+}
+
+/*
+ * Start packet transmission on the interface.
+ * This routine must be called with the softc lock held, and is an
+ * internal entry point only.
+ */
+static void
+fxp_start_body(struct ifnet *ifp)
+{
+ struct fxp_softc *sc = ifp->if_softc;
+ struct mbuf *mb_head;
+ int error, txqueued;
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+
+ /*
+ * See if we need to suspend xmit until the multicast filter
+ * has been reprogrammed (which can only be done at the head
+ * of the command chain).
+ */
+ if (sc->need_mcsetup)
+ return;
+
+ /*
+ * We're finished if there is nothing more to add to the list or if
+ * we're all filled up with buffers to transmit.
+ * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
+ * a NOP command when needed.
+ */
+ txqueued = 0;
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
+ sc->tx_queued < FXP_NTXCB - 1) {
+
+ /*
+ * Grab a packet to transmit.
+ */
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
+ if (mb_head == NULL)
+ break;
+
+ error = fxp_encap(sc, mb_head);
+ if (error)
+ break;
+ txqueued = 1;
+ }
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+
+ /*
+ * We're finished. If we added to the list, issue a RESUME to get DMA
+ * going again if suspended.
+ */
+ if (txqueued) {
+ fxp_scb_wait(sc);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
+ }
+}
+
+static int
+fxp_encap(struct fxp_softc *sc, struct mbuf *m_head)
+{
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct fxp_tx *txp;
+ struct fxp_cb_tx *cbp;
+ bus_dma_segment_t segs[FXP_NTXSEG];
+ int chainlen, error, i, nseg;
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+ ifp = sc->ifp;
+
+ /*
+ * Get pointer to next available tx desc.
+ */
+ txp = sc->fxp_desc.tx_last->tx_next;
+
+ /*
+ * A note in Appendix B of the Intel 8255x 10/100 Mbps
+ * Ethernet Controller Family Open Source Software
+ * Developer Manual says:
+ * Using software parsing is only allowed with legal
+ * TCP/IP or UDP/IP packets.
+ * ...
+ * For all other datagrams, hardware parsing must
+ * be used.
+ * Software parsing appears to truncate ICMP and
+ * fragmented UDP packets that contain one to three
+ * bytes in the second (and final) mbuf of the packet.
+ */
+ if (sc->flags & FXP_FLAG_EXT_RFA)
+ txp->tx_cb->ipcb_ip_activation_high =
+ FXP_IPCB_HARDWAREPARSING_ENABLE;
+
+#ifndef __rtems__
+ /*
+ * Deal with TCP/IP checksum offload. Note that
+ * in order for TCP checksum offload to work,
+ * the pseudo header checksum must have already
+ * been computed and stored in the checksum field
+ * in the TCP header. The stack should have
+ * already done this for us.
+ */
+ if (m_head->m_pkthdr.csum_flags) {
+ if (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ txp->tx_cb->ipcb_ip_schedule =
+ FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
+ if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
+ txp->tx_cb->ipcb_ip_schedule |=
+ FXP_IPCB_TCP_PACKET;
+ }
+
+#ifdef FXP_IP_CSUM_WAR
+ /*
+ * XXX The 82550 chip appears to have trouble
+ * dealing with IP header checksums in very small
+ * datagrams, namely fragments from 1 to 3 bytes
+ * in size. For example, say you want to transmit
+ * a UDP packet of 1473 bytes. The packet will be
+ * fragmented over two IP datagrams, the latter
+ * containing only one byte of data. The 82550 will
+ * botch the header checksum on the 1-byte fragment.
+ * As long as the datagram contains 4 or more bytes
+ * of data, you're ok.
+ *
+ * The following code attempts to work around this
+ * problem: if the datagram is less than 38 bytes
+ * in size (14 bytes ether header, 20 bytes IP header,
+ * plus 4 bytes of data), we punt and compute the IP
+ * header checksum by hand. This workaround doesn't
+ * work very well, however, since it can be fooled
+ * by things like VLAN tags and IP options that make
+ * the header sizes/offsets vary.
+ */
+
+ if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
+ if (m_head->m_pkthdr.len < 38) {
+ struct ip *ip;
+ m_head->m_data += ETHER_HDR_LEN;
+ ip = mtod(mb_head, struct ip *);
+ ip->ip_sum = in_cksum(mb_head, ip->ip_hl << 2);
+ m_head->m_data -= ETHER_HDR_LEN;
+ } else {
+ txp->tx_cb->ipcb_ip_activation_high =
+ FXP_IPCB_HARDWAREPARSING_ENABLE;
+ txp->tx_cb->ipcb_ip_schedule |=
+ FXP_IPCB_IP_CHECKSUM_ENABLE;
+ }
+ }
+#endif
+ }
+#endif
+
+ chainlen = 0;
+ for (m = m_head; m != NULL && chainlen <= sc->maxtxseg; m = m->m_next)
+ chainlen++;
+ if (chainlen > sc->maxtxseg) {
+ struct mbuf *mn;
+
+ /*
+ * We ran out of segments. We have to recopy this
+ * mbuf chain first. Bail out if we can't get the
+ * new buffers.
+ */
+ mn = m_defrag(m_head, M_DONTWAIT);
+ if (mn == NULL) {
+ m_freem(m_head);
+ return (-1);
+ } else {
+ m_head = mn;
+ }
+ }
+
+ /*
+ * Go through each of the mbufs in the chain and initialize
+ * the transmit buffer descriptors with the physical address
+ * and size of the mbuf.
+ */
+ error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map,
+ m_head, segs, &nseg, 0);
+ if (error) {
+ device_printf(sc->dev, "can't map mbuf (error %d)\n", error);
+ m_freem(m_head);
+ return (-1);
+ }
+
+ KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
+
+ cbp = txp->tx_cb;
+ for (i = 0; i < nseg; i++) {
+ KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
+ /*
+ * If this is an 82550/82551, then we're using extended
+ * TxCBs _and_ we're using checksum offload. This means
+ * that the TxCB is really an IPCB. One major difference
+ * between the two is that with plain extended TxCBs,
+ * the bottom half of the TxCB contains two entries from
+ * the TBD array, whereas IPCBs contain just one entry:
+ * one entry (8 bytes) has been sacrificed for the TCP/IP
+ * checksum offload control bits. So to make things work
+ * right, we have to start filling in the TBD array
+ * starting from a different place depending on whether
+ * the chip is an 82550/82551 or not.
+ */
+ if (sc->flags & FXP_FLAG_EXT_RFA) {
+ cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
+ cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
+ } else {
+ cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
+ cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
+ }
+ }
+ cbp->tbd_number = nseg;
+
+ bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
+ txp->tx_mbuf = m_head;
+ txp->tx_cb->cb_status = 0;
+ txp->tx_cb->byte_count = 0;
+ if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
+ txp->tx_cb->cb_command =
+ htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
+ FXP_CB_COMMAND_S);
+ } else {
+ txp->tx_cb->cb_command =
+ htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
+ FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
+ /*
+ * Set a 5 second timer just in case we don't hear
+ * from the card again.
+ */
+ sc->watchdog_timer = 5;
+ }
+ txp->tx_cb->tx_threshold = tx_threshold;
+
+ /*
+ * Advance the end of list forward.
+ */
+
+#ifdef __alpha__
+ /*
+ * On platforms which can't access memory in 16-bit
+ * granularities, we must prevent the card from DMA'ing
+ * up the status while we update the command field.
+ * This could cause us to overwrite the completion status.
+ * XXX This is probably bogus and we're _not_ looking
+ * for atomicity here.
+ */
+ atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command,
+ htole16(FXP_CB_COMMAND_S));
+#else
+ sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
+#endif /*__alpha__*/
+ sc->fxp_desc.tx_last = txp;
+
+ /*
+ * Advance the beginning of the list forward if there are
+ * no other packets queued (when nothing is queued, tx_first
+ * sits on the last TxCB that was sent out).
+ */
+ if (sc->tx_queued == 0)
+ sc->fxp_desc.tx_first = txp;
+
+ sc->tx_queued++;
+
+ /*
+ * Pass packet to bpf if there is a listener.
+ */
+ BPF_MTAP(ifp, m_head);
+ return (0);
+}
+
+#ifdef DEVICE_POLLING
+static poll_handler_t fxp_poll;
+
+static void
+fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+ struct fxp_softc *sc = ifp->if_softc;
+ uint8_t statack;
+
+ FXP_LOCK(sc);
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ FXP_UNLOCK(sc);
+ return;
+ }
+
+ statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
+ FXP_SCB_STATACK_FR;
+ if (cmd == POLL_AND_CHECK_STATUS) {
+ uint8_t tmp;
+
+ tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
+ if (tmp == 0xff || tmp == 0) {
+ FXP_UNLOCK(sc);
+ return; /* nothing to do */
+ }
+ tmp &= ~statack;
+ /* ack what we can */
+ if (tmp != 0)
+ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
+ statack |= tmp;
+ }
+ fxp_intr_body(sc, ifp, statack, count);
+ FXP_UNLOCK(sc);
+}
+#endif /* DEVICE_POLLING */
+
+/*
+ * Process interface interrupts.
+ */
+static void
+fxp_intr(void *xsc)
+{
+ struct fxp_softc *sc = xsc;
+ struct ifnet *ifp = sc->ifp;
+ uint8_t statack;
+
+ FXP_LOCK(sc);
+ if (sc->suspended) {
+ FXP_UNLOCK(sc);
+ return;
+ }
+
+#ifndef __rtems__
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ FXP_UNLOCK(sc);
+ return;
+ }
+#endif
+#endif
+ while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
+ /*
+ * It should not be possible to have all bits set; the
+ * FXP_SCB_INTR_SWI bit always returns 0 on a read. If
+ * all bits are set, this may indicate that the card has
+ * been physically ejected, so ignore it.
+ */
+ if (statack == 0xff) {
+ FXP_UNLOCK(sc);
+ return;
+ }
+
+ /*
+ * First ACK all the interrupts in this pass.
+ */
+ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
+ fxp_intr_body(sc, ifp, statack, -1);
+ }
+ FXP_UNLOCK(sc);
+}
+
+static void
+fxp_txeof(struct fxp_softc *sc)
+{
+ struct fxp_tx *txp;
+
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
+ for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
+ (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
+ txp = txp->tx_next) {
+ if (txp->tx_mbuf != NULL) {
+ bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
+ m_freem(txp->tx_mbuf);
+ txp->tx_mbuf = NULL;
+ /* clear this to reset csum offload bits */
+ txp->tx_cb->tbd[0].tb_addr = 0;
+ }
+ sc->tx_queued--;
+ }
+ sc->fxp_desc.tx_first = txp;
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+}
+
+static void
+fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack,
+ int count)
+{
+ struct mbuf *m;
+ struct fxp_rx *rxp;
+ struct fxp_rfa *rfa;
+ int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
+ int fxp_rc = 0;
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+ if (rnr)
+ sc->rnr++;
+#ifdef DEVICE_POLLING
+ /* Pick up a deferred RNR condition if `count' ran out last time. */
+ if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
+ sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
+ rnr = 1;
+ }
+#endif
+
+ /*
+ * Free any finished transmit mbuf chains.
+ *
+ * Handle the CNA event likt a CXTNO event. It used to
+ * be that this event (control unit not ready) was not
+ * encountered, but it is now with the SMPng modifications.
+ * The exact sequence of events that occur when the interface
+ * is brought up are different now, and if this event
+ * goes unhandled, the configuration/rxfilter setup sequence
+ * can stall for several seconds. The result is that no
+ * packets go out onto the wire for about 5 to 10 seconds
+ * after the interface is ifconfig'ed for the first time.
+ */
+ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
+ fxp_txeof(sc);
+
+ sc->watchdog_timer = 0;
+ if (sc->tx_queued == 0) {
+ if (sc->need_mcsetup)
+ fxp_mc_setup(sc);
+ }
+ /*
+ * Try to start more packets transmitting.
+ */
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ fxp_start_body(ifp);
+ }
+
+ /*
+ * Just return if nothing happened on the receive side.
+ */
+ if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
+ return;
+
+ /*
+ * Process receiver interrupts. If a no-resource (RNR)
+ * condition exists, get whatever packets we can and
+ * re-start the receiver.
+ *
+ * When using polling, we do not process the list to completion,
+ * so when we get an RNR interrupt we must defer the restart
+ * until we hit the last buffer with the C bit set.
+ * If we run out of cycles and rfa_headm has the C bit set,
+ * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
+ * that the info will be used in the subsequent polling cycle.
+ */
+ for (;;) {
+ rxp = sc->fxp_desc.rx_head;
+ m = rxp->rx_mbuf;
+ rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
+ RFA_ALIGNMENT_FUDGE);
+ bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
+ BUS_DMASYNC_POSTREAD);
+
+#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
+ if (count >= 0 && count-- == 0) {
+ if (rnr) {
+ /* Defer RNR processing until the next time. */
+ sc->flags |= FXP_FLAG_DEFERRED_RNR;
+ rnr = 0;
+ }
+ break;
+ }
+#endif /* DEVICE_POLLING */
+
+ if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0)
+ break;
+
+ /*
+ * Advance head forward.
+ */
+ sc->fxp_desc.rx_head = rxp->rx_next;
+
+ /*
+ * Add a new buffer to the receive chain.
+ * If this fails, the old buffer is recycled
+ * instead.
+ */
+ fxp_rc = fxp_add_rfabuf(sc, rxp);
+ if (fxp_rc == 0) {
+ int total_len;
+
+ /*
+ * Fetch packet length (the top 2 bits of
+ * actual_size are flags set by the controller
+ * upon completion), and drop the packet in case
+ * of bogus length or CRC errors.
+ */
+ total_len = le16toh(rfa->actual_size) & 0x3fff;
+ if (total_len < sizeof(struct ether_header) ||
+ total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
+ sc->rfa_size ||
+ le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) {
+ m_freem(m);
+ continue;
+ }
+
+#ifndef __rtems__
+ /* Do IP checksum checking. */
+ if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) {
+ if (rfa->rfax_csum_sts &
+ FXP_RFDX_CS_IP_CSUM_BIT_VALID)
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED;
+ if (rfa->rfax_csum_sts &
+ FXP_RFDX_CS_IP_CSUM_VALID)
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_VALID;
+ if ((rfa->rfax_csum_sts &
+ FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
+ (rfa->rfax_csum_sts &
+ FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ }
+#endif
+
+ m->m_pkthdr.len = m->m_len = total_len;
+ m->m_pkthdr.rcvif = ifp;
+
+ /*
+ * Drop locks before calling if_input() since it
+ * may re-enter fxp_start() in the netisr case.
+ * This would result in a lock reversal. Better
+ * performance might be obtained by chaining all
+ * packets received, dropping the lock, and then
+ * calling if_input() on each one.
+ */
+ FXP_UNLOCK(sc);
+#ifndef __rtems__
+ (*ifp->if_input)(ifp, m);
+#else
+ ether_input_skipping(ifp, m);
+#endif
+ FXP_LOCK(sc);
+ } else if (fxp_rc == ENOBUFS) {
+ rnr = 0;
+ break;
+ }
+ }
+ if (rnr) {
+ fxp_scb_wait(sc);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
+ sc->fxp_desc.rx_head->rx_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
+ }
+}
+
+/*
+ * Update packet in/out/collision statistics. The i82557 doesn't
+ * allow you to access these counters without doing a fairly
+ * expensive DMA to get _all_ of the statistics it maintains, so
+ * we do this operation here only once per second. The statistics
+ * counters in the kernel are updated from the previous dump-stats
+ * DMA and then a new dump-stats DMA is started. The on-chip
+ * counters are zeroed when the DMA completes. If we can't start
+ * the DMA immediately, we don't wait - we just prepare to read
+ * them again next time.
+ */
+static void
+fxp_tick(void *xsc)
+{
+ struct fxp_softc *sc = xsc;
+ struct ifnet *ifp = sc->ifp;
+ struct fxp_stats *sp = sc->fxp_stats;
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+ bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
+ ifp->if_opackets += le32toh(sp->tx_good);
+ ifp->if_collisions += le32toh(sp->tx_total_collisions);
+ if (sp->rx_good) {
+ ifp->if_ipackets += le32toh(sp->rx_good);
+ sc->rx_idle_secs = 0;
+ } else {
+ /*
+ * Receiver's been idle for another second.
+ */
+ sc->rx_idle_secs++;
+ }
+ ifp->if_ierrors +=
+ le32toh(sp->rx_crc_errors) +
+ le32toh(sp->rx_alignment_errors) +
+ le32toh(sp->rx_rnr_errors) +
+ le32toh(sp->rx_overrun_errors);
+ /*
+ * If any transmit underruns occured, bump up the transmit
+ * threshold by another 512 bytes (64 * 8).
+ */
+ if (sp->tx_underruns) {
+ ifp->if_oerrors += le32toh(sp->tx_underruns);
+ if (tx_threshold < 192)
+ tx_threshold += 64;
+ }
+
+ /*
+ * Release any xmit buffers that have completed DMA. This isn't
+ * strictly necessary to do here, but it's advantagous for mbufs
+ * with external storage to be released in a timely manner rather
+ * than being defered for a potentially long time. This limits
+ * the delay to a maximum of one second.
+ */
+ fxp_txeof(sc);
+
+ /*
+ * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
+ * then assume the receiver has locked up and attempt to clear
+ * the condition by reprogramming the multicast filter. This is
+ * a work-around for a bug in the 82557 where the receiver locks
+ * up if it gets certain types of garbage in the syncronization
+ * bits prior to the packet header. This bug is supposed to only
+ * occur in 10Mbps mode, but has been seen to occur in 100Mbps
+ * mode as well (perhaps due to a 10/100 speed transition).
+ */
+ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
+ sc->rx_idle_secs = 0;
+ fxp_mc_setup(sc);
+ }
+ /*
+ * If there is no pending command, start another stats
+ * dump. Otherwise punt for now.
+ */
+ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
+ /*
+ * Start another stats dump.
+ */
+ bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
+ BUS_DMASYNC_PREREAD);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
+ } else {
+ /*
+ * A previous command is still waiting to be accepted.
+ * Just zero our copy of the stats and wait for the
+ * next timer event to update them.
+ */
+ sp->tx_good = 0;
+ sp->tx_underruns = 0;
+ sp->tx_total_collisions = 0;
+
+ sp->rx_good = 0;
+ sp->rx_crc_errors = 0;
+ sp->rx_alignment_errors = 0;
+ sp->rx_rnr_errors = 0;
+ sp->rx_overrun_errors = 0;
+ }
+#ifndef __rtems__
+ if (sc->miibus != NULL)
+ mii_tick(device_get_softc(sc->miibus));
+#endif
+
+ /*
+ * Check that chip hasn't hung.
+ */
+ fxp_watchdog(sc);
+
+ /*
+ * Schedule another timeout one second from now.
+ */
+ callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
+}
+
+/*
+ * Stop the interface. Cancels the statistics updater and resets
+ * the interface.
+ */
+static void
+fxp_stop(struct fxp_softc *sc)
+{
+ struct ifnet *ifp = sc->ifp;
+ struct fxp_tx *txp;
+ int i;
+
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->watchdog_timer = 0;
+
+ /*
+ * Cancel stats updater.
+ */
+ callout_stop(&sc->stat_ch);
+
+ /*
+ * Issue software reset, which also unloads the microcode.
+ */
+ sc->flags &= ~FXP_FLAG_UCODE;
+ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
+ DELAY(50);
+
+ /*
+ * Release any xmit buffers.
+ */
+ txp = sc->fxp_desc.tx_list;
+ if (txp != NULL) {
+ for (i = 0; i < FXP_NTXCB; i++) {
+ if (txp[i].tx_mbuf != NULL) {
+ bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
+ m_freem(txp[i].tx_mbuf);
+ txp[i].tx_mbuf = NULL;
+ /* clear this to reset csum offload bits */
+ txp[i].tx_cb->tbd[0].tb_addr = 0;
+ }
+ }
+ }
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+ sc->tx_queued = 0;
+}
+
+/*
+ * Watchdog/transmission transmit timeout handler. Called when a
+ * transmission is started on the interface, but no interrupt is
+ * received before the timeout. This usually indicates that the
+ * card has wedged for some reason.
+ */
+static void
+fxp_watchdog(struct fxp_softc *sc)
+{
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+
+ if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
+ return;
+
+ device_printf(sc->dev, "device timeout\n");
+ sc->ifp->if_oerrors++;
+
+ fxp_init_body(sc);
+}
+
+/*
+ * Acquire locks and then call the real initialization function. This
+ * is necessary because ether_ioctl() calls if_init() and this would
+ * result in mutex recursion if the mutex was held.
+ */
+static void
+fxp_init(void *xsc)
+{
+ struct fxp_softc *sc = xsc;
+
+ FXP_LOCK(sc);
+ fxp_init_body(sc);
+ FXP_UNLOCK(sc);
+}
+
+/*
+ * Perform device initialization. This routine must be called with the
+ * softc lock held.
+ */
+static void
+fxp_init_body(struct fxp_softc *sc)
+{
+ struct ifnet *ifp = sc->ifp;
+ struct fxp_cb_config *cbp;
+ struct fxp_cb_ias *cb_ias;
+ struct fxp_cb_tx *tcbp;
+ struct fxp_tx *txp;
+ struct fxp_cb_mcs *mcsp;
+ int i, prm;
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+ /*
+ * Cancel any pending I/O
+ */
+ fxp_stop(sc);
+
+ prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
+
+ /*
+ * Initialize base of CBL and RFA memory. Loading with zero
+ * sets it up for regular linear addressing.
+ */
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
+
+ fxp_scb_wait(sc);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
+
+ /*
+ * Initialize base of dump-stats buffer.
+ */
+ fxp_scb_wait(sc);
+ bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
+
+ /*
+ * Attempt to load microcode if requested.
+ */
+ if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
+ fxp_load_ucode(sc);
+
+ /*
+ * Initialize the multicast address list.
+ */
+ if (fxp_mc_addrs(sc)) {
+ mcsp = sc->mcsp;
+ mcsp->cb_status = 0;
+ mcsp->cb_command =
+ htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
+ mcsp->link_addr = 0xffffffff;
+ /*
+ * Start the multicast setup command.
+ */
+ fxp_scb_wait(sc);
+ bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
+ /* ...and wait for it to complete. */
+ fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
+ bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
+ BUS_DMASYNC_POSTWRITE);
+ }
+
+ /*
+ * We temporarily use memory that contains the TxCB list to
+ * construct the config CB. The TxCB list memory is rebuilt
+ * later.
+ */
+ cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
+
+ /*
+ * This bcopy is kind of disgusting, but there are a bunch of must be
+ * zero and must be one bits in this structure and this is the easiest
+ * way to initialize them all to proper values.
+ */
+ bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
+
+ cbp->cb_status = 0;
+ cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG |
+ FXP_CB_COMMAND_EL);
+ cbp->link_addr = 0xffffffff; /* (no) next command */
+ cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
+ cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
+ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
+ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
+ cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
+ cbp->type_enable = 0; /* actually reserved */
+ cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
+ cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
+ cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
+ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
+ cbp->dma_mbce = 0; /* (disable) dma max counters */
+ cbp->late_scb = 0; /* (don't) defer SCB update */
+ cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
+ cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
+ cbp->ci_int = 1; /* interrupt on CU idle */
+ cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
+ cbp->ext_stats_dis = 1; /* disable extended counters */
+ cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
+ cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
+ cbp->disc_short_rx = !prm; /* discard short packets */
+ cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
+ cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
+ cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */
+ cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
+ cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
+ cbp->csma_dis = 0; /* (don't) disable link */
+ cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */
+ cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
+ cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
+ cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
+ cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
+ cbp->nsai = 1; /* (don't) disable source addr insert */
+ cbp->preamble_length = 2; /* (7 byte) preamble */
+ cbp->loopback = 0; /* (don't) loopback */
+ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
+ cbp->linear_pri_mode = 0; /* (wait after xmit only) */
+ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
+ cbp->promiscuous = prm; /* promiscuous mode */
+ cbp->bcast_disable = 0; /* (don't) disable broadcasts */
+ cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
+ cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
+ cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
+ cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
+
+ cbp->stripping = !prm; /* truncate rx packet to byte count */
+ cbp->padding = 1; /* (do) pad short tx packets */
+ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
+ cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
+ cbp->ia_wake_en = 0; /* (don't) wake up on address match */
+ cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */
+ /* must set wake_en in PMCSR also */
+ cbp->force_fdx = 0; /* (don't) force full duplex */
+ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
+ cbp->multi_ia = 0; /* (don't) accept multiple IAs */
+ cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
+ cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
+
+ if (sc->tunable_noflow || sc->revision == FXP_REV_82557) {
+ /*
+ * The 82557 has no hardware flow control, the values
+ * below are the defaults for the chip.
+ */
+ cbp->fc_delay_lsb = 0;
+ cbp->fc_delay_msb = 0x40;
+ cbp->pri_fc_thresh = 3;
+ cbp->tx_fc_dis = 0;
+ cbp->rx_fc_restop = 0;
+ cbp->rx_fc_restart = 0;
+ cbp->fc_filter = 0;
+ cbp->pri_fc_loc = 1;
+ } else {
+ cbp->fc_delay_lsb = 0x1f;
+ cbp->fc_delay_msb = 0x01;
+ cbp->pri_fc_thresh = 3;
+ cbp->tx_fc_dis = 0; /* enable transmit FC */
+ cbp->rx_fc_restop = 1; /* enable FC restop frames */
+ cbp->rx_fc_restart = 1; /* enable FC restart frames */
+ cbp->fc_filter = !prm; /* drop FC frames to host */
+ cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
+ }
+
+ /*
+ * Start the config command/DMA.
+ */
+ fxp_scb_wait(sc);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
+ /* ...and wait for it to complete. */
+ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
+
+ /*
+ * Now initialize the station address. Temporarily use the TxCB
+ * memory area like we did above for the config CB.
+ */
+ cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
+ cb_ias->cb_status = 0;
+ cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
+ cb_ias->link_addr = 0xffffffff;
+ bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
+
+ /*
+ * Start the IAS (Individual Address Setup) command/DMA.
+ */
+ fxp_scb_wait(sc);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
+ /* ...and wait for it to complete. */
+ fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
+
+ /*
+ * Initialize transmit control block (TxCB) list.
+ */
+ txp = sc->fxp_desc.tx_list;
+ tcbp = sc->fxp_desc.cbl_list;
+ bzero(tcbp, FXP_TXCB_SZ);
+ for (i = 0; i < FXP_NTXCB; i++) {
+ txp[i].tx_mbuf = NULL;
+ tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
+ tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
+ tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
+ (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
+ if (sc->flags & FXP_FLAG_EXT_TXCB)
+ tcbp[i].tbd_array_addr =
+ htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
+ else
+ tcbp[i].tbd_array_addr =
+ htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
+ txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
+ }
+ /*
+ * Set the suspend flag on the first TxCB and start the control
+ * unit. It will execute the NOP and then suspend.
+ */
+ tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+ sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
+ sc->tx_queued = 1;
+
+ fxp_scb_wait(sc);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
+
+ /*
+ * Initialize receiver buffer area - RFA.
+ */
+ fxp_scb_wait(sc);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
+
+ /*
+ * Set current media.
+ */
+ if (sc->miibus != NULL)
+ mii_mediachg(device_get_softc(sc->miibus));
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ /*
+ * Enable interrupts.
+ */
+#ifndef __rtems__
+#ifdef DEVICE_POLLING
+ /*
+ * ... but only do that if we are not polling. And because (presumably)
+ * the default is interrupts on, we need to disable them explicitly!
+ */
+ if (ifp->if_capenable & IFCAP_POLLING )
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
+ else
+#endif /* DEVICE_POLLING */
+#endif
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
+
+ /*
+ * Start stats updater.
+ */
+ callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
+}
+
+static int
+fxp_serial_ifmedia_upd(struct ifnet *ifp)
+{
+
+ return (0);
+}
+
+static void
+fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+
+ ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
+}
+
+#ifndef __rtems__
+/*
+ * Change media according to request.
+ */
+static int
+fxp_ifmedia_upd(struct ifnet *ifp)
+{
+ struct fxp_softc *sc = ifp->if_softc;
+ struct mii_data *mii;
+
+ mii = device_get_softc(sc->miibus);
+ FXP_LOCK(sc);
+ if (mii->mii_instance) {
+ struct mii_softc *miisc;
+ LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
+ mii_phy_reset(miisc);
+ }
+ mii_mediachg(mii);
+ FXP_UNLOCK(sc);
+ return (0);
+}
+
+/*
+ * Notify the world which media we're using.
+ */
+static void
+fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct fxp_softc *sc = ifp->if_softc;
+ struct mii_data *mii;
+
+ mii = device_get_softc(sc->miibus);
+ FXP_LOCK(sc);
+ mii_pollstat(mii);
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+
+ if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_10_T &&
+ sc->flags & FXP_FLAG_CU_RESUME_BUG)
+ sc->cu_resume_bug = 1;
+ else
+ sc->cu_resume_bug = 0;
+ FXP_UNLOCK(sc);
+}
+#endif
+
+/*
+ * Add a buffer to the end of the RFA buffer list.
+ * Return 0 if successful, 1 for failure. A failure results in
+ * adding the 'oldm' (if non-NULL) on to the end of the list -
+ * tossing out its old contents and recycling it.
+ * The RFA struct is stuck at the beginning of mbuf cluster and the
+ * data pointer is fixed up to point just past it.
+ */
+static int
+fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
+{
+ struct mbuf *m;
+ struct fxp_rfa *rfa, *p_rfa;
+ struct fxp_rx *p_rx;
+ bus_dmamap_t tmp_map;
+ int error;
+
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+
+ /*
+ * Move the data pointer up so that the incoming data packet
+ * will be 32-bit aligned.
+ */
+ m->m_data += RFA_ALIGNMENT_FUDGE;
+
+ /*
+ * Get a pointer to the base of the mbuf cluster and move
+ * data start past it.
+ */
+ rfa = mtod(m, struct fxp_rfa *);
+ m->m_data += sc->rfa_size;
+ rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
+
+ rfa->rfa_status = 0;
+ rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
+ rfa->actual_size = 0;
+
+ /*
+ * Initialize the rest of the RFA. Note that since the RFA
+ * is misaligned, we cannot store values directly. We're thus
+ * using the le32enc() function which handles endianness and
+ * is also alignment-safe.
+ */
+ le32enc(&rfa->link_addr, 0xffffffff);
+ le32enc(&rfa->rbd_addr, 0xffffffff);
+
+ /* Map the RFA into DMA memory. */
+ error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
+ MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
+ &rxp->rx_addr, 0);
+ if (error) {
+ m_freem(m);
+ return (error);
+ }
+
+ bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
+ tmp_map = sc->spare_map;
+ sc->spare_map = rxp->rx_map;
+ rxp->rx_map = tmp_map;
+ rxp->rx_mbuf = m;
+
+ bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /*
+ * If there are other buffers already on the list, attach this
+ * one to the end by fixing up the tail to point to this one.
+ */
+ if (sc->fxp_desc.rx_head != NULL) {
+ p_rx = sc->fxp_desc.rx_tail;
+ p_rfa = (struct fxp_rfa *)
+ (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
+ p_rx->rx_next = rxp;
+ le32enc(&p_rfa->link_addr, rxp->rx_addr);
+ p_rfa->rfa_control = 0;
+ bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ rxp->rx_next = NULL;
+ sc->fxp_desc.rx_head = rxp;
+ }
+ sc->fxp_desc.rx_tail = rxp;
+ return (0);
+}
+
+static int
+fxp_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct fxp_softc *sc = device_get_softc(dev);
+ int count = 10000;
+ int value;
+
+ CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
+ (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
+
+ while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
+ && count--)
+ DELAY(10);
+
+ if (count <= 0)
+ device_printf(dev, "fxp_miibus_readreg: timed out\n");
+
+ return (value & 0xffff);
+}
+
+static void
+fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
+{
+ struct fxp_softc *sc = device_get_softc(dev);
+ int count = 10000;
+
+ CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
+ (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
+ (value & 0xffff));
+
+ while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
+ count--)
+ DELAY(10);
+
+ if (count <= 0)
+ device_printf(dev, "fxp_miibus_writereg: timed out\n");
+}
+
+#ifdef __rtems__
+static int
+mdio_r(int phy, void *uarg, unsigned reg, uint32_t *pval)
+{
+ struct fxp_softc *sc = uarg;
+
+ /* Hack to support early probing */
+ if ( -2 != sc->phyidx ) {
+
+ /* using phy's other than the default not supported */
+ if ( 0 != phy || sc->phyidx < 0 ) {
+ return EINVAL;
+ }
+ phy = sc->phyidx;
+ }
+
+ *pval = fxp_miibus_readreg(sc->dev, phy, reg);
+
+ return 0;
+}
+
+static int
+mdio_w(int phy, void *uarg, unsigned reg, uint32_t value)
+{
+ struct fxp_softc *sc = uarg;
+
+ /* using phy's other than the default not supported */
+ if ( 0 != phy || sc->phyidx < 0 ) {
+ return EINVAL;
+ }
+
+ fxp_miibus_writereg(sc->dev, sc->phyidx, reg, value);
+
+ return 0;
+}
+#endif
+
+static int
+#ifndef __rtems__
+fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+#else
+fxp_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
+#endif
+{
+ struct fxp_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+#ifndef __rtems__
+ struct mii_data *mii;
+ int flag, mask, error = 0;
+#else
+ int error = 0;
+#endif
+
+ switch (command) {
+ case SIOCSIFFLAGS:
+ FXP_LOCK(sc);
+ if (ifp->if_flags & IFF_ALLMULTI)
+ sc->flags |= FXP_FLAG_ALL_MCAST;
+ else
+ sc->flags &= ~FXP_FLAG_ALL_MCAST;
+
+ /*
+ * If interface is marked up and not running, then start it.
+ * If it is marked down and running, stop it.
+ * XXX If it's up then re-initialize it. This is so flags
+ * such as IFF_PROMISC are handled.
+ */
+ if (ifp->if_flags & IFF_UP) {
+ fxp_init_body(sc);
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ fxp_stop(sc);
+ }
+ FXP_UNLOCK(sc);
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+#ifdef __rtems__
+ if ( ETHER_SIOCMULTIFRAG(error, command, ifr, ifp) )
+ break;
+#endif
+ FXP_LOCK(sc);
+ if (ifp->if_flags & IFF_ALLMULTI)
+ sc->flags |= FXP_FLAG_ALL_MCAST;
+ else
+ sc->flags &= ~FXP_FLAG_ALL_MCAST;
+ /*
+ * Multicast list has changed; set the hardware filter
+ * accordingly.
+ */
+ if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
+ fxp_mc_setup(sc);
+ /*
+ * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
+ * again rather than else {}.
+ */
+ if (sc->flags & FXP_FLAG_ALL_MCAST)
+ fxp_init_body(sc);
+ FXP_UNLOCK(sc);
+ error = 0;
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+#ifndef __rtems__
+ if (sc->miibus != NULL) {
+ mii = device_get_softc(sc->miibus);
+ error = ifmedia_ioctl(ifp, ifr,
+ &mii->mii_media, command);
+ } else {
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
+ }
+#else
+ error = rtems_mii_ioctl(&fxp_mdio, sc, command, &ifr->ifr_media);
+#endif
+ break;
+
+#ifndef __rtems__
+ case SIOCSIFCAP:
+ mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+#ifdef DEVICE_POLLING
+ if (mask & IFCAP_POLLING) {
+ if (ifr->ifr_reqcap & IFCAP_POLLING) {
+ error = ether_poll_register(fxp_poll, ifp);
+ if (error)
+ return(error);
+ FXP_LOCK(sc);
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
+ FXP_SCB_INTR_DISABLE);
+ ifp->if_capenable |= IFCAP_POLLING;
+ FXP_UNLOCK(sc);
+ } else {
+ error = ether_poll_deregister(ifp);
+ /* Enable interrupts in any case */
+ FXP_LOCK(sc);
+ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
+ ifp->if_capenable &= ~IFCAP_POLLING;
+ FXP_UNLOCK(sc);
+ }
+ }
+#endif
+ if (mask & IFCAP_VLAN_MTU) {
+ FXP_LOCK(sc);
+ ifp->if_capenable ^= IFCAP_VLAN_MTU;
+ if (sc->revision != FXP_REV_82557)
+ flag = FXP_FLAG_LONG_PKT_EN;
+ else /* a hack to get long frames on the old chip */
+ flag = FXP_FLAG_SAVE_BAD;
+ sc->flags ^= flag;
+ if (ifp->if_flags & IFF_UP)
+ fxp_init_body(sc);
+ FXP_UNLOCK(sc);
+ }
+ break;
+#endif
+
+#ifdef __rtems__
+ case SIO_RTEMS_SHOW_STATS:
+ printf("Good packets sent %lu\n", ifp->if_opackets);
+ printf("Output errors %lu\n", ifp->if_oerrors);
+ printf("Good packets recvd %lu\n", ifp->if_ipackets);
+ printf("Input errors %lu\n", ifp->if_ierrors);
+ printf("Collisions %lu\n", ifp->if_collisions);
+ break;
+#endif
+
+ default:
+ error = ether_ioctl(ifp, command, data);
+ }
+ return (error);
+}
+
+/*
+ * Fill in the multicast address list and return number of entries.
+ */
+static int
+fxp_mc_addrs(struct fxp_softc *sc)
+{
+ struct fxp_cb_mcs *mcsp = sc->mcsp;
+ struct ifnet *ifp = sc->ifp;
+#ifndef __rtems__
+ struct ifmultiaddr *ifma;
+#endif
+ int nmcasts;
+
+ nmcasts = 0;
+ if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
+#ifndef __rtems__
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (nmcasts >= MAXMCADDR) {
+ sc->flags |= FXP_FLAG_ALL_MCAST;
+ nmcasts = 0;
+ break;
+ }
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
+ nmcasts++;
+ }
+ IF_ADDR_UNLOCK(ifp);
+#else
+ {
+ /* UNTESTED */
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ ETHER_FIRST_MULTI(step, (struct arpcom*)ifp, enm);
+ while ( enm != NULL ) {
+ if (nmcasts >= MAXMCADDR) {
+ sc->flags |= FXP_FLAG_ALL_MCAST;
+ nmcasts = 0;
+ break;
+ }
+ bcopy(enm->enm_addrlo,
+ &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
+ nmcasts++;
+ ETHER_NEXT_MULTI( step, enm );
+ }
+ }
+#endif
+ }
+ mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
+ return (nmcasts);
+}
+
+/*
+ * Program the multicast filter.
+ *
+ * We have an artificial restriction that the multicast setup command
+ * must be the first command in the chain, so we take steps to ensure
+ * this. By requiring this, it allows us to keep up the performance of
+ * the pre-initialized command ring (esp. link pointers) by not actually
+ * inserting the mcsetup command in the ring - i.e. its link pointer
+ * points to the TxCB ring, but the mcsetup descriptor itself is not part
+ * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
+ * lead into the regular TxCB ring when it completes.
+ *
+ * This function must be called at splimp.
+ */
+static void
+fxp_mc_setup(struct fxp_softc *sc)
+{
+ struct fxp_cb_mcs *mcsp = sc->mcsp;
+ struct fxp_tx *txp;
+ int count;
+
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
+ /*
+ * If there are queued commands, we must wait until they are all
+ * completed. If we are already waiting, then add a NOP command
+ * with interrupt option so that we're notified when all commands
+ * have been completed - fxp_start() ensures that no additional
+ * TX commands will be added when need_mcsetup is true.
+ */
+ if (sc->tx_queued) {
+ /*
+ * need_mcsetup will be true if we are already waiting for the
+ * NOP command to be completed (see below). In this case, bail.
+ */
+ if (sc->need_mcsetup)
+ return;
+ sc->need_mcsetup = 1;
+
+ /*
+ * Add a NOP command with interrupt so that we are notified
+ * when all TX commands have been processed.
+ */
+ txp = sc->fxp_desc.tx_last->tx_next;
+ txp->tx_mbuf = NULL;
+ txp->tx_cb->cb_status = 0;
+ txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP |
+ FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
+ /*
+ * Advance the end of list forward.
+ */
+ sc->fxp_desc.tx_last->tx_cb->cb_command &=
+ htole16(~FXP_CB_COMMAND_S);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+ sc->fxp_desc.tx_last = txp;
+ sc->tx_queued++;
+ /*
+ * Issue a resume in case the CU has just suspended.
+ */
+ fxp_scb_wait(sc);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
+ /*
+ * Set a 5 second timer just in case we don't hear from the
+ * card again.
+ */
+ sc->watchdog_timer = 5;
+
+ return;
+ }
+ sc->need_mcsetup = 0;
+
+ /*
+ * Initialize multicast setup descriptor.
+ */
+ mcsp->cb_status = 0;
+ mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS |
+ FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
+ mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr);
+ txp = &sc->fxp_desc.mcs_tx;
+ txp->tx_mbuf = NULL;
+ txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
+ txp->tx_next = sc->fxp_desc.tx_list;
+ (void) fxp_mc_addrs(sc);
+ sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
+ sc->tx_queued = 1;
+
+ /*
+ * Wait until command unit is not active. This should never
+ * be the case when nothing is queued, but make sure anyway.
+ */
+ count = 100;
+ while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
+ FXP_SCB_CUS_ACTIVE && --count)
+ DELAY(10);
+ if (count == 0) {
+ device_printf(sc->dev, "command queue timeout\n");
+ return;
+ }
+
+ /*
+ * Start the multicast setup command.
+ */
+ fxp_scb_wait(sc);
+ bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
+
+ sc->watchdog_timer = 2;
+ return;
+}
+
+static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
+static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
+static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
+static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
+static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
+static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
+static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
+
+#define UCODE(x) x, sizeof(x)/sizeof(uint32_t)
+
+struct ucode {
+ uint32_t revision;
+ uint32_t *ucode;
+ int length;
+ u_short int_delay_offset;
+ u_short bundle_max_offset;
+} ucode_table[] = {
+ { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
+ { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
+ { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
+ D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
+ { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
+ D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
+ { FXP_REV_82550, UCODE(fxp_ucode_d102),
+ D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
+ { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
+ D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
+ { FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
+ D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
+ { 0, NULL, 0, 0, 0 }
+};
+
+static void
+fxp_load_ucode(struct fxp_softc *sc)
+{
+ struct ucode *uc;
+ struct fxp_cb_ucode *cbp;
+ int i;
+
+ for (uc = ucode_table; uc->ucode != NULL; uc++)
+ if (sc->revision == uc->revision)
+ break;
+ if (uc->ucode == NULL)
+ return;
+ cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
+ cbp->cb_status = 0;
+ cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
+ cbp->link_addr = 0xffffffff; /* (no) next command */
+ for (i = 0; i < uc->length; i++)
+ cbp->ucode[i] = htole32(uc->ucode[i]);
+ if (uc->int_delay_offset)
+ *(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
+ htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
+ if (uc->bundle_max_offset)
+ *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
+ htole16(sc->tunable_bundle_max);
+ /*
+ * Download the ucode to the chip.
+ */
+ fxp_scb_wait(sc);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
+ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
+ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
+ /* ...and wait for it to complete. */
+ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
+ bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
+ device_printf(sc->dev,
+ "Microcode loaded, int_delay: %d usec bundle_max: %d\n",
+ sc->tunable_int_delay,
+ uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
+ sc->flags |= FXP_FLAG_UCODE;
+}
+
+#ifndef RTEMS_SYSCTL_NOTYETSUP
+static int
+sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
+{
+ int error, value;
+
+ value = *(int *)arg1;
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (value < low || value > high)
+ return (EINVAL);
+ *(int *)arg1 = value;
+ return (0);
+}
+
+/*
+ * Interrupt delay is expressed in microseconds, a multiplier is used
+ * to convert this to the appropriate clock ticks before using.
+ */
+static int
+sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
+{
+ return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
+}
+
+static int
+sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
+{
+ return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
+}
+#endif
diff --git a/bsd_eth_drivers/if_fxp/if_fxpreg.h b/bsd_eth_drivers/if_fxp/if_fxpreg.h
new file mode 100644
index 0000000..111572f
--- /dev/null
+++ b/bsd_eth_drivers/if_fxp/if_fxpreg.h
@@ -0,0 +1,473 @@
+/*-
+ * Copyright (c) 1995, David Greenman
+ * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/fxp/if_fxpreg.h,v 1.39.18.1 2008/11/25 02:59:29 kensmith Exp $
+ */
+
+#define FXP_VENDORID_INTEL 0x8086
+
+#define FXP_PCI_MMBA 0x10
+#define FXP_PCI_IOBA 0x14
+
+/*
+ * Control/status registers.
+ */
+#define FXP_CSR_SCB_RUSCUS 0 /* scb_rus/scb_cus (1 byte) */
+#define FXP_CSR_SCB_STATACK 1 /* scb_statack (1 byte) */
+#define FXP_CSR_SCB_COMMAND 2 /* scb_command (1 byte) */
+#define FXP_CSR_SCB_INTRCNTL 3 /* scb_intrcntl (1 byte) */
+#define FXP_CSR_SCB_GENERAL 4 /* scb_general (4 bytes) */
+#define FXP_CSR_PORT 8 /* port (4 bytes) */
+#define FXP_CSR_FLASHCONTROL 12 /* flash control (2 bytes) */
+#define FXP_CSR_EEPROMCONTROL 14 /* eeprom control (2 bytes) */
+#define FXP_CSR_MDICONTROL 16 /* mdi control (4 bytes) */
+#define FXP_CSR_FLOWCONTROL 0x19 /* flow control (2 bytes) */
+#define FXP_CSR_GENCONTROL 0x1C /* general control (1 byte) */
+
+/*
+ * FOR REFERENCE ONLY, the old definition of FXP_CSR_SCB_RUSCUS:
+ *
+ * volatile uint8_t :2,
+ * scb_rus:4,
+ * scb_cus:2;
+ */
+
+#define FXP_PORT_SOFTWARE_RESET 0
+#define FXP_PORT_SELFTEST 1
+#define FXP_PORT_SELECTIVE_RESET 2
+#define FXP_PORT_DUMP 3
+
+#define FXP_SCB_RUS_IDLE 0
+#define FXP_SCB_RUS_SUSPENDED 1
+#define FXP_SCB_RUS_NORESOURCES 2
+#define FXP_SCB_RUS_READY 4
+#define FXP_SCB_RUS_SUSP_NORBDS 9
+#define FXP_SCB_RUS_NORES_NORBDS 10
+#define FXP_SCB_RUS_READY_NORBDS 12
+
+#define FXP_SCB_CUS_IDLE 0
+#define FXP_SCB_CUS_SUSPENDED 1
+#define FXP_SCB_CUS_ACTIVE 2
+
+#define FXP_SCB_INTR_DISABLE 0x01 /* Disable all interrupts */
+#define FXP_SCB_INTR_SWI 0x02 /* Generate SWI */
+#define FXP_SCB_INTMASK_FCP 0x04
+#define FXP_SCB_INTMASK_ER 0x08
+#define FXP_SCB_INTMASK_RNR 0x10
+#define FXP_SCB_INTMASK_CNA 0x20
+#define FXP_SCB_INTMASK_FR 0x40
+#define FXP_SCB_INTMASK_CXTNO 0x80
+
+#define FXP_SCB_STATACK_FCP 0x01 /* Flow Control Pause */
+#define FXP_SCB_STATACK_ER 0x02 /* Early Receive */
+#define FXP_SCB_STATACK_SWI 0x04
+#define FXP_SCB_STATACK_MDI 0x08
+#define FXP_SCB_STATACK_RNR 0x10
+#define FXP_SCB_STATACK_CNA 0x20
+#define FXP_SCB_STATACK_FR 0x40
+#define FXP_SCB_STATACK_CXTNO 0x80
+
+#define FXP_SCB_COMMAND_CU_NOP 0x00
+#define FXP_SCB_COMMAND_CU_START 0x10
+#define FXP_SCB_COMMAND_CU_RESUME 0x20
+#define FXP_SCB_COMMAND_CU_DUMP_ADR 0x40
+#define FXP_SCB_COMMAND_CU_DUMP 0x50
+#define FXP_SCB_COMMAND_CU_BASE 0x60
+#define FXP_SCB_COMMAND_CU_DUMPRESET 0x70
+
+#define FXP_SCB_COMMAND_RU_NOP 0
+#define FXP_SCB_COMMAND_RU_START 1
+#define FXP_SCB_COMMAND_RU_RESUME 2
+#define FXP_SCB_COMMAND_RU_ABORT 4
+#define FXP_SCB_COMMAND_RU_LOADHDS 5
+#define FXP_SCB_COMMAND_RU_BASE 6
+#define FXP_SCB_COMMAND_RU_RBDRESUME 7
+
+/*
+ * Command block definitions
+ */
+struct fxp_cb_nop {
+ uint16_t cb_status;
+ uint16_t cb_command;
+ uint32_t link_addr;
+};
+struct fxp_cb_ias {
+ uint16_t cb_status;
+ uint16_t cb_command;
+ uint32_t link_addr;
+ uint8_t macaddr[6];
+};
+
+/* I hate bit-fields :-( */
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define __FXP_BITFIELD2(a, b) a, b
+#define __FXP_BITFIELD3(a, b, c) a, b, c
+#define __FXP_BITFIELD4(a, b, c, d) a, b, c, d
+#define __FXP_BITFIELD5(a, b, c, d, e) a, b, c, d, e
+#define __FXP_BITFIELD6(a, b, c, d, e, f) a, b, c, d, e, f
+#define __FXP_BITFIELD7(a, b, c, d, e, f, g) a, b, c, d, e, f, g
+#define __FXP_BITFIELD8(a, b, c, d, e, f, g, h) a, b, c, d, e, f, g, h
+#else
+#define __FXP_BITFIELD2(a, b) b, a
+#define __FXP_BITFIELD3(a, b, c) c, b, a
+#define __FXP_BITFIELD4(a, b, c, d) d, c, b, a
+#define __FXP_BITFIELD5(a, b, c, d, e) e, d, c, b, a
+#define __FXP_BITFIELD6(a, b, c, d, e, f) f, e, d, c, b, a
+#define __FXP_BITFIELD7(a, b, c, d, e, f, g) g, f, e, d, c, b, a
+#define __FXP_BITFIELD8(a, b, c, d, e, f, g, h) h, g, f, e, d, c, b, a
+#endif
+
+struct fxp_cb_config {
+ uint16_t cb_status;
+ uint16_t cb_command;
+ uint32_t link_addr;
+
+ /* Bytes 0 - 21 -- common to all i8255x */
+ u_int __FXP_BITFIELD2(byte_count:6, :2);
+ u_int __FXP_BITFIELD3(rx_fifo_limit:4, tx_fifo_limit:3, :1);
+ uint8_t adaptive_ifs;
+ u_int __FXP_BITFIELD5(mwi_enable:1, /* 8,9 */
+ type_enable:1, /* 8,9 */
+ read_align_en:1, /* 8,9 */
+ end_wr_on_cl:1, /* 8,9 */
+ :4);
+ u_int __FXP_BITFIELD2(rx_dma_bytecount:7, :1);
+ u_int __FXP_BITFIELD2(tx_dma_bytecount:7, dma_mbce:1);
+ u_int __FXP_BITFIELD8(late_scb:1, /* 7 */
+ direct_dma_dis:1, /* 8,9 */
+ tno_int_or_tco_en:1, /* 7,9 */
+ ci_int:1,
+ ext_txcb_dis:1, /* 8,9 */
+ ext_stats_dis:1, /* 8,9 */
+ keep_overrun_rx:1,
+ save_bf:1);
+ u_int __FXP_BITFIELD6(disc_short_rx:1,
+ underrun_retry:2,
+ :2,
+ ext_rfa:1, /* 550 */
+ two_frames:1, /* 8,9 */
+ dyn_tbd:1); /* 8,9 */
+ u_int __FXP_BITFIELD3(mediatype:1, /* 7 */
+ :6,
+ csma_dis:1); /* 8,9 */
+ u_int __FXP_BITFIELD6(tcp_udp_cksum:1, /* 9 */
+ :3,
+ vlan_tco:1, /* 8,9 */
+ link_wake_en:1, /* 8,9 */
+ arp_wake_en:1, /* 8 */
+ mc_wake_en:1); /* 8 */
+ u_int __FXP_BITFIELD4(:3,
+ nsai:1,
+ preamble_length:2,
+ loopback:2);
+ u_int __FXP_BITFIELD2(linear_priority:3, /* 7 */
+ :5);
+ u_int __FXP_BITFIELD3(linear_pri_mode:1, /* 7 */
+ :3,
+ interfrm_spacing:4);
+ u_int :8;
+ u_int :8;
+ u_int __FXP_BITFIELD8(promiscuous:1,
+ bcast_disable:1,
+ wait_after_win:1, /* 8,9 */
+ :1,
+ ignore_ul:1, /* 8,9 */
+ crc16_en:1, /* 9 */
+ :1,
+ crscdt:1);
+ u_int fc_delay_lsb:8; /* 8,9 */
+ u_int fc_delay_msb:8; /* 8,9 */
+ u_int __FXP_BITFIELD6(stripping:1,
+ padding:1,
+ rcv_crc_xfer:1,
+ long_rx_en:1, /* 8,9 */
+ pri_fc_thresh:3, /* 8,9 */
+ :1);
+ u_int __FXP_BITFIELD8(ia_wake_en:1, /* 8 */
+ magic_pkt_dis:1, /* 8,9,!9ER */
+ tx_fc_dis:1, /* 8,9 */
+ rx_fc_restop:1, /* 8,9 */
+ rx_fc_restart:1, /* 8,9 */
+ fc_filter:1, /* 8,9 */
+ force_fdx:1,
+ fdx_pin_en:1);
+ u_int __FXP_BITFIELD4(:5,
+ pri_fc_loc:1, /* 8,9 */
+ multi_ia:1,
+ :1);
+ u_int __FXP_BITFIELD3(:3, mc_all:1, :4);
+
+ /* Bytes 22 - 31 -- i82550 only */
+ u_int __FXP_BITFIELD3(gamla_rx:1,
+ vlan_drop_en:1,
+ :6);
+ uint8_t pad[9];
+};
+
+#define MAXMCADDR 80
+struct fxp_cb_mcs {
+ uint16_t cb_status;
+ uint16_t cb_command;
+ uint32_t link_addr;
+ uint16_t mc_cnt;
+ uint8_t mc_addr[MAXMCADDR][6];
+};
+
+#define MAXUCODESIZE 192
+struct fxp_cb_ucode {
+ uint16_t cb_status;
+ uint16_t cb_command;
+ uint32_t link_addr;
+ uint32_t ucode[MAXUCODESIZE];
+};
+
+/*
+ * Number of DMA segments in a TxCB.
+ */
+#define FXP_NTXSEG 32
+
+struct fxp_tbd {
+ uint32_t tb_addr;
+ uint32_t tb_size;
+};
+
+struct fxp_ipcb {
+ /*
+ * The following fields are valid only when
+ * using the IPCB command block for TX checksum offload
+ * (and TCP large send, VLANs, and (I think) IPsec). To use
+ * them, you must enable extended TxCBs (available only
+ * on the 82559 and later) and use the IPCBXMIT command.
+ * Note that Intel defines the IPCB to be 32 bytes long,
+ * the last 8 bytes of which comprise the first entry
+ * in the TBD array (see note below). This means we only
+ * have to define 8 extra bytes here.
+ */
+ uint16_t ipcb_schedule_low;
+ uint8_t ipcb_ip_schedule;
+ uint8_t ipcb_ip_activation_high;
+ uint16_t ipcb_vlan_id;
+ uint8_t ipcb_ip_header_offset;
+ uint8_t ipcb_tcp_header_offset;
+};
+
+struct fxp_cb_tx {
+ uint16_t cb_status;
+ uint16_t cb_command;
+ uint32_t link_addr;
+ uint32_t tbd_array_addr;
+ uint16_t byte_count;
+ uint8_t tx_threshold;
+ uint8_t tbd_number;
+
+ /*
+ * The following structure isn't actually part of the TxCB,
+ * unless the extended TxCB feature is being used. In this
+ * case, the first two elements of the structure below are
+ * fetched along with the TxCB.
+ */
+ union {
+ struct fxp_ipcb ipcb;
+ struct fxp_tbd tbd[FXP_NTXSEG];
+ } tx_cb_u;
+};
+
+#define tbd tx_cb_u.tbd
+#define ipcb_schedule_low tx_cb_u.ipcb.ipcb_schedule_low
+#define ipcb_ip_schedule tx_cb_u.ipcb.ipcb_ip_schedule
+#define ipcb_ip_activation_high tx_cb_u.ipcb.ipcb_ip_activation_high
+#define ipcb_vlan_id tx_cb_u.ipcb.ipcb_vlan_id
+#define ipcb_ip_header_offset tx_cb_u.ipcb.ipcb_ip_header_offset
+#define ipcb_tcp_header_offset tx_cb_u.ipcb.ipcb_tcp_header_offset
+
+/*
+ * IPCB field definitions
+ */
+#define FXP_IPCB_IP_CHECKSUM_ENABLE 0x10
+#define FXP_IPCB_TCPUDP_CHECKSUM_ENABLE 0x20
+#define FXP_IPCB_TCP_PACKET 0x40
+#define FXP_IPCB_LARGESEND_ENABLE 0x80
+#define FXP_IPCB_HARDWAREPARSING_ENABLE 0x01
+#define FXP_IPCB_INSERTVLAN_ENABLE 0x02
+
+/*
+ * Control Block (CB) definitions
+ */
+
+/* status */
+#define FXP_CB_STATUS_OK 0x2000
+#define FXP_CB_STATUS_C 0x8000
+/* commands */
+#define FXP_CB_COMMAND_NOP 0x0
+#define FXP_CB_COMMAND_IAS 0x1
+#define FXP_CB_COMMAND_CONFIG 0x2
+#define FXP_CB_COMMAND_MCAS 0x3
+#define FXP_CB_COMMAND_XMIT 0x4
+#define FXP_CB_COMMAND_UCODE 0x5
+#define FXP_CB_COMMAND_DUMP 0x6
+#define FXP_CB_COMMAND_DIAG 0x7
+#define FXP_CB_COMMAND_LOADFILT 0x8
+#define FXP_CB_COMMAND_IPCBXMIT 0x9
+
+/* command flags */
+#define FXP_CB_COMMAND_SF 0x0008 /* simple/flexible mode */
+#define FXP_CB_COMMAND_I 0x2000 /* generate interrupt on completion */
+#define FXP_CB_COMMAND_S 0x4000 /* suspend on completion */
+#define FXP_CB_COMMAND_EL 0x8000 /* end of list */
+
+/*
+ * RFA definitions
+ */
+
+struct fxp_rfa {
+ uint16_t rfa_status;
+ uint16_t rfa_control;
+ uint32_t link_addr;
+ uint32_t rbd_addr;
+ uint16_t actual_size;
+ uint16_t size;
+
+ /*
+ * The following fields are only available when using
+ * extended receive mode on an 82550/82551 chipset.
+ */
+ uint16_t rfax_vlan_id;
+ uint8_t rfax_rx_parser_sts;
+ uint8_t rfax_rsvd0;
+ uint16_t rfax_security_sts;
+ uint8_t rfax_csum_sts;
+ uint8_t rfax_zerocopy_sts;
+ uint8_t rfax_pad[8];
+} __packed;
+#define FXP_RFAX_LEN 16
+
+#define FXP_RFA_STATUS_RCOL 0x0001 /* receive collision */
+#define FXP_RFA_STATUS_IAMATCH 0x0002 /* 0 = matches station address */
+#define FXP_RFA_STATUS_NOAMATCH 0x0004 /* 1 = doesn't match anything */
+#define FXP_RFA_STATUS_PARSE 0x0008 /* pkt parse ok (82550/1 only) */
+#define FXP_RFA_STATUS_S4 0x0010 /* receive error from PHY */
+#define FXP_RFA_STATUS_TL 0x0020 /* type/length */
+#define FXP_RFA_STATUS_FTS 0x0080 /* frame too short */
+#define FXP_RFA_STATUS_OVERRUN 0x0100 /* DMA overrun */
+#define FXP_RFA_STATUS_RNR 0x0200 /* no resources */
+#define FXP_RFA_STATUS_ALIGN 0x0400 /* alignment error */
+#define FXP_RFA_STATUS_CRC 0x0800 /* CRC error */
+#define FXP_RFA_STATUS_OK 0x2000 /* packet received okay */
+#define FXP_RFA_STATUS_C 0x8000 /* packet reception complete */
+#define FXP_RFA_CONTROL_SF 0x08 /* simple/flexible memory mode */
+#define FXP_RFA_CONTROL_H 0x10 /* header RFD */
+#define FXP_RFA_CONTROL_S 0x4000 /* suspend after reception */
+#define FXP_RFA_CONTROL_EL 0x8000 /* end of list */
+
+/* Bits in the 'csum_sts' byte */
+#define FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID 0x10
+#define FXP_RFDX_CS_TCPUDP_CSUM_VALID 0x20
+#define FXP_RFDX_CS_IP_CSUM_BIT_VALID 0x01
+#define FXP_RFDX_CS_IP_CSUM_VALID 0x02
+
+/* Bits in the 'packet parser' byte */
+#define FXP_RFDX_P_PARSE_BIT 0x08
+#define FXP_RFDX_P_CSUM_PROTOCOL_MASK 0x03
+#define FXP_RFDX_P_TCP_PACKET 0x00
+#define FXP_RFDX_P_UDP_PACKET 0x01
+#define FXP_RFDX_P_IP_PACKET 0x03
+
+/*
+ * Statistics dump area definitions
+ */
+struct fxp_stats {
+ uint32_t tx_good;
+ uint32_t tx_maxcols;
+ uint32_t tx_latecols;
+ uint32_t tx_underruns;
+ uint32_t tx_lostcrs;
+ uint32_t tx_deffered;
+ uint32_t tx_single_collisions;
+ uint32_t tx_multiple_collisions;
+ uint32_t tx_total_collisions;
+ uint32_t rx_good;
+ uint32_t rx_crc_errors;
+ uint32_t rx_alignment_errors;
+ uint32_t rx_rnr_errors;
+ uint32_t rx_overrun_errors;
+ uint32_t rx_cdt_errors;
+ uint32_t rx_shortframes;
+ uint32_t completion_status;
+};
+#define FXP_STATS_DUMP_COMPLETE 0xa005
+#define FXP_STATS_DR_COMPLETE 0xa007
+
+/*
+ * Serial EEPROM control register bits
+ */
+#define FXP_EEPROM_EESK 0x01 /* shift clock */
+#define FXP_EEPROM_EECS 0x02 /* chip select */
+#define FXP_EEPROM_EEDI 0x04 /* data in */
+#define FXP_EEPROM_EEDO 0x08 /* data out */
+
+/*
+ * Serial EEPROM opcodes, including start bit
+ */
+#define FXP_EEPROM_OPC_ERASE 0x4
+#define FXP_EEPROM_OPC_WRITE 0x5
+#define FXP_EEPROM_OPC_READ 0x6
+
+/*
+ * Management Data Interface opcodes
+ */
+#define FXP_MDI_WRITE 0x1
+#define FXP_MDI_READ 0x2
+
+/*
+ * PHY device types
+ */
+#define FXP_PHY_DEVICE_MASK 0x3f00
+#define FXP_PHY_SERIAL_ONLY 0x8000
+#define FXP_PHY_NONE 0
+#define FXP_PHY_82553A 1
+#define FXP_PHY_82553C 2
+#define FXP_PHY_82503 3
+#define FXP_PHY_DP83840 4
+#define FXP_PHY_80C240 5
+#define FXP_PHY_80C24 6
+#define FXP_PHY_82555 7
+#define FXP_PHY_DP83840A 10
+#define FXP_PHY_82555B 11
+
+/*
+ * Chip revision values.
+ */
+#define FXP_REV_82557 1 /* catchall 82557 chip type */
+#define FXP_REV_82558_A4 4 /* 82558 A4 stepping */
+#define FXP_REV_82558_B0 5 /* 82558 B0 stepping */
+#define FXP_REV_82559_A0 8 /* 82559 A0 stepping */
+#define FXP_REV_82559S_A 9 /* 82559S A stepping */
+#define FXP_REV_82550 12
+#define FXP_REV_82550_C 13 /* 82550 C stepping */
+#define FXP_REV_82551_E 14 /* 82551 */
+#define FXP_REV_82551_F 15 /* 82551 */
+#define FXP_REV_82551_10 16 /* 82551 */
diff --git a/bsd_eth_drivers/if_fxp/if_fxpvar.h b/bsd_eth_drivers/if_fxp/if_fxpvar.h
new file mode 100644
index 0000000..edb9f28
--- /dev/null
+++ b/bsd_eth_drivers/if_fxp/if_fxpvar.h
@@ -0,0 +1,206 @@
+/*-
+ * Copyright (c) 1995, David Greenman
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/fxp/if_fxpvar.h,v 1.40.6.1 2008/11/25 02:59:29 kensmith Exp $
+ */
+
+/*
+ * Misc. defintions for the Intel EtherExpress Pro/100B PCI Fast
+ * Ethernet driver
+ */
+
+/*
+ * Number of transmit control blocks. This determines the number
+ * of transmit buffers that can be chained in the CB list.
+ * This must be a power of two.
+ */
+#define FXP_NTXCB 128
+
+/*
+ * Size of the TxCB list.
+ */
+#define FXP_TXCB_SZ (FXP_NTXCB * sizeof(struct fxp_cb_tx))
+
+/*
+ * Macro to obtain the DMA address of a virtual address in the
+ * TxCB list based on the base DMA address of the TxCB list.
+ */
+#define FXP_TXCB_DMA_ADDR(sc, addr) \
+ (sc->fxp_desc.cbl_addr + (uintptr_t)addr - \
+ (uintptr_t)sc->fxp_desc.cbl_list)
+
+/*
+ * Number of completed TX commands at which point an interrupt
+ * will be generated to garbage collect the attached buffers.
+ * Must be at least one less than FXP_NTXCB, and should be
+ * enough less so that the transmitter doesn't becomes idle
+ * during the buffer rundown (which would reduce performance).
+ */
+#define FXP_CXINT_THRESH 120
+
+/*
+ * TxCB list index mask. This is used to do list wrap-around.
+ */
+#define FXP_TXCB_MASK (FXP_NTXCB - 1)
+
+/*
+ * Number of receive frame area buffers. These are large so chose
+ * wisely.
+ */
+#ifdef DEVICE_POLLING
+#define FXP_NRFABUFS 192
+#else
+#define FXP_NRFABUFS 64
+#endif
+
+/*
+ * Maximum number of seconds that the receiver can be idle before we
+ * assume it's dead and attempt to reset it by reprogramming the
+ * multicast filter. This is part of a work-around for a bug in the
+ * NIC. See fxp_stats_update().
+ */
+#define FXP_MAX_RX_IDLE 15
+
+/*
+ * Default maximum time, in microseconds, that an interrupt may be delayed
+ * in an attempt to coalesce interrupts. This is only effective if the Intel
+ * microcode is loaded, and may be changed via either loader tunables or
+ * sysctl. See also the CPUSAVER_DWORD entry in rcvbundl.h.
+ */
+#define TUNABLE_INT_DELAY 1000
+
+/*
+ * Default number of packets that will be bundled, before an interrupt is
+ * generated. This is only effective if the Intel microcode is loaded, and
+ * may be changed via either loader tunables or sysctl. This may not be
+ * present in all microcode revisions, see also the CPUSAVER_BUNDLE_MAX_DWORD
+ * entry in rcvbundl.h.
+ */
+#define TUNABLE_BUNDLE_MAX 6
+
+#define FXP_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define FXP_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define FXP_LOCK_ASSERT(_sc, _what) mtx_assert(&(_sc)->sc_mtx, (_what))
+
+/*
+ * Structures to handle TX and RX descriptors.
+ */
+struct fxp_rx {
+ struct fxp_rx *rx_next;
+ struct mbuf *rx_mbuf;
+ bus_dmamap_t rx_map;
+ uint32_t rx_addr;
+};
+
+struct fxp_tx {
+ struct fxp_tx *tx_next;
+ struct fxp_cb_tx *tx_cb;
+ struct mbuf *tx_mbuf;
+ bus_dmamap_t tx_map;
+};
+
+struct fxp_desc_list {
+ struct fxp_rx rx_list[FXP_NRFABUFS];
+ struct fxp_tx tx_list[FXP_NTXCB];
+ struct fxp_tx mcs_tx;
+ struct fxp_rx *rx_head;
+ struct fxp_rx *rx_tail;
+ struct fxp_tx *tx_first;
+ struct fxp_tx *tx_last;
+ struct fxp_rfa *rfa_list;
+ struct fxp_cb_tx *cbl_list;
+ uint32_t cbl_addr;
+ bus_dma_tag_t rx_tag;
+};
+
+/*
+ * NOTE: Elements are ordered for optimal cacheline behavior, and NOT
+ * for functional grouping.
+ */
+struct fxp_softc {
+ struct ifnet *ifp; /* per-interface network data */
+ struct resource *fxp_res[2]; /* I/O and IRQ resources */
+ struct resource_spec *fxp_spec; /* the resource spec we used */
+ void *ih; /* interrupt handler cookie */
+ struct mtx sc_mtx;
+ bus_dma_tag_t fxp_mtag; /* bus DMA tag for mbufs */
+ bus_dma_tag_t fxp_stag; /* bus DMA tag for stats */
+ bus_dmamap_t fxp_smap; /* bus DMA map for stats */
+ bus_dma_tag_t cbl_tag; /* DMA tag for the TxCB list */
+ bus_dmamap_t cbl_map; /* DMA map for the TxCB list */
+ bus_dma_tag_t mcs_tag; /* DMA tag for the multicast setup */
+ bus_dmamap_t mcs_map; /* DMA map for the multicast setup */
+ bus_dmamap_t spare_map; /* spare DMA map */
+ struct fxp_desc_list fxp_desc; /* descriptors management struct */
+ int maxtxseg; /* maximum # of TX segments */
+ int tx_queued; /* # of active TxCB's */
+ int need_mcsetup; /* multicast filter needs programming */
+ struct fxp_stats *fxp_stats; /* Pointer to interface stats */
+ uint32_t stats_addr; /* DMA address of the stats structure */
+ int rx_idle_secs; /* # of seconds RX has been idle */
+ struct callout stat_ch; /* stat callout */
+ int watchdog_timer; /* seconds until chip reset */
+ struct fxp_cb_mcs *mcsp; /* Pointer to mcast setup descriptor */
+ uint32_t mcs_addr; /* DMA address of the multicast cmd */
+ struct ifmedia sc_media; /* media information */
+ device_t miibus;
+#ifdef __rtems__
+ int phyidx;
+#endif
+ device_t dev;
+ int tunable_int_delay; /* interrupt delay value for ucode */
+ int tunable_bundle_max; /* max # frames per interrupt (ucode) */
+ int tunable_noflow; /* flow control disabled */
+ int rnr; /* RNR events */
+ int eeprom_size; /* size of serial EEPROM */
+ int suspended; /* 0 = normal 1 = suspended or dead */
+ int cu_resume_bug;
+ int revision;
+ int flags;
+ uint8_t rfa_size;
+ uint32_t tx_cmd;
+};
+
+#define FXP_FLAG_MWI_ENABLE 0x0001 /* MWI enable */
+#define FXP_FLAG_READ_ALIGN 0x0002 /* align read access with cacheline */
+#define FXP_FLAG_WRITE_ALIGN 0x0004 /* end write on cacheline */
+#define FXP_FLAG_EXT_TXCB 0x0008 /* enable use of extended TXCB */
+#define FXP_FLAG_SERIAL_MEDIA 0x0010 /* 10Mbps serial interface */
+#define FXP_FLAG_LONG_PKT_EN 0x0020 /* enable long packet reception */
+#define FXP_FLAG_ALL_MCAST 0x0040 /* accept all multicast frames */
+#define FXP_FLAG_CU_RESUME_BUG 0x0080 /* requires workaround for CU_RESUME */
+#define FXP_FLAG_UCODE 0x0100 /* ucode is loaded */
+#define FXP_FLAG_DEFERRED_RNR 0x0200 /* DEVICE_POLLING deferred RNR */
+#define FXP_FLAG_EXT_RFA 0x0400 /* extended RFDs for csum offload */
+#define FXP_FLAG_SAVE_BAD 0x0800 /* save bad pkts: bad size, CRC, etc */
+
+/* Macros to ease CSR access. */
+#define CSR_READ_1(sc, reg) bus_read_1(sc->fxp_res[0], reg)
+#define CSR_READ_2(sc, reg) bus_read_2(sc->fxp_res[0], reg)
+#define CSR_READ_4(sc, reg) bus_read_4(sc->fxp_res[0], reg)
+#define CSR_WRITE_1(sc, reg, val) bus_write_1(sc->fxp_res[0], reg, val)
+#define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->fxp_res[0], reg, val)
+#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->fxp_res[0], reg, val)
diff --git a/bsd_eth_drivers/if_fxp/rcvbundl.h b/bsd_eth_drivers/if_fxp/rcvbundl.h
new file mode 100644
index 0000000..5647456
--- /dev/null
+++ b/bsd_eth_drivers/if_fxp/rcvbundl.h
@@ -0,0 +1,1257 @@
+/*-
+Copyright (c) 1999-2001, Intel Corporation
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ * $FreeBSD: src/sys/dev/fxp/rcvbundl.h,v 1.3.18.1 2008/11/25 02:59:29 kensmith Exp $
+ */
+/*
+rcvbundl.h
+
+Author: Patrick J Luhmann (PJL)
+Date: 05/30/2000
+Version: 3.28
+
+This file contains the loadable micro code arrays to implement receive bundling on the
+D101 A-step, D101 B-step, D101M (B-step only), D101S, D102 B-step,
+D102 B-step with TCO work around, D102 C-step and D102 E-step.
+
+Each controller has its own specific micro code array. The array for one controller
+is totally incompatible with any other controller, and if used will most likely
+cause the controller to lock up and stop responding to the driver. Each micro
+code array has its own parameter offsets (described below), and they each have
+their own version number (which should not be confused with the version of the
+rcvbundl.h file given above).
+
+*/
+
+
+
+/*************************************************************************
+* CPUSaver parameters
+*
+* All CPUSaver parameters are 16-bit literals that are part of a
+* "move immediate value" instruction. By changing the value of
+* the literal in the instruction before the code is loaded, the
+* driver can change algorithm.
+*
+* CPUSAVER_DWORD - This is the location of the instruction that loads
+* the dead-man timer with its inital value. By writing a 16-bit
+* value to the low word of this instruction, the driver can change
+* the timer value. The current default is either x600 or x800;
+* experiments show that the value probably should stay within the
+* range of x200 - x1000.
+*
+* CPUSAVER_BUNDLE_MAX_DWORD - This is the location of the instruction
+* that sets the maximum number of frames that will be bundled. In
+* some situations, such as the TCP windowing algorithm, it may be
+* better to limit the growth of the bundle size than let it go as
+* high as it can, because that could cause too much added latency.
+* The default is six, because this is the number of packets in the
+* default TCP window size. A value of 1 would make CPUSaver indicate
+* an interrupt for every frame received. If you do not want to put
+* a limit on the bundle size, set this value to xFFFF.
+*
+* CPUSAVER_MIN_SIZE_DWORD - This is the location of the instruction
+* that contains a bit-mask describing the minimum size frame that
+* will be bundled. The default masks the lower 7 bits, which means
+* that any frame less than 128 bytes in length will not be bundled,
+* but will instead immediately generate an interrupt. This does
+* not affect the current bundle in any way. Any frame that is 128
+* bytes or large will be bundled normally. This feature is meant
+* to provide immediate indication of ACK frames in a TCP environment.
+* Customers were seeing poor performance when a machine with CPUSaver
+* enabled was sending but not receiving. The delay introduced when
+* the ACKs were received was enough to reduce total throughput, because
+* the sender would sit idle until the ACK was finally seen.
+*
+* The current default is 0xFF80, which masks out the lower 7 bits.
+* This means that any frame which is x7F (127) bytes or smaller
+* will cause an immediate interrupt. Because this value must be a
+* bit mask, there are only a few valid values that can be used. To
+* turn this feature off, the driver can write the value xFFFF to the
+* lower word of this instruction (in the same way that the other
+* parameters are used). Likewise, a value of 0xF800 (2047) would
+* cause an interrupt to be generated for every frame, because all
+* standard Ethernet frames are <= 2047 bytes in length.
+*************************************************************************/
+
+
+
+/********************************************************/
+/* CPUSaver micro code for the D101A */
+/********************************************************/
+
+/* Version 2.0 */
+
+/* This value is the same for both A and B step of 558. */
+#define D101_CPUSAVER_DWORD 72
+
+
+#define D101_A_RCVBUNDLE_UCODE \
+{\
+0x03B301BB, \
+0x0046FFFF, \
+0xFFFFFFFF, \
+0x051DFFFF, \
+0xFFFFFFFF, \
+0xFFFFFFFF, \
+0x000C0001, \
+0x00101212, \
+0x000C0008, \
+0x003801BC, \
+0x00000000, \
+0x00124818, \
+0x000C1000, \
+0x00220809, \
+0x00010200, \
+0x00124818, \
+0x000CFFFC, \
+0x003803B5, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x0010009C, \
+0x0024B81D, \
+0x00130836, \
+0x000C0001, \
+0x0026081C, \
+0x0020C81B, \
+0x00130824, \
+0x00222819, \
+0x00101213, \
+0x00041000, \
+0x003A03B3, \
+0x00010200, \
+0x00101B13, \
+0x00238081, \
+0x00213049, \
+0x0038003B, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x0010009C, \
+0x0024B83E, \
+0x00130826, \
+0x000C0001, \
+0x0026083B, \
+0x00010200, \
+0x00134824, \
+0x000C0001, \
+0x00101213, \
+0x00041000, \
+0x0038051E, \
+0x00101313, \
+0x00010400, \
+0x00380521, \
+0x00050600, \
+0x00100824, \
+0x00101310, \
+0x00041000, \
+0x00080600, \
+0x00101B10, \
+0x0038051E, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+}
+
+
+/********************************************************/
+/* CPUSaver micro code for the D101B */
+/********************************************************/
+
+/* Version 2.0 */
+
+#define D101_B0_RCVBUNDLE_UCODE \
+{\
+0x03B401BC, \
+0x0047FFFF, \
+0xFFFFFFFF, \
+0x051EFFFF, \
+0xFFFFFFFF, \
+0xFFFFFFFF, \
+0x000C0001, \
+0x00101B92, \
+0x000C0008, \
+0x003801BD, \
+0x00000000, \
+0x00124818, \
+0x000C1000, \
+0x00220809, \
+0x00010200, \
+0x00124818, \
+0x000CFFFC, \
+0x003803B6, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x0010009C, \
+0x0024B81D, \
+0x0013082F, \
+0x000C0001, \
+0x0026081C, \
+0x0020C81B, \
+0x00130837, \
+0x00222819, \
+0x00101B93, \
+0x00041000, \
+0x003A03B4, \
+0x00010200, \
+0x00101793, \
+0x00238082, \
+0x0021304A, \
+0x0038003C, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x0010009C, \
+0x0024B83E, \
+0x00130826, \
+0x000C0001, \
+0x0026083B, \
+0x00010200, \
+0x00134837, \
+0x000C0001, \
+0x00101B93, \
+0x00041000, \
+0x0038051F, \
+0x00101313, \
+0x00010400, \
+0x00380522, \
+0x00050600, \
+0x00100837, \
+0x00101310, \
+0x00041000, \
+0x00080600, \
+0x00101790, \
+0x0038051F, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+}
+
+
+/********************************************************/
+/* CPUSaver micro code for the D101M (B-step only) */
+/********************************************************/
+
+/* Version 2.10 */
+
+/* Parameter values for the D101M B-step */
+#define D101M_CPUSAVER_DWORD 78
+#define D101M_CPUSAVER_BUNDLE_MAX_DWORD 65
+#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
+
+
+#define D101M_B_RCVBUNDLE_UCODE \
+{\
+0x00550215, \
+0xFFFF0437, \
+0xFFFFFFFF, \
+0x06A70789, \
+0xFFFFFFFF, \
+0x0558FFFF, \
+0x000C0001, \
+0x00101312, \
+0x000C0008, \
+0x00380216, \
+0x0010009C, \
+0x00204056, \
+0x002380CC, \
+0x00380056, \
+0x0010009C, \
+0x00244C0B, \
+0x00000800, \
+0x00124818, \
+0x00380438, \
+0x00000000, \
+0x00140000, \
+0x00380555, \
+0x00308000, \
+0x00100662, \
+0x00100561, \
+0x000E0408, \
+0x00134861, \
+0x000C0002, \
+0x00103093, \
+0x00308000, \
+0x00100624, \
+0x00100561, \
+0x000E0408, \
+0x00100861, \
+0x000C007E, \
+0x00222C21, \
+0x000C0002, \
+0x00103093, \
+0x00380C7A, \
+0x00080000, \
+0x00103090, \
+0x00380C7A, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x0010009C, \
+0x00244C2D, \
+0x00010004, \
+0x00041000, \
+0x003A0437, \
+0x00044010, \
+0x0038078A, \
+0x00000000, \
+0x00100099, \
+0x00206C7A, \
+0x0010009C, \
+0x00244C48, \
+0x00130824, \
+0x000C0001, \
+0x00101213, \
+0x00260C75, \
+0x00041000, \
+0x00010004, \
+0x00130826, \
+0x000C0006, \
+0x002206A8, \
+0x0013C926, \
+0x00101313, \
+0x003806A8, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00080600, \
+0x00101B10, \
+0x00050004, \
+0x00100826, \
+0x00101210, \
+0x00380C34, \
+0x00000000, \
+0x00000000, \
+0x0021155B, \
+0x00100099, \
+0x00206559, \
+0x0010009C, \
+0x00244559, \
+0x00130836, \
+0x000C0000, \
+0x00220C62, \
+0x000C0001, \
+0x00101B13, \
+0x00229C0E, \
+0x00210C0E, \
+0x00226C0E, \
+0x00216C0E, \
+0x0022FC0E, \
+0x00215C0E, \
+0x00214C0E, \
+0x00380555, \
+0x00010004, \
+0x00041000, \
+0x00278C67, \
+0x00040800, \
+0x00018100, \
+0x003A0437, \
+0x00130826, \
+0x000C0001, \
+0x00220559, \
+0x00101313, \
+0x00380559, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00130831, \
+0x0010090B, \
+0x00124813, \
+0x000CFF80, \
+0x002606AB, \
+0x00041000, \
+0x003806A8, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+}
+
+
+/********************************************************/
+/* CPUSaver micro code for the D101S */
+/********************************************************/
+
+/* Version 1.20 */
+
+/* Parameter values for the D101S */
+#define D101S_CPUSAVER_DWORD 78
+#define D101S_CPUSAVER_BUNDLE_MAX_DWORD 67
+#define D101S_CPUSAVER_MIN_SIZE_DWORD 129
+
+
+#define D101S_RCVBUNDLE_UCODE \
+{\
+0x00550242, \
+0xFFFF047E, \
+0xFFFFFFFF, \
+0x06FF0818, \
+0xFFFFFFFF, \
+0x05A6FFFF, \
+0x000C0001, \
+0x00101312, \
+0x000C0008, \
+0x00380243, \
+0x0010009C, \
+0x00204056, \
+0x002380D0, \
+0x00380056, \
+0x0010009C, \
+0x00244F8B, \
+0x00000800, \
+0x00124818, \
+0x0038047F, \
+0x00000000, \
+0x00140000, \
+0x003805A3, \
+0x00308000, \
+0x00100610, \
+0x00100561, \
+0x000E0408, \
+0x00134861, \
+0x000C0002, \
+0x00103093, \
+0x00308000, \
+0x00100624, \
+0x00100561, \
+0x000E0408, \
+0x00100861, \
+0x000C007E, \
+0x00222FA1, \
+0x000C0002, \
+0x00103093, \
+0x00380F90, \
+0x00080000, \
+0x00103090, \
+0x00380F90, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x0010009C, \
+0x00244FAD, \
+0x00010004, \
+0x00041000, \
+0x003A047E, \
+0x00044010, \
+0x00380819, \
+0x00000000, \
+0x00100099, \
+0x00206FFD, \
+0x0010009A, \
+0x0020AFFD, \
+0x0010009C, \
+0x00244FC8, \
+0x00130824, \
+0x000C0001, \
+0x00101213, \
+0x00260FF8, \
+0x00041000, \
+0x00010004, \
+0x00130826, \
+0x000C0006, \
+0x00220700, \
+0x0013C926, \
+0x00101313, \
+0x00380700, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00080600, \
+0x00101B10, \
+0x00050004, \
+0x00100826, \
+0x00101210, \
+0x00380FB6, \
+0x00000000, \
+0x00000000, \
+0x002115A9, \
+0x00100099, \
+0x002065A7, \
+0x0010009A, \
+0x0020A5A7, \
+0x0010009C, \
+0x002445A7, \
+0x00130836, \
+0x000C0000, \
+0x00220FE4, \
+0x000C0001, \
+0x00101B13, \
+0x00229F8E, \
+0x00210F8E, \
+0x00226F8E, \
+0x00216F8E, \
+0x0022FF8E, \
+0x00215F8E, \
+0x00214F8E, \
+0x003805A3, \
+0x00010004, \
+0x00041000, \
+0x00278FE9, \
+0x00040800, \
+0x00018100, \
+0x003A047E, \
+0x00130826, \
+0x000C0001, \
+0x002205A7, \
+0x00101313, \
+0x003805A7, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00130831, \
+0x0010090B, \
+0x00124813, \
+0x000CFF80, \
+0x00260703, \
+0x00041000, \
+0x00380700, \
+0x00000000, \
+}
+
+
+/********************************************************/
+/* CPUSaver micro code for the D102 B-step */
+/********************************************************/
+
+/* Version 2.0 */
+
+/*
+ This version of CPUSaver is different from all others in
+ a different way. It combines the CPUSaver algorithm with
+ fixes for bugs in the B-step hardware (specifically, bugs
+ with Inline Receive).
+ Thus, when CPUSaver is disabled, this micro code image will
+ still need to be loaded. Before this happens, the hit addresses
+ for the CPUSaver algorithm must be set to 0x1FFFF. The hit
+ addresses for CPUSaver are (starting with 0, and remember that
+
+*/
+
+/* Parameter values for the D102 B-step */
+#define D102_B_CPUSAVER_DWORD 91
+#define D102_B_CPUSAVER_BUNDLE_MAX_DWORD 115
+#define D102_B_CPUSAVER_MIN_SIZE_DWORD 70
+
+
+#define D102_B_RCVBUNDLE_UCODE \
+{\
+0x006F0276, \
+0x02BF0E93, \
+0x1FFF0ED9, \
+0x0D2508FA, \
+0x04D21FFF, \
+0x0EA10892, \
+0x00300001, \
+0x0140D871, \
+0x00300008, \
+0x00E00277, \
+0x01406C57, \
+0x00816073, \
+0x008700FA, \
+0x00E00070, \
+0x00E00E94, \
+0x00200004, \
+0x01410000, \
+0x014B6F6F, \
+0x0030FFFF, \
+0x01486F72, \
+0x00E81F9B, \
+0x00E00EA3, \
+0x003C0040, \
+0x00380920, \
+0x00C02000, \
+0x0150ED38, \
+0x0150EE39, \
+0x0150EF3A, \
+0x003C0040, \
+0x01506F0D, \
+0x01600E72, \
+0x00380AE0, \
+0x00E002C0, \
+0x00300001, \
+0x014C0000, \
+0x008404DC, \
+0x014C6F72, \
+0x00E01F9D, \
+0x01406C51, \
+0x0080DFC2, \
+0x01406C52, \
+0x00815FC2, \
+0x01406C57, \
+0x00917FD5, \
+0x00E01FE6, \
+0x00000000, \
+0x01406C57, \
+0x00919FAD, \
+0x00038800, \
+0x00300000, \
+0x00E81FF2, \
+0x014D6FC4, \
+0x00E008FB, \
+0x00000000, \
+0x00822D30, \
+0x01406C51, \
+0x0080CD26, \
+0x01406C52, \
+0x00814D26, \
+0x01406C57, \
+0x00916D26, \
+0x014C6FD7, \
+0x00300000, \
+0x00841FDB, \
+0x00300001, \
+0x0140D772, \
+0x00E012B3, \
+0x014C6F91, \
+0x0150710B, \
+0x01496F72, \
+0x0030FF80, \
+0x00940EDD, \
+0x00102000, \
+0x00E00EDA, \
+0x01406C57, \
+0x00917FFD, \
+0x00001000, \
+0x00E01FFD, \
+0x00138800, \
+0x00300001, \
+0x00E81FF2, \
+0x00202500, \
+0x00E81F9B, \
+0x01600EC5, \
+0x00E00893, \
+0x00000000, \
+0x01406CD5, \
+0x0091EEA3, \
+0x00904EA3, \
+0x00901F89, \
+0x00E00EA3, \
+0x00200600, \
+0x0140D76F, \
+0x00138400, \
+0x01406FD8, \
+0x0140D96F, \
+0x00E01FE6, \
+0x00038400, \
+0x00102000, \
+0x00971FE0, \
+0x00101000, \
+0x00050200, \
+0x00E804D2, \
+0x014C6FD8, \
+0x00300001, \
+0x00840D26, \
+0x0140D872, \
+0x00E00D26, \
+0x014C6FD9, \
+0x00300001, \
+0x0140D972, \
+0x00941FBD, \
+0x00102000, \
+0x00038400, \
+0x014C6FD8, \
+0x00300006, \
+0x00840EDA, \
+0x014F71D8, \
+0x0140D872, \
+0x00E00EDA, \
+0x00340020, \
+0x014C6FED, \
+0x01603472, \
+0x016035EE, \
+0x016036EF, \
+0x00300004, \
+0x01611C71, \
+0x00300014, \
+0x00200A00, \
+0x00E810B9, \
+0x00600000, \
+0x01496F50, \
+0x00E004D3, \
+0x00000000, \
+}
+
+
+
+
+/********************************************************/
+/* TCO micro code for the D102 B-step */
+/********************************************************/
+
+/* Version 2.0 */
+
+/*
+ This version is a fix to TCO bug. This version can be loaded instead
+ the CPUSaver version by modifing the registry key "LoadTcoUCodeInsteadOfCpuSaver"
+
+*/
+
+
+#define D102_B_TCO_UCODE \
+{\
+0x1FFF0ED3, \
+0x02BF0E93, \
+0x1FFF1FFF, \
+0x1FFF08FA, \
+0x1FFF1FFF, \
+0x0EA10892, \
+0x00906ED8, \
+0x01406C55, \
+0x00E00ED4, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E00E94, \
+0x00200004, \
+0x01410000, \
+0x014B6F6F, \
+0x0030FFFF, \
+0x01486F72, \
+0x00E81F9B, \
+0x00E00EA3, \
+0x003C0040, \
+0x00380920, \
+0x00C02000, \
+0x0150ED38, \
+0x0150EE39, \
+0x0150EF3A, \
+0x003C0040, \
+0x01506F0D, \
+0x01600E72, \
+0x00380AE0, \
+0x00E002C0, \
+0x00300001, \
+0x014C0000, \
+0x008404DC, \
+0x014C6F72, \
+0x00E01F9D, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x01406C57, \
+0x00919FAD, \
+0x00038800, \
+0x00300000, \
+0x00E81FD5, \
+0x014D6FC4, \
+0x00E008FB, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00138800, \
+0x00300001, \
+0x00E81FD5, \
+0x00202500, \
+0x00E81F9B, \
+0x01600EC5, \
+0x00E00893, \
+0x00000000, \
+0x01406CD5, \
+0x0091EEA3, \
+0x00904EA3, \
+0x00901F89, \
+0x00E00EA3, \
+0x00340020, \
+0x014C6FED, \
+0x01603472, \
+0x016035EE, \
+0x016036EF, \
+0x00300004, \
+0x01611C71, \
+0x00300014, \
+0x00200A00, \
+0x00E810B9, \
+0x00600000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+}
+
+
+
+/********************************************************/
+/* Micro code for the D102 C-step */
+/********************************************************/
+
+/* Parameter values for the D102 C-step */
+#define D102_C_CPUSAVER_DWORD 46
+#define D102_C_CPUSAVER_BUNDLE_MAX_DWORD 54
+#define D102_C_CPUSAVER_MIN_SIZE_DWORD 133 /* not implemented */
+
+
+
+
+
+#if 0
+// this uCode include the CPU Saver and the TCO work around
+//for IP fregments.
+#endif
+#define D102_C_RCVBUNDLE_UCODE \
+{ \
+0x00700279, \
+0x0E6104E2, \
+0x02BF0CAE, \
+0x1519150C, \
+0x1FFF0E5B, \
+0x1FFF1FFF, \
+0x00E014D8, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014DC, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014F4, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014E0, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014E7, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00141000, \
+0x015D6F0D, \
+0x00E002C0, \
+0x00000000, \
+0x00200600, \
+0x00E0150D, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00300006, \
+0x00E0151A, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00906E65, \
+0x00800E60, \
+0x00E00E5D, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+}
+
+/********************************************************/
+/* Micro code for the D102 E-step */
+/********************************************************/
+
+/* Parameter values for the D102 E-step */
+#define D102_E_CPUSAVER_DWORD 42
+#define D102_E_CPUSAVER_BUNDLE_MAX_DWORD 54
+#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
+
+#define D102_E_RCVBUNDLE_UCODE \
+{\
+0x007D028F, \
+0x0E4204F9, \
+0x14ED0C85, \
+0x14FA14E9, \
+0x0EF70E36, \
+0x1FFF1FFF, \
+0x00E014B9, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014BD, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014D5, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014C1, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00E014C8, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00200600, \
+0x00E014EE, \
+0x00000000, \
+0x00000000, \
+0x0030FF80, \
+0x00940E46, \
+0x00038200, \
+0x00102000, \
+0x00E00E43, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00300006, \
+0x00E014FB, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00906E41, \
+0x00800E3C, \
+0x00E00E39, \
+0x00000000, \
+0x00906EFD, \
+0x00900EFD, \
+0x00E00EF8, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+0x00000000, \
+}
diff --git a/bsd_eth_drivers/if_le/.cvsignore b/bsd_eth_drivers/if_le/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/if_le/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/if_pcn/.cvsignore b/bsd_eth_drivers/if_pcn/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/if_pcn/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/if_pcn/if_pcn.c b/bsd_eth_drivers/if_pcn/if_pcn.c
index 7dde188..b116265 100644
--- a/bsd_eth_drivers/if_pcn/if_pcn.c
+++ b/bsd_eth_drivers/if_pcn/if_pcn.c
@@ -1509,7 +1509,6 @@ pcn_init_locked(sc)
#ifndef __rtems__
mii = device_get_softc(sc->pcn_miibus);
ife = mii->mii_media.ifm_cur;
-#endif
/* Set MAC address */
{ unsigned tmp;
@@ -1521,6 +1520,22 @@ pcn_init_locked(sc)
tmp = htole16(((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[2]);
pcn_csr_write(sc, PCN_CSR_PAR2, tmp);
}
+#else
+ /* Set MAC address */
+ { unsigned tmp;
+ u_int16_t s;
+ /* fix endinanness; LLADDR gets swapped on a BE machine */
+ memcpy(&s, IF_LLADDR(sc->pcn_ifp) + 0, sizeof(s));
+ tmp = htole16(s);
+ pcn_csr_write(sc, PCN_CSR_PAR0, tmp);
+ memcpy(&s, IF_LLADDR(sc->pcn_ifp) + 2, sizeof(s));
+ tmp = htole16(s);
+ pcn_csr_write(sc, PCN_CSR_PAR1, tmp);
+ memcpy(&s, IF_LLADDR(sc->pcn_ifp) + 4, sizeof(s));
+ tmp = htole16(s);
+ pcn_csr_write(sc, PCN_CSR_PAR2, tmp);
+ }
+#endif
/* Init circular RX list. */
if (pcn_list_rx_init(sc) == ENOBUFS) {
diff --git a/bsd_eth_drivers/if_re/.cvsignore b/bsd_eth_drivers/if_re/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/if_re/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/if_re/Makefile.am b/bsd_eth_drivers/if_re/Makefile.am
index 5c33e09..e7e2efd 100644
--- a/bsd_eth_drivers/if_re/Makefile.am
+++ b/bsd_eth_drivers/if_re/Makefile.am
@@ -3,7 +3,7 @@ AUTOMAKE_OPTIONS=foreign
include $(top_srcdir)/rtems-pre.am
-libif_re_a_SOURCES = if_re.c
+libif_re_a_SOURCES = if_re.c if_rl.c if_rlreg.h
##EXTRA_libif_re_a_SOURCES =
diff --git a/bsd_eth_drivers/if_re/if_re.c b/bsd_eth_drivers/if_re/if_re.c
index e39a52a..e7a9f49 100644
--- a/bsd_eth_drivers/if_re/if_re.c
+++ b/bsd_eth_drivers/if_re/if_re.c
@@ -32,10 +32,11 @@
#ifdef __rtems__
#include <libbsdport.h>
+#define M_ASSERTPKTHDR(_h)
#endif
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/dev/re/if_re.c,v 1.46.2.39.2.1 2008/10/02 02:57:24 kensmith Exp $");
+__FBSDID("$FreeBSD: src/sys/dev/re/if_re.c,v 1.161 2009/08/24 18:58:13 yongari Exp $");
/*
* RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
@@ -150,6 +151,17 @@ __FBSDID("$FreeBSD: src/sys/dev/re/if_re.c,v 1.46.2.39.2.1 2008/10/02 02:57:24 k
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
+/*
+ * Default to using PIO access for this driver.
+ */
+#define RE_USEIOSPACE
+
+#ifndef __rtems__
+#include <pci/if_rlreg.h>
+#else
+#include "if_rlreg.h"
+#endif
+
MODULE_DEPEND(re, pci, 1, 1, 1);
MODULE_DEPEND(re, ether, 1, 1, 1);
MODULE_DEPEND(re, miibus, 1, 1, 1);
@@ -159,18 +171,16 @@ MODULE_DEPEND(re, miibus, 1, 1, 1);
#ifdef __rtems__
#include <libbsdport_post.h>
+#define TUNABLE_INT(_t, _v)
#endif
-/*
- * Default to using PIO access for this driver.
- */
-#define RE_USEIOSPACE
-
+/* Tunables. */
#ifndef __rtems__
-#include <pci/if_rlreg.h>
-#else
-#include "if_rlreg.h"
+static int msi_disable = 0;
+TUNABLE_INT("hw.re.msi_disable", &msi_disable);
#endif
+static int prefer_iomap = 0;
+TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
@@ -178,39 +188,25 @@ MODULE_DEPEND(re, miibus, 1, 1, 1);
* Various supported device vendors/types and their names.
*/
static struct rl_type re_devs[] = {
- { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169S,
- "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
- { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169_8110SB,
- "D-Link DGE-528(T) Rev.B1 Gigabit Ethernet Adapter" },
- { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS,
- "RealTek 8139C+ 10/100BaseTX" },
- { RT_VENDORID, RT_DEVICEID_8101E, RL_HWREV_8101E,
- "RealTek 8101E PCIe 10/100baseTX" },
- { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN1,
- "RealTek 8168/8111B PCIe Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN2,
- "RealTek 8168/8111B PCIe Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN3,
- "RealTek 8168/8111B PCIe Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169,
- "RealTek 8169 Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S,
- "RealTek 8169S Single-chip Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169_8110SB,
- "RealTek 8169SB/8110SB Single-chip Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169_8110SC,
- "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8169SC, RL_HWREV_8169_8110SC,
- "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
- { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S,
- "RealTek 8110S Single-chip Gigabit Ethernet" },
- { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, RL_HWREV_8169S,
- "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
- { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, RL_HWREV_8169S,
- "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
- { USR_VENDORID, USR_DEVICEID_997902, RL_HWREV_8169S,
- "US Robotics 997902 (RTL8169S) Gigabit Ethernet" },
- { 0, 0, 0, NULL }
+ { DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
+ "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
+ { RT_VENDORID, RT_DEVICEID_8139, 0,
+ "RealTek 8139C+ 10/100BaseTX" },
+ { RT_VENDORID, RT_DEVICEID_8101E, 0,
+ "RealTek 8101E/8102E/8102EL PCIe 10/100baseTX" },
+ { RT_VENDORID, RT_DEVICEID_8168, 0,
+ "RealTek 8168/8168B/8168C/8168CP/8168D/8168DP/"
+ "8111B/8111C/8111CP/8111DP PCIe Gigabit Ethernet" },
+ { RT_VENDORID, RT_DEVICEID_8169, 0,
+ "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
+ { RT_VENDORID, RT_DEVICEID_8169SC, 0,
+ "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
+ { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
+ "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
+ { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
+ "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
+ { USR_VENDORID, USR_DEVICEID_997902, 0,
+ "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
};
static struct rl_hwrev re_hwrevs[] = {
@@ -226,14 +222,24 @@ static struct rl_hwrev re_hwrevs[] = {
{ RL_HWREV_8169, RL_8169, "8169"},
{ RL_HWREV_8169S, RL_8169, "8169S"},
{ RL_HWREV_8110S, RL_8169, "8110S"},
- { RL_HWREV_8169_8110SB, RL_8169, "8169SB"},
- { RL_HWREV_8169_8110SC, RL_8169, "8169SC"},
+ { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB"},
+ { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC"},
+ { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL"},
+ { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC"},
{ RL_HWREV_8100, RL_8139, "8100"},
{ RL_HWREV_8101, RL_8139, "8101"},
{ RL_HWREV_8100E, RL_8169, "8100E"},
{ RL_HWREV_8101E, RL_8169, "8101E"},
+ { RL_HWREV_8102E, RL_8169, "8102E"},
+ { RL_HWREV_8102EL, RL_8169, "8102EL"},
+ { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL"},
{ RL_HWREV_8168_SPIN2, RL_8169, "8168"},
{ RL_HWREV_8168_SPIN3, RL_8169, "8168"},
+ { RL_HWREV_8168C, RL_8169, "8168C/8111C"},
+ { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C"},
+ { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP"},
+ { RL_HWREV_8168D, RL_8169, "8168D/8111D"},
+ { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP"},
{ 0, 0, NULL }
};
@@ -241,26 +247,26 @@ static int re_probe (device_t);
static int re_attach (device_t);
static int re_detach (device_t);
-static int re_encap (struct rl_softc *, struct mbuf **, int *);
+static int re_encap (struct rl_softc *, struct mbuf **);
static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
-static void re_dma_map_desc (void *, bus_dma_segment_t *, int,
- bus_size_t, int);
static int re_allocmem (device_t, struct rl_softc *);
-static int re_newbuf (struct rl_softc *, int, struct mbuf *);
+static __inline void re_discard_rxbuf
+ (struct rl_softc *, int);
+static int re_newbuf (struct rl_softc *, int);
static int re_rx_list_init (struct rl_softc *);
static int re_tx_list_init (struct rl_softc *);
#ifdef RE_FIXUP_RX
static __inline void re_fixup_rx
(struct mbuf *);
#endif
-static int re_rxeof (struct rl_softc *);
+static int re_rxeof (struct rl_softc *, int *);
static void re_txeof (struct rl_softc *);
#ifdef DEVICE_POLLING
-static void re_poll (struct ifnet *, enum poll_cmd, int);
-static void re_poll_locked (struct ifnet *, enum poll_cmd, int);
+static int re_poll (struct ifnet *, enum poll_cmd, int);
+static int re_poll_locked (struct ifnet *, enum poll_cmd, int);
#endif
-static void re_intr (void *);
+static int re_intr (void *);
static void re_tick (void *);
static void re_tx_task (void *, int);
static void re_int_task (void *, int);
@@ -278,7 +284,11 @@ static void re_watchdog (struct rl_softc *);
static int re_suspend (device_t);
static int re_resume (device_t);
#endif
-static void re_shutdown (device_t);
+#ifndef __rtems__
+static int re_shutdown (device_t);
+#else
+static void re_shutdown (device_t);
+#endif
#ifndef __rtems__
static int re_ifmedia_upd (struct ifnet *);
static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
@@ -287,19 +297,21 @@ static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
static void re_eeprom_putbyte (struct rl_softc *, int);
static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
static void re_read_eeprom (struct rl_softc *, caddr_t, int, int);
-#ifndef __rtems__
static int re_gmii_readreg (device_t, int, int);
-#endif
static int re_gmii_writereg (device_t, int, int, int);
-#ifndef __rtems__
static int re_miibus_readreg (device_t, int, int);
static int re_miibus_writereg (device_t, int, int, int);
+#ifndef __rtems__
static void re_miibus_statchg (device_t);
#endif
-static void re_setmulti (struct rl_softc *);
+static void re_set_rxmode (struct rl_softc *);
static void re_reset (struct rl_softc *);
+#ifndef __rtems__
+static void re_setwol (struct rl_softc *);
+static void re_clrwol (struct rl_softc *);
+#endif
#ifdef RE_DIAG
static int re_diag (struct rl_softc *);
@@ -344,36 +356,16 @@ static driver_t re_driver = {
static devclass_t re_devclass;
DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
-DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0);
DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
#else
-static int
-re_irq_check_dis(device_t d)
-{
- // struct re_softc *sc = device_get_softc(d);
- printk( "check_dis\n" );
- return 0;
-}
-
-static void
-re_irq_en(device_t d)
-{
- // struct re_softc *sc = device_get_softc(d);
- /* This can be called from IRQ context -- since all register accesses
- * involve RAP we must take care to preserve it across this routine!
- */
- printk( "irq_en\n" );
-}
-
-
static device_method_t re_methods = {
probe: re_probe,
attach: re_attach,
shutdown: re_shutdown,
detach: re_detach,
- irq_check_dis: re_irq_check_dis,
- irq_en: re_irq_en,
+ irq_check_dis: 0,
+ irq_en: 0,
};
driver_t libbsdport_re_driver = {
@@ -383,6 +375,45 @@ driver_t libbsdport_re_driver = {
sizeof(struct rl_softc)
};
+static int
+mdio_r(int phy, void *uarg, unsigned reg, uint32_t *pval)
+{
+struct rl_softc *sc = uarg;
+
+ if ( phy != 0 )
+ return EINVAL;
+
+ *pval = (uint32_t) re_miibus_readreg(sc->rl_dev, phy, reg);
+
+ return 0;
+}
+
+static int
+mdio_w(int phy, void *uarg, unsigned reg, uint32_t val)
+{
+struct rl_softc *sc = uarg;
+
+ if ( phy != 0 )
+ return EINVAL;
+
+ re_miibus_writereg(sc->rl_dev, phy, reg, val);
+
+ return 0;
+}
+
+struct rtems_mdio_info re_mdio_100 = {
+ mdio_r : mdio_r,
+ mdio_w : mdio_w,
+ has_gmii : 0
+};
+
+struct rtems_mdio_info re_mdio_1000 = {
+ mdio_r : mdio_r,
+ mdio_w : mdio_w,
+ has_gmii : 1
+};
+
+#define RE_MDIO(sc) ((sc)->rl_type == RL_8169 ? &re_mdio_1000 : & re_mdio_100)
#endif
@@ -398,11 +429,9 @@ driver_t libbsdport_re_driver = {
* Send a read command and address to the EEPROM, check for ACK.
*/
static void
-re_eeprom_putbyte(sc, addr)
- struct rl_softc *sc;
- int addr;
+re_eeprom_putbyte(struct rl_softc *sc, int addr)
{
- register int d, i;
+ int d, i;
d = addr | (RL_9346_READ << sc->rl_eewidth);
@@ -422,20 +451,15 @@ re_eeprom_putbyte(sc, addr)
EE_CLR(RL_EE_CLK);
DELAY(100);
}
-
- return;
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
-re_eeprom_getword(sc, addr, dest)
- struct rl_softc *sc;
- int addr;
- u_int16_t *dest;
+re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
{
- register int i;
+ int i;
u_int16_t word = 0;
/*
@@ -456,19 +480,13 @@ re_eeprom_getword(sc, addr, dest)
}
*dest = word;
-
- return;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
-re_read_eeprom(sc, dest, off, cnt)
- struct rl_softc *sc;
- caddr_t dest;
- int off;
- int cnt;
+re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
{
int i;
u_int16_t word = 0, *ptr;
@@ -486,15 +504,10 @@ re_read_eeprom(sc, dest, off, cnt)
}
CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
-
- return;
}
-#ifndef __rtems__
static int
-re_gmii_readreg(dev, phy, reg)
- device_t dev;
- int phy, reg;
+re_gmii_readreg(device_t dev, int phy, int reg)
{
struct rl_softc *sc;
u_int32_t rval;
@@ -515,26 +528,23 @@ re_gmii_readreg(dev, phy, reg)
CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
DELAY(1000);
- for (i = 0; i < RL_TIMEOUT; i++) {
+ for (i = 0; i < RL_PHY_TIMEOUT; i++) {
rval = CSR_READ_4(sc, RL_PHYAR);
if (rval & RL_PHYAR_BUSY)
break;
DELAY(100);
}
- if (i == RL_TIMEOUT) {
+ if (i == RL_PHY_TIMEOUT) {
device_printf(sc->rl_dev, "PHY read failed\n");
return (0);
}
return (rval & RL_PHYAR_PHYDATA);
}
-#endif
static int
-re_gmii_writereg(dev, phy, reg, data)
- device_t dev;
- int phy, reg, data;
+re_gmii_writereg(device_t dev, int phy, int reg, int data)
{
struct rl_softc *sc;
u_int32_t rval;
@@ -546,14 +556,14 @@ re_gmii_writereg(dev, phy, reg, data)
(data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
DELAY(1000);
- for (i = 0; i < RL_TIMEOUT; i++) {
+ for (i = 0; i < RL_PHY_TIMEOUT; i++) {
rval = CSR_READ_4(sc, RL_PHYAR);
if (!(rval & RL_PHYAR_BUSY))
break;
DELAY(100);
}
- if (i == RL_TIMEOUT) {
+ if (i == RL_PHY_TIMEOUT) {
device_printf(sc->rl_dev, "PHY write failed\n");
return (0);
}
@@ -561,11 +571,8 @@ re_gmii_writereg(dev, phy, reg, data)
return (0);
}
-#ifndef __rtems__
static int
-re_miibus_readreg(dev, phy, reg)
- device_t dev;
- int phy, reg;
+re_miibus_readreg(device_t dev, int phy, int reg)
{
struct rl_softc *sc;
u_int16_t rval = 0;
@@ -621,13 +628,9 @@ re_miibus_readreg(dev, phy, reg)
}
return (rval);
}
-#endif
-#ifndef __rtems__
static int
-re_miibus_writereg(dev, phy, reg, data)
- device_t dev;
- int phy, reg, data;
+re_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct rl_softc *sc;
u_int16_t re8139_reg = 0;
@@ -675,42 +678,67 @@ re_miibus_writereg(dev, phy, reg, data)
CSR_WRITE_2(sc, re8139_reg, data);
return (0);
}
-#endif
#ifndef __rtems__
static void
-re_miibus_statchg(dev)
- device_t dev;
+re_miibus_statchg(device_t dev)
{
+ struct rl_softc *sc;
+ struct ifnet *ifp;
+ struct mii_data *mii;
+
+ sc = device_get_softc(dev);
+ mii = device_get_softc(sc->rl_miibus);
+ ifp = sc->rl_ifp;
+ if (mii == NULL || ifp == NULL ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+ sc->rl_flags &= ~RL_FLAG_LINK;
+ if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+ (IFM_ACTIVE | IFM_AVALID)) {
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ sc->rl_flags |= RL_FLAG_LINK;
+ break;
+ case IFM_1000_T:
+ if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
+ break;
+ sc->rl_flags |= RL_FLAG_LINK;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * RealTek controllers does not provide any interface to
+ * Tx/Rx MACs for resolved speed, duplex and flow-control
+ * parameters.
+ */
}
#endif
/*
- * Program the 64-bit multicast hash filter.
+ * Set the RX configuration and 64-bit multicast hash filter.
*/
static void
-re_setmulti(sc)
- struct rl_softc *sc;
+re_set_rxmode(struct rl_softc *sc)
{
struct ifnet *ifp;
#ifndef __rtems__
- int h = 0;
struct ifmultiaddr *ifma;
#endif
- u_int32_t hashes[2] = { 0, 0 };
- u_int32_t rxfilt;
- int mcnt = 0;
- u_int32_t hwrev;
+ uint32_t hashes[2] = { 0, 0 };
+ uint32_t h, rxfilt;
RL_LOCK_ASSERT(sc);
ifp = sc->rl_ifp;
+ rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
- rxfilt = CSR_READ_4(sc, RL_RXCFG);
- rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_MULTI);
- if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
+ if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
if (ifp->if_flags & IFF_PROMISC)
rxfilt |= RL_RXCFG_RX_ALLPHYS;
/*
@@ -719,19 +747,12 @@ re_setmulti(sc)
* promiscuous mode.
*/
rxfilt |= RL_RXCFG_RX_MULTI;
- CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
- CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
- CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
- return;
+ hashes[0] = hashes[1] = 0xffffffff;
+ goto done;
}
- /* first, zot all the existing hash bits */
- CSR_WRITE_4(sc, RL_MAR0, 0);
- CSR_WRITE_4(sc, RL_MAR4, 0);
-
- /* now program new ones */
#ifndef __rtems__
- IF_ADDR_LOCK(ifp);
+ if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
@@ -741,48 +762,50 @@ re_setmulti(sc)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
- mcnt++;
}
- IF_ADDR_UNLOCK(ifp);
+ if_maddr_runlock(ifp);
+#else
+ {
+ /* UNTESTED */
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ ETHER_FIRST_MULTI(step, (struct arpcom*)ifp, enm);
+ while ( enm != NULL ) {
+ h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
+ if (h < 32)
+ hashes[0] |= (1 << h);
+ else
+ hashes[1] |= (1 << (h - 32));
+ }
+ }
#endif
-
- if (mcnt)
+
+ if (hashes[0] != 0 || hashes[1] != 0) {
+ /*
+ * For some unfathomable reason, RealTek decided to
+ * reverse the order of the multicast hash registers
+ * in the PCI Express parts. This means we have to
+ * write the hash pattern in reverse order for those
+ * devices.
+ */
+ if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
+ h = bswap32(hashes[0]);
+ hashes[0] = bswap32(hashes[1]);
+ hashes[1] = h;
+ }
rxfilt |= RL_RXCFG_RX_MULTI;
- else
- rxfilt &= ~RL_RXCFG_RX_MULTI;
+ }
+done:
+ CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
+ CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
-
- /*
- * For some unfathomable reason, RealTek decided to reverse
- * the order of the multicast hash registers in the PCI Express
- * parts. This means we have to write the hash pattern in reverse
- * order for those devices.
- */
-
- hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
-
- switch (hwrev) {
- case RL_HWREV_8100E:
- case RL_HWREV_8101E:
- case RL_HWREV_8168_SPIN1:
- case RL_HWREV_8168_SPIN2:
- case RL_HWREV_8168_SPIN3:
- CSR_WRITE_4(sc, RL_MAR0, bswap32(hashes[1]));
- CSR_WRITE_4(sc, RL_MAR4, bswap32(hashes[0]));
- break;
- default:
- CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
- CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
- break;
- }
}
static void
-re_reset(sc)
- struct rl_softc *sc;
+re_reset(struct rl_softc *sc)
{
- register int i;
+ int i;
RL_LOCK_ASSERT(sc);
@@ -796,7 +819,10 @@ re_reset(sc)
if (i == RL_TIMEOUT)
device_printf(sc->rl_dev, "reset never completed!\n");
- CSR_WRITE_1(sc, 0x82, 1);
+ if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
+ CSR_WRITE_1(sc, 0x82, 1);
+ if (sc->rl_hwrev == RL_HWREV_8169S)
+ re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
}
#ifdef RE_DIAG
@@ -822,8 +848,7 @@ re_reset(sc)
*/
static int
-re_diag(sc)
- struct rl_softc *sc;
+re_diag(struct rl_softc *sc)
{
struct ifnet *ifp = sc->rl_ifp;
struct mbuf *m0;
@@ -853,9 +878,8 @@ re_diag(sc)
ifp->if_flags |= IFF_PROMISC;
sc->rl_testmode = 1;
- re_reset(sc);
re_init_locked(sc);
- sc->rl_link = 1;
+ sc->rl_flags |= RL_FLAG_LINK;
if (sc->rl_type == RL_8169)
phyaddr = 1;
else
@@ -921,14 +945,14 @@ re_diag(sc)
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_sync(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[0],
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[0]);
-
- m0 = sc->rl_ldata.rl_rx_mbuf[0];
- sc->rl_ldata.rl_rx_mbuf[0] = NULL;
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
+ sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
+ sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
+
+ m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
+ sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
eh = mtod(m0, struct ether_header *);
cur_rx = &sc->rl_ldata.rl_rx_list[0];
@@ -966,7 +990,7 @@ done:
/* Turn interface off, release resources */
sc->rl_testmode = 0;
- sc->rl_link = 0;
+ sc->rl_flags &= ~RL_FLAG_LINK;
ifp->if_flags &= ~IFF_PROMISC;
re_stop(sc);
if (m0 != NULL)
@@ -984,137 +1008,45 @@ done:
* IDs against our list and return a device name if we find a match.
*/
static int
-re_probe(dev)
- device_t dev;
+re_probe(device_t dev)
{
struct rl_type *t;
- struct rl_softc *sc;
- int rid;
- u_int32_t hwrev;
-
- t = re_devs;
- sc = device_get_softc(dev);
-
- while (t->rl_name != NULL) {
- if ((pci_get_vendor(dev) == t->rl_vid) &&
- (pci_get_device(dev) == t->rl_did)) {
+ uint16_t devid, vendor;
+ uint16_t revid, sdevid;
+ int i;
+
+ vendor = pci_get_vendor(dev);
+ devid = pci_get_device(dev);
+ revid = pci_get_revid(dev);
+ sdevid = pci_get_subdevice(dev);
+
+ if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
+ if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
/*
* Only attach to rev. 3 of the Linksys EG1032 adapter.
- * Rev. 2 i supported by sk(4).
+ * Rev. 2 is supported by sk(4).
*/
- if ((t->rl_vid == LINKSYS_VENDORID) &&
- (t->rl_did == LINKSYS_DEVICEID_EG1032) &&
- (pci_get_subdevice(dev) !=
- LINKSYS_SUBDEVICE_EG1032_REV3)) {
- t++;
- continue;
- }
-
- /*
- * Temporarily map the I/O space
- * so we can read the chip ID register.
- */
- rid = RL_RID;
- sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid,
- RF_ACTIVE);
- if (sc->rl_res == NULL) {
- device_printf(dev,
- "couldn't map ports/memory\n");
- return (ENXIO);
- }
- sc->rl_btag = rman_get_bustag(sc->rl_res);
- sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
- hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
- bus_release_resource(dev, RL_RES,
- RL_RID, sc->rl_res);
- if (t->rl_basetype == hwrev) {
- device_set_desc(dev, t->rl_name);
- return (BUS_PROBE_DEFAULT);
- }
+ return (ENXIO);
}
- t++;
}
- return (ENXIO);
-}
-
-/*
- * This routine takes the segment list provided as the result of
- * a bus_dma_map_load() operation and assigns the addresses/lengths
- * to RealTek DMA descriptors. This can be called either by the RX
- * code or the TX code. In the RX case, we'll probably wind up mapping
- * at most one segment. For the TX case, there could be any number of
- * segments since TX packets may span multiple mbufs. In either case,
- * if the number of segments is larger than the rl_maxsegs limit
- * specified by the caller, we abort the mapping operation. Sadly,
- * whoever designed the buffer mapping API did not provide a way to
- * return an error from here, so we have to fake it a bit.
- */
-
-static void
-re_dma_map_desc(arg, segs, nseg, mapsize, error)
- void *arg;
- bus_dma_segment_t *segs;
- int nseg;
- bus_size_t mapsize;
- int error;
-{
- struct rl_dmaload_arg *ctx;
- struct rl_desc *d = NULL;
- int i = 0, idx;
- u_int32_t cmdstat;
- int totlen = 0;
-
- if (error)
- return;
-
- ctx = arg;
-
- /* Signal error to caller if there's too many segments */
- if (nseg > ctx->rl_maxsegs) {
- ctx->rl_maxsegs = 0;
- return;
+ if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
+ if (revid != 0x20) {
+ /* 8139, let rl(4) take care of this device. */
+ printf ("need rl driver: %0x %0x %0x\n", vendor, devid, revid);
+ return (ENXIO);
+ }
}
- /*
- * Map the segment array into descriptors. Note that we set the
- * start-of-frame and end-of-frame markers for either TX or RX, but
- * they really only have meaning in the TX case. (In the RX case,
- * it's the chip that tells us where packets begin and end.)
- * We also keep track of the end of the ring and set the
- * end-of-ring bits as needed, and we set the ownership bits
- * in all except the very first descriptor. (The caller will
- * set this descriptor later when it start transmission or
- * reception.)
- */
- idx = ctx->rl_idx;
- for (;;) {
- d = &ctx->rl_ring[idx];
- if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) {
- ctx->rl_maxsegs = 0;
- return;
+ t = re_devs;
+ for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
+ if (vendor == t->rl_vid && devid == t->rl_did) {
+ device_set_desc(dev, t->rl_name);
+ return (BUS_PROBE_DEFAULT);
}
- cmdstat = segs[i].ds_len;
- totlen += segs[i].ds_len;
- d->rl_vlanctl = 0;
- d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
- d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
- if (i == 0)
- cmdstat |= RL_TDESC_CMD_SOF;
- else
- cmdstat |= RL_TDESC_CMD_OWN;
- if (idx == (RL_RX_DESC_CNT - 1))
- cmdstat |= RL_TDESC_CMD_EOR;
- d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags);
- i++;
- if (i == nseg)
- break;
- RL_DESC_INC(idx);
}
- d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
- ctx->rl_maxsegs = nseg;
- ctx->rl_idx = idx;
+ return (ENXIO);
}
/*
@@ -1122,11 +1054,7 @@ re_dma_map_desc(arg, segs, nseg, mapsize, error)
*/
static void
-re_dma_map_addr(arg, segs, nseg, error)
- void *arg;
- bus_dma_segment_t *segs;
- int nseg;
- int error;
+re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *addr;
@@ -1139,25 +1067,54 @@ re_dma_map_addr(arg, segs, nseg, error)
}
static int
-re_allocmem(dev, sc)
- device_t dev;
- struct rl_softc *sc;
+re_allocmem(device_t dev, struct rl_softc *sc)
{
+ bus_size_t rx_list_size, tx_list_size;
int error;
- int nseg;
int i;
+ rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
+ tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
+
+ /*
+ * Allocate the parent bus DMA tag appropriate for PCI.
+ * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
+ * register should be set. However some RealTek chips are known
+ * to be buggy on DAC handling, therefore disable DAC by limiting
+ * DMA address space to 32bit. PCIe variants of RealTek chips
+ * may not have the limitation but I took safer path.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
+ NULL, NULL, &sc->rl_parent_tag);
+ if (error) {
+ device_printf(dev, "could not allocate parent DMA tag\n");
+ return (error);
+ }
+
+ /*
+ * Allocate map for TX mbufs.
+ */
+ error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
+ NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
+ if (error) {
+ device_printf(dev, "could not allocate TX DMA tag\n");
+ return (error);
+ }
+
/*
* Allocate map for RX mbufs.
*/
- nseg = 32;
- error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0,
- BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
- NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
- NULL, NULL, &sc->rl_ldata.rl_mtag);
+
+ error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
if (error) {
- device_printf(dev, "could not allocate dma tag\n");
- return (ENOMEM);
+ device_printf(dev, "could not allocate RX DMA tag\n");
+ return (error);
}
/*
@@ -1165,36 +1122,44 @@ re_allocmem(dev, sc)
*/
error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
- NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0,
+ NULL, tx_list_size, 1, tx_list_size, 0,
NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
if (error) {
- device_printf(dev, "could not allocate dma tag\n");
- return (ENOMEM);
+ device_printf(dev, "could not allocate TX DMA ring tag\n");
+ return (error);
}
/* Allocate DMA'able memory for the TX ring */
error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
- (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ (void **)&sc->rl_ldata.rl_tx_list,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->rl_ldata.rl_tx_list_map);
- if (error)
- return (ENOMEM);
+ if (error) {
+ device_printf(dev, "could not allocate TX DMA ring\n");
+ return (error);
+ }
/* Load the map for the TX ring. */
+ sc->rl_ldata.rl_tx_list_addr = 0;
error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
- sc->rl_ldata.rl_tx_list_map, (caddr_t) sc->rl_ldata.rl_tx_list,
- RL_TX_LIST_SZ, re_dma_map_addr,
+ sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
+ tx_list_size, re_dma_map_addr,
&sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
+ if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
+ device_printf(dev, "could not load TX DMA ring\n");
+ return (ENOMEM);
+ }
/* Create DMA maps for TX buffers */
- for (i = 0; i < RL_TX_DESC_CNT; i++) {
- error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
- &sc->rl_ldata.rl_tx_dmamap[i]);
+ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
+ error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
+ &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
if (error) {
- device_printf(dev, "can't create DMA map for TX\n");
- return (ENOMEM);
+ device_printf(dev, "could not create DMA map for TX\n");
+ return (error);
}
}
@@ -1203,36 +1168,50 @@ re_allocmem(dev, sc)
*/
error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
- NULL, RL_RX_LIST_SZ, 1, RL_RX_LIST_SZ, 0,
+ NULL, rx_list_size, 1, rx_list_size, 0,
NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
if (error) {
- device_printf(dev, "could not allocate dma tag\n");
- return (ENOMEM);
+ device_printf(dev, "could not create RX DMA ring tag\n");
+ return (error);
}
/* Allocate DMA'able memory for the RX ring */
error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
- (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ (void **)&sc->rl_ldata.rl_rx_list,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->rl_ldata.rl_rx_list_map);
- if (error)
- return (ENOMEM);
+ if (error) {
+ device_printf(dev, "could not allocate RX DMA ring\n");
+ return (error);
+ }
/* Load the map for the RX ring. */
+ sc->rl_ldata.rl_rx_list_addr = 0;
error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
- sc->rl_ldata.rl_rx_list_map, (caddr_t) sc->rl_ldata.rl_rx_list,
- RL_RX_LIST_SZ, re_dma_map_addr,
+ sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
+ rx_list_size, re_dma_map_addr,
&sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
+ if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
+ device_printf(dev, "could not load RX DMA ring\n");
+ return (ENOMEM);
+ }
/* Create DMA maps for RX buffers */
- for (i = 0; i < RL_RX_DESC_CNT; i++) {
- error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
- &sc->rl_ldata.rl_rx_dmamap[i]);
+ error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
+ &sc->rl_ldata.rl_rx_sparemap);
+ if (error) {
+ device_printf(dev, "could not create spare DMA map for RX\n");
+ return (error);
+ }
+ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
+ error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
+ &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
if (error) {
- device_printf(dev, "can't create DMA map for RX\n");
- return (ENOMEM);
+ device_printf(dev, "could not create DMA map for RX\n");
+ return (error);
}
}
@@ -1244,8 +1223,7 @@ re_allocmem(dev, sc)
* setup and ethernet/BPF attach.
*/
static int
-re_attach(dev)
- device_t dev;
+re_attach(device_t dev)
{
u_char eaddr[ETHER_ADDR_LEN];
u_int16_t as[ETHER_ADDR_LEN / 2];
@@ -1253,8 +1231,14 @@ re_attach(dev)
struct ifnet *ifp;
struct rl_hwrev *hw_rev;
int hwrev;
- u_int16_t re_did = 0;
+ u_int16_t devid, re_did = 0;
int error = 0, rid, i;
+#ifndef __rtems__
+ int msic, reg;
+#else
+ int msic;
+#endif
+ uint8_t cfg;
sc = device_get_softc(dev);
sc->rl_dev = dev;
@@ -1268,10 +1252,32 @@ re_attach(dev)
*/
pci_enable_busmaster(dev);
- rid = RL_RID;
- sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid,
- RF_ACTIVE);
-
+ devid = pci_get_device(dev);
+ /*
+ * Prefer memory space register mapping over IO space.
+ * Because RTL8169SC does not seem to work when memory mapping
+ * is used always activate io mapping.
+ */
+ if (devid == RT_DEVICEID_8169SC)
+ prefer_iomap = 1;
+ if (prefer_iomap == 0) {
+ sc->rl_res_id = PCIR_BAR(1);
+ sc->rl_res_type = SYS_RES_MEMORY;
+ /* RTL8168/8101E seems to use different BARs. */
+ if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
+ sc->rl_res_id = PCIR_BAR(2);
+ } else {
+ sc->rl_res_id = PCIR_BAR(0);
+ sc->rl_res_type = SYS_RES_IOPORT;
+ }
+ sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
+ &sc->rl_res_id, RF_ACTIVE);
+ if (sc->rl_res == NULL && prefer_iomap == 0) {
+ sc->rl_res_id = PCIR_BAR(0);
+ sc->rl_res_type = SYS_RES_IOPORT;
+ sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
+ &sc->rl_res_id, RF_ACTIVE);
+ }
if (sc->rl_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
@@ -1281,15 +1287,66 @@ re_attach(dev)
sc->rl_btag = rman_get_bustag(sc->rl_res);
sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
+ msic = 0;
+#ifndef __rtems__
+ if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
+ sc->rl_flags |= RL_FLAG_PCIE;
+ msic = pci_msi_count(dev);
+ if (bootverbose)
+ device_printf(dev, "MSI count : %d\n", msic);
+ }
+ if (msic > 0 && msi_disable == 0) {
+ msic = 1;
+ if (pci_alloc_msi(dev, &msic) == 0) {
+ if (msic == RL_MSI_MESSAGES) {
+ device_printf(dev, "Using %d MSI messages\n",
+ msic);
+ sc->rl_flags |= RL_FLAG_MSI;
+ /* Explicitly set MSI enable bit. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
+ cfg = CSR_READ_1(sc, RL_CFG2);
+ cfg |= RL_CFG2_MSI;
+ CSR_WRITE_1(sc, RL_CFG2, cfg);
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
+ } else
+ pci_release_msi(dev);
+ }
+ }
+#endif
+
/* Allocate interrupt */
- rid = 0;
- sc->rl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
+ if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
+ rid = 0;
+ sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (sc->rl_irq[0] == NULL) {
+ device_printf(dev, "couldn't allocate IRQ resources\n");
+ error = ENXIO;
+ goto fail;
+ }
+ } else {
+ for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
+ sc->rl_irq[i] = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_ACTIVE);
+ if (sc->rl_irq[i] == NULL) {
+ device_printf(dev,
+ "couldn't llocate IRQ resources for "
+ "message %d\n", rid);
+ error = ENXIO;
+ goto fail;
+ }
+ }
+ }
- if (sc->rl_irq == NULL) {
- device_printf(dev, "couldn't map interrupt\n");
- error = ENXIO;
- goto fail;
+ if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
+ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
+ cfg = CSR_READ_1(sc, RL_CFG2);
+ if ((cfg & RL_CFG2_MSI) != 0) {
+ device_printf(dev, "turning off MSI enable bit.\n");
+ cfg &= ~RL_CFG2_MSI;
+ CSR_WRITE_1(sc, RL_CFG2, cfg);
+ }
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
}
/* Reset the adapter. */
@@ -1298,51 +1355,144 @@ re_attach(dev)
RL_UNLOCK(sc);
hw_rev = re_hwrevs;
- hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
+ hwrev = CSR_READ_4(sc, RL_TXCFG);
+ switch (hwrev & 0x70000000) {
+ case 0x00000000:
+ case 0x10000000:
+ device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
+ hwrev &= (RL_TXCFG_HWREV | 0x80000000);
+ break;
+ default:
+ device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
+ hwrev &= RL_TXCFG_HWREV;
+ break;
+ }
+ device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000);
while (hw_rev->rl_desc != NULL) {
if (hw_rev->rl_rev == hwrev) {
sc->rl_type = hw_rev->rl_type;
+ sc->rl_hwrev = hw_rev->rl_rev;
break;
}
hw_rev++;
}
+ if (hw_rev->rl_desc == NULL) {
+ device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
+ error = ENXIO;
+ goto fail;
+ }
- sc->rl_eewidth = RL_9356_ADDR_LEN;
- re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
- if (re_did != 0x8129)
- sc->rl_eewidth = RL_9346_ADDR_LEN;
+ switch (hw_rev->rl_rev) {
+ case RL_HWREV_8139CPLUS:
+ sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_FASTETHER |
+ RL_FLAG_AUTOPAD;
+ break;
+ case RL_HWREV_8100E:
+ case RL_HWREV_8101E:
+ sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_PHYWAKE |
+ RL_FLAG_FASTETHER;
+ break;
+ case RL_HWREV_8102E:
+ case RL_HWREV_8102EL:
+ case RL_HWREV_8102EL_SPIN1:
+ sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_PHYWAKE |
+ RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
+ RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
+ break;
+ case RL_HWREV_8168_SPIN1:
+ case RL_HWREV_8168_SPIN2:
+ sc->rl_flags |= RL_FLAG_WOLRXENB;
+ /* FALLTHROUGH */
+ case RL_HWREV_8168_SPIN3:
+ sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
+ break;
+ case RL_HWREV_8168C_SPIN2:
+ sc->rl_flags |= RL_FLAG_MACSLEEP;
+ /* FALLTHROUGH */
+ case RL_HWREV_8168C:
+ if ((hwrev & 0x00700000) == 0x00200000)
+ sc->rl_flags |= RL_FLAG_MACSLEEP;
+ /* FALLTHROUGH */
+ case RL_HWREV_8168CP:
+ case RL_HWREV_8168D:
+ case RL_HWREV_8168DP:
+ sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
+ RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
+ RL_FLAG_AUTOPAD;
+ /*
+ * These controllers support jumbo frame but it seems
+ * that enabling it requires touching additional magic
+ * registers. Depending on MAC revisions some
+ * controllers need to disable checksum offload. So
+ * disable jumbo frame until I have better idea what
+ * it really requires to make it support.
+ * RTL8168C/CP : supports up to 6KB jumbo frame.
+ * RTL8111C/CP : supports up to 9KB jumbo frame.
+ */
+ sc->rl_flags |= RL_FLAG_NOJUMBO;
+ break;
+ case RL_HWREV_8169_8110SB:
+ case RL_HWREV_8169_8110SBL:
+ case RL_HWREV_8169_8110SC:
+ case RL_HWREV_8169_8110SCE:
+ sc->rl_flags |= RL_FLAG_PHYWAKE;
+ /* FALLTHROUGH */
+ case RL_HWREV_8169:
+ case RL_HWREV_8169S:
+ case RL_HWREV_8110S:
+ sc->rl_flags |= RL_FLAG_MACRESET;
+ break;
+ default:
+ break;
+ }
- /*
- * Get station address from the EEPROM.
- */
- re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
- for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
- as[i] = le16toh(as[i]);
- bcopy(as, eaddr, sizeof(eaddr));
+ /* Enable PME. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
+ cfg = CSR_READ_1(sc, RL_CFG1);
+ cfg |= RL_CFG1_PME;
+ CSR_WRITE_1(sc, RL_CFG1, cfg);
+ cfg = CSR_READ_1(sc, RL_CFG5);
+ cfg &= RL_CFG5_PME_STS;
+ CSR_WRITE_1(sc, RL_CFG5, cfg);
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
+
+ if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
+ /*
+ * XXX Should have a better way to extract station
+ * address from EEPROM.
+ */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
+ } else {
+ sc->rl_eewidth = RL_9356_ADDR_LEN;
+ re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
+ if (re_did != 0x8129)
+ sc->rl_eewidth = RL_9346_ADDR_LEN;
+
+ /*
+ * Get station address from the EEPROM.
+ */
+ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
+ for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
+ as[i] = le16toh(as[i]);
+ bcopy(as, eaddr, sizeof(eaddr));
+ }
if (sc->rl_type == RL_8169) {
- /* Set RX length mask */
+ /* Set RX length mask and number of descriptors. */
sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
sc->rl_txstart = RL_GTXSTART;
+ sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
+ sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
} else {
- /* Set RX length mask */
+ /* Set RX length mask and number of descriptors. */
sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
sc->rl_txstart = RL_TXSTART;
+ sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
+ sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
}
- /*
- * Allocate the parent bus DMA tag appropriate for PCI.
- */
-#define RL_NSEG_NEW 32
- error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
- BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
- MAXBSIZE, RL_NSEG_NEW, BUS_SPACE_MAXSIZE_32BIT, 0,
- NULL, NULL, &sc->rl_parent_tag);
- if (error)
- goto fail;
-
error = re_allocmem(dev, sc);
-
if (error)
goto fail;
@@ -1353,6 +1503,22 @@ re_attach(dev)
goto fail;
}
+ /* Take controller out of deep sleep mode. */
+ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
+ if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
+ CSR_WRITE_1(sc, RL_GPIO,
+ CSR_READ_1(sc, RL_GPIO) | 0x01);
+ else
+ CSR_WRITE_1(sc, RL_GPIO,
+ CSR_READ_1(sc, RL_GPIO) & ~0x01);
+ }
+
+ /* Take PHY out of power down mode. */
+ if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
+ re_gmii_writereg(dev, 1, 0x1f, 0);
+ re_gmii_writereg(dev, 1, 0x0e, 0);
+ }
+
/* Do MII setup */
#ifndef __rtems__
if (mii_phy_probe(dev, &sc->rl_miibus,
@@ -1363,29 +1529,6 @@ re_attach(dev)
}
#endif
- /* Take PHY out of power down mode. */
- if (sc->rl_type == RL_8169) {
- uint32_t rev;
-
- rev = CSR_READ_4(sc, RL_TXCFG);
- /* HWVERID 0, 1 and 2 : bit26-30, bit23 */
- rev &= 0x7c800000;
- if (rev != 0) {
- /* RTL8169S single chip */
- switch (rev) {
- case RL_HWREV_8169_8110SB:
- case RL_HWREV_8169_8110SC:
- case RL_HWREV_8168_SPIN2:
- case RL_HWREV_8168_SPIN3:
- re_gmii_writereg(dev, 1, 0x1f, 0);
- re_gmii_writereg(dev, 1, 0x0e, 0);
- break;
- default:
- break;
- }
- }
- }
-
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1404,6 +1547,25 @@ re_attach(dev)
TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp);
TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
+#ifdef __rtems__
+ taskqueue_create_fast("re_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &taskqueue_fast);
+ taskqueue_start_threads(&taskqueue_fast, 1, PI_NET, "%s taskq",
+ device_get_nameunit(dev));
+#endif
+
+ /*
+ * XXX
+ * Still have no idea how to make TSO work on 8168C, 8168CP,
+ * 8111C and 8111CP.
+ */
+#ifndef __rtems__
+ if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
+ ifp->if_hwassist |= CSUM_TSO;
+ ifp->if_capabilities |= IFCAP_TSO4;
+ }
+#endif
+
/*
* Call MI attach routine.
*/
@@ -1412,11 +1574,19 @@ re_attach(dev)
#ifndef __rtems__
/* VLAN capability setup */
ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
-#ifdef IFCAP_VLAN_HWCSUM
if (ifp->if_capabilities & IFCAP_HWCSUM)
ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
-#endif
+ /* Enable WOL if PM is supported. */
+ if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &reg) == 0)
+ ifp->if_capabilities |= IFCAP_WOL;
ifp->if_capenable = ifp->if_capabilities;
+ /*
+ * Don't enable TSO by default. Under certain
+ * circumtances the controller generated corrupted
+ * packets in TSO size.
+ */
+ ifp->if_hwassist &= ~CSUM_TSO;
+ ifp->if_capenable &= ~IFCAP_TSO4;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
@@ -1427,7 +1597,7 @@ re_attach(dev)
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
#endif
-
+
#ifdef RE_DIAG
/*
* Perform hardware diagnostic on the original RTL8169.
@@ -1447,8 +1617,19 @@ re_attach(dev)
#endif
/* Hook interrupt last to avoid having to lock softc */
- error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET | INTR_MPSAFE |
- INTR_FAST, NULL, re_intr, sc, &sc->rl_intrhand);
+ if ((sc->rl_flags & RL_FLAG_MSI) == 0)
+ error = bus_setup_intr(dev, sc->rl_irq[0],
+ INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
+ &sc->rl_intrhand[0]);
+ else {
+ for (i = 0; i < RL_MSI_MESSAGES; i++) {
+ error = bus_setup_intr(dev, sc->rl_irq[i],
+ INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
+ &sc->rl_intrhand[i]);
+ if (error != 0)
+ break;
+ }
+ }
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
@@ -1470,23 +1651,22 @@ fail:
* allocated.
*/
static int
-re_detach(dev)
- device_t dev;
+re_detach(device_t dev)
{
struct rl_softc *sc;
struct ifnet *ifp;
- int i;
+ int i, rid;
sc = device_get_softc(dev);
ifp = sc->rl_ifp;
KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
-#ifdef DEVICE_POLLING
- if (ifp->if_capenable & IFCAP_POLLING)
- ether_poll_deregister(ifp);
-#endif
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING)
+ ether_poll_deregister(ifp);
+#endif
RL_LOCK(sc);
#if 0
sc->suspended = 1;
@@ -1520,14 +1700,34 @@ re_detach(dev)
* stopped here.
*/
- if (sc->rl_intrhand)
- bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand);
+ for (i = 0; i < RL_MSI_MESSAGES; i++) {
+ if (sc->rl_intrhand[i] != NULL) {
+ bus_teardown_intr(dev, sc->rl_irq[i],
+ sc->rl_intrhand[i]);
+ sc->rl_intrhand[i] = NULL;
+ }
+ }
if (ifp != NULL)
if_free(ifp);
- if (sc->rl_irq)
- bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq);
+ if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
+ if (sc->rl_irq[0] != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, 0,
+ sc->rl_irq[0]);
+ sc->rl_irq[0] = NULL;
+ }
+ } else {
+ for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
+ if (sc->rl_irq[i] != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, rid,
+ sc->rl_irq[i]);
+ sc->rl_irq[i] = NULL;
+ }
+ }
+ pci_release_msi(dev);
+ }
if (sc->rl_res)
- bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
+ bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
+ sc->rl_res);
/* Unload and free the RX DMA ring memory and map */
@@ -1553,14 +1753,20 @@ re_detach(dev)
/* Destroy all the RX and TX buffer maps */
- if (sc->rl_ldata.rl_mtag) {
- for (i = 0; i < RL_TX_DESC_CNT; i++)
- bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_tx_dmamap[i]);
- for (i = 0; i < RL_RX_DESC_CNT; i++)
- bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[i]);
- bus_dma_tag_destroy(sc->rl_ldata.rl_mtag);
+ if (sc->rl_ldata.rl_tx_mtag) {
+ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
+ bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
+ sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
+ bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
+ }
+ if (sc->rl_ldata.rl_rx_mtag) {
+ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++)
+ bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
+ sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
+ if (sc->rl_ldata.rl_rx_sparemap)
+ bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
+ sc->rl_ldata.rl_rx_sparemap);
+ bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
}
/* Unload and free the stats buffer and map */
@@ -1582,23 +1788,36 @@ re_detach(dev)
return (0);
}
+static __inline void
+re_discard_rxbuf(struct rl_softc *sc, int idx)
+{
+ struct rl_desc *desc;
+ struct rl_rxdesc *rxd;
+ uint32_t cmdstat;
+
+ rxd = &sc->rl_ldata.rl_rx_desc[idx];
+ desc = &sc->rl_ldata.rl_rx_list[idx];
+ desc->rl_vlanctl = 0;
+ cmdstat = rxd->rx_size;
+ if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
+ cmdstat |= RL_RDESC_CMD_EOR;
+ desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
+}
+
static int
-re_newbuf(sc, idx, m)
- struct rl_softc *sc;
- int idx;
- struct mbuf *m;
+re_newbuf(struct rl_softc *sc, int idx)
{
- struct rl_dmaload_arg arg;
- struct mbuf *n = NULL;
- int error;
+ struct mbuf *m;
+ struct rl_rxdesc *rxd;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ struct rl_desc *desc;
+ uint32_t cmdstat;
+ int error, nsegs;
- if (m == NULL) {
- n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (n == NULL)
- return (ENOBUFS);
- m = n;
- } else
- m->m_data = m->m_ext.ext_buf;
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
#ifdef RE_FIXUP_RX
@@ -1614,37 +1833,44 @@ re_newbuf(sc, idx, m)
*/
m_adj(m, RE_ETHER_ALIGN);
#endif
- arg.rl_idx = idx;
- arg.rl_maxsegs = 1;
- arg.rl_flags = 0;
- arg.rl_ring = sc->rl_ldata.rl_rx_list;
-
- error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc,
- &arg, BUS_DMA_NOWAIT);
- if (error || arg.rl_maxsegs != 1) {
- if (n != NULL)
- m_freem(n);
- if (arg.rl_maxsegs == 0)
- bus_dmamap_unload(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[idx]);
- return (ENOMEM);
+ error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
+ sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ m_freem(m);
+ return (ENOBUFS);
}
+ KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
- sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN);
- sc->rl_ldata.rl_rx_mbuf[idx] = m;
+ rxd = &sc->rl_ldata.rl_rx_desc[idx];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
+ }
- bus_dmamap_sync(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[idx],
+ rxd->rx_m = m;
+ map = rxd->rx_dmamap;
+ rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
+ rxd->rx_size = segs[0].ds_len;
+ sc->rl_ldata.rl_rx_sparemap = map;
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
+ desc = &sc->rl_ldata.rl_rx_list[idx];
+ desc->rl_vlanctl = 0;
+ desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
+ desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
+ cmdstat = segs[0].ds_len;
+ if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
+ cmdstat |= RL_RDESC_CMD_EOR;
+ desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
+
return (0);
}
#ifdef RE_FIXUP_RX
static __inline void
-re_fixup_rx(m)
- struct mbuf *m;
+re_fixup_rx(struct mbuf *m)
{
int i;
uint16_t *src, *dst;
@@ -1656,44 +1882,47 @@ re_fixup_rx(m)
*dst++ = *src++;
m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
-
- return;
}
#endif
static int
-re_tx_list_init(sc)
- struct rl_softc *sc;
+re_tx_list_init(struct rl_softc *sc)
{
+ struct rl_desc *desc;
+ int i;
RL_LOCK_ASSERT(sc);
- bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ);
- bzero ((char *)&sc->rl_ldata.rl_tx_mbuf,
- (RL_TX_DESC_CNT * sizeof(struct mbuf *)));
+ bzero(sc->rl_ldata.rl_tx_list,
+ sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
+ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
+ sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
+ /* Set EOR. */
+ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
+ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
- sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE);
+ sc->rl_ldata.rl_tx_list_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
sc->rl_ldata.rl_tx_prodidx = 0;
sc->rl_ldata.rl_tx_considx = 0;
- sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT;
+ sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
return (0);
}
static int
-re_rx_list_init(sc)
- struct rl_softc *sc;
+re_rx_list_init(struct rl_softc *sc)
{
- int i;
-
- bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ);
- bzero ((char *)&sc->rl_ldata.rl_rx_mbuf,
- (RL_RX_DESC_CNT * sizeof(struct mbuf *)));
+ int error, i;
- for (i = 0; i < RL_RX_DESC_CNT; i++) {
- if (re_newbuf(sc, i, NULL) == ENOBUFS)
- return (ENOBUFS);
+ bzero(sc->rl_ldata.rl_rx_list,
+ sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
+ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
+ sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
+ if ((error = re_newbuf(sc, i)) != 0)
+ return (error);
}
/* Flush the RX descriptors */
@@ -1714,43 +1943,48 @@ re_rx_list_init(sc)
* across multiple 2K mbuf cluster buffers.
*/
static int
-re_rxeof(sc)
- struct rl_softc *sc;
+re_rxeof(struct rl_softc *sc, int *rx_npktsp)
{
struct mbuf *m;
struct ifnet *ifp;
int i, total_len;
struct rl_desc *cur_rx;
u_int32_t rxstat, rxvlan;
- int maxpkt = 16;
+ int maxpkt = 16, rx_npkts = 0;
RL_LOCK_ASSERT(sc);
ifp = sc->rl_ifp;
- i = sc->rl_ldata.rl_rx_prodidx;
/* Invalidate the descriptor memory */
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
- BUS_DMASYNC_POSTREAD);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i]) && maxpkt) {
+ for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
+ i = RL_RX_DESC_NXT(sc, i)) {
cur_rx = &sc->rl_ldata.rl_rx_list[i];
- m = sc->rl_ldata.rl_rx_mbuf[i];
- total_len = RL_RXBYTES(cur_rx);
rxstat = le32toh(cur_rx->rl_cmdstat);
+ if ((rxstat & RL_RDESC_STAT_OWN) != 0)
+ break;
+ total_len = rxstat & sc->rl_rxlenmask;
rxvlan = le32toh(cur_rx->rl_vlanctl);
-
- /* Invalidate the RX mbuf and unload its map */
-
- bus_dmamap_sync(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[i],
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[i]);
+ m = sc->rl_ldata.rl_rx_desc[i].rx_m;
if (!(rxstat & RL_RDESC_STAT_EOF)) {
+ if (re_newbuf(sc, i) != 0) {
+ /*
+ * If this is part of a multi-fragment packet,
+ * discard all the pieces.
+ */
+ if (sc->rl_head != NULL) {
+ m_freem(sc->rl_head);
+ sc->rl_head = sc->rl_tail = NULL;
+ }
+ re_discard_rxbuf(sc, i);
+ continue;
+ }
m->m_len = RE_RX_DESC_BUFLEN;
if (sc->rl_head == NULL)
sc->rl_head = sc->rl_tail = m;
@@ -1759,8 +1993,6 @@ re_rxeof(sc)
sc->rl_tail->m_next = m;
sc->rl_tail = m;
}
- re_newbuf(sc, i, NULL);
- RL_DESC_INC(i);
continue;
}
@@ -1798,8 +2030,7 @@ re_rxeof(sc)
m_freem(sc->rl_head);
sc->rl_head = sc->rl_tail = NULL;
}
- re_newbuf(sc, i, m);
- RL_DESC_INC(i);
+ re_discard_rxbuf(sc, i);
continue;
}
@@ -1808,19 +2039,16 @@ re_rxeof(sc)
* reload the current one.
*/
- if (re_newbuf(sc, i, NULL)) {
- ifp->if_ierrors++;
+ if (re_newbuf(sc, i) != 0) {
+ ifp->if_iqdrops++;
if (sc->rl_head != NULL) {
m_freem(sc->rl_head);
sc->rl_head = sc->rl_tail = NULL;
}
- re_newbuf(sc, i, m);
- RL_DESC_INC(i);
+ re_discard_rxbuf(sc, i);
continue;
}
- RL_DESC_INC(i);
-
if (sc->rl_head != NULL) {
m->m_len = total_len % RE_RX_DESC_BUFLEN;
if (m->m_len == 0)
@@ -1857,31 +2085,53 @@ re_rxeof(sc)
#ifndef __rtems__
if (ifp->if_capenable & IFCAP_RXCSUM) {
-
- /* Check IP header checksum */
- if (rxstat & RL_RDESC_STAT_PROTOID)
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
- if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
- m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-
- /* Check TCP/UDP checksum */
- if ((RL_TCPPKT(rxstat) &&
- !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
- (RL_UDPPKT(rxstat) &&
- !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
- m->m_pkthdr.csum_flags |=
- CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xffff;
+ if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
+ /* Check IP header checksum */
+ if (rxstat & RL_RDESC_STAT_PROTOID)
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED;
+ if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_VALID;
+
+ /* Check TCP/UDP checksum */
+ if ((RL_TCPPKT(rxstat) &&
+ !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
+ (RL_UDPPKT(rxstat) &&
+ !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ } else {
+ /*
+ * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
+ */
+ if ((rxstat & RL_RDESC_STAT_PROTOID) &&
+ (rxvlan & RL_RDESC_IPV4))
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED;
+ if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
+ (rxvlan & RL_RDESC_IPV4))
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_VALID;
+ if (((rxstat & RL_RDESC_STAT_TCP) &&
+ !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
+ ((rxstat & RL_RDESC_STAT_UDP) &&
+ !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
}
}
#endif
maxpkt--;
#ifndef __rtems__
if (rxvlan & RL_RDESC_VLANCTL_TAG) {
- VLAN_INPUT_TAG_NEW(ifp, m,
- ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)));
- if (m == NULL)
- continue;
+ m->m_pkthdr.ether_vtag =
+ bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
+ m->m_flags |= M_VLANTAG;
}
#endif
RL_UNLOCK(sc);
@@ -1891,6 +2141,7 @@ re_rxeof(sc)
ether_input_skipping(ifp, m);
#endif
RL_LOCK(sc);
+ rx_npkts++;
}
/* Flush the RX DMA ring */
@@ -1901,6 +2152,8 @@ re_rxeof(sc)
sc->rl_ldata.rl_rx_prodidx = i;
+ if (rx_npktsp != NULL)
+ *rx_npktsp = rx_npkts;
if (maxpkt)
return(EAGAIN);
@@ -1908,28 +2161,28 @@ re_rxeof(sc)
}
static void
-re_txeof(sc)
- struct rl_softc *sc;
+re_txeof(struct rl_softc *sc)
{
struct ifnet *ifp;
+ struct rl_txdesc *txd;
u_int32_t txstat;
- int idx;
+ int cons;
- ifp = sc->rl_ifp;
- idx = sc->rl_ldata.rl_tx_considx;
+ cons = sc->rl_ldata.rl_tx_considx;
+ if (cons == sc->rl_ldata.rl_tx_prodidx)
+ return;
+ ifp = sc->rl_ifp;
/* Invalidate the TX descriptor list */
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map,
- BUS_DMASYNC_POSTREAD);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- while (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT) {
- txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
- if (txstat & RL_TDESC_CMD_OWN)
+ for (; cons != sc->rl_ldata.rl_tx_prodidx;
+ cons = RL_TX_DESC_NXT(sc, cons)) {
+ txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
+ if (txstat & RL_TDESC_STAT_OWN)
break;
-
- sc->rl_ldata.rl_tx_list[idx].rl_bufaddr_lo = 0;
-
/*
* We only stash mbufs in the last descriptor
* in a fragment chain, which also happens to
@@ -1937,10 +2190,15 @@ re_txeof(sc)
* are valid.
*/
if (txstat & RL_TDESC_CMD_EOF) {
- m_freem(sc->rl_ldata.rl_tx_mbuf[idx]);
- sc->rl_ldata.rl_tx_mbuf[idx] = NULL;
- bus_dmamap_unload(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_tx_dmamap[idx]);
+ txd = &sc->rl_ldata.rl_tx_desc[cons];
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
+ txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
+ txd->tx_dmamap);
+ KASSERT(txd->tx_m != NULL,
+ ("%s: freeing NULL mbufs!", __func__));
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
if (txstat & (RL_TDESC_STAT_EXCESSCOL|
RL_TDESC_STAT_COLCNT))
ifp->if_collisions++;
@@ -1950,26 +2208,13 @@ re_txeof(sc)
ifp->if_opackets++;
}
sc->rl_ldata.rl_tx_free++;
- RL_DESC_INC(idx);
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
- sc->rl_ldata.rl_tx_considx = idx;
+ sc->rl_ldata.rl_tx_considx = cons;
/* No changes made to the TX ring, so no flush needed */
- if (sc->rl_ldata.rl_tx_free > RL_TX_DESC_THLD)
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT) {
- /*
- * Some chips will ignore a second TX request issued
- * while an existing transmission is in progress. If
- * the transmitter goes idle but there are still
- * packets waiting to be sent, we need to restart the
- * channel here to flush them out. This only seems to
- * be required with the PCIe devices.
- */
- CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
-
+ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
#ifdef RE_TX_MODERATION
/*
* If not all descriptors have been reaped yet, reload
@@ -1984,61 +2229,84 @@ re_txeof(sc)
}
static void
-re_tick(xsc)
- void *xsc;
+re_tick(void *xsc)
{
struct rl_softc *sc;
struct mii_data *mii;
- struct ifnet *ifp;
sc = xsc;
- ifp = sc->rl_ifp;
RL_LOCK_ASSERT(sc);
- re_watchdog(sc);
-
mii = device_get_softc(sc->rl_miibus);
#ifndef __rtems__
mii_tick(mii);
- if (sc->rl_link) {
- if (!(mii->mii_media_status & IFM_ACTIVE))
- sc->rl_link = 0;
- } else {
- if (mii->mii_media_status & IFM_ACTIVE &&
- IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
- sc->rl_link = 1;
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- taskqueue_enqueue_fast(taskqueue_fast,
- &sc->rl_txtask);
- }
+ if ((sc->rl_flags & RL_FLAG_LINK) == 0)
+ re_miibus_statchg(sc->rl_dev);
+#else
+ {
+ struct ifnet *ifp;
+ int med, err;
+
+ ifp = sc->rl_ifp;
+
+ med = IFM_MAKEWORD(0,0,0,0);
+
+ if ( (err = rtems_mii_ioctl( RE_MDIO(sc), sc, SIOCGIFMEDIA, &med)) ) {
+ device_printf(sc->rl_dev, "WARNING: mii ioctl failed; unable to determine link status -- fake ON\n");
+ med = IFM_LINK_OK;
+ }
+
+ /* link just died */
+ if ( (sc->rl_flags & RL_FLAG_LINK) & !(IFM_LINK_OK & med) ) {
+ sc->rl_flags &= ~RL_FLAG_LINK;
}
+ /* link just came up, restart */
+ if ( ((sc->rl_flags & RL_FLAG_LINK) == 0) && (IFM_LINK_OK & med) ) {
+ sc->rl_flags |= RL_FLAG_LINK;
+ if ( ifp->if_snd.ifq_head != NULL ) {
+ taskqueue_enqueue_fast(taskqueue_fast,
+ &sc->rl_txtask);
+ }
+ }
+ }
#endif
+ /*
+ * Reclaim transmitted frames here. Technically it is not
+ * necessary to do here but it ensures periodic reclamation
+ * regardless of Tx completion interrupt which seems to be
+ * lost on PCIe based controllers under certain situations.
+ */
+ re_txeof(sc);
+ re_watchdog(sc);
callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
}
#ifdef DEVICE_POLLING
-static void
+static int
re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
struct rl_softc *sc = ifp->if_softc;
+ int rx_npkts = 0;
RL_LOCK(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- re_poll_locked(ifp, cmd, count);
+ rx_npkts = re_poll_locked(ifp, cmd, count);
RL_UNLOCK(sc);
+ return (rx_npkts);
}
-static void
+static int
re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
struct rl_softc *sc = ifp->if_softc;
+ int rx_npkts;
RL_LOCK_ASSERT(sc);
sc->rxcycles = count;
- re_rxeof(sc);
+ re_rxeof(sc, &rx_npkts);
re_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
@@ -2049,46 +2317,44 @@ re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
status = CSR_READ_2(sc, RL_ISR);
if (status == 0xffff)
- return;
+ return (rx_npkts);
if (status)
CSR_WRITE_2(sc, RL_ISR, status);
+ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
+ (sc->rl_flags & RL_FLAG_PCIE))
+ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
/*
* XXX check behaviour on receiver stalls.
*/
- if (status & RL_ISR_SYSTEM_ERR) {
- re_reset(sc);
+ if (status & RL_ISR_SYSTEM_ERR)
re_init_locked(sc);
- }
}
+ return (rx_npkts);
}
#endif /* DEVICE_POLLING */
-static void
-re_intr(arg)
- void *arg;
+static int
+re_intr(void *arg)
{
struct rl_softc *sc;
uint16_t status;
sc = arg;
-printk( "re_intr " );
status = CSR_READ_2(sc, RL_ISR);
if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
- return;
+ return (FILTER_STRAY);
CSR_WRITE_2(sc, RL_IMR, 0);
taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
- return;
+ return (FILTER_HANDLED);
}
static void
-re_int_task(arg, npending)
- void *arg;
- int npending;
+re_int_task(void *arg, int npending)
{
struct rl_softc *sc;
struct ifnet *ifp;
@@ -2098,52 +2364,54 @@ re_int_task(arg, npending)
sc = arg;
ifp = sc->rl_ifp;
- NET_LOCK_GIANT();
RL_LOCK(sc);
status = CSR_READ_2(sc, RL_ISR);
CSR_WRITE_2(sc, RL_ISR, status);
- if (sc->suspended || !(ifp->if_flags & IFF_UP)) {
+ if (sc->suspended ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
RL_UNLOCK(sc);
- NET_UNLOCK_GIANT();
return;
}
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING) {
RL_UNLOCK(sc);
- NET_UNLOCK_GIANT();
return;
}
#endif
if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
- rval = re_rxeof(sc);
+ rval = re_rxeof(sc, NULL);
+ /*
+ * Some chips will ignore a second TX request issued
+ * while an existing transmission is in progress. If
+ * the transmitter goes idle but there are still
+ * packets waiting to be sent, we need to restart the
+ * channel here to flush them out. This only seems to
+ * be required with the PCIe devices.
+ */
+ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
+ (sc->rl_flags & RL_FLAG_PCIE))
+ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
+ if (status & (
#ifdef RE_TX_MODERATION
- if (status & (RL_ISR_TIMEOUT_EXPIRED|
+ RL_ISR_TIMEOUT_EXPIRED|
#else
- if (status & (RL_ISR_TX_OK|
+ RL_ISR_TX_OK|
#endif
RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
re_txeof(sc);
- if (status & RL_ISR_SYSTEM_ERR) {
- re_reset(sc);
+ if (status & RL_ISR_SYSTEM_ERR)
re_init_locked(sc);
- }
-
- if (status & RL_ISR_LINKCHG) {
- callout_stop(&sc->rl_stat_callout);
- re_tick(sc);
- }
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask);
RL_UNLOCK(sc);
- NET_UNLOCK_GIANT();
if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
@@ -2151,54 +2419,25 @@ re_int_task(arg, npending)
}
CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
-
- return;
}
static int
-re_encap(sc, m_head, idx)
- struct rl_softc *sc;
- struct mbuf **m_head;
- int *idx;
+re_encap(struct rl_softc *sc, struct mbuf **m_head)
{
- struct mbuf *m_new = NULL;
- struct rl_dmaload_arg arg;
+ struct rl_txdesc *txd, *txd_last;
+ bus_dma_segment_t segs[RL_NTXSEGS];
bus_dmamap_t map;
- int error;
+ struct mbuf *m_new;
+ struct rl_desc *desc;
+ int nsegs, prod;
+ int i, error, ei, si;
#ifndef __rtems__
- struct m_tag *mtag;
+ int padlen;
#endif
-
+ uint32_t cmdstat, csum_flags, vlanctl;
+
RL_LOCK_ASSERT(sc);
-
- if (sc->rl_ldata.rl_tx_free <= RL_TX_DESC_THLD)
- return (EFBIG);
-
- /*
- * Set up checksum offload. Note: checksum offload bits must
- * appear in all descriptors of a multi-descriptor transmit
- * attempt. This is according to testing done with an 8169
- * chip. This is a requirement.
- */
-
- arg.rl_flags = 0;
-
-#ifndef __rtems__
- if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
- arg.rl_flags |= RL_TDESC_CMD_IPCSUM;
- if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
- arg.rl_flags |= RL_TDESC_CMD_TCPCSUM;
- if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
- arg.rl_flags |= RL_TDESC_CMD_UDPCSUM;
-#endif
-
- arg.rl_idx = *idx;
- arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
- if (arg.rl_maxsegs > RL_TX_DESC_THLD)
- arg.rl_maxsegs -= RL_TX_DESC_THLD;
- arg.rl_ring = sc->rl_ldata.rl_tx_list;
-
- map = sc->rl_ldata.rl_tx_dmamap[*idx];
+ M_ASSERTPKTHDR((*m_head));
/*
* With some of the RealTek chips, using the checksum offload
@@ -2207,151 +2446,215 @@ re_encap(sc, m_head, idx)
* need to send a really small IP fragment that's less than 60
* bytes in size, and IP header checksumming is enabled, the
* resulting ethernet frame that appears on the wire will
- * have garbled payload. To work around this, if TX checksum
+ * have garbled payload. To work around this, if TX IP checksum
* offload is enabled, we always manually pad short frames out
- * to the minimum ethernet frame size. We do this by pretending
- * the mbuf chain has too many fragments so the coalescing code
- * below can assemble the packet into a single buffer that's
- * padded out to the mininum frame size.
- *
- * Note: this appears unnecessary for TCP, and doing it for TCP
- * with PCIe adapters seems to result in bad checksums.
+ * to the minimum ethernet frame size.
*/
-
- if (arg.rl_flags && !(arg.rl_flags & RL_TDESC_CMD_TCPCSUM) &&
- (*m_head)->m_pkthdr.len < RL_MIN_FRAMELEN)
- error = EFBIG;
- else
- error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
- *m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
-
- if (error && error != EFBIG) {
- device_printf(sc->rl_dev, "can't map mbuf (error %d)\n", error);
- return (ENOBUFS);
- }
-
- /* Too many segments to map, coalesce into a single mbuf */
-
- if (error || arg.rl_maxsegs == 0) {
- if (arg.rl_maxsegs == 0)
- bus_dmamap_unload(sc->rl_ldata.rl_mtag, map);
- m_new = m_defrag(*m_head, M_DONTWAIT);
- if (m_new == NULL) {
+#ifndef __rtems__
+ if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
+ (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
+ ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
+ padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
+ if (M_WRITABLE(*m_head) == 0) {
+ /* Get a writable copy. */
+ m_new = m_dup(*m_head, M_DONTWAIT);
m_freem(*m_head);
- *m_head = NULL;
- return (ENOBUFS);
+ if (m_new == NULL) {
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ *m_head = m_new;
}
- *m_head = m_new;
+ if ((*m_head)->m_next != NULL ||
+ M_TRAILINGSPACE(*m_head) < padlen) {
+ m_new = m_defrag(*m_head, M_DONTWAIT);
+ if (m_new == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ } else
+ m_new = *m_head;
/*
* Manually pad short frames, and zero the pad space
* to avoid leaking data.
*/
- if (m_new->m_pkthdr.len < RL_MIN_FRAMELEN) {
- bzero(mtod(m_new, char *) + m_new->m_pkthdr.len,
- RL_MIN_FRAMELEN - m_new->m_pkthdr.len);
- m_new->m_pkthdr.len += RL_MIN_FRAMELEN -
- m_new->m_pkthdr.len;
- m_new->m_len = m_new->m_pkthdr.len;
+ bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
+ m_new->m_pkthdr.len += padlen;
+ m_new->m_len = m_new->m_pkthdr.len;
+ *m_head = m_new;
+ }
+#endif
+
+ prod = sc->rl_ldata.rl_tx_prodidx;
+ txd = &sc->rl_ldata.rl_tx_desc[prod];
+ error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
+ *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error == EFBIG) {
+#ifndef __rtems__
+ m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS);
+#else
+ m_new = m_defrag(*m_head, M_DONTWAIT);
+#endif
+ if (m_new == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOBUFS);
}
-
- /* Note that we'll run over RL_TX_DESC_THLD here. */
- arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
- error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
- *m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
- if (error || arg.rl_maxsegs == 0) {
- device_printf(sc->rl_dev,
- "can't map defragmented mbuf (error %d)\n", error);
- m_freem(m_new);
+ *m_head = m_new;
+ error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
+ txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ m_freem(*m_head);
*m_head = NULL;
- if (arg.rl_maxsegs == 0)
- bus_dmamap_unload(sc->rl_ldata.rl_mtag, map);
- return (EFBIG);
+ return (error);
}
+ } else if (error != 0)
+ return (error);
+ if (nsegs == 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (EIO);
}
+ /* Check for number of available descriptors. */
+ if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
+ bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
+ BUS_DMASYNC_PREWRITE);
+
/*
- * Insure that the map for this transmission
- * is placed at the array index of the last descriptor
- * in this chain. (Swap last and first dmamaps.)
+ * Set up checksum offload. Note: checksum offload bits must
+ * appear in all descriptors of a multi-descriptor transmit
+ * attempt. This is according to testing done with an 8169
+ * chip. This is a requirement.
*/
- sc->rl_ldata.rl_tx_dmamap[*idx] =
- sc->rl_ldata.rl_tx_dmamap[arg.rl_idx];
- sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map;
-
- sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = *m_head;
- sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs;
+ vlanctl = 0;
+ csum_flags = 0;
+#ifndef __rtems__
+ if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0)
+ csum_flags = RL_TDESC_CMD_LGSEND |
+ ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
+ RL_TDESC_CMD_MSSVAL_SHIFT);
+ else {
+ /*
+ * Unconditionally enable IP checksum if TCP or UDP
+ * checksum is required. Otherwise, TCP/UDP checksum
+ * does't make effects.
+ */
+ if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
+ if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
+ csum_flags |= RL_TDESC_CMD_IPCSUM;
+ if (((*m_head)->m_pkthdr.csum_flags &
+ CSUM_TCP) != 0)
+ csum_flags |= RL_TDESC_CMD_TCPCSUM;
+ if (((*m_head)->m_pkthdr.csum_flags &
+ CSUM_UDP) != 0)
+ csum_flags |= RL_TDESC_CMD_UDPCSUM;
+ } else {
+ vlanctl |= RL_TDESC_CMD_IPCSUMV2;
+ if (((*m_head)->m_pkthdr.csum_flags &
+ CSUM_TCP) != 0)
+ vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
+ if (((*m_head)->m_pkthdr.csum_flags &
+ CSUM_UDP) != 0)
+ vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
+ }
+ }
+ }
+#endif
/*
* Set up hardware VLAN tagging. Note: vlan tag info must
- * appear in the first descriptor of a multi-descriptor
+ * appear in all descriptors of a multi-descriptor
* transmission attempt.
*/
-
#ifndef __rtems__
- mtag = VLAN_OUTPUT_TAG(sc->rl_ifp, *m_head);
- if (mtag != NULL)
- sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl =
- htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG);
+ if ((*m_head)->m_flags & M_VLANTAG)
+ vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
+ RL_TDESC_VLANCTL_TAG;
#endif
+
+ si = prod;
+ for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
+ desc = &sc->rl_ldata.rl_tx_list[prod];
+ desc->rl_vlanctl = htole32(vlanctl);
+ desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
+ desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
+ cmdstat = segs[i].ds_len;
+ if (i != 0)
+ cmdstat |= RL_TDESC_CMD_OWN;
+ if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
+ cmdstat |= RL_TDESC_CMD_EOR;
+ desc->rl_cmdstat = htole32(cmdstat | csum_flags);
+ sc->rl_ldata.rl_tx_free--;
+ }
+ /* Update producer index. */
+ sc->rl_ldata.rl_tx_prodidx = prod;
- /* Transfer ownership of packet to the chip. */
+ /* Set EOF on the last descriptor. */
+ ei = RL_TX_DESC_PRV(sc, prod);
+ desc = &sc->rl_ldata.rl_tx_list[ei];
+ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
- sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |=
- htole32(RL_TDESC_CMD_OWN);
- if (*idx != arg.rl_idx)
- sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |=
- htole32(RL_TDESC_CMD_OWN);
+ desc = &sc->rl_ldata.rl_tx_list[si];
+ /* Set SOF and transfer ownership of packet to the chip. */
+ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
- RL_DESC_INC(arg.rl_idx);
- *idx = arg.rl_idx;
+ /*
+ * Insure that the map for this transmission
+ * is placed at the array index of the last descriptor
+ * in this chain. (Swap last and first dmamaps.)
+ */
+ txd_last = &sc->rl_ldata.rl_tx_desc[ei];
+ map = txd->tx_dmamap;
+ txd->tx_dmamap = txd_last->tx_dmamap;
+ txd_last->tx_dmamap = map;
+ txd_last->tx_m = *m_head;
return (0);
}
static void
-re_tx_task(arg, npending)
- void *arg;
- int npending;
+re_tx_task(void *arg, int npending)
{
struct ifnet *ifp;
ifp = arg;
- NET_LOCK_GIANT();
re_start(ifp);
- NET_UNLOCK_GIANT();
-
- return;
}
/*
* Main transmit routine for C+ and gigE NICs.
*/
static void
-re_start(ifp)
- struct ifnet *ifp;
+re_start(struct ifnet *ifp)
{
struct rl_softc *sc;
- struct mbuf *m_head = NULL;
- int idx, queued = 0;
+ struct mbuf *m_head;
+ int queued;
sc = ifp->if_softc;
RL_LOCK(sc);
- if (!sc->rl_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) {
RL_UNLOCK(sc);
return;
}
- idx = sc->rl_ldata.rl_tx_prodidx;
-
- while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) {
+ for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
+ sc->rl_ldata.rl_tx_free > 1;) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
- if (re_encap(sc, &m_head, &idx)) {
+ if (re_encap(sc, &m_head) != 0) {
if (m_head == NULL)
break;
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
@@ -2370,7 +2673,7 @@ re_start(ifp)
if (queued == 0) {
#ifdef RE_TX_MODERATION
- if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT)
+ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
CSR_WRITE_4(sc, RL_TIMERCNT, 1);
#endif
RL_UNLOCK(sc);
@@ -2383,8 +2686,6 @@ re_start(ifp)
sc->rl_ldata.rl_tx_list_map,
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
- sc->rl_ldata.rl_tx_prodidx = idx;
-
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
#ifdef RE_TX_MODERATION
@@ -2405,13 +2706,10 @@ re_start(ifp)
sc->rl_watchdog_timer = 5;
RL_UNLOCK(sc);
-
- return;
}
static void
-re_init(xsc)
- void *xsc;
+re_init(void *xsc)
{
struct rl_softc *sc = xsc;
@@ -2421,17 +2719,21 @@ re_init(xsc)
}
static void
-re_init_locked(sc)
- struct rl_softc *sc;
+re_init_locked(struct rl_softc *sc)
{
struct ifnet *ifp = sc->rl_ifp;
struct mii_data *mii;
- u_int32_t rxcfg = 0;
+ uint32_t reg;
+ uint16_t cfg;
+#ifndef __rtems__
union {
uint32_t align_dummy;
u_char eaddr[ETHER_ADDR_LEN];
} eaddr;
-
+#else
+ uint32_t eaddr[2];
+#endif
+
RL_LOCK_ASSERT(sc);
mii = device_get_softc(sc->rl_miibus);
@@ -2441,27 +2743,70 @@ re_init_locked(sc)
*/
re_stop(sc);
+ /* Put controller into known state. */
+ re_reset(sc);
+
/*
* Enable C+ RX and TX mode, as well as VLAN stripping and
* RX checksum offload. We must configure the C+ register
* before all others.
*/
- CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB|
- RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW|
- RL_CPLUSCMD_VLANSTRIP|RL_CPLUSCMD_RXCSUM_ENB);
-
+ cfg = RL_CPLUSCMD_PCI_MRW;
+#ifndef __rtems__
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ cfg |= RL_CPLUSCMD_RXCSUM_ENB;
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
+ cfg |= RL_CPLUSCMD_VLANSTRIP;
+ if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
+ cfg |= RL_CPLUSCMD_MACSTAT_DIS;
+ /* XXX magic. */
+ cfg |= 0x0001;
+ } else
+#endif
+ cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
+ CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
+ if (sc->rl_hwrev == RL_HWREV_8169_8110SC ||
+ sc->rl_hwrev == RL_HWREV_8169_8110SCE) {
+ reg = 0x000fff00;
+ if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0)
+ reg |= 0x000000ff;
+ if (sc->rl_hwrev == RL_HWREV_8169_8110SCE)
+ reg |= 0x00f00000;
+ CSR_WRITE_4(sc, 0x7c, reg);
+ /* Disable interrupt mitigation. */
+ CSR_WRITE_2(sc, 0xe2, 0);
+ }
+ /*
+ * Disable TSO if interface MTU size is greater than MSS
+ * allowed in controller.
+ */
+#ifndef __rtems__
+ if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+#endif
+
/*
* Init our MAC address. Even though the chipset
* documentation doesn't mention it, we need to enter "Config
* register write enable" mode to modify the ID registers.
*/
/* Copy MAC address on stack to align. */
+#ifndef __rtems__
bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
CSR_WRITE_4(sc, RL_IDR0,
htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
CSR_WRITE_4(sc, RL_IDR4,
htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
+#else
+ bzero(eaddr, sizeof(eaddr));
+ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
+ CSR_WRITE_4(sc, RL_IDR0, htole32(eaddr[0]));
+ CSR_WRITE_4(sc, RL_IDR4, htole32(eaddr[1]));
+#endif
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
/*
@@ -2490,7 +2835,7 @@ re_init_locked(sc)
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
/*
- * Set the initial TX and RX configuration.
+ * Set the initial TX configuration.
*/
if (sc->rl_testmode) {
if (sc->rl_type == RL_8169)
@@ -2504,32 +2849,10 @@ re_init_locked(sc)
CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
- CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
-
- /* Set the individual bit to receive frames for this host only. */
- rxcfg = CSR_READ_4(sc, RL_RXCFG);
- rxcfg |= RL_RXCFG_RX_INDIV;
-
- /* If we want promiscuous mode, set the allframes bit. */
- if (ifp->if_flags & IFF_PROMISC)
- rxcfg |= RL_RXCFG_RX_ALLPHYS;
- else
- rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
- CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
-
/*
- * Set capture broadcast bit to capture broadcast frames.
+ * Set the initial RX configuration.
*/
- if (ifp->if_flags & IFF_BROADCAST)
- rxcfg |= RL_RXCFG_RX_BROAD;
- else
- rxcfg &= ~RL_RXCFG_RX_BROAD;
- CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
-
- /*
- * Program the multicast filter, if necessary.
- */
- re_setmulti(sc);
+ re_set_rxmode(sc);
#ifdef DEVICE_POLLING
/*
@@ -2590,7 +2913,7 @@ re_init_locked(sc)
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- sc->rl_link = 0;
+ sc->rl_flags &= ~RL_FLAG_LINK;
sc->rl_watchdog_timer = 0;
callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
}
@@ -2600,19 +2923,19 @@ re_init_locked(sc)
*/
#ifndef __rtems__
static int
-re_ifmedia_upd(ifp)
- struct ifnet *ifp;
+re_ifmedia_upd(struct ifnet *ifp)
{
struct rl_softc *sc;
struct mii_data *mii;
+ int error;
sc = ifp->if_softc;
mii = device_get_softc(sc->rl_miibus);
RL_LOCK(sc);
- mii_mediachg(mii);
+ error = mii_mediachg(mii);
RL_UNLOCK(sc);
- return (0);
+ return (error);
}
#endif
@@ -2621,9 +2944,7 @@ re_ifmedia_upd(ifp)
*/
#ifndef __rtems__
static void
-re_ifmedia_sts(ifp, ifmr)
- struct ifnet *ifp;
- struct ifmediareq *ifmr;
+re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct rl_softc *sc;
struct mii_data *mii;
@@ -2653,10 +2974,25 @@ re_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
switch (command) {
case SIOCSIFMTU:
- RL_LOCK(sc);
- if (ifr->ifr_mtu > RL_JUMBO_MTU)
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > RL_JUMBO_MTU) {
+ error = EINVAL;
+ break;
+ }
+ if ((sc->rl_flags & RL_FLAG_NOJUMBO) != 0 &&
+ ifr->ifr_mtu > RL_MAX_FRAMELEN) {
error = EINVAL;
- ifp->if_mtu = ifr->ifr_mtu;
+ break;
+ }
+ RL_LOCK(sc);
+ if (ifp->if_mtu != ifr->ifr_mtu)
+ ifp->if_mtu = ifr->ifr_mtu;
+#ifndef __rtems__
+ if (ifp->if_mtu > RL_TSO_MTU &&
+ (ifp->if_capenable & IFCAP_TSO4) != 0) {
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+#endif
RL_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
@@ -2664,8 +3000,8 @@ re_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
if ((ifp->if_flags & IFF_UP) != 0) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if (((ifp->if_flags ^ sc->rl_if_flags)
- & IFF_PROMISC) != 0)
- re_setmulti(sc);
+ & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
+ re_set_rxmode(sc);
} else
re_init_locked(sc);
} else {
@@ -2678,7 +3014,7 @@ re_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
case SIOCADDMULTI:
case SIOCDELMULTI:
RL_LOCK(sc);
- re_setmulti(sc);
+ re_set_rxmode(sc);
RL_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
@@ -2686,6 +3022,8 @@ re_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
mii = device_get_softc(sc->rl_miibus);
#ifndef __rtems__
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
+#else
+ error = rtems_mii_ioctl( RE_MDIO(sc), sc, command, &ifr->ifr_media);
#endif
break;
#ifndef __rtems__
@@ -2719,23 +3057,43 @@ re_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
if (mask & IFCAP_HWCSUM) {
ifp->if_capenable ^= IFCAP_HWCSUM;
if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist = RE_CSUM_FEATURES;
+ ifp->if_hwassist |= RE_CSUM_FEATURES;
else
- ifp->if_hwassist = 0;
+ ifp->if_hwassist &= ~RE_CSUM_FEATURES;
reinit = 1;
}
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
+ if (mask & IFCAP_TSO4) {
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if ((IFCAP_TSO4 & ifp->if_capenable) &&
+ (IFCAP_TSO4 & ifp->if_capabilities))
+ ifp->if_hwassist |= CSUM_TSO;
+ else
+ ifp->if_hwassist &= ~CSUM_TSO;
+ if (ifp->if_mtu > RL_TSO_MTU &&
+ (ifp->if_capenable & IFCAP_TSO4) != 0) {
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+ }
+ if ((mask & IFCAP_WOL) != 0 &&
+ (ifp->if_capabilities & IFCAP_WOL) != 0) {
+ if ((mask & IFCAP_WOL_UCAST) != 0)
+ ifp->if_capenable ^= IFCAP_WOL_UCAST;
+ if ((mask & IFCAP_WOL_MCAST) != 0)
+ ifp->if_capenable ^= IFCAP_WOL_MCAST;
+ if ((mask & IFCAP_WOL_MAGIC) != 0)
+ ifp->if_capenable ^= IFCAP_WOL_MAGIC;
+ }
if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING)
re_init(sc);
-#ifdef VLAN_CAPABILITIES
VLAN_CAPABILITIES(ifp);
-#endif
}
-#endif
break;
+#endif
default:
error = ether_ioctl(ifp, command, data);
break;
@@ -2745,21 +3103,32 @@ re_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
}
static void
-re_watchdog(sc)
- struct rl_softc *sc;
+re_watchdog(struct rl_softc *sc)
{
+ struct ifnet *ifp;
RL_LOCK_ASSERT(sc);
if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
return;
- device_printf(sc->rl_dev, "watchdog timeout\n");
- sc->rl_ifp->if_oerrors++;
-
+ ifp = sc->rl_ifp;
re_txeof(sc);
- re_rxeof(sc);
+ if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
+ if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
+ "-- recovering\n");
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask);
+ return;
+ }
+
+ if_printf(ifp, "watchdog timeout\n");
+ ifp->if_oerrors++;
+
+ re_rxeof(sc, NULL);
re_init_locked(sc);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask);
}
/*
@@ -2767,11 +3136,12 @@ re_watchdog(sc)
* RX and TX lists.
*/
static void
-re_stop(sc)
- struct rl_softc *sc;
+re_stop(struct rl_softc *sc)
{
- register int i;
+ int i;
struct ifnet *ifp;
+ struct rl_txdesc *txd;
+ struct rl_rxdesc *rxd;
RL_LOCK_ASSERT(sc);
@@ -2781,7 +3151,12 @@ re_stop(sc)
callout_stop(&sc->rl_stat_callout);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
- CSR_WRITE_1(sc, RL_COMMAND, 0x00);
+ if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0)
+ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
+ RL_CMD_RX_ENB);
+ else
+ CSR_WRITE_1(sc, RL_COMMAND, 0x00);
+ DELAY(1000);
CSR_WRITE_2(sc, RL_IMR, 0x0000);
CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
@@ -2792,23 +3167,29 @@ re_stop(sc)
/* Free the TX list buffers. */
- for (i = 0; i < RL_TX_DESC_CNT; i++) {
- if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) {
- bus_dmamap_unload(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_tx_dmamap[i]);
- m_freem(sc->rl_ldata.rl_tx_mbuf[i]);
- sc->rl_ldata.rl_tx_mbuf[i] = NULL;
+ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
+ txd = &sc->rl_ldata.rl_tx_desc[i];
+ if (txd->tx_m != NULL) {
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
+ txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
+ txd->tx_dmamap);
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
}
}
/* Free the RX list buffers. */
- for (i = 0; i < RL_RX_DESC_CNT; i++) {
- if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) {
- bus_dmamap_unload(sc->rl_ldata.rl_mtag,
- sc->rl_ldata.rl_rx_dmamap[i]);
- m_freem(sc->rl_ldata.rl_rx_mbuf[i]);
- sc->rl_ldata.rl_rx_mbuf[i] = NULL;
+ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
+ rxd = &sc->rl_ldata.rl_rx_desc[i];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
+ rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
+ rxd->rx_dmamap);
+ m_freem(rxd->rx_m);
+ rxd->rx_m = NULL;
}
}
}
@@ -2820,8 +3201,7 @@ re_stop(sc)
*/
#ifndef __rtems__
static int
-re_suspend(dev)
- device_t dev;
+re_suspend(device_t dev)
{
struct rl_softc *sc;
@@ -2829,6 +3209,7 @@ re_suspend(dev)
RL_LOCK(sc);
re_stop(sc);
+ re_setwol(sc);
sc->suspended = 1;
RL_UNLOCK(sc);
@@ -2843,8 +3224,7 @@ re_suspend(dev)
*/
#ifndef __rtems__
static int
-re_resume(dev)
- device_t dev;
+re_resume(device_t dev)
{
struct rl_softc *sc;
struct ifnet *ifp;
@@ -2854,11 +3234,22 @@ re_resume(dev)
RL_LOCK(sc);
ifp = sc->rl_ifp;
+ /* Take controller out of sleep mode. */
+ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
+ if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
+ CSR_WRITE_1(sc, RL_GPIO,
+ CSR_READ_1(sc, RL_GPIO) | 0x01);
+ }
/* reinitialize interface if necessary */
if (ifp->if_flags & IFF_UP)
re_init_locked(sc);
+ /*
+ * Clear WOL matching such that normal Rx filtering
+ * wouldn't interfere with WOL patterns.
+ */
+ re_clrwol(sc);
sc->suspended = 0;
RL_UNLOCK(sc);
@@ -2870,9 +3261,12 @@ re_resume(dev)
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
+#ifndef __rtems__
+static int
+#else
static void
-re_shutdown(dev)
- device_t dev;
+#endif
+re_shutdown(device_t dev)
{
struct rl_softc *sc;
@@ -2886,5 +3280,108 @@ re_shutdown(dev)
* cases.
*/
sc->rl_ifp->if_flags &= ~IFF_UP;
+#ifndef __rtems__
+ re_setwol(sc);
+#endif
RL_UNLOCK(sc);
+
+#ifndef __rtems__
+ return (0);
+#endif
+}
+
+#ifndef __rtems__
+static void
+re_setwol(struct rl_softc *sc)
+{
+ struct ifnet *ifp;
+ int pmc;
+ uint16_t pmstat;
+ uint8_t v;
+
+ RL_LOCK_ASSERT(sc);
+
+ if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
+ return;
+
+ ifp = sc->rl_ifp;
+ /* Put controller into sleep mode. */
+ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
+ if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
+ CSR_WRITE_1(sc, RL_GPIO,
+ CSR_READ_1(sc, RL_GPIO) & ~0x01);
+ }
+ if ((ifp->if_capenable & IFCAP_WOL) != 0 &&
+ (sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
+ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
+ /* Enable config register write. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
+
+ /* Enable PME. */
+ v = CSR_READ_1(sc, RL_CFG1);
+ v &= ~RL_CFG1_PME;
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ v |= RL_CFG1_PME;
+ CSR_WRITE_1(sc, RL_CFG1, v);
+
+ v = CSR_READ_1(sc, RL_CFG3);
+ v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
+ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
+ v |= RL_CFG3_WOL_MAGIC;
+ CSR_WRITE_1(sc, RL_CFG3, v);
+
+ /* Config register write done. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
+
+ v = CSR_READ_1(sc, RL_CFG5);
+ v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
+ v &= ~RL_CFG5_WOL_LANWAKE;
+ if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
+ v |= RL_CFG5_WOL_UCAST;
+ if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
+ v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ v |= RL_CFG5_WOL_LANWAKE;
+ CSR_WRITE_1(sc, RL_CFG5, v);
+
+ /*
+ * It seems that hardware resets its link speed to 100Mbps in
+ * power down mode so switching to 100Mbps in driver is not
+ * needed.
+ */
+
+ /* Request PME if WOL is requested. */
+ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
+ pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+}
+
+static void
+re_clrwol(struct rl_softc *sc)
+{
+ int pmc;
+ uint8_t v;
+
+ RL_LOCK_ASSERT(sc);
+
+ if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
+ return;
+
+ /* Enable config register write. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
+
+ v = CSR_READ_1(sc, RL_CFG3);
+ v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
+ CSR_WRITE_1(sc, RL_CFG3, v);
+
+ /* Config register write done. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
+
+ v = CSR_READ_1(sc, RL_CFG5);
+ v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
+ v &= ~RL_CFG5_WOL_LANWAKE;
+ CSR_WRITE_1(sc, RL_CFG5, v);
}
+#endif
diff --git a/bsd_eth_drivers/if_re/if_rl.c b/bsd_eth_drivers/if_re/if_rl.c
new file mode 100644
index 0000000..93ce2f7
--- /dev/null
+++ b/bsd_eth_drivers/if_re/if_rl.c
@@ -0,0 +1,2351 @@
+/*-
+ * Copyright (c) 1997, 1998
+ * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __rtems__
+#include <libbsdport.h>
+#endif
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/pci/if_rl.c,v 1.189 2009/06/26 11:45:06 rwatson Exp $");
+
+/*
+ * RealTek 8129/8139 PCI NIC driver
+ *
+ * Supports several extremely cheap PCI 10/100 adapters based on
+ * the RealTek chipset. Datasheets can be obtained from
+ * www.realtek.com.tw.
+ *
+ * Written by Bill Paul <wpaul@ctr.columbia.edu>
+ * Electrical Engineering Department
+ * Columbia University, New York City
+ */
+/*
+ * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
+ * probably the worst PCI ethernet controller ever made, with the possible
+ * exception of the FEAST chip made by SMC. The 8139 supports bus-master
+ * DMA, but it has a terrible interface that nullifies any performance
+ * gains that bus-master DMA usually offers.
+ *
+ * For transmission, the chip offers a series of four TX descriptor
+ * registers. Each transmit frame must be in a contiguous buffer, aligned
+ * on a longword (32-bit) boundary. This means we almost always have to
+ * do mbuf copies in order to transmit a frame, except in the unlikely
+ * case where a) the packet fits into a single mbuf, and b) the packet
+ * is 32-bit aligned within the mbuf's data area. The presence of only
+ * four descriptor registers means that we can never have more than four
+ * packets queued for transmission at any one time.
+ *
+ * Reception is not much better. The driver has to allocate a single large
+ * buffer area (up to 64K in size) into which the chip will DMA received
+ * frames. Because we don't know where within this region received packets
+ * will begin or end, we have no choice but to copy data from the buffer
+ * area into mbufs in order to pass the packets up to the higher protocol
+ * levels.
+ *
+ * It's impossible given this rotten design to really achieve decent
+ * performance at 100Mbps, unless you happen to have a 400Mhz PII or
+ * some equally overmuscled CPU to drive it.
+ *
+ * On the bright side, the 8139 does have a built-in PHY, although
+ * rather than using an MDIO serial interface like most other NICs, the
+ * PHY registers are directly accessible through the 8139's register
+ * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
+ * filter.
+ *
+ * The 8129 chip is an older version of the 8139 that uses an external PHY
+ * chip. The 8129 has a serial MDIO interface for accessing the MII where
+ * the 8139 lets you directly access the on-board PHY registers. We need
+ * to select which interface to use depending on the chip type.
+ */
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include <net/bpf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+MODULE_DEPEND(rl, pci, 1, 1, 1);
+MODULE_DEPEND(rl, ether, 1, 1, 1);
+MODULE_DEPEND(rl, miibus, 1, 1, 1);
+
+/* "device miibus" required. See GENERIC if you get errors here. */
+#include "miibus_if.h"
+
+#ifdef __rtems__
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <libbsdport_post.h>
+struct mii_data
+{
+ int mii_media_status;
+ int mii_media_active;
+};
+#endif
+
+/*
+ * Default to using PIO access for this driver. On SMP systems,
+ * there appear to be problems with memory mapped mode: it looks like
+ * doing too many memory mapped access back to back in rapid succession
+ * can hang the bus. I'm inclined to blame this on crummy design/construction
+ * on the part of RealTek. Memory mapped mode does appear to work on
+ * uniprocessor systems though.
+ */
+#define RL_USEIOSPACE
+
+#ifndef __rtems__
+#include <pci/if_rlreg.h>
+#else
+#include "if_rlreg.h"
+#endif
+
+/*
+ * Various supported device vendors/types and their names.
+ */
+static struct rl_type rl_devs[] = {
+ { RT_VENDORID, RT_DEVICEID_8129, RL_8129,
+ "RealTek 8129 10/100BaseTX" },
+ { RT_VENDORID, RT_DEVICEID_8139, RL_8139,
+ "RealTek 8139 10/100BaseTX" },
+ { RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
+ "RealTek 8139 10/100BaseTX" },
+ { RT_VENDORID, RT_DEVICEID_8138, RL_8139,
+ "RealTek 8139 10/100BaseTX CardBus" },
+ { RT_VENDORID, RT_DEVICEID_8100, RL_8139,
+ "RealTek 8100 10/100BaseTX" },
+ { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
+ "Accton MPX 5030/5038 10/100BaseTX" },
+ { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
+ "Delta Electronics 8139 10/100BaseTX" },
+ { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
+ "Addtron Technology 8139 10/100BaseTX" },
+ { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
+ "D-Link DFE-530TX+ 10/100BaseTX" },
+ { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
+ "D-Link DFE-690TXD 10/100BaseTX" },
+ { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
+ "Nortel Networks 10/100BaseTX" },
+ { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
+ "Corega FEther CB-TXD" },
+ { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
+ "Corega FEtherII CB-TXD" },
+ { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
+ "Peppercon AG ROL-F" },
+ { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
+ "Planex FNW-3603-TX" },
+ { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
+ "Planex FNW-3800-TX" },
+ { CP_VENDORID, RT_DEVICEID_8139, RL_8139,
+ "Compaq HNE-300" },
+ { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
+ "LevelOne FPC-0106TX" },
+ { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
+ "Edimax EP-4103DL CardBus" }
+};
+
+static int rl_attach(device_t);
+static int rl_detach(device_t);
+static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+static int rl_dma_alloc(struct rl_softc *);
+static void rl_dma_free(struct rl_softc *);
+static void rl_eeprom_putbyte(struct rl_softc *, int);
+static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
+static int rl_encap(struct rl_softc *, struct mbuf **);
+static int rl_list_tx_init(struct rl_softc *);
+static int rl_list_rx_init(struct rl_softc *);
+#ifndef __rtems__
+static int rl_ifmedia_upd(struct ifnet *);
+static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+static int rl_ioctl(struct ifnet *, u_long, caddr_t);
+static void rl_intr(void *);
+#else
+static int rl_ioctl(struct ifnet *, ioctl_command_t, caddr_t);
+static int rl_intr(void *);
+#endif
+static void rl_init(void *);
+static void rl_init_locked(struct rl_softc *sc);
+static void rl_mii_send(struct rl_softc *, uint32_t, int);
+static void rl_mii_sync(struct rl_softc *);
+static int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *);
+static int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *);
+static int rl_miibus_readreg(device_t, int, int);
+static void rl_miibus_statchg(device_t);
+static int rl_miibus_writereg(device_t, int, int, int);
+#ifdef DEVICE_POLLING
+static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
+static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
+#endif
+static int rl_probe(device_t);
+static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
+static void rl_reset(struct rl_softc *);
+#ifndef __rtems__
+static int rl_resume(device_t);
+#endif
+static int rl_rxeof(struct rl_softc *);
+static void rl_setmulti(struct rl_softc *);
+#ifndef __rtems__
+static int rl_shutdown(device_t);
+#else
+static void rl_shutdown(device_t);
+#endif
+static void rl_start(struct ifnet *);
+static void rl_start_locked(struct ifnet *);
+static void rl_stop(struct rl_softc *);
+#ifndef __rtems__
+static int rl_suspend(device_t);
+#endif
+static void rl_tick(void *);
+static void rl_txeof(struct rl_softc *);
+static void rl_watchdog(struct rl_softc *);
+
+#ifdef RL_USEIOSPACE
+#define RL_RES SYS_RES_IOPORT
+#define RL_RID RL_PCI_LOIO
+#else
+#define RL_RES SYS_RES_MEMORY
+#define RL_RID RL_PCI_LOMEM
+#endif
+
+#ifndef __rtems__
+static device_method_t rl_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rl_probe),
+ DEVMETHOD(device_attach, rl_attach),
+ DEVMETHOD(device_detach, rl_detach),
+ DEVMETHOD(device_suspend, rl_suspend),
+ DEVMETHOD(device_resume, rl_resume),
+ DEVMETHOD(device_shutdown, rl_shutdown),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, rl_miibus_readreg),
+ DEVMETHOD(miibus_writereg, rl_miibus_writereg),
+ DEVMETHOD(miibus_statchg, rl_miibus_statchg),
+
+ { 0, 0 }
+};
+
+static driver_t rl_driver = {
+ "rl",
+ rl_methods,
+ sizeof(struct rl_softc)
+};
+
+static devclass_t rl_devclass;
+
+DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
+DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
+DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
+#else
+static device_method_t rl_methods = {
+ probe: rl_probe,
+ attach: rl_attach,
+ shutdown: rl_shutdown,
+ detach: rl_detach,
+ irq_check_dis: 0,
+ irq_en: 0,
+};
+
+driver_t libbsdport_rl_driver = {
+ "rl",
+ &rl_methods,
+ DEV_TYPE_PCI,
+ sizeof(struct rl_softc)
+};
+
+static int mii_phy_probe_alloc (device_t* dev)
+{
+ struct mii_data* md;
+ int l;
+ l = sizeof(struct device) + 4 + DEVICE_SOFTC_ALIGNMENT;
+ *dev = malloc(l, M_DEVBUF, M_WAIT);
+ if (*dev == NULL)
+ return 1;
+ md = malloc(sizeof(struct mii_data), M_DEVBUF, M_WAIT);
+ if (md == NULL)
+ {
+ free (*dev, M_NOWAIT);
+ return 1;
+ }
+ memset (*dev, 0, l);
+ memset (md, 0, sizeof (struct mii_data));
+ memcpy (device_get_softc(*dev), &md, sizeof (md));
+ return 0;
+}
+
+#define mii_phy_probe(dev, miibus, ifmedia_upd, ifmedia_sts) mii_phy_probe_alloc(miibus)
+
+static int
+mdio_r(int phy, void *uarg, unsigned reg, uint32_t *pval)
+{
+struct rl_softc *sc = uarg;
+
+ if ( phy != 0 )
+ return EINVAL;
+
+ *pval = (uint32_t) rl_miibus_readreg(sc->rl_dev, phy, reg);
+
+ return 0;
+}
+
+static int
+mdio_w(int phy, void *uarg, unsigned reg, uint32_t val)
+{
+struct rl_softc *sc = uarg;
+
+ if ( phy != 0 )
+ return EINVAL;
+
+ rl_miibus_writereg(sc->rl_dev, phy, reg, val);
+
+ return 0;
+}
+
+struct rtems_mdio_info rl_mdio = {
+ mdio_r : mdio_r,
+ mdio_w : mdio_w,
+ has_gmii : 0
+};
+
+#define RE_MDIO(sc) (&rl_mdio)
+#endif
+
+#define EE_SET(x) \
+ CSR_WRITE_1(sc, RL_EECMD, \
+ CSR_READ_1(sc, RL_EECMD) | x)
+
+#define EE_CLR(x) \
+ CSR_WRITE_1(sc, RL_EECMD, \
+ CSR_READ_1(sc, RL_EECMD) & ~x)
+
+/*
+ * Send a read command and address to the EEPROM, check for ACK.
+ */
+static void
+rl_eeprom_putbyte(struct rl_softc *sc, int addr)
+{
+ register int d, i;
+
+ d = addr | sc->rl_eecmd_read;
+
+ /*
+ * Feed in each bit and strobe the clock.
+ */
+ for (i = 0x400; i; i >>= 1) {
+ if (d & i) {
+ EE_SET(RL_EE_DATAIN);
+ } else {
+ EE_CLR(RL_EE_DATAIN);
+ }
+ DELAY(100);
+ EE_SET(RL_EE_CLK);
+ DELAY(150);
+ EE_CLR(RL_EE_CLK);
+ DELAY(100);
+ }
+}
+
+/*
+ * Read a word of data stored in the EEPROM at address 'addr.'
+ */
+static void
+rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
+{
+ register int i;
+ uint16_t word = 0;
+
+ /* Enter EEPROM access mode. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
+
+ /*
+ * Send address of word we want to read.
+ */
+ rl_eeprom_putbyte(sc, addr);
+
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
+
+ /*
+ * Start reading bits from EEPROM.
+ */
+ for (i = 0x8000; i; i >>= 1) {
+ EE_SET(RL_EE_CLK);
+ DELAY(100);
+ if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
+ word |= i;
+ EE_CLR(RL_EE_CLK);
+ DELAY(100);
+ }
+
+ /* Turn off EEPROM access mode. */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
+
+ *dest = word;
+}
+
+/*
+ * Read a sequence of words from the EEPROM.
+ */
+static void
+rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
+{
+ int i;
+ uint16_t word = 0, *ptr;
+
+ for (i = 0; i < cnt; i++) {
+ rl_eeprom_getword(sc, off + i, &word);
+ ptr = (uint16_t *)(dest + (i * 2));
+ if (swap)
+ *ptr = ntohs(word);
+ else
+ *ptr = word;
+ }
+}
+
+/*
+ * MII access routines are provided for the 8129, which
+ * doesn't have a built-in PHY. For the 8139, we fake things
+ * up by diverting rl_phy_readreg()/rl_phy_writereg() to the
+ * direct access PHY registers.
+ */
+#define MII_SET(x) \
+ CSR_WRITE_1(sc, RL_MII, \
+ CSR_READ_1(sc, RL_MII) | (x))
+
+#define MII_CLR(x) \
+ CSR_WRITE_1(sc, RL_MII, \
+ CSR_READ_1(sc, RL_MII) & ~(x))
+
+/*
+ * Sync the PHYs by setting data bit and strobing the clock 32 times.
+ */
+static void
+rl_mii_sync(struct rl_softc *sc)
+{
+ register int i;
+
+ MII_SET(RL_MII_DIR|RL_MII_DATAOUT);
+
+ for (i = 0; i < 32; i++) {
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+ }
+}
+
+/*
+ * Clock a series of bits through the MII.
+ */
+static void
+rl_mii_send(struct rl_softc *sc, uint32_t bits, int cnt)
+{
+ int i;
+
+ MII_CLR(RL_MII_CLK);
+
+ for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
+ if (bits & i) {
+ MII_SET(RL_MII_DATAOUT);
+ } else {
+ MII_CLR(RL_MII_DATAOUT);
+ }
+ DELAY(1);
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+ MII_SET(RL_MII_CLK);
+ }
+}
+
+/*
+ * Read an PHY register through the MII.
+ */
+static int
+rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame)
+{
+ int i, ack;
+
+ /* Set up frame for RX. */
+ frame->mii_stdelim = RL_MII_STARTDELIM;
+ frame->mii_opcode = RL_MII_READOP;
+ frame->mii_turnaround = 0;
+ frame->mii_data = 0;
+
+ CSR_WRITE_2(sc, RL_MII, 0);
+
+ /* Turn on data xmit. */
+ MII_SET(RL_MII_DIR);
+
+ rl_mii_sync(sc);
+
+ /* Send command/address info. */
+ rl_mii_send(sc, frame->mii_stdelim, 2);
+ rl_mii_send(sc, frame->mii_opcode, 2);
+ rl_mii_send(sc, frame->mii_phyaddr, 5);
+ rl_mii_send(sc, frame->mii_regaddr, 5);
+
+ /* Idle bit */
+ MII_CLR((RL_MII_CLK|RL_MII_DATAOUT));
+ DELAY(1);
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+
+ /* Turn off xmit. */
+ MII_CLR(RL_MII_DIR);
+
+ /* Check for ack */
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+ ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN;
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+
+ /*
+ * Now try reading data bits. If the ack failed, we still
+ * need to clock through 16 cycles to keep the PHY(s) in sync.
+ */
+ if (ack) {
+ for(i = 0; i < 16; i++) {
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+ }
+ goto fail;
+ }
+
+ for (i = 0x8000; i; i >>= 1) {
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+ if (!ack) {
+ if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN)
+ frame->mii_data |= i;
+ DELAY(1);
+ }
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+ }
+
+fail:
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+
+ return (ack ? 1 : 0);
+}
+
+/*
+ * Write to a PHY register through the MII.
+ */
+static int
+rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame)
+{
+
+ /* Set up frame for TX. */
+ frame->mii_stdelim = RL_MII_STARTDELIM;
+ frame->mii_opcode = RL_MII_WRITEOP;
+ frame->mii_turnaround = RL_MII_TURNAROUND;
+
+ /* Turn on data output. */
+ MII_SET(RL_MII_DIR);
+
+ rl_mii_sync(sc);
+
+ rl_mii_send(sc, frame->mii_stdelim, 2);
+ rl_mii_send(sc, frame->mii_opcode, 2);
+ rl_mii_send(sc, frame->mii_phyaddr, 5);
+ rl_mii_send(sc, frame->mii_regaddr, 5);
+ rl_mii_send(sc, frame->mii_turnaround, 2);
+ rl_mii_send(sc, frame->mii_data, 16);
+
+ /* Idle bit. */
+ MII_SET(RL_MII_CLK);
+ DELAY(1);
+ MII_CLR(RL_MII_CLK);
+ DELAY(1);
+
+ /* Turn off xmit. */
+ MII_CLR(RL_MII_DIR);
+
+ return (0);
+}
+
+static int
+rl_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct rl_softc *sc;
+ struct rl_mii_frame frame;
+ uint16_t rval = 0;
+ uint16_t rl8139_reg = 0;
+
+ sc = device_get_softc(dev);
+
+ if (sc->rl_type == RL_8139) {
+ /* Pretend the internal PHY is only at address 0 */
+ if (phy) {
+ return (0);
+ }
+ switch (reg) {
+ case MII_BMCR:
+ rl8139_reg = RL_BMCR;
+ break;
+ case MII_BMSR:
+ rl8139_reg = RL_BMSR;
+ break;
+ case MII_ANAR:
+ rl8139_reg = RL_ANAR;
+ break;
+ case MII_ANER:
+ rl8139_reg = RL_ANER;
+ break;
+ case MII_ANLPAR:
+ rl8139_reg = RL_LPAR;
+ break;
+ case MII_PHYIDR1:
+ case MII_PHYIDR2:
+ return (0);
+ /*
+ * Allow the rlphy driver to read the media status
+ * register. If we have a link partner which does not
+ * support NWAY, this is the register which will tell
+ * us the results of parallel detection.
+ */
+ case RL_MEDIASTAT:
+ rval = CSR_READ_1(sc, RL_MEDIASTAT);
+ return (rval);
+ default:
+ device_printf(sc->rl_dev, "bad phy register\n");
+ return (0);
+ }
+ rval = CSR_READ_2(sc, rl8139_reg);
+ return (rval);
+ }
+
+ bzero((char *)&frame, sizeof(frame));
+ frame.mii_phyaddr = phy;
+ frame.mii_regaddr = reg;
+ rl_mii_readreg(sc, &frame);
+
+ return (frame.mii_data);
+}
+
+static int
+rl_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+ struct rl_softc *sc;
+ struct rl_mii_frame frame;
+ uint16_t rl8139_reg = 0;
+
+ sc = device_get_softc(dev);
+
+ if (sc->rl_type == RL_8139) {
+ /* Pretend the internal PHY is only at address 0 */
+ if (phy) {
+ return (0);
+ }
+ switch (reg) {
+ case MII_BMCR:
+ rl8139_reg = RL_BMCR;
+ break;
+ case MII_BMSR:
+ rl8139_reg = RL_BMSR;
+ break;
+ case MII_ANAR:
+ rl8139_reg = RL_ANAR;
+ break;
+ case MII_ANER:
+ rl8139_reg = RL_ANER;
+ break;
+ case MII_ANLPAR:
+ rl8139_reg = RL_LPAR;
+ break;
+ case MII_PHYIDR1:
+ case MII_PHYIDR2:
+ return (0);
+ break;
+ default:
+ device_printf(sc->rl_dev, "bad phy register\n");
+ return (0);
+ }
+ CSR_WRITE_2(sc, rl8139_reg, data);
+ return (0);
+ }
+
+ bzero((char *)&frame, sizeof(frame));
+ frame.mii_phyaddr = phy;
+ frame.mii_regaddr = reg;
+ frame.mii_data = data;
+ rl_mii_writereg(sc, &frame);
+
+ return (0);
+}
+
+#ifdef __rtems__
+static void
+rl_mii_tick(device_t dev)
+{
+ struct rl_softc *sc;
+ struct mii_data *mii;
+ int err;
+ sc = device_get_softc(dev);
+ mii = device_get_softc(sc->rl_miibus);
+ mii->mii_media_status = IFM_MAKEWORD(0,0,0,0);
+ mii->mii_media_active = IFM_100_TX;
+ if ( (err = rtems_mii_ioctl( RE_MDIO(sc), sc, SIOCGIFMEDIA, &mii->mii_media_status)) ) {
+ device_printf(sc->rl_dev, "WARNING: mii ioctl failed; unable to determine link status -- fake ON\n");
+ mii->mii_media_status = IFM_LINK_OK;
+ }
+}
+#endif
+
+static void
+rl_miibus_statchg(device_t dev)
+{
+ struct rl_softc *sc;
+ struct ifnet *ifp;
+ struct mii_data *mii;
+
+ sc = device_get_softc(dev);
+ mii = device_get_softc(sc->rl_miibus);
+ ifp = sc->rl_ifp;
+ if (mii == NULL || ifp == NULL ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ sc->rl_flags &= ~RL_FLAG_LINK;
+
+ if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+ (IFM_ACTIVE | IFM_AVALID)) {
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ sc->rl_flags |= RL_FLAG_LINK;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * RealTek controllers do not provide any interface to
+ * Tx/Rx MACs for resolved speed, duplex and flow-control
+ * parameters.
+ */
+}
+
+/*
+ * Program the 64-bit multicast hash filter.
+ */
+static void
+rl_setmulti(struct rl_softc *sc)
+{
+ struct ifnet *ifp = sc->rl_ifp;
+ int h = 0;
+ uint32_t hashes[2] = { 0, 0 };
+#ifndef __rtems__
+ struct ifmultiaddr *ifma;
+#endif
+ uint32_t rxfilt;
+ int mcnt = 0;
+
+ RL_LOCK_ASSERT(sc);
+
+ rxfilt = CSR_READ_4(sc, RL_RXCFG);
+
+ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
+ rxfilt |= RL_RXCFG_RX_MULTI;
+ CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
+ CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
+ CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
+ return;
+ }
+
+ /* first, zot all the existing hash bits */
+ CSR_WRITE_4(sc, RL_MAR0, 0);
+ CSR_WRITE_4(sc, RL_MAR4, 0);
+
+#ifndef __rtems__
+ /* now program new ones */
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
+ ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
+ if (h < 32)
+ hashes[0] |= (1 << h);
+ else
+ hashes[1] |= (1 << (h - 32));
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+#else
+ {
+ struct in_ifaddr *ia;
+ IFP_TO_IA(ifp, ia);
+ if (ia) {
+ struct in_multi *inm;
+ struct in_multistep step;
+ step.i_ia = ia;
+ step.i_inm = NULL;
+ IN_NEXT_MULTI(step, inm);
+ while (inm) {
+ unsigned char ethaddr[ETHER_ADDR_LEN];
+ unsigned char* ipaddr = (unsigned char *) &inm->inm_addr.s_addr;
+ ETHER_MAP_IP_MULTICAST (ipaddr, ethaddr);
+ h = ether_crc32_be(ethaddr, ETHER_ADDR_LEN) >> 26;
+ if (h < 32)
+ hashes[0] |= (1 << h);
+ else
+ hashes[1] |= (1 << (h - 32));
+ mcnt++;
+ IN_NEXT_MULTI(step, inm);
+ }
+ }
+ }
+#endif
+
+ if (mcnt)
+ rxfilt |= RL_RXCFG_RX_MULTI;
+ else
+ rxfilt &= ~RL_RXCFG_RX_MULTI;
+
+ CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
+ CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
+ CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
+}
+
+static void
+rl_reset(struct rl_softc *sc)
+{
+ register int i;
+
+ RL_LOCK_ASSERT(sc);
+
+ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
+
+ for (i = 0; i < RL_TIMEOUT; i++) {
+ DELAY(10);
+ if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
+ break;
+ }
+ if (i == RL_TIMEOUT)
+ device_printf(sc->rl_dev, "reset never completed!\n");
+}
+
+/*
+ * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
+ * IDs against our list and return a device name if we find a match.
+ */
+static int
+rl_probe(device_t dev)
+{
+ struct rl_type *t;
+ uint16_t devid, revid, vendor;
+ int i;
+
+ vendor = pci_get_vendor(dev);
+ devid = pci_get_device(dev);
+ revid = pci_get_revid(dev);
+
+ if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
+ if (revid == 0x20) {
+ /* 8139C+, let re(4) take care of this device. */
+ return (ENXIO);
+ }
+ }
+ t = rl_devs;
+ for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
+ if (vendor == t->rl_vid && devid == t->rl_did) {
+ device_set_desc(dev, t->rl_name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
+struct rl_dmamap_arg {
+ bus_addr_t rl_busaddr;
+};
+
+static void
+rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct rl_dmamap_arg *ctx;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ ctx = (struct rl_dmamap_arg *)arg;
+ ctx->rl_busaddr = segs[0].ds_addr;
+}
+
+/*
+ * Attach the interface. Allocate softc structures, do ifmedia
+ * setup and ethernet/BPF attach.
+ */
+static int
+rl_attach(device_t dev)
+{
+ uint8_t eaddr[ETHER_ADDR_LEN];
+ uint16_t as[3];
+ struct ifnet *ifp;
+ struct rl_softc *sc;
+ struct rl_type *t;
+#ifndef __rtems__
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children;
+#endif
+ int error = 0, i, rid;
+ int unit;
+ uint16_t rl_did = 0;
+ char tn[32];
+
+ sc = device_get_softc(dev);
+ unit = device_get_unit(dev);
+ sc->rl_dev = dev;
+
+ sc->rl_twister_enable = 0;
+ snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
+#ifndef __rtems__
+ TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
+ ctx = device_get_sysctl_ctx(sc->rl_dev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
+ &sc->rl_twister_enable, 0, "");
+#endif
+
+ mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+ callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
+
+ pci_enable_busmaster(dev);
+
+ /* Map control/status registers. */
+ rid = RL_RID;
+ sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE);
+
+ if (sc->rl_res == NULL) {
+ device_printf(dev, "couldn't map ports/memory\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+#ifdef notdef
+ /*
+ * Detect the Realtek 8139B. For some reason, this chip is very
+ * unstable when left to autoselect the media
+ * The best workaround is to set the device to the required
+ * media type or to set it to the 10 Meg speed.
+ */
+ if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
+ device_printf(dev,
+"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
+#endif
+
+ sc->rl_btag = rman_get_bustag(sc->rl_res);
+ sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
+
+ /* Allocate interrupt */
+ rid = 0;
+ sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+
+ if (sc->rl_irq[0] == NULL) {
+ device_printf(dev, "couldn't map interrupt\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ /*
+ * Reset the adapter. Only take the lock here as it's needed in
+ * order to call rl_reset().
+ */
+ RL_LOCK(sc);
+ rl_reset(sc);
+ RL_UNLOCK(sc);
+
+ sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
+ rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
+ if (rl_did != 0x8129)
+ sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
+
+ /*
+ * Get station address from the EEPROM.
+ */
+ rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
+ for (i = 0; i < 3; i++) {
+ eaddr[(i * 2) + 0] = as[i] & 0xff;
+ eaddr[(i * 2) + 1] = as[i] >> 8;
+ }
+
+ /*
+ * Now read the exact device type from the EEPROM to find
+ * out if it's an 8129 or 8139.
+ */
+ rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
+
+ t = rl_devs;
+ sc->rl_type = 0;
+ while(t->rl_name != NULL) {
+ if (rl_did == t->rl_did) {
+ sc->rl_type = t->rl_basetype;
+ break;
+ }
+ t++;
+ }
+
+ if (sc->rl_type == 0) {
+ device_printf(dev, "unknown device ID: %x assuming 8139\n",
+ rl_did);
+ sc->rl_type = RL_8139;
+ /*
+ * Read RL_IDR register to get ethernet address as accessing
+ * EEPROM may not extract correct address.
+ */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
+ }
+
+ if ((error = rl_dma_alloc(sc)) != 0)
+ goto fail;
+
+ ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "can not if_alloc()\n");
+ error = ENOSPC;
+ goto fail;
+ }
+
+ /* Do MII setup */
+ if (mii_phy_probe(dev, &sc->rl_miibus,
+ rl_ifmedia_upd, rl_ifmedia_sts)) {
+ device_printf(dev, "MII without any phy!\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ ifp->if_softc = sc;
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = rl_ioctl;
+ ifp->if_start = rl_start;
+ ifp->if_init = rl_init;
+#ifndef __rtems__
+ ifp->if_capabilities = IFCAP_VLAN_MTU;
+ ifp->if_capenable = ifp->if_capabilities;
+#endif
+#ifdef DEVICE_POLLING
+ ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+ IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
+ ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
+ IFQ_SET_READY(&ifp->if_snd);
+
+#ifdef __rtems__
+ taskqueue_create_fast("rl_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &taskqueue_fast);
+ taskqueue_start_threads(&taskqueue_fast, 1, PI_NET, "%s taskq",
+ device_get_nameunit(dev));
+#endif
+
+ /*
+ * Call MI attach routine.
+ */
+ ether_ifattach(ifp, eaddr);
+
+ /* Hook interrupt last to avoid having to lock softc */
+#ifndef __rtems__
+ error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, rl_intr, sc, &sc->rl_intrhand[0]);
+#else
+ error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
+ rl_intr, NULL, sc, &sc->rl_intrhand[0]);
+#endif
+ if (error) {
+ device_printf(sc->rl_dev, "couldn't set up irq\n");
+ ether_ifdetach(ifp);
+ }
+
+fail:
+ if (error)
+ rl_detach(dev);
+
+ return (error);
+}
+
+/*
+ * Shutdown hardware and free up resources. This can be called any
+ * time after the mutex has been initialized. It is called in both
+ * the error case in attach and the normal detach case so it needs
+ * to be careful about only freeing resources that have actually been
+ * allocated.
+ */
+static int
+rl_detach(device_t dev)
+{
+ struct rl_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ ifp = sc->rl_ifp;
+
+ KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING)
+ ether_poll_deregister(ifp);
+#endif
+ /* These should only be active if attach succeeded */
+ if (device_is_attached(dev)) {
+ RL_LOCK(sc);
+ rl_stop(sc);
+ RL_UNLOCK(sc);
+ callout_drain(&sc->rl_stat_callout);
+ ether_ifdetach(ifp);
+ }
+#if 0
+ sc->suspended = 1;
+#endif
+ if (sc->rl_miibus)
+ {
+#ifndef __rtems__
+ device_delete_child(dev, sc->rl_miibus);
+#else
+ free (sc->rl_miibus, M_NOWAIT);
+#endif
+ }
+ bus_generic_detach(dev);
+
+ if (sc->rl_intrhand[0])
+ bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
+ if (sc->rl_irq[0])
+ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
+ if (sc->rl_res)
+ bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
+
+ if (ifp)
+ if_free(ifp);
+
+ rl_dma_free(sc);
+
+ mtx_destroy(&sc->rl_mtx);
+
+ return (0);
+}
+
+static int
+rl_dma_alloc(struct rl_softc *sc)
+{
+ struct rl_dmamap_arg ctx;
+ int error, i;
+
+ /*
+ * Allocate the parent bus DMA tag appropriate for PCI.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rl_parent_tag);
+ if (error) {
+ device_printf(sc->rl_dev,
+ "failed to create parent DMA tag.\n");
+ goto fail;
+ }
+ /* Create DMA tag for Rx memory block. */
+ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
+ RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */
+ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rl_cdata.rl_rx_tag);
+ if (error) {
+ device_printf(sc->rl_dev,
+ "failed to create Rx memory block DMA tag.\n");
+ goto fail;
+ }
+ /* Create DMA tag for Tx buffer. */
+ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
+ RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, 1, /* maxsize, nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rl_cdata.rl_tx_tag);
+ if (error) {
+ device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
+ goto fail;
+ }
+
+ /*
+ * Allocate DMA'able memory and load DMA map for Rx memory block.
+ */
+ error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
+ (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
+ BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
+ if (error != 0) {
+ device_printf(sc->rl_dev,
+ "failed to allocate Rx DMA memory block.\n");
+ goto fail;
+ }
+ ctx.rl_busaddr = 0;
+ error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
+ sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
+ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
+ BUS_DMA_NOWAIT);
+ if (error != 0 || ctx.rl_busaddr == 0) {
+ device_printf(sc->rl_dev,
+ "could not load Rx DMA memory block.\n");
+ goto fail;
+ }
+ sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
+
+ /* Create DMA maps for Tx buffers. */
+ for (i = 0; i < RL_TX_LIST_CNT; i++) {
+ sc->rl_cdata.rl_tx_chain[i] = NULL;
+ sc->rl_cdata.rl_tx_dmamap[i] = NULL;
+ error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
+ &sc->rl_cdata.rl_tx_dmamap[i]);
+ if (error != 0) {
+ device_printf(sc->rl_dev,
+ "could not create Tx dmamap.\n");
+ goto fail;
+ }
+ }
+
+ /* Leave a few bytes before the start of the RX ring buffer. */
+ sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
+ sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
+
+fail:
+ return (error);
+}
+
+static void
+rl_dma_free(struct rl_softc *sc)
+{
+ int i;
+
+ /* Rx memory block. */
+ if (sc->rl_cdata.rl_rx_tag != NULL) {
+ if (sc->rl_cdata.rl_rx_dmamap != NULL)
+ bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
+ sc->rl_cdata.rl_rx_dmamap);
+ if (sc->rl_cdata.rl_rx_dmamap != NULL &&
+ sc->rl_cdata.rl_rx_buf_ptr != NULL)
+ bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
+ sc->rl_cdata.rl_rx_buf_ptr,
+ sc->rl_cdata.rl_rx_dmamap);
+ sc->rl_cdata.rl_rx_buf_ptr = NULL;
+ sc->rl_cdata.rl_rx_buf = NULL;
+ sc->rl_cdata.rl_rx_dmamap = NULL;
+ bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
+ sc->rl_cdata.rl_tx_tag = NULL;
+ }
+
+ /* Tx buffers. */
+ if (sc->rl_cdata.rl_tx_tag != NULL) {
+ for (i = 0; i < RL_TX_LIST_CNT; i++) {
+ if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
+ bus_dmamap_destroy(
+ sc->rl_cdata.rl_tx_tag,
+ sc->rl_cdata.rl_tx_dmamap[i]);
+ sc->rl_cdata.rl_tx_dmamap[i] = NULL;
+ }
+ }
+ bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
+ sc->rl_cdata.rl_tx_tag = NULL;
+ }
+
+ if (sc->rl_parent_tag != NULL) {
+ bus_dma_tag_destroy(sc->rl_parent_tag);
+ sc->rl_parent_tag = NULL;
+ }
+}
+
+/*
+ * Initialize the transmit descriptors.
+ */
+static int
+rl_list_tx_init(struct rl_softc *sc)
+{
+ struct rl_chain_data *cd;
+ int i;
+
+ RL_LOCK_ASSERT(sc);
+
+ cd = &sc->rl_cdata;
+ for (i = 0; i < RL_TX_LIST_CNT; i++) {
+ cd->rl_tx_chain[i] = NULL;
+ CSR_WRITE_4(sc,
+ RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
+ }
+
+ sc->rl_cdata.cur_tx = 0;
+ sc->rl_cdata.last_tx = 0;
+
+ return (0);
+}
+
+static int
+rl_list_rx_init(struct rl_softc *sc)
+{
+
+ RL_LOCK_ASSERT(sc);
+
+ bzero(sc->rl_cdata.rl_rx_buf_ptr,
+ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
+ bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+/*
+ * A frame has been uploaded: pass the resulting mbuf chain up to
+ * the higher level protocols.
+ *
+ * You know there's something wrong with a PCI bus-master chip design
+ * when you have to use m_devget().
+ *
+ * The receive operation is badly documented in the datasheet, so I'll
+ * attempt to document it here. The driver provides a buffer area and
+ * places its base address in the RX buffer start address register.
+ * The chip then begins copying frames into the RX buffer. Each frame
+ * is preceded by a 32-bit RX status word which specifies the length
+ * of the frame and certain other status bits. Each frame (starting with
+ * the status word) is also 32-bit aligned. The frame length is in the
+ * first 16 bits of the status word; the lower 15 bits correspond with
+ * the 'rx status register' mentioned in the datasheet.
+ *
+ * Note: to make the Alpha happy, the frame payload needs to be aligned
+ * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
+ * as the offset argument to m_devget().
+ */
+static int
+rl_rxeof(struct rl_softc *sc)
+{
+ struct mbuf *m;
+ struct ifnet *ifp = sc->rl_ifp;
+ uint8_t *rxbufpos;
+ int total_len = 0;
+ int wrap = 0;
+ int rx_npkts = 0;
+ uint32_t rxstat;
+ uint16_t cur_rx;
+ uint16_t limit;
+ uint16_t max_bytes, rx_bytes = 0;
+
+ RL_LOCK_ASSERT(sc);
+
+ bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
+
+ /* Do not try to read past this point. */
+ limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
+
+ if (limit < cur_rx)
+ max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
+ else
+ max_bytes = limit - cur_rx;
+
+ while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ if (sc->rxcycles <= 0)
+ break;
+ sc->rxcycles--;
+ }
+#endif
+ rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
+ rxstat = le32toh(*(uint32_t *)rxbufpos);
+
+ /*
+ * Here's a totally undocumented fact for you. When the
+ * RealTek chip is in the process of copying a packet into
+ * RAM for you, the length will be 0xfff0. If you spot a
+ * packet header with this value, you need to stop. The
+ * datasheet makes absolutely no mention of this and
+ * RealTek should be shot for this.
+ */
+ total_len = rxstat >> 16;
+ if (total_len == RL_RXSTAT_UNFINISHED)
+ break;
+
+ if (!(rxstat & RL_RXSTAT_RXOK) ||
+ total_len < ETHER_MIN_LEN ||
+ total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
+ ifp->if_ierrors++;
+ rl_init_locked(sc);
+ return (rx_npkts);
+ }
+
+ /* No errors; receive the packet. */
+ rx_bytes += total_len + 4;
+
+ /*
+ * XXX The RealTek chip includes the CRC with every
+ * received frame, and there's no way to turn this
+ * behavior off (at least, I can't find anything in
+ * the manual that explains how to do it) so we have
+ * to trim off the CRC manually.
+ */
+ total_len -= ETHER_CRC_LEN;
+
+ /*
+ * Avoid trying to read more bytes than we know
+ * the chip has prepared for us.
+ */
+ if (rx_bytes > max_bytes)
+ break;
+
+ rxbufpos = sc->rl_cdata.rl_rx_buf +
+ ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
+ if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
+ rxbufpos = sc->rl_cdata.rl_rx_buf;
+
+ wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
+ if (total_len > wrap) {
+ m = m_devget((char*)rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
+ NULL);
+ if (m != NULL)
+ m_copyback(m, wrap, total_len - wrap,
+ (caddr_t)sc->rl_cdata.rl_rx_buf);
+ cur_rx = (total_len - wrap + ETHER_CRC_LEN);
+ } else {
+ m = m_devget((char*)rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
+ NULL);
+ cur_rx += total_len + 4 + ETHER_CRC_LEN;
+ }
+
+ /* Round up to 32-bit boundary. */
+ cur_rx = (cur_rx + 3) & ~3;
+ CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
+
+ if (m == NULL) {
+ ifp->if_iqdrops++;
+ continue;
+ }
+
+ ifp->if_ipackets++;
+ RL_UNLOCK(sc);
+#ifndef __rtems__
+ (*ifp->if_input)(ifp, m);
+#else
+ ether_input_skipping(ifp, m);
+#endif
+ RL_LOCK(sc);
+ rx_npkts++;
+ }
+
+ /* No need to sync Rx memory block as we didn't modify it. */
+ return (rx_npkts);
+}
+
+/*
+ * A frame was downloaded to the chip. It's safe for us to clean up
+ * the list buffers.
+ */
+static void
+rl_txeof(struct rl_softc *sc)
+{
+ struct ifnet *ifp = sc->rl_ifp;
+ uint32_t txstat;
+
+ RL_LOCK_ASSERT(sc);
+
+ /*
+ * Go through our tx list and free mbufs for those
+ * frames that have been uploaded.
+ */
+ do {
+ if (RL_LAST_TXMBUF(sc) == NULL)
+ break;
+ txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
+ if (!(txstat & (RL_TXSTAT_TX_OK|
+ RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
+ break;
+
+ ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
+
+ bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
+ m_freem(RL_LAST_TXMBUF(sc));
+ RL_LAST_TXMBUF(sc) = NULL;
+ /*
+ * If there was a transmit underrun, bump the TX threshold.
+ * Make sure not to overflow the 63 * 32byte we can address
+ * with the 6 available bit.
+ */
+ if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
+ (sc->rl_txthresh < 2016))
+ sc->rl_txthresh += 32;
+ if (txstat & RL_TXSTAT_TX_OK)
+ ifp->if_opackets++;
+ else {
+ int oldthresh;
+ ifp->if_oerrors++;
+ if ((txstat & RL_TXSTAT_TXABRT) ||
+ (txstat & RL_TXSTAT_OUTOFWIN))
+ CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
+ oldthresh = sc->rl_txthresh;
+ /* error recovery */
+ rl_init_locked(sc);
+ /* restore original threshold */
+ sc->rl_txthresh = oldthresh;
+ return;
+ }
+ RL_INC(sc->rl_cdata.last_tx);
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
+
+ if (RL_LAST_TXMBUF(sc) == NULL)
+ sc->rl_watchdog_timer = 0;
+}
+
+static void
+rl_twister_update(struct rl_softc *sc)
+{
+ uint16_t linktest;
+ /*
+ * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
+ * Linux driver. Values undocumented otherwise.
+ */
+ static const uint32_t param[4][4] = {
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+ };
+
+ /*
+ * Tune the so-called twister registers of the RTL8139. These
+ * are used to compensate for impedance mismatches. The
+ * method for tuning these registers is undocumented and the
+ * following procedure is collected from public sources.
+ */
+ switch (sc->rl_twister)
+ {
+ case CHK_LINK:
+ /*
+ * If we have a sufficient link, then we can proceed in
+ * the state machine to the next stage. If not, then
+ * disable further tuning after writing sane defaults.
+ */
+ if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
+ CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
+ sc->rl_twister = FIND_ROW;
+ } else {
+ CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
+ CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
+ CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
+ CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
+ sc->rl_twister = DONE;
+ }
+ break;
+ case FIND_ROW:
+ /*
+ * Read how long it took to see the echo to find the tuning
+ * row to use.
+ */
+ linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
+ if (linktest == RL_CSCFG_ROW3)
+ sc->rl_twist_row = 3;
+ else if (linktest == RL_CSCFG_ROW2)
+ sc->rl_twist_row = 2;
+ else if (linktest == RL_CSCFG_ROW1)
+ sc->rl_twist_row = 1;
+ else
+ sc->rl_twist_row = 0;
+ sc->rl_twist_col = 0;
+ sc->rl_twister = SET_PARAM;
+ break;
+ case SET_PARAM:
+ if (sc->rl_twist_col == 0)
+ CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
+ CSR_WRITE_4(sc, RL_PARA7C,
+ param[sc->rl_twist_row][sc->rl_twist_col]);
+ if (++sc->rl_twist_col == 4) {
+ if (sc->rl_twist_row == 3)
+ sc->rl_twister = RECHK_LONG;
+ else
+ sc->rl_twister = DONE;
+ }
+ break;
+ case RECHK_LONG:
+ /*
+ * For long cables, we have to double check to make sure we
+ * don't mistune.
+ */
+ linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
+ if (linktest == RL_CSCFG_ROW3)
+ sc->rl_twister = DONE;
+ else {
+ CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
+ sc->rl_twister = RETUNE;
+ }
+ break;
+ case RETUNE:
+ /* Retune for a shorter cable (try column 2) */
+ CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
+ CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
+ CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
+ CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
+ sc->rl_twist_row--;
+ sc->rl_twist_col = 0;
+ sc->rl_twister = SET_PARAM;
+ break;
+
+ case DONE:
+ break;
+ }
+
+}
+
+static void
+rl_tick(void *xsc)
+{
+ struct rl_softc *sc = xsc;
+ struct mii_data *mii;
+ int ticks;
+
+ RL_LOCK_ASSERT(sc);
+ /*
+ * If we're doing the twister cable calibration, then we need to defer
+ * watchdog timeouts. This is a no-op in normal operations, but
+ * can falsely trigger when the cable calibration takes a while and
+ * there was traffic ready to go when rl was started.
+ *
+ * We don't defer mii_tick since that updates the mii status, which
+ * helps the twister process, at least according to similar patches
+ * for the Linux driver I found online while doing the fixes. Worst
+ * case is a few extra mii reads during calibration.
+ */
+ mii = device_get_softc(sc->rl_miibus);
+#ifndef __rtems__
+ mii_tick(mii);
+#else
+ rl_mii_tick(sc->rl_dev);
+#endif
+ if ((sc->rl_flags & RL_FLAG_LINK) == 0)
+ rl_miibus_statchg(sc->rl_dev);
+ if (sc->rl_twister_enable) {
+ if (sc->rl_twister == DONE)
+ rl_watchdog(sc);
+ else
+ rl_twister_update(sc);
+ if (sc->rl_twister == DONE)
+ ticks = hz;
+ else
+ ticks = hz / 10;
+ } else {
+ rl_watchdog(sc);
+ ticks = hz;
+ }
+
+ callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
+}
+
+#ifdef DEVICE_POLLING
+static int
+rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+ struct rl_softc *sc = ifp->if_softc;
+ int rx_npkts = 0;
+
+ RL_LOCK(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ rx_npkts = rl_poll_locked(ifp, cmd, count);
+ RL_UNLOCK(sc);
+ return (rx_npkts);
+}
+
+static int
+rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+ struct rl_softc *sc = ifp->if_softc;
+ int rx_npkts;
+
+ RL_LOCK_ASSERT(sc);
+
+ sc->rxcycles = count;
+ rx_npkts = rl_rxeof(sc);
+ rl_txeof(sc);
+
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ rl_start_locked(ifp);
+
+ if (cmd == POLL_AND_CHECK_STATUS) {
+ uint16_t status;
+
+ /* We should also check the status register. */
+ status = CSR_READ_2(sc, RL_ISR);
+ if (status == 0xffff)
+ return (rx_npkts);
+ if (status != 0)
+ CSR_WRITE_2(sc, RL_ISR, status);
+
+ /* XXX We should check behaviour on receiver stalls. */
+
+ if (status & RL_ISR_SYSTEM_ERR)
+ rl_init_locked(sc);
+ }
+ return (rx_npkts);
+}
+#endif /* DEVICE_POLLING */
+
+#ifndef __rtems__
+static void
+#else
+static int
+#endif
+rl_intr(void *arg)
+{
+ struct rl_softc *sc = arg;
+ struct ifnet *ifp = sc->rl_ifp;
+ uint16_t status;
+
+ RL_LOCK(sc);
+
+ if (sc->suspended)
+ goto done_locked;
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING)
+ goto done_locked;
+#endif
+
+ for (;;) {
+ status = CSR_READ_2(sc, RL_ISR);
+ /* If the card has gone away, the read returns 0xffff. */
+ if (status == 0xffff)
+#ifdef __rtems__
+ return FILTER_STRAY;
+#else
+ break;
+#endif
+ if (status != 0)
+ CSR_WRITE_2(sc, RL_ISR, status);
+ if ((status & RL_INTRS) == 0)
+ break;
+ if (status & RL_ISR_RX_OK)
+ rl_rxeof(sc);
+ if (status & RL_ISR_RX_ERR)
+ rl_rxeof(sc);
+ if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR))
+ rl_txeof(sc);
+ if (status & RL_ISR_SYSTEM_ERR)
+ rl_init_locked(sc);
+ }
+
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ rl_start_locked(ifp);
+
+done_locked:
+ RL_UNLOCK(sc);
+
+#ifdef __rtems__
+ return FILTER_HANDLED;
+#endif
+}
+
+/*
+ * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
+ * pointers to the fragment pointers.
+ */
+static int
+rl_encap(struct rl_softc *sc, struct mbuf **m_head)
+{
+ struct mbuf *m;
+ bus_dma_segment_t txsegs[1];
+ int error, nsegs, padlen;
+
+ RL_LOCK_ASSERT(sc);
+
+ m = *m_head;
+ padlen = 0;
+ /*
+ * Hardware doesn't auto-pad, so we have to make sure
+ * pad short frames out to the minimum frame length.
+ */
+ if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
+ padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
+ /*
+ * The RealTek is brain damaged and wants longword-aligned
+ * TX buffers, plus we can only have one fragment buffer
+ * per packet. We have to copy pretty much all the time.
+ */
+ if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
+ (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
+ m = m_defrag(*m_head, M_DONTWAIT);
+ if (m == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOMEM);
+ }
+ }
+ *m_head = m;
+
+ if (padlen > 0) {
+ /*
+ * Make security-conscious people happy: zero out the
+ * bytes in the pad area, since we don't know what
+ * this mbuf cluster buffer's previous user might
+ * have left in it.
+ */
+ bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
+ m->m_pkthdr.len += padlen;
+ m->m_len = m->m_pkthdr.len;
+ }
+
+ error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
+ RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
+ if (error != 0)
+ return (error);
+ if (nsegs == 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (EIO);
+ }
+
+ RL_CUR_TXMBUF(sc) = m;
+ bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
+ BUS_DMASYNC_PREWRITE);
+ CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
+
+ return (0);
+}
+
+/*
+ * Main transmit routine.
+ */
+static void
+rl_start(struct ifnet *ifp)
+{
+ struct rl_softc *sc = ifp->if_softc;
+
+ RL_LOCK(sc);
+ rl_start_locked(ifp);
+ RL_UNLOCK(sc);
+}
+
+static void
+rl_start_locked(struct ifnet *ifp)
+{
+ struct rl_softc *sc = ifp->if_softc;
+ struct mbuf *m_head = NULL;
+
+ RL_LOCK_ASSERT(sc);
+
+#ifdef __rtems__
+ if ((sc->rl_flags & RL_FLAG_LINK) == 0) {
+ rl_mii_tick(sc->rl_dev);
+ rl_miibus_statchg(sc->rl_dev);
+ }
+#endif
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
+ return;
+
+ while (RL_CUR_TXMBUF(sc) == NULL) {
+
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+
+ if (m_head == NULL)
+ break;
+
+ if (rl_encap(sc, &m_head)) {
+ if (m_head == NULL)
+ break;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ /* Pass a copy of this mbuf chain to the bpf subsystem. */
+ BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
+
+ /* Transmit the frame. */
+ CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
+ RL_TXTHRESH(sc->rl_txthresh) |
+ RL_CUR_TXMBUF(sc)->m_pkthdr.len);
+
+ RL_INC(sc->rl_cdata.cur_tx);
+
+ /* Set a timeout in case the chip goes out to lunch. */
+ sc->rl_watchdog_timer = 5;
+ }
+
+ /*
+ * We broke out of the loop because all our TX slots are
+ * full. Mark the NIC as busy until it drains some of the
+ * packets from the queue.
+ */
+ if (RL_CUR_TXMBUF(sc) != NULL)
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+}
+
+static void
+rl_init(void *xsc)
+{
+ struct rl_softc *sc = xsc;
+
+ RL_LOCK(sc);
+ rl_init_locked(sc);
+ RL_UNLOCK(sc);
+}
+
+static void
+rl_init_locked(struct rl_softc *sc)
+{
+ struct ifnet *ifp = sc->rl_ifp;
+ struct mii_data *mii;
+ uint32_t rxcfg = 0;
+ uint32_t eaddr[2];
+
+ RL_LOCK_ASSERT(sc);
+
+ mii = device_get_softc(sc->rl_miibus);
+
+ /*
+ * Cancel pending I/O and free all RX/TX buffers.
+ */
+ rl_stop(sc);
+
+ rl_reset(sc);
+ if (sc->rl_twister_enable) {
+ /*
+ * Reset twister register tuning state. The twister
+ * registers and their tuning are undocumented, but
+ * are necessary to cope with bad links. rl_twister =
+ * DONE here will disable this entirely.
+ */
+ sc->rl_twister = CHK_LINK;
+ }
+
+ /*
+ * Init our MAC address. Even though the chipset
+ * documentation doesn't mention it, we need to enter "Config
+ * register write enable" mode to modify the ID registers.
+ */
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
+ bzero(eaddr, sizeof(eaddr));
+ bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
+ CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
+ CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
+ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
+
+ /* Init the RX memory block pointer register. */
+ CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
+ RL_RX_8139_BUF_RESERVE);
+ /* Init TX descriptors. */
+ rl_list_tx_init(sc);
+ /* Init Rx memory block. */
+ rl_list_rx_init(sc);
+
+ /*
+ * Enable transmit and receive.
+ */
+ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
+
+ /*
+ * Set the initial TX and RX configuration.
+ */
+ CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
+ CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
+
+ /* Set the individual bit to receive frames for this host only. */
+ rxcfg = CSR_READ_4(sc, RL_RXCFG);
+ rxcfg |= RL_RXCFG_RX_INDIV;
+
+ /* If we want promiscuous mode, set the allframes bit. */
+ if (ifp->if_flags & IFF_PROMISC) {
+ rxcfg |= RL_RXCFG_RX_ALLPHYS;
+ CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
+ } else {
+ rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
+ CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
+ }
+
+ /* Set capture broadcast bit to capture broadcast frames. */
+ if (ifp->if_flags & IFF_BROADCAST) {
+ rxcfg |= RL_RXCFG_RX_BROAD;
+ CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
+ } else {
+ rxcfg &= ~RL_RXCFG_RX_BROAD;
+ CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
+ }
+
+ /* Program the multicast filter, if necessary. */
+ rl_setmulti(sc);
+
+#ifdef DEVICE_POLLING
+ /* Disable interrupts if we are polling. */
+ if (ifp->if_capenable & IFCAP_POLLING)
+ CSR_WRITE_2(sc, RL_IMR, 0);
+ else
+#endif
+ /* Enable interrupts. */
+ CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
+
+ /* Set initial TX threshold */
+ sc->rl_txthresh = RL_TX_THRESH_INIT;
+
+ /* Start RX/TX process. */
+ CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
+
+ /* Enable receiver and transmitter. */
+ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
+
+ sc->rl_flags &= ~RL_FLAG_LINK;
+ mii_mediachg(mii);
+
+ CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
+}
+
+/*
+ * Set media options.
+ */
+#ifndef __rtems__
+static int
+rl_ifmedia_upd(struct ifnet *ifp)
+{
+ struct rl_softc *sc = ifp->if_softc;
+ struct mii_data *mii;
+
+ mii = device_get_softc(sc->rl_miibus);
+
+ RL_LOCK(sc);
+ mii_mediachg(mii);
+ RL_UNLOCK(sc);
+
+ return (0);
+}
+#endif
+
+/*
+ * Report current media status.
+ */
+#ifndef __rtems__
+static void
+rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct rl_softc *sc = ifp->if_softc;
+ struct mii_data *mii;
+
+ mii = device_get_softc(sc->rl_miibus);
+
+ RL_LOCK(sc);
+ mii_pollstat(mii);
+ RL_UNLOCK(sc);
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+}
+#endif
+
+static int
+#ifndef __rtems__
+rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+#else
+rl_ioctl(struct ifnet *ifp, ioctl_command_t command, caddr_t data)
+#endif
+{
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct mii_data *mii;
+ struct rl_softc *sc = ifp->if_softc;
+ int error = 0;
+
+ switch (command) {
+ case SIOCSIFFLAGS:
+ RL_LOCK(sc);
+ if (ifp->if_flags & IFF_UP) {
+ rl_init_locked(sc);
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ rl_stop(sc);
+ }
+ RL_UNLOCK(sc);
+ error = 0;
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+#ifdef __rtems__
+ if ( ETHER_SIOCMULTIFRAG(error, command, ifr, ifp) )
+ break;
+#endif
+ RL_LOCK(sc);
+ rl_setmulti(sc);
+ RL_UNLOCK(sc);
+ error = 0;
+ break;
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ mii = device_get_softc(sc->rl_miibus);
+#ifndef __rtems__
+ error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
+#else
+ error = rtems_mii_ioctl( RE_MDIO(sc), sc, command, &ifr->ifr_media);
+#endif
+ break;
+#ifndef __rtems__
+ case SIOCSIFCAP:
+#ifdef DEVICE_POLLING
+ if (ifr->ifr_reqcap & IFCAP_POLLING &&
+ !(ifp->if_capenable & IFCAP_POLLING)) {
+ error = ether_poll_register(rl_poll, ifp);
+ if (error)
+ return(error);
+ RL_LOCK(sc);
+ /* Disable interrupts */
+ CSR_WRITE_2(sc, RL_IMR, 0x0000);
+ ifp->if_capenable |= IFCAP_POLLING;
+ RL_UNLOCK(sc);
+ return (error);
+
+ }
+ if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
+ ifp->if_capenable & IFCAP_POLLING) {
+ error = ether_poll_deregister(ifp);
+ /* Enable interrupts. */
+ RL_LOCK(sc);
+ CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
+ ifp->if_capenable &= ~IFCAP_POLLING;
+ RL_UNLOCK(sc);
+ return (error);
+ }
+#endif /* DEVICE_POLLING */
+ break;
+#endif
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+rl_watchdog(struct rl_softc *sc)
+{
+
+ RL_LOCK_ASSERT(sc);
+
+ if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
+ return;
+
+ device_printf(sc->rl_dev, "watchdog timeout\n");
+ sc->rl_ifp->if_oerrors++;
+
+ rl_txeof(sc);
+ rl_rxeof(sc);
+ rl_init_locked(sc);
+}
+
+/*
+ * Stop the adapter and free any mbufs allocated to the
+ * RX and TX lists.
+ */
+static void
+rl_stop(struct rl_softc *sc)
+{
+ register int i;
+ struct ifnet *ifp = sc->rl_ifp;
+
+ RL_LOCK_ASSERT(sc);
+
+ sc->rl_watchdog_timer = 0;
+ callout_stop(&sc->rl_stat_callout);
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->rl_flags &= ~RL_FLAG_LINK;
+
+ CSR_WRITE_1(sc, RL_COMMAND, 0x00);
+ CSR_WRITE_2(sc, RL_IMR, 0x0000);
+ for (i = 0; i < RL_TIMEOUT; i++) {
+ DELAY(10);
+ if ((CSR_READ_1(sc, RL_COMMAND) &
+ (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
+ break;
+ }
+ if (i == RL_TIMEOUT)
+ device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
+
+ /*
+ * Free the TX list buffers.
+ */
+ for (i = 0; i < RL_TX_LIST_CNT; i++) {
+ if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
+ if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
+ bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
+ sc->rl_cdata.rl_tx_dmamap[i],
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
+ sc->rl_cdata.rl_tx_dmamap[i]);
+ m_freem(sc->rl_cdata.rl_tx_chain[i]);
+ sc->rl_cdata.rl_tx_chain[i] = NULL;
+ }
+ CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
+ 0x0000000);
+ }
+ }
+}
+
+/*
+ * Device suspend routine. Stop the interface and save some PCI
+ * settings in case the BIOS doesn't restore them properly on
+ * resume.
+ */
+#ifndef __rtems__
+static int
+rl_suspend(device_t dev)
+{
+ struct rl_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ RL_LOCK(sc);
+ rl_stop(sc);
+ sc->suspended = 1;
+ RL_UNLOCK(sc);
+
+ return (0);
+}
+#endif
+
+/*
+ * Device resume routine. Restore some PCI settings in case the BIOS
+ * doesn't, re-enable busmastering, and restart the interface if
+ * appropriate.
+ */
+#ifndef __rtems__
+static int
+rl_resume(device_t dev)
+{
+ struct rl_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ ifp = sc->rl_ifp;
+
+ RL_LOCK(sc);
+
+ /* reinitialize interface if necessary */
+ if (ifp->if_flags & IFF_UP)
+ rl_init_locked(sc);
+
+ sc->suspended = 0;
+
+ RL_UNLOCK(sc);
+
+ return (0);
+}
+#endif
+
+/*
+ * Stop all chip I/O so that the kernel's probe routines don't
+ * get confused by errant DMAs when rebooting.
+ */
+#ifndef __rtems__
+static int
+#else
+static void
+#endif
+rl_shutdown(device_t dev)
+{
+ struct rl_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ RL_LOCK(sc);
+ rl_stop(sc);
+ RL_UNLOCK(sc);
+
+#ifndef __rtems__
+ return (0);
+#endif
+}
diff --git a/bsd_eth_drivers/if_re/if_rlreg.h b/bsd_eth_drivers/if_re/if_rlreg.h
index 5d9db5d..8c41a72 100644
--- a/bsd_eth_drivers/if_re/if_rlreg.h
+++ b/bsd_eth_drivers/if_re/if_rlreg.h
@@ -29,7 +29,7 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
- * $FreeBSD: src/sys/pci/if_rlreg.h,v 1.51.2.14.2.1 2008/10/02 02:57:24 kensmith Exp $
+ * $FreeBSD: src/sys/pci/if_rlreg.h,v 1.98 2009/08/24 18:58:13 yongari Exp $
*/
/*
@@ -76,7 +76,11 @@
#define RL_EECMD 0x0050 /* EEPROM command register */
#define RL_CFG0 0x0051 /* config register #0 */
#define RL_CFG1 0x0052 /* config register #1 */
- /* 0053-0057 reserved */
+#define RL_CFG2 0x0053 /* config register #2 */
+#define RL_CFG3 0x0054 /* config register #3 */
+#define RL_CFG4 0x0055 /* config register #4 */
+#define RL_CFG5 0x0056 /* config register #5 */
+ /* 0057 reserved */
#define RL_MEDIASTAT 0x0058 /* media status register (8139) */
/* 0059-005A reserved */
#define RL_MII 0x005A /* 8129 chip only */
@@ -127,8 +131,10 @@
#define RL_TBI_ANAR 0x0068
#define RL_TBI_LPAR 0x006A
#define RL_GMEDIASTAT 0x006C /* 8 bits */
+#define RL_MACDBG 0x006D /* 8 bits, 8168C SPIN2 only */
+#define RL_GPIO 0x006E /* 8 bits, 8168C SPIN2 only */
#define RL_MAXRXPKTLEN 0x00DA /* 16 bits, chip multiplies by 8 */
-#define RL_GTXSTART 0x0038 /* 16 bits */
+#define RL_GTXSTART 0x0038 /* 8 bits */
/*
* TX config register bits
@@ -148,15 +154,23 @@
/* Known revision codes. */
#define RL_HWREV_8169 0x00000000
-#define RL_HWREV_8110S 0x00800000
-#define RL_HWREV_8169S 0x04000000
+#define RL_HWREV_8169S 0x00800000
+#define RL_HWREV_8110S 0x04000000
#define RL_HWREV_8169_8110SB 0x10000000
#define RL_HWREV_8169_8110SC 0x18000000
+#define RL_HWREV_8102EL 0x24800000
+#define RL_HWREV_8102EL_SPIN1 0x24c00000
+#define RL_HWREV_8168D 0x28000000
+#define RL_HWREV_8168DP 0x28800000
#define RL_HWREV_8168_SPIN1 0x30000000
#define RL_HWREV_8100E 0x30800000
#define RL_HWREV_8101E 0x34000000
+#define RL_HWREV_8102E 0x34800000
#define RL_HWREV_8168_SPIN2 0x38000000
#define RL_HWREV_8168_SPIN3 0x38400000
+#define RL_HWREV_8168C 0x3C000000
+#define RL_HWREV_8168C_SPIN2 0x3C400000
+#define RL_HWREV_8168CP 0x3C800000
#define RL_HWREV_8139 0x60000000
#define RL_HWREV_8139A 0x70000000
#define RL_HWREV_8139AG 0x70800000
@@ -167,6 +181,8 @@
#define RL_HWREV_8139CPLUS 0x74800000
#define RL_HWREV_8101 0x74c00000
#define RL_HWREV_8100 0x78800000
+#define RL_HWREV_8169_8110SBL 0x7CC00000
+#define RL_HWREV_8169_8110SCE 0x98000000
#define RL_TXDMA_16BYTES 0x00000000
#define RL_TXDMA_32BYTES 0x00000100
@@ -297,8 +313,30 @@
#define RL_CMD_TX_ENB 0x0004
#define RL_CMD_RX_ENB 0x0008
#define RL_CMD_RESET 0x0010
+#define RL_CMD_STOPREQ 0x0080
/*
+ * Twister register values. These are completely undocumented and derived
+ * from public sources.
+ */
+#define RL_CSCFG_LINK_OK 0x0400
+#define RL_CSCFG_CHANGE 0x0800
+#define RL_CSCFG_STATUS 0xf000
+#define RL_CSCFG_ROW3 0x7000
+#define RL_CSCFG_ROW2 0x3000
+#define RL_CSCFG_ROW1 0x1000
+#define RL_CSCFG_LINK_DOWN_OFF_CMD 0x03c0
+#define RL_CSCFG_LINK_DOWN_CMD 0xf3c0
+
+#define RL_NWAYTST_RESET 0
+#define RL_NWAYTST_CBL_TEST 0x20
+
+#define RL_PARA78 0x78
+#define RL_PARA78_DEF 0x78fa8388
+#define RL_PARA7C 0x7C
+#define RL_PARA7C_DEF 0xcb38de43
+#define RL_PARA7C_RETUNE 0xfb38de03
+/*
* EEPROM control register
*/
#define RL_EE_DATAOUT 0x01 /* Data out */
@@ -359,16 +397,51 @@
* Config 1 register
*/
#define RL_CFG1_PWRDWN 0x01
+#define RL_CFG1_PME 0x01
#define RL_CFG1_SLEEP 0x02
+#define RL_CFG1_VPDEN 0x02
#define RL_CFG1_IOMAP 0x04
#define RL_CFG1_MEMMAP 0x08
#define RL_CFG1_RSVD 0x10
+#define RL_CFG1_LWACT 0x10
#define RL_CFG1_DRVLOAD 0x20
#define RL_CFG1_LED0 0x40
#define RL_CFG1_FULLDUPLEX 0x40 /* 8129 only */
#define RL_CFG1_LED1 0x80
/*
+ * Config 2 register
+ */
+#define RL_CFG2_PCI33MHZ 0x00
+#define RL_CFG2_PCI66MHZ 0x01
+#define RL_CFG2_PCI64BIT 0x08
+#define RL_CFG2_AUXPWR 0x10
+#define RL_CFG2_MSI 0x20
+
+/*
+ * Config 3 register
+ */
+#define RL_CFG3_GRANTSEL 0x80
+#define RL_CFG3_WOL_MAGIC 0x20
+#define RL_CFG3_WOL_LINK 0x10
+#define RL_CFG3_FAST_B2B 0x01
+
+/*
+ * Config 4 register
+ */
+#define RL_CFG4_LWPTN 0x04
+#define RL_CFG4_LWPME 0x10
+
+/*
+ * Config 5 register
+ */
+#define RL_CFG5_WOL_BCAST 0x40
+#define RL_CFG5_WOL_MCAST 0x20
+#define RL_CFG5_WOL_UCAST 0x10
+#define RL_CFG5_WOL_LANWAKE 0x02
+#define RL_CFG5_PME_STS 0x01
+
+/*
* 8139C+ register definitions
*/
@@ -403,6 +476,15 @@
#define RL_CPLUSCMD_PCI_DAC 0x0010 /* PCI dual-address cycle only */
#define RL_CPLUSCMD_RXCSUM_ENB 0x0020 /* enable RX checksum offload */
#define RL_CPLUSCMD_VLANSTRIP 0x0040 /* enable VLAN tag stripping */
+#define RL_CPLUSCMD_MACSTAT_DIS 0x0080 /* 8168B/C/CP */
+#define RL_CPLUSCMD_ASF 0x0100 /* 8168C/CP */
+#define RL_CPLUSCMD_DBG_SEL 0x0200 /* 8168C/CP */
+#define RL_CPLUSCMD_FORCE_TXFC 0x0400 /* 8168C/CP */
+#define RL_CPLUSCMD_FORCE_RXFC 0x0800 /* 8168C/CP */
+#define RL_CPLUSCMD_FORCE_HDPX 0x1000 /* 8168C/CP */
+#define RL_CPLUSCMD_NORMAL_MODE 0x2000 /* 8168C/CP */
+#define RL_CPLUSCMD_DBG_ENB 0x4000 /* 8168C/CP */
+#define RL_CPLUSCMD_BIST_ENB 0x8000 /* 8168C/CP */
/* C+ early transmit threshold */
@@ -447,6 +529,11 @@
#define RL_RXBUFLEN (1 << ((RL_RX_BUF_SZ >> 11) + 13))
#define RL_TX_LIST_CNT 4
#define RL_MIN_FRAMELEN 60
+#define RL_TX_8139_BUF_ALIGN 4
+#define RL_RX_8139_BUF_ALIGN 8
+#define RL_RX_8139_BUF_RESERVE sizeof(int64_t)
+#define RL_RX_8139_BUF_GUARD_SZ \
+ (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + RL_RX_8139_BUF_RESERVE)
#define RL_TXTHRESH(x) ((x) << 11)
#define RL_TX_THRESH_INIT 96
#define RL_RX_FIFOTHRESH RL_RXFIFO_NOTHRESH
@@ -456,16 +543,29 @@
#define RL_RXCFG_CONFIG (RL_RX_FIFOTHRESH|RL_RX_MAXDMA|RL_RX_BUF_SZ)
#define RL_TXCFG_CONFIG (RL_TXCFG_IFG|RL_TX_MAXDMA)
+#ifndef __rtems__
#define RL_ETHER_ALIGN 2
+#else
+#define RL_ETHER_ALIGN 0
+#endif
+
+/*
+ * re(4) hardware ip4csum-tx could be mangled with 28 bytes or less IP packets.
+ */
+#define RL_IP4CSUMTX_MINLEN 28
+#define RL_IP4CSUMTX_PADLEN (ETHER_HDR_LEN + RL_IP4CSUMTX_MINLEN)
struct rl_chain_data {
uint16_t cur_rx;
uint8_t *rl_rx_buf;
uint8_t *rl_rx_buf_ptr;
- bus_dmamap_t rl_rx_dmamap;
struct mbuf *rl_tx_chain[RL_TX_LIST_CNT];
bus_dmamap_t rl_tx_dmamap[RL_TX_LIST_CNT];
+ bus_dma_tag_t rl_tx_tag;
+ bus_dma_tag_t rl_rx_tag;
+ bus_dmamap_t rl_rx_dmamap;
+ bus_addr_t rl_rx_buf_paddr;
uint8_t last_tx;
uint8_t cur_tx;
};
@@ -544,6 +644,7 @@ struct rl_desc {
#define RL_TDESC_CMD_UDPCSUM 0x00020000 /* UDP checksum enable */
#define RL_TDESC_CMD_IPCSUM 0x00040000 /* IP header checksum enable */
#define RL_TDESC_CMD_MSSVAL 0x07FF0000 /* Large send MSS value */
+#define RL_TDESC_CMD_MSSVAL_SHIFT 16 /* Large send MSS value shift */
#define RL_TDESC_CMD_LGSEND 0x08000000 /* TCP large send enb */
#define RL_TDESC_CMD_EOF 0x10000000 /* end of frame marker */
#define RL_TDESC_CMD_SOF 0x20000000 /* start of frame marker */
@@ -552,6 +653,10 @@ struct rl_desc {
#define RL_TDESC_VLANCTL_TAG 0x00020000 /* Insert VLAN tag */
#define RL_TDESC_VLANCTL_DATA 0x0000FFFF /* TAG data */
+/* RTL8168C/RTL8168CP/RTL8111C/RTL8111CP */
+#define RL_TDESC_CMD_UDPCSUMV2 0x80000000
+#define RL_TDESC_CMD_TCPCSUMV2 0x40000000
+#define RL_TDESC_CMD_IPCSUMV2 0x20000000
/*
* Error bits are valid only on the last descriptor of a frame
@@ -589,6 +694,8 @@ struct rl_desc {
#define RL_RDESC_STAT_RUNT 0x00080000 /* runt packet received */
#define RL_RDESC_STAT_CRCERR 0x00040000 /* CRC error */
#define RL_RDESC_STAT_PROTOID 0x00030000 /* Protocol type */
+#define RL_RDESC_STAT_UDP 0x00020000 /* UDP, 8168C/CP, 8111C/CP */
+#define RL_RDESC_STAT_TCP 0x00010000 /* TCP, 8168C/CP, 8111C/CP */
#define RL_RDESC_STAT_IPSUMBAD 0x00008000 /* IP header checksum bad */
#define RL_RDESC_STAT_UDPSUMBAD 0x00004000 /* UDP checksum bad */
#define RL_RDESC_STAT_TCPSUMBAD 0x00002000 /* TCP checksum bad */
@@ -600,6 +707,9 @@ struct rl_desc {
#define RL_RDESC_VLANCTL_TAG 0x00010000 /* VLAN tag available
(rl_vlandata valid)*/
#define RL_RDESC_VLANCTL_DATA 0x0000FFFF /* TAG data */
+/* RTL8168C/RTL8168CP/RTL8111C/RTL8111CP */
+#define RL_RDESC_IPV6 0x80000000
+#define RL_RDESC_IPV4 0x40000000
#define RL_PROTOID_NONIP 0x00000000
#define RL_PROTOID_TCPIP 0x00010000
@@ -635,24 +745,35 @@ struct rl_stats {
/*
* Rx/Tx descriptor parameters (8139C+ and 8169 only)
*
- * Tx/Rx count must be equal. Shared code like re_dma_map_desc assumes this.
- * Buffers must be a multiple of 8 bytes. Currently limit to 64 descriptors
- * due to the 8139C+. We need to put the number of descriptors in the ring
- * structure and use that value instead.
+ * 8139C+
+ * Number of descriptors supported : up to 64
+ * Descriptor alignment : 256 bytes
+ * Tx buffer : At least 4 bytes in length.
+ * Rx buffer : At least 8 bytes in length and 8 bytes alignment required.
+ *
+ * 8169
+ * Number of descriptors supported : up to 1024
+ * Descriptor alignment : 256 bytes
+ * Tx buffer : At least 4 bytes in length.
+ * Rx buffer : At least 8 bytes in length and 8 bytes alignment required.
*/
-#if !defined(__i386__) && !defined(__amd64__)
+#ifndef __NO_STRICT_ALIGNMENT
#define RE_FIXUP_RX 1
#endif
-#define RL_TX_DESC_CNT 64
-#define RL_TX_DESC_THLD 4
-#define RL_RX_DESC_CNT RL_TX_DESC_CNT
+#define RL_8169_TX_DESC_CNT 256
+#define RL_8169_RX_DESC_CNT 256
+#define RL_8139_TX_DESC_CNT 64
+#define RL_8139_RX_DESC_CNT 64
+#define RL_TX_DESC_CNT RL_8169_TX_DESC_CNT
+#define RL_RX_DESC_CNT RL_8169_RX_DESC_CNT
+#define RL_NTXSEGS 32
-#define RL_RX_LIST_SZ (RL_RX_DESC_CNT * sizeof(struct rl_desc))
-#define RL_TX_LIST_SZ (RL_TX_DESC_CNT * sizeof(struct rl_desc))
#define RL_RING_ALIGN 256
#define RL_IFQ_MAXLEN 512
-#define RL_DESC_INC(x) (x = (x + 1) % RL_TX_DESC_CNT)
+#define RL_TX_DESC_NXT(sc,x) ((x + 1) & ((sc)->rl_ldata.rl_tx_desc_cnt - 1))
+#define RL_TX_DESC_PRV(sc,x) ((x - 1) & ((sc)->rl_ldata.rl_tx_desc_cnt - 1))
+#define RL_RX_DESC_NXT(sc,x) ((x + 1) & ((sc)->rl_ldata.rl_rx_desc_cnt - 1))
#define RL_OWN(x) (le32toh((x)->rl_cmdstat) & RL_RDESC_STAT_OWN)
#define RL_RXBYTES(x) (le32toh((x)->rl_cmdstat) & sc->rl_rxlenmask)
#define RL_PKTSZ(x) ((x)/* >> 3*/)
@@ -664,32 +785,48 @@ struct rl_stats {
#define RE_RX_DESC_BUFLEN MCLBYTES
#endif
+#define RL_MSI_MESSAGES 1
+
#define RL_ADDR_LO(y) ((uint64_t) (y) & 0xFFFFFFFF)
#define RL_ADDR_HI(y) ((uint64_t) (y) >> 32)
+/*
+ * The number of bits reserved for MSS in RealTek controllers is
+ * 11bits. This limits the maximum interface MTU size in TSO case
+ * as upper stack should not generate TCP segments with MSS greater
+ * than the limit.
+ */
+#define RL_TSO_MTU (2047 - ETHER_HDR_LEN - ETHER_CRC_LEN)
+
/* see comment in dev/re/if_re.c */
#define RL_JUMBO_FRAMELEN 7440
#define RL_JUMBO_MTU (RL_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define RL_MAX_FRAMELEN \
+ (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
-struct rl_softc;
+struct rl_txdesc {
+ struct mbuf *tx_m;
+ bus_dmamap_t tx_dmamap;
+};
-struct rl_dmaload_arg {
- int rl_idx;
- int rl_maxsegs;
- uint32_t rl_flags;
- struct rl_desc *rl_ring;
+struct rl_rxdesc {
+ struct mbuf *rx_m;
+ bus_dmamap_t rx_dmamap;
+ bus_size_t rx_size;
};
struct rl_list_data {
- struct mbuf *rl_tx_mbuf[RL_TX_DESC_CNT];
- struct mbuf *rl_rx_mbuf[RL_RX_DESC_CNT];
+ struct rl_txdesc rl_tx_desc[RL_TX_DESC_CNT];
+ struct rl_rxdesc rl_rx_desc[RL_RX_DESC_CNT];
+ int rl_tx_desc_cnt;
+ int rl_rx_desc_cnt;
int rl_tx_prodidx;
int rl_rx_prodidx;
int rl_tx_considx;
int rl_tx_free;
- bus_dmamap_t rl_tx_dmamap[RL_TX_DESC_CNT];
- bus_dmamap_t rl_rx_dmamap[RL_RX_DESC_CNT];
- bus_dma_tag_t rl_mtag; /* mbuf mapping tag */
+ bus_dma_tag_t rl_tx_mtag; /* mbuf TX mapping tag */
+ bus_dma_tag_t rl_rx_mtag; /* mbuf RX mapping tag */
+ bus_dmamap_t rl_rx_sparemap;
bus_dma_tag_t rl_stag; /* stats mapping tag */
bus_dmamap_t rl_smap; /* stats map */
struct rl_stats *rl_stats;
@@ -704,17 +841,20 @@ struct rl_list_data {
bus_addr_t rl_tx_list_addr;
};
+enum rl_twist { DONE, CHK_LINK, FIND_ROW, SET_PARAM, RECHK_LONG, RETUNE };
+
struct rl_softc {
struct ifnet *rl_ifp; /* interface info */
bus_space_handle_t rl_bhandle; /* bus space handle */
bus_space_tag_t rl_btag; /* bus space tag */
device_t rl_dev;
struct resource *rl_res;
- struct resource *rl_irq;
- void *rl_intrhand;
+ int rl_res_id;
+ int rl_res_type;
+ struct resource *rl_irq[RL_MSI_MESSAGES];
+ void *rl_intrhand[RL_MSI_MESSAGES];
device_t rl_miibus;
bus_dma_tag_t rl_parent_tag;
- bus_dma_tag_t rl_tag;
uint8_t rl_type;
int rl_eecmd_read;
int rl_eewidth;
@@ -731,6 +871,10 @@ struct rl_softc {
uint32_t rl_rxlenmask;
int rl_testmode;
int rl_if_flags;
+ int rl_twister_enable;
+ enum rl_twist rl_twister;
+ int rl_twist_row;
+ int rl_twist_col;
int suspended; /* 0 = normal 1 = suspended */
#ifdef DEVICE_POLLING
int rxcycles;
@@ -739,9 +883,22 @@ struct rl_softc {
struct task rl_txtask;
struct task rl_inttask;
- struct mtx rl_intlock;
int rl_txstart;
- int rl_link;
+ uint32_t rl_flags;
+#define RL_FLAG_MSI 0x0001
+#define RL_FLAG_AUTOPAD 0x0002
+#define RL_FLAG_PHYWAKE 0x0008
+#define RL_FLAG_NOJUMBO 0x0010
+#define RL_FLAG_PAR 0x0020
+#define RL_FLAG_DESCV2 0x0040
+#define RL_FLAG_MACSTAT 0x0080
+#define RL_FLAG_FASTETHER 0x0100
+#define RL_FLAG_CMDSTOP 0x0200
+#define RL_FLAG_MACRESET 0x0400
+#define RL_FLAG_WOLRXENB 0x1000
+#define RL_FLAG_MACSLEEP 0x2000
+#define RL_FLAG_PCIE 0x4000
+#define RL_FLAG_LINK 0x8000
};
#define RL_LOCK(_sc) mtx_lock(&(_sc)->rl_mtx)
@@ -786,6 +943,7 @@ struct rl_softc {
CSR_WRITE_4(sc, offset, CSR_READ_4(sc, offset) & ~(val))
#define RL_TIMEOUT 1000
+#define RL_PHY_TIMEOUT 2000
/*
* General constants that are fun to know.
diff --git a/bsd_eth_drivers/libbsdport/.cvsignore b/bsd_eth_drivers/libbsdport/.cvsignore
new file mode 100644
index 0000000..70845e0
--- /dev/null
+++ b/bsd_eth_drivers/libbsdport/.cvsignore
@@ -0,0 +1 @@
+Makefile.in
diff --git a/bsd_eth_drivers/libbsdport/Makefile.am b/bsd_eth_drivers/libbsdport/Makefile.am
index e6e31cd..06c70c8 100644
--- a/bsd_eth_drivers/libbsdport/Makefile.am
+++ b/bsd_eth_drivers/libbsdport/Makefile.am
@@ -5,7 +5,7 @@ include $(top_srcdir)/rtems-pre.am
libbsdport_a_SOURCES = rtems_callout.c rtems_taskqueue.c rtems_udelay.c
libbsdport_a_SOURCES += ifstuff.c devicet.c alldrv.c contigmalloc.c
-libbsdport_a_SOURCES += sysbus.c malloc.c ifmedia.c
+libbsdport_a_SOURCES += sysbus.c malloc.c ifmedia.c misc.c miistuff.c
libbsdport_a_SOURCES += rtems_verscheck.h bus.h callout.h devicet.h
libbsdport_a_SOURCES += libbsdport.h libbsdport_post.h mutex.h
@@ -38,6 +38,9 @@ DUMMYHEADERS+=dummyheaders/netinet/ip6.h
DUMMYHEADERS+=dummyheaders/vm/pmap.h
DUMMYHEADERS+=dummyheaders/miibus_if.h
+DUMMYHEADERS+=dummyheaders/miidevs.h
+DUMMYHEADERS+=dummyheaders/dev/mii/brgphyreg.h
+
BUILT_SOURCES=
include ../links.am
@@ -48,25 +51,3 @@ include ../links.am
# after OBJECTS are made :-(
$(libbsdport_a_OBJECTS): $(DUMMYHEADERS) $(LINKS)
-if FALSE
-CLOBBER_ADDITIONS =
-CLOBBER_ADDITIONS += $(srcdir)/dummyheaders
-CLOBBER_ADDITIONS += $(addprefix $(srcdir)/,$(sort $(foreach n,$(LINKS),$(firstword $(subst /, ,$(n))))))
-
-dummyheaders/%:
- @if [ ! -d $(srcdir)/`dirname $@` ] ; then mkdir -p $(srcdir)/`dirname $@`; fi
- @touch $(srcdir)/$@
-
-# for each name listed in LINKS, create parent directories (if needed)
-# and a symlink to file in .
-# E.g., LINKS=a/b/c.h
-# creates a/b/c.h -> ../../c.h
-$(LINKS):
- @if [ ! -d $(srcdir)/$(dir $@) ] ; then mkdir -p $(srcdir)/$(dir $@); fi
- @ln -s `echo $@ | sed -e 's%[^/]\+[/]\+%../%g'` $(srcdir)/$@
-
-
-distclean-local:
- $(RM) -r $(CLOBBER_ADDITIONS)
-endif
-
diff --git a/bsd_eth_drivers/libbsdport/alldrv.c b/bsd_eth_drivers/libbsdport/alldrv.c
index f81b95a..c68d6d1 100644
--- a/bsd_eth_drivers/libbsdport/alldrv.c
+++ b/bsd_eth_drivers/libbsdport/alldrv.c
@@ -1,14 +1,42 @@
#include <stdio.h>
#include "libbsdport_api.h"
+#include "devicet.h"
driver_t *libbsdport_netdriver_table_all[] = {
&libbsdport_em_driver,
&libbsdport_pcn_driver,
&libbsdport_le_pci_driver,
+ &libbsdport_fxp_driver,
+ &libbsdport_bge_driver,
+ &libbsdport_re_driver,
+ &libbsdport_rl_driver,
0
};
-/* weak alias defaults to a table that includes all currently supported drivers */
+driver_t libbsdport_null_driver = {0};
+
+extern driver_t libbsdport_em_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+extern driver_t libbsdport_pcn_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+extern driver_t libbsdport_le_pci_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+extern driver_t libbsdport_fxp_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+extern driver_t libbsdport_bge_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+extern driver_t libbsdport_re_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+extern driver_t libbsdport_rl_driver
+ __attribute__((weak,alias("libbsdport_null_driver")));
+
+
+/* weak alias defaults to a table that includes
+ * all currently supported drivers.
+ *
+ * However, the individual entires are weak aliases
+ * themselves so that you don't have to link all drivers...
+ */
extern driver_t *libbsdport_netdriver_table
[
sizeof(libbsdport_netdriver_table_all)/sizeof(libbsdport_netdriver_table_all[0])
diff --git a/bsd_eth_drivers/libbsdport/bus.h b/bsd_eth_drivers/libbsdport/bus.h
index 19cb24f..e4a9d29 100644
--- a/bsd_eth_drivers/libbsdport/bus.h
+++ b/bsd_eth_drivers/libbsdport/bus.h
@@ -17,6 +17,12 @@ typedef enum {
struct resource;
+struct resource_spec {
+ int type;
+ int rid;
+ int flags;
+};
+
typedef bus_addr_t bus_space_handle_t;
/* The 'bus_space_xxx()' inlines can be helped if the
@@ -59,7 +65,6 @@ bus_space_write_##nwidth(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
*(volatile type __attribute__((may_alias)) *)(h+o) = v; \
}\
}
-
BUS_SPACE_DECL(u_int32_t, long, 4)
BUS_SPACE_DECL(u_int16_t, word, 2)
BUS_SPACE_DECL(u_int8_t, byte, 1)
@@ -115,6 +120,8 @@ BUS_SPACE_DECL(u_int8_t, byte, 1, 8)
#error "Missing definitions of bus_space_XXX() for this CPU architecture"
#endif
+#define bus_space_write_stream_4(_t, _h, _o, _v) \
+ bus_space_write_4(_t, _h, _o, htole32(_v))
#undef BUS_SPACE_DECL
@@ -140,10 +147,17 @@ BUS_SPACE_DECL(u_int8_t, byte, 1, 8)
/* flags (1<<31) means unsupported */
#define RF_ACTIVE (1<<1)
#define RF_SHAREABLE (1<<2)
+#define RF_OPTIONAL (1<<3)
struct resource *
bus_alloc_resource_any(device_t dev, int type, int *prid, unsigned flags);
+int
+bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res);
+
+void
+bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res);
+
#define FILTER_STRAY 1
#define FILTER_HANDLED 0
@@ -157,6 +171,29 @@ bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filt
#define INTR_MPSAFE 0
#define INTR_TYPE_NET 0
+/*
+ * INTR_FAST handlers are already more like 'filters',
+ * i.e., they disable interrupts and schedule work
+ * on a task queue.
+ *
+ * During porting the fast handler has to be slightly
+ * rewritten (must return an int value, FILTER_HANDLED
+ * if a valid IRQ was detected and work has been scheduled
+ * and FILTER_STRAY if this device didn't interrupt).
+ *
+ * You need to then remove INTR_FAST from the flags,
+ * pass the converted handler as the 'filter' argument
+ * and a NULL handler argument to bus_setup_intr().
+ *
+ */
+extern int __INTR_FAST() __attribute__((
+ error("\n\n==> you need to convert bus_setup_intr(INTR_FAST) to new API;\n"
+ " consult <sys/bus.h>\n\n")
+));
+
+/* Barf at compile time if they try to use INTR_FAST */
+#define INTR_FAST (__INTR_FAST())
+
int
bus_teardown_intr(device_t dev, struct resource *r, void *cookiep);
@@ -177,6 +214,37 @@ rman_get_bushandle(struct resource *r);
bus_space_tag_t
rman_get_bustag(struct resource *r);
+/* Newer API (releng 7_1) */
+static inline u_int8_t bus_read_1(struct resource *r, bus_size_t o)
+{
+ return bus_space_read_1(rman_get_bustag(r), rman_get_bushandle(r), o);
+}
+
+static inline u_int16_t bus_read_2(struct resource *r, bus_size_t o)
+{
+ return bus_space_read_2(rman_get_bustag(r), rman_get_bushandle(r), o);
+}
+
+static inline u_int32_t bus_read_4(struct resource *r, bus_size_t o)
+{
+ return bus_space_read_4(rman_get_bustag(r), rman_get_bushandle(r), o);
+}
+
+static inline void bus_write_1(struct resource *r, bus_size_t o, u_int8_t v)
+{
+ bus_space_write_1(rman_get_bustag(r), rman_get_bushandle(r), o, v);
+}
+
+static inline void bus_write_2(struct resource *r, bus_size_t o, u_int16_t v)
+{
+ bus_space_write_2(rman_get_bustag(r), rman_get_bushandle(r), o, v);
+}
+
+static inline void bus_write_4(struct resource *r, bus_size_t o, u_int32_t v)
+{
+ bus_space_write_4(rman_get_bustag(r), rman_get_bushandle(r), o, v);
+}
+
#ifndef BUS_DMA_NOWAIT
/* ignored anyways */
#define BUS_DMA_NOWAIT 0
@@ -192,6 +260,20 @@ rman_get_bustag(struct resource *r);
#define BUS_DMA_COHERENT 0
#endif
+#ifndef BUS_DMA_ZERO