summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/dev/bge/if_bge.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/dev/bge/if_bge.c')
-rw-r--r--freebsd/sys/dev/bge/if_bge.c470
1 files changed, 235 insertions, 235 deletions
diff --git a/freebsd/sys/dev/bge/if_bge.c b/freebsd/sys/dev/bge/if_bge.c
index 5456f3d5..aadf5a99 100644
--- a/freebsd/sys/dev/bge/if_bge.c
+++ b/freebsd/sys/dev/bge/if_bge.c
@@ -85,6 +85,7 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <net/if.h>
+#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
@@ -172,6 +173,7 @@ static const struct bge_type {
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5717C },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
@@ -312,6 +314,7 @@ static const struct bge_revision {
{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
+ { BGE_CHIPID_BCM5717_C0, "BCM5717 C0" },
{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
@@ -423,18 +426,19 @@ static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
static void bge_intr(void *);
static int bge_msi_intr(void *);
static void bge_intr_task(void *, int);
-static void bge_start_locked(struct ifnet *);
-static void bge_start(struct ifnet *);
-static int bge_ioctl(struct ifnet *, u_long, caddr_t);
+static void bge_start_locked(if_t);
+static void bge_start(if_t);
+static int bge_ioctl(if_t, u_long, caddr_t);
static void bge_init_locked(struct bge_softc *);
static void bge_init(void *);
static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
static void bge_stop(struct bge_softc *);
static void bge_watchdog(struct bge_softc *);
static int bge_shutdown(device_t);
-static int bge_ifmedia_upd_locked(struct ifnet *);
-static int bge_ifmedia_upd(struct ifnet *);
-static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+static int bge_ifmedia_upd_locked(if_t);
+static int bge_ifmedia_upd(if_t);
+static void bge_ifmedia_sts(if_t, struct ifmediareq *);
+static uint64_t bge_get_counter(if_t, ift_counter);
static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
@@ -475,7 +479,7 @@ static int bge_miibus_readreg(device_t, int, int);
static int bge_miibus_writereg(device_t, int, int, int);
static void bge_miibus_statchg(device_t);
#ifdef DEVICE_POLLING
-static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
+static int bge_poll(if_t ifp, enum poll_cmd cmd, int count);
#endif
#define BGE_RESET_SHUTDOWN 0
@@ -543,10 +547,8 @@ DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
static int bge_allow_asf = 1;
-TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
-
static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
-SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
+SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0,
"Allow ASF mode if available");
#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
@@ -1247,7 +1249,7 @@ bge_miibus_statchg(device_t dev)
uint32_t mac_mode, rx_mode, tx_mode;
sc = device_get_softc(dev);
- if ((sc->bge_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0)
return;
mii = device_get_softc(sc->bge_miibus);
@@ -1323,7 +1325,7 @@ bge_newbuf_std(struct bge_softc *sc, int i)
int error, nsegs;
if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
- (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
@@ -1385,8 +1387,7 @@ bge_newbuf_jumbo(struct bge_softc *sc, int i)
if (m == NULL)
return (ENOBUFS);
- m_cljget(m, M_NOWAIT, MJUM9BYTES);
- if (!(m->m_flags & M_EXT)) {
+ if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) {
m_freem(m);
return (ENOBUFS);
}
@@ -1601,14 +1602,14 @@ bge_init_tx_ring(struct bge_softc *sc)
static void
bge_setpromisc(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
/* Enable or disable promiscuous mode as needed. */
- if (ifp->if_flags & IFF_PROMISC)
+ if (if_getflags(ifp) & IFF_PROMISC)
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
else
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
@@ -1617,18 +1618,30 @@ bge_setpromisc(struct bge_softc *sc)
static void
bge_setmulti(struct bge_softc *sc)
{
- struct ifnet *ifp;
- struct ifmultiaddr *ifma;
+ if_t ifp;
+ int mc_count = 0;
uint32_t hashes[4] = { 0, 0, 0, 0 };
- int h, i;
+ int h, i, mcnt;
+ unsigned char *mta;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
- if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
+ mc_count = if_multiaddr_count(ifp, -1);
+ mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
+ mc_count, M_DEVBUF, M_NOWAIT);
+
+ if(mta == NULL) {
+ device_printf(sc->bge_dev,
+ "Failed to allocated temp mcast list\n");
+ return;
+ }
+
+ if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
+ free(mta, M_DEVBUF);
return;
}
@@ -1636,32 +1649,30 @@ bge_setmulti(struct bge_softc *sc)
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
- /* Now program new ones. */
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
- ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
+ if_multiaddr_array(ifp, mta, &mcnt, mc_count);
+ for(i = 0; i < mcnt; i++) {
+ h = ether_crc32_le(mta + (i * ETHER_ADDR_LEN),
+ ETHER_ADDR_LEN) & 0x7F;
hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
}
- if_maddr_runlock(ifp);
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
+
+ free(mta, M_DEVBUF);
}
static void
bge_setvlan(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
/* Enable or disable VLAN tag stripping as needed. */
- if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
else
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
@@ -1995,7 +2006,7 @@ bge_blockinit(struct bge_softc *sc)
/* Configure mbuf pool watermarks */
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
- if (sc->bge_ifp->if_mtu > ETHERMTU) {
+ if (if_getmtu(sc->bge_ifp) > ETHERMTU) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
} else {
@@ -2691,6 +2702,10 @@ bge_chipid(device_t dev)
* registers.
*/
switch (pci_get_device(dev)) {
+ case BCOM_DEVICEID_BCM5717C:
+ /* 5717 C0 seems to belong to 5720 line. */
+ id = BGE_CHIPID_BCM5720_A0;
+ break;
case BCOM_DEVICEID_BCM5717:
case BCOM_DEVICEID_BCM5718:
case BCOM_DEVICEID_BCM5719:
@@ -2816,10 +2831,10 @@ bge_dma_free(struct bge_softc *sc)
bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
/* Destroy standard RX ring. */
- if (sc->bge_cdata.bge_rx_std_ring_map)
+ if (sc->bge_ldata.bge_rx_std_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map);
- if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
+ if (sc->bge_ldata.bge_rx_std_ring)
bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_ldata.bge_rx_std_ring,
sc->bge_cdata.bge_rx_std_ring_map);
@@ -2828,12 +2843,11 @@ bge_dma_free(struct bge_softc *sc)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
/* Destroy jumbo RX ring. */
- if (sc->bge_cdata.bge_rx_jumbo_ring_map)
+ if (sc->bge_ldata.bge_rx_jumbo_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map);
- if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
- sc->bge_ldata.bge_rx_jumbo_ring)
+ if (sc->bge_ldata.bge_rx_jumbo_ring)
bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_ldata.bge_rx_jumbo_ring,
sc->bge_cdata.bge_rx_jumbo_ring_map);
@@ -2842,12 +2856,11 @@ bge_dma_free(struct bge_softc *sc)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
/* Destroy RX return ring. */
- if (sc->bge_cdata.bge_rx_return_ring_map)
+ if (sc->bge_ldata.bge_rx_return_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
sc->bge_cdata.bge_rx_return_ring_map);
- if (sc->bge_cdata.bge_rx_return_ring_map &&
- sc->bge_ldata.bge_rx_return_ring)
+ if (sc->bge_ldata.bge_rx_return_ring)
bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
sc->bge_ldata.bge_rx_return_ring,
sc->bge_cdata.bge_rx_return_ring_map);
@@ -2856,11 +2869,11 @@ bge_dma_free(struct bge_softc *sc)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
/* Destroy TX ring. */
- if (sc->bge_cdata.bge_tx_ring_map)
+ if (sc->bge_ldata.bge_tx_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_cdata.bge_tx_ring_map);
- if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
+ if (sc->bge_ldata.bge_tx_ring)
bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_ldata.bge_tx_ring,
sc->bge_cdata.bge_tx_ring_map);
@@ -2869,11 +2882,11 @@ bge_dma_free(struct bge_softc *sc)
bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
/* Destroy status block. */
- if (sc->bge_cdata.bge_status_map)
+ if (sc->bge_ldata.bge_status_block_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map);
- if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
+ if (sc->bge_ldata.bge_status_block)
bus_dmamem_free(sc->bge_cdata.bge_status_tag,
sc->bge_ldata.bge_status_block,
sc->bge_cdata.bge_status_map);
@@ -2882,11 +2895,11 @@ bge_dma_free(struct bge_softc *sc)
bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
/* Destroy statistics block. */
- if (sc->bge_cdata.bge_stats_map)
+ if (sc->bge_ldata.bge_stats_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
sc->bge_cdata.bge_stats_map);
- if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
+ if (sc->bge_ldata.bge_stats)
bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
sc->bge_ldata.bge_stats,
sc->bge_cdata.bge_stats_map);
@@ -2908,14 +2921,10 @@ bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
bus_addr_t *paddr, const char *msg)
{
struct bge_dmamap_arg ctx;
- bus_addr_t lowaddr;
- bus_size_t ring_end;
int error;
- lowaddr = BUS_SPACE_MAXADDR;
-again:
error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
- alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
+ alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
if (error != 0) {
device_printf(sc->bge_dev,
@@ -2940,25 +2949,6 @@ again:
return (ENOMEM);
}
*paddr = ctx.bge_busaddr;
- ring_end = *paddr + maxsize;
- if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
- BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
- /*
- * 4GB boundary crossed. Limit maximum allowable DMA
- * address space to 32bit and try again.
- */
- bus_dmamap_unload(*tag, *map);
- bus_dmamem_free(*tag, *ring, *map);
- bus_dma_tag_destroy(*tag);
- if (bootverbose)
- device_printf(sc->bge_dev, "4GB boundary crossed, "
- "limit DMA address space to 32bit for %s\n", msg);
- *ring = NULL;
- *tag = NULL;
- *map = NULL;
- lowaddr = BUS_SPACE_MAXADDR_32BIT;
- goto again;
- }
return (0);
}
@@ -2966,7 +2956,7 @@ static int
bge_dma_alloc(struct bge_softc *sc)
{
bus_addr_t lowaddr;
- bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
+ bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz;
int i, error;
lowaddr = BUS_SPACE_MAXADDR;
@@ -3053,9 +3043,7 @@ bge_dma_alloc(struct bge_softc *sc)
}
/* Create parent tag for buffers. */
- boundary = 0;
if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
- boundary = BGE_DMA_BNDRY;
/*
* XXX
* watchdog timeout issue was observed on BCM5704 which
@@ -3066,10 +3054,10 @@ bge_dma_alloc(struct bge_softc *sc)
if (sc->bge_pcixcap != 0)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
}
- error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
- 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
- NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
- 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr,
+ BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
+ BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
+ &sc->bge_cdata.bge_buffer_tag);
if (error != 0) {
device_printf(sc->bge_dev,
"could not allocate buffer dma tag\n");
@@ -3322,7 +3310,7 @@ bge_devinfo(struct bge_softc *sc)
static int
bge_attach(device_t dev)
{
- struct ifnet *ifp;
+ if_t ifp;
struct bge_softc *sc;
uint32_t hwcfg = 0, misccfg, pcistate;
u_char eaddr[ETHER_ADDR_LEN];
@@ -3751,28 +3739,28 @@ bge_attach(device_t dev)
error = ENXIO;
goto fail;
}
- ifp->if_softc = sc;
+ if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = bge_ioctl;
- ifp->if_start = bge_start;
- ifp->if_init = bge_init;
- ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
- IFQ_SET_READY(&ifp->if_snd);
- ifp->if_hwassist = sc->bge_csum_features;
- ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
- IFCAP_VLAN_MTU;
+ if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+ if_setioctlfn(ifp, bge_ioctl);
+ if_setstartfn(ifp, bge_start);
+ if_setinitfn(ifp, bge_init);
+ if_setgetcounterfn(ifp, bge_get_counter);
+ if_setsendqlen(ifp, BGE_TX_RING_CNT - 1);
+ if_setsendqready(ifp);
+ if_sethwassist(ifp, sc->bge_csum_features);
+ if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
+ IFCAP_VLAN_MTU);
if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
- ifp->if_hwassist |= CSUM_TSO;
- ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
+ if_sethwassistbits(ifp, CSUM_TSO, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0);
}
#ifdef IFCAP_VLAN_HWCSUM
- ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
+ if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
#endif
- ifp->if_capenable = ifp->if_capabilities;
+ if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
- ifp->if_capabilities |= IFCAP_POLLING;
+ if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
@@ -3780,9 +3768,9 @@ bge_attach(device_t dev)
* to hardware bugs.
*/
if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
- ifp->if_capabilities &= ~IFCAP_HWCSUM;
- ifp->if_capenable &= ~IFCAP_HWCSUM;
- ifp->if_hwassist = 0;
+ if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM);
+ if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
+ if_sethwassist(ifp, 0);
}
/*
@@ -3878,9 +3866,10 @@ bge_attach(device_t dev)
again:
bge_asf_driver_up(sc);
- error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
- bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY,
- MIIF_DOPAUSE);
+ error = mii_attach(dev, &sc->bge_miibus, ifp,
+ (ifm_change_cb_t)bge_ifmedia_upd,
+ (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr,
+ MII_OFFSET_ANY, MIIF_DOPAUSE);
if (error != 0) {
if (trys++ < 4) {
device_printf(sc->bge_dev, "Try again\n");
@@ -3917,7 +3906,7 @@ again:
ether_ifattach(ifp, eaddr);
/* Tell upper layer we support long frames. */
- ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Hookup IRQ last.
@@ -3964,13 +3953,13 @@ static int
bge_detach(device_t dev)
{
struct bge_softc *sc;
- struct ifnet *ifp;
+ if_t ifp;
sc = device_get_softc(dev);
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
- if (ifp->if_capenable & IFCAP_POLLING)
+ if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
@@ -4326,7 +4315,7 @@ bge_rxreuse_jumbo(struct bge_softc *sc, int i)
static int
bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
{
- struct ifnet *ifp;
+ if_t ifp;
int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
uint16_t rx_cons;
@@ -4343,8 +4332,8 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
if (BGE_IS_JUMBO_CAPABLE(sc) &&
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
- (MCLBYTES - ETHER_ALIGN))
+ if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
@@ -4356,7 +4345,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
int have_tag = 0;
#ifdef DEVICE_POLLING
- if (ifp->if_capenable & IFCAP_POLLING) {
+ if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
@@ -4368,7 +4357,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
rxidx = cur_rx->bge_idx;
BGE_INC(rx_cons, sc->bge_return_ring_cnt);
- if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
+ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
have_tag = 1;
vlan_tag = cur_rx->bge_vlan_tag;
@@ -4383,7 +4372,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
}
if (bge_newbuf_jumbo(sc, rxidx) != 0) {
bge_rxreuse_jumbo(sc, rxidx);
- ifp->if_iqdrops++;
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
@@ -4396,13 +4385,13 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
}
if (bge_newbuf_std(sc, rxidx) != 0) {
bge_rxreuse_std(sc, rxidx);
- ifp->if_iqdrops++;
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
}
- ifp->if_ipackets++;
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
#ifndef __NO_STRICT_ALIGNMENT
/*
* For architectures with strict alignment we must make sure
@@ -4417,7 +4406,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
- if (ifp->if_capenable & IFCAP_RXCSUM)
+ if (if_getcapenable(ifp) & IFCAP_RXCSUM)
bge_rxcsum(sc, cur_rx, m);
/*
@@ -4431,13 +4420,13 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
if (holdlck != 0) {
BGE_UNLOCK(sc);
- (*ifp->if_input)(ifp, m);
+ if_input(ifp, m);
BGE_LOCK(sc);
} else
- (*ifp->if_input)(ifp, m);
+ if_input(ifp, m);
rx_npkts++;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return (rx_npkts);
}
@@ -4465,7 +4454,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
* If you need correct statistics, you can enable this check.
*/
if (BGE_IS_5705_PLUS(sc))
- ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
+ if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
#endif
return (rx_npkts);
}
@@ -4509,7 +4498,7 @@ static void
bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
{
struct bge_tx_bd *cur_tx;
- struct ifnet *ifp;
+ if_t ifp;
BGE_LOCK_ASSERT(sc);
@@ -4531,7 +4520,7 @@ bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
idx = sc->bge_tx_saved_considx;
cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
- ifp->if_opackets++;
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[idx],
@@ -4545,22 +4534,22 @@ bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
}
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (sc->bge_txcnt == 0)
sc->bge_timer = 0;
}
#ifdef DEVICE_POLLING
static int
-bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+bge_poll(if_t ifp, enum poll_cmd cmd, int count)
{
- struct bge_softc *sc = ifp->if_softc;
+ struct bge_softc *sc = if_getsoftc(ifp);
uint16_t rx_prod, tx_cons;
uint32_t statusword;
int rx_npkts = 0;
BGE_LOCK(sc);
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
}
@@ -4592,12 +4581,12 @@ bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
sc->rxcycles = count;
rx_npkts = bge_rxeof(sc, rx_prod, 1);
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
}
bge_txeof(sc, tx_cons);
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ if (!if_sendq_empty(ifp))
bge_start_locked(ifp);
BGE_UNLOCK(sc);
@@ -4623,7 +4612,7 @@ static void
bge_intr_task(void *arg, int pending)
{
struct bge_softc *sc;
- struct ifnet *ifp;
+ if_t ifp;
uint32_t status, status_tag;
uint16_t rx_prod, tx_cons;
@@ -4631,7 +4620,7 @@ bge_intr_task(void *arg, int pending)
ifp = sc->bge_ifp;
BGE_LOCK(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
BGE_UNLOCK(sc);
return;
}
@@ -4660,17 +4649,17 @@ bge_intr_task(void *arg, int pending)
/* Let controller work. */
bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
sc->bge_rx_saved_considx != rx_prod) {
/* Check RX return ring producer/consumer. */
BGE_UNLOCK(sc);
bge_rxeof(sc, rx_prod, 0);
BGE_LOCK(sc);
}
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check TX ring producer/consumer. */
bge_txeof(sc, tx_cons);
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ if (!if_sendq_empty(ifp))
bge_start_locked(ifp);
}
BGE_UNLOCK(sc);
@@ -4680,7 +4669,7 @@ static void
bge_intr(void *xsc)
{
struct bge_softc *sc;
- struct ifnet *ifp;
+ if_t ifp;
uint32_t statusword;
uint16_t rx_prod, tx_cons;
@@ -4691,7 +4680,7 @@ bge_intr(void *xsc)
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
- if (ifp->if_capenable & IFCAP_POLLING) {
+ if (if_getcapenable(ifp) & IFCAP_POLLING) {
BGE_UNLOCK(sc);
return;
}
@@ -4740,18 +4729,18 @@ bge_intr(void *xsc)
statusword || sc->bge_link_evt)
bge_link_upd(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check RX return ring producer/consumer. */
bge_rxeof(sc, rx_prod, 1);
}
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check TX ring producer/consumer. */
bge_txeof(sc, tx_cons);
}
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
+ !if_sendq_empty(ifp))
bge_start_locked(ifp);
BGE_UNLOCK(sc);
@@ -4815,7 +4804,7 @@ bge_tick(void *xsc)
*/
#ifdef DEVICE_POLLING
/* In polling mode we poll link state in bge_poll(). */
- if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
+ if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING))
#endif
{
sc->bge_link_evt++;
@@ -4836,7 +4825,7 @@ bge_tick(void *xsc)
static void
bge_stats_update_regs(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
struct bge_mac_stats *stats;
uint32_t val;
@@ -4936,10 +4925,6 @@ bge_stats_update_regs(struct bge_softc *sc)
stats->RecvThresholdHit +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
- ifp->if_collisions = (u_long)stats->etherStatsCollisions;
- ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
- stats->InputErrors);
-
if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
/*
* If controller transmitted more than BGE_NUM_RDMA_CHANNELS
@@ -5004,7 +4989,7 @@ bge_stats_clear_regs(struct bge_softc *sc)
static void
bge_stats_update(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
bus_size_t stats;
uint32_t cnt; /* current register value */
@@ -5016,21 +5001,21 @@ bge_stats_update(struct bge_softc *sc)
CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
- ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
+ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions);
sc->bge_tx_collisions = cnt;
cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
- ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_nobds);
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds);
sc->bge_rx_nobds = cnt;
cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
- ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrs);
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs);
sc->bge_rx_inerrs = cnt;
cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
- ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards);
sc->bge_rx_discards = cnt;
cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
- ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards);
sc->bge_tx_discards = cnt;
#undef READ_STAT
@@ -5357,29 +5342,29 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
-bge_start_locked(struct ifnet *ifp)
+bge_start_locked(if_t ifp)
{
struct bge_softc *sc;
struct mbuf *m_head;
uint32_t prodidx;
int count;
- sc = ifp->if_softc;
+ sc = if_getsoftc(ifp);
BGE_LOCK_ASSERT(sc);
if (!sc->bge_link ||
- (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
prodidx = sc->bge_tx_prodidx;
- for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
+ for (count = 0; !if_sendq_empty(ifp);) {
if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
@@ -5391,8 +5376,8 @@ bge_start_locked(struct ifnet *ifp)
if (bge_encap(sc, &m_head, &prodidx)) {
if (m_head == NULL)
break;
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ if_sendq_prepend(ifp, m_head);
+ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
++count;
@@ -5401,11 +5386,7 @@ bge_start_locked(struct ifnet *ifp)
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
-#ifdef ETHER_BPF_MTAP
- ETHER_BPF_MTAP(ifp, m_head);
-#else
- BPF_MTAP(ifp, m_head);
-#endif
+ if_bpfmtap(ifp, m_head);
}
if (count > 0) {
@@ -5431,11 +5412,11 @@ bge_start_locked(struct ifnet *ifp)
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
-bge_start(struct ifnet *ifp)
+bge_start(if_t ifp)
{
struct bge_softc *sc;
- sc = ifp->if_softc;
+ sc = if_getsoftc(ifp);
BGE_LOCK(sc);
bge_start_locked(ifp);
BGE_UNLOCK(sc);
@@ -5444,7 +5425,7 @@ bge_start(struct ifnet *ifp)
static void
bge_init_locked(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
uint16_t *m;
uint32_t mode;
@@ -5452,7 +5433,7 @@ bge_init_locked(struct bge_softc *sc)
ifp = sc->bge_ifp;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
/* Cancel pending I/O and flush buffers. */
@@ -5478,9 +5459,9 @@ bge_init_locked(struct bge_softc *sc)
ifp = sc->bge_ifp;
/* Specify MTU. */
- CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
+ CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN +
- (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
+ (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
/* Load our MAC address. */
m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
@@ -5501,10 +5482,10 @@ bge_init_locked(struct bge_softc *sc)
sc->bge_csum_features &= ~CSUM_UDP;
else
sc->bge_csum_features |= CSUM_UDP;
- if (ifp->if_capabilities & IFCAP_TXCSUM &&
- ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
- ifp->if_hwassist |= sc->bge_csum_features;
+ if (if_getcapabilities(ifp) & IFCAP_TXCSUM &&
+ if_getcapenable(ifp) & IFCAP_TXCSUM) {
+ if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP));
+ if_sethwassistbits(ifp, sc->bge_csum_features, 0);
}
/* Init RX ring. */
@@ -5534,8 +5515,8 @@ bge_init_locked(struct bge_softc *sc)
/* Init jumbo RX ring. */
if (BGE_IS_JUMBO_CAPABLE(sc) &&
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
- (MCLBYTES - ETHER_ALIGN)) {
+ if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) {
if (bge_init_rx_ring_jumbo(sc) != 0) {
device_printf(sc->bge_dev,
"no memory for jumbo Rx buffers.\n");
@@ -5571,6 +5552,8 @@ bge_init_locked(struct bge_softc *sc)
mode = CSR_READ_4(sc, BGE_RX_MODE);
if (BGE_IS_5755_PLUS(sc))
mode |= BGE_RXMODE_IPV6_ENABLE;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
+ mode |= BGE_RXMODE_IPV4_FRAG_FIX;
CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
DELAY(10);
@@ -5594,7 +5577,7 @@ bge_init_locked(struct bge_softc *sc)
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
- if (ifp->if_capenable & IFCAP_POLLING) {
+ if (if_getcapenable(ifp) & IFCAP_POLLING) {
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
@@ -5608,8 +5591,8 @@ bge_init_locked(struct bge_softc *sc)
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
}
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
+ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
bge_ifmedia_upd_locked(ifp);
@@ -5630,9 +5613,9 @@ bge_init(void *xsc)
* Set media options.
*/
static int
-bge_ifmedia_upd(struct ifnet *ifp)
+bge_ifmedia_upd(if_t ifp)
{
- struct bge_softc *sc = ifp->if_softc;
+ struct bge_softc *sc = if_getsoftc(ifp);
int res;
BGE_LOCK(sc);
@@ -5643,9 +5626,9 @@ bge_ifmedia_upd(struct ifnet *ifp)
}
static int
-bge_ifmedia_upd_locked(struct ifnet *ifp)
+bge_ifmedia_upd_locked(if_t ifp)
{
- struct bge_softc *sc = ifp->if_softc;
+ struct bge_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
struct mii_softc *miisc;
struct ifmedia *ifm;
@@ -5728,14 +5711,14 @@ bge_ifmedia_upd_locked(struct ifnet *ifp)
* Report current media status.
*/
static void
-bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
- struct bge_softc *sc = ifp->if_softc;
+ struct bge_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
BGE_LOCK(sc);
- if ((ifp->if_flags & IFF_UP) == 0) {
+ if ((if_getflags(ifp) & IFF_UP) == 0) {
BGE_UNLOCK(sc);
return;
}
@@ -5768,9 +5751,9 @@ bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
}
static int
-bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+bge_ioctl(if_t ifp, u_long command, caddr_t data)
{
- struct bge_softc *sc = ifp->if_softc;
+ struct bge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int flags, mask, error = 0;
@@ -5789,10 +5772,10 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
}
BGE_LOCK(sc);
- if (ifp->if_mtu != ifr->ifr_mtu) {
- ifp->if_mtu = ifr->ifr_mtu;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ if (if_getmtu(ifp) != ifr->ifr_mtu) {
+ if_setmtu(ifp, ifr->ifr_mtu);
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
+ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init_locked(sc);
}
}
@@ -5800,7 +5783,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
case SIOCSIFFLAGS:
BGE_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
+ if (if_getflags(ifp) & IFF_UP) {
/*
* If only the state of the PROMISC flag changed,
* then just use the 'set promisc mode' command
@@ -5809,8 +5792,8 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* waiting for it to start up, which may take a
* second or two. Similarly for ALLMULTI.
*/
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- flags = ifp->if_flags ^ sc->bge_if_flags;
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
+ flags = if_getflags(ifp) ^ sc->bge_if_flags;
if (flags & IFF_PROMISC)
bge_setpromisc(sc);
if (flags & IFF_ALLMULTI)
@@ -5818,17 +5801,17 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
} else
bge_init_locked(sc);
} else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
bge_stop(sc);
}
}
- sc->bge_if_flags = ifp->if_flags;
+ sc->bge_if_flags = if_getflags(ifp);
BGE_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
BGE_LOCK(sc);
bge_setmulti(sc);
BGE_UNLOCK(sc);
@@ -5847,7 +5830,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
break;
case SIOCSIFCAP:
- mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
@@ -5858,7 +5841,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
- ifp->if_capenable |= IFCAP_POLLING;
+ if_setcapenablebit(ifp, IFCAP_POLLING, 0);
BGE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
@@ -5867,53 +5850,55 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
- ifp->if_capenable &= ~IFCAP_POLLING;
+ if_setcapenablebit(ifp, 0, IFCAP_POLLING);
BGE_UNLOCK(sc);
}
}
#endif
if ((mask & IFCAP_TXCSUM) != 0 &&
- (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
- ifp->if_capenable ^= IFCAP_TXCSUM;
- if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
- ifp->if_hwassist |= sc->bge_csum_features;
+ (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
+ if_togglecapenable(ifp, IFCAP_TXCSUM);
+ if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
+ if_sethwassistbits(ifp,
+ sc->bge_csum_features, 0);
else
- ifp->if_hwassist &= ~sc->bge_csum_features;
+ if_sethwassistbits(ifp, 0,
+ sc->bge_csum_features);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
- (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
- ifp->if_capenable ^= IFCAP_RXCSUM;
+ (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
+ if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_TSO4) != 0 &&
- (ifp->if_capabilities & IFCAP_TSO4) != 0) {
- ifp->if_capenable ^= IFCAP_TSO4;
- if ((ifp->if_capenable & IFCAP_TSO4) != 0)
- ifp->if_hwassist |= CSUM_TSO;
+ (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
+ if_togglecapenable(ifp, IFCAP_TSO4);
+ if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
+ if_sethwassistbits(ifp, CSUM_TSO, 0);
else
- ifp->if_hwassist &= ~CSUM_TSO;
+ if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if (mask & IFCAP_VLAN_MTU) {
- ifp->if_capenable ^= IFCAP_VLAN_MTU;
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ if_togglecapenable(ifp, IFCAP_VLAN_MTU);
+ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init(sc);
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
- (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
+ if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
- (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
- ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
+ (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
+ if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
+ if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
+ if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
BGE_LOCK(sc);
bge_setvlan(sc);
BGE_UNLOCK(sc);
}
#ifdef VLAN_CAPABILITIES
- VLAN_CAPABILITIES(ifp);
+ if_vlancap(ifp);
#endif
break;
default:
@@ -5927,7 +5912,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
static void
bge_watchdog(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
uint32_t status;
BGE_LOCK_ASSERT(sc);
@@ -5966,10 +5951,10 @@ bge_watchdog(struct bge_softc *sc)
if_printf(ifp, "watchdog timeout -- resetting\n");
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init_locked(sc);
- ifp->if_oerrors++;
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
static void
@@ -5993,7 +5978,7 @@ bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
static void
bge_stop(struct bge_softc *sc)
{
- struct ifnet *ifp;
+ if_t ifp;
BGE_LOCK_ASSERT(sc);
@@ -6083,7 +6068,7 @@ bge_stop(struct bge_softc *sc)
if_printf(sc->bge_ifp, "link DOWN\n");
sc->bge_link = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
/*
@@ -6120,14 +6105,14 @@ static int
bge_resume(device_t dev)
{
struct bge_softc *sc;
- struct ifnet *ifp;
+ if_t ifp;
sc = device_get_softc(dev);
BGE_LOCK(sc);
ifp = sc->bge_ifp;
- if (ifp->if_flags & IFF_UP) {
+ if (if_getflags(ifp) & IFF_UP) {
bge_init_locked(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bge_start_locked(ifp);
}
BGE_UNLOCK(sc);
@@ -6261,7 +6246,6 @@ bge_add_sysctls(struct bge_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
- char tn[32];
int unit;
ctx = device_get_sysctl_ctx(sc->bge_dev);
@@ -6300,18 +6284,14 @@ bge_add_sysctls(struct bge_softc *sc)
* consumes a lot of CPU cycles, so leave it off by default.
*/
sc->bge_forced_collapse = 0;
- snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
- TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
- CTLFLAG_RW, &sc->bge_forced_collapse, 0,
+ CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0,
"Number of fragmented TX buffers of a frame allowed before "
"forced collapsing");
sc->bge_msi = 1;
- snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
- TUNABLE_INT_FETCH(tn, &sc->bge_msi);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
- CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
+ CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI");
/*
* It seems all Broadcom controllers have a bug that can generate UDP
@@ -6324,10 +6304,8 @@ bge_add_sysctls(struct bge_softc *sc)
* dev.bge.0.forced_udpcsum.
*/
sc->bge_forced_udpcsum = 0;
- snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
- TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
- CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
+ CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0,
"Enable UDP checksum offloading even if controller can "
"generate UDP checksum value 0");
@@ -6796,3 +6774,25 @@ bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
}
return (*func == NULL ? ENXIO : 0);
}
+
+static uint64_t
+bge_get_counter(if_t ifp, ift_counter cnt)
+{
+ struct bge_softc *sc;
+ struct bge_mac_stats *stats;
+
+ sc = if_getsoftc(ifp);
+ if (!BGE_IS_5705_PLUS(sc))
+ return (if_get_counter_default(ifp, cnt));
+ stats = &sc->bge_mac_stats;
+
+ switch (cnt) {
+ case IFCOUNTER_IERRORS:
+ return (stats->NoMoreRxBDs + stats->InputDiscards +
+ stats->InputErrors);
+ case IFCOUNTER_COLLISIONS:
+ return (stats->etherStatsCollisions);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}