summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/dev/bce
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2013-11-06 16:20:21 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2013-11-11 10:08:08 +0100
commit66659ff1ad6831b0ea7425fa6ecd8a8687523658 (patch)
tree48e22b475fa8854128e0861a33fed6f78c8094b5 /freebsd/sys/dev/bce
parentDefine __GLOBL1() and __GLOBL() (diff)
downloadrtems-libbsd-66659ff1ad6831b0ea7425fa6ecd8a8687523658.tar.bz2
Update to FreeBSD 9.2
Diffstat (limited to 'freebsd/sys/dev/bce')
-rw-r--r--freebsd/sys/dev/bce/if_bce.c406
-rw-r--r--freebsd/sys/dev/bce/if_bcefw.h131
-rw-r--r--freebsd/sys/dev/bce/if_bcereg.h23
3 files changed, 255 insertions, 305 deletions
diff --git a/freebsd/sys/dev/bce/if_bce.c b/freebsd/sys/dev/bce/if_bce.c
index 21ef5dc7..dfae5dcc 100644
--- a/freebsd/sys/dev/bce/if_bce.c
+++ b/freebsd/sys/dev/bce/if_bce.c
@@ -377,7 +377,8 @@ static void bce_release_resources (struct bce_softc *);
/****************************************************************************/
static void bce_fw_cap_init (struct bce_softc *);
static int bce_fw_sync (struct bce_softc *, u32);
-static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
+static void bce_load_rv2p_fw (struct bce_softc *, const u32 *, u32,
+ u32);
static void bce_load_cpu_fw (struct bce_softc *,
struct cpu_reg *, struct fw_info *);
static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
@@ -400,14 +401,12 @@ static int bce_blockinit (struct bce_softc *);
static int bce_init_tx_chain (struct bce_softc *);
static void bce_free_tx_chain (struct bce_softc *);
-static int bce_get_rx_buf (struct bce_softc *,
- struct mbuf *, u16 *, u16 *, u32 *);
+static int bce_get_rx_buf (struct bce_softc *, u16, u16, u32 *);
static int bce_init_rx_chain (struct bce_softc *);
static void bce_fill_rx_chain (struct bce_softc *);
static void bce_free_rx_chain (struct bce_softc *);
-static int bce_get_pg_buf (struct bce_softc *,
- struct mbuf *, u16 *, u16 *);
+static int bce_get_pg_buf (struct bce_softc *, u16, u16);
static int bce_init_pg_chain (struct bce_softc *);
static void bce_fill_pg_chain (struct bce_softc *);
static void bce_free_pg_chain (struct bce_softc *);
@@ -490,7 +489,7 @@ DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
/****************************************************************************/
/* Tunable device values */
/****************************************************************************/
-SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
/* Allowable values are TRUE or FALSE */
static int bce_verbose = TRUE;
@@ -501,14 +500,14 @@ SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0,
/* Allowable values are TRUE or FALSE */
static int bce_tso_enable = TRUE;
TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
-SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
+SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
"TSO Enable/Disable");
/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
/* ToDo: Add MSI-X support. */
static int bce_msi_enable = 1;
TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
-SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
+SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
"MSI-X|MSI|INTx selector");
/* Allowable values are 1, 2, 4, 8. */
@@ -808,13 +807,13 @@ bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
DBENTER(BCE_VERBOSE_LOAD);
/* Check if PCI-X capability is enabled. */
- if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
+ if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0) {
if (reg != 0)
sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
}
/* Check if PCIe capability is enabled. */
- if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
+ if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
if (reg != 0) {
u16 link_status = pci_read_config(dev, reg + 0x12, 2);
DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = "
@@ -827,13 +826,13 @@ bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
}
/* Check if MSI capability is enabled. */
- if (pci_find_extcap(dev, PCIY_MSI, &reg) == 0) {
+ if (pci_find_cap(dev, PCIY_MSI, &reg) == 0) {
if (reg != 0)
sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
}
/* Check if MSI-X capability is enabled. */
- if (pci_find_extcap(dev, PCIY_MSIX, &reg) == 0) {
+ if (pci_find_cap(dev, PCIY_MSIX, &reg) == 0) {
if (reg != 0)
sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
}
@@ -1021,7 +1020,6 @@ bce_set_tunables(struct bce_softc *sc)
sc->bce_tx_ticks = DEFAULT_TX_TICKS;
sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
}
-
}
@@ -1334,23 +1332,6 @@ bce_attach(device_t dev)
/* Fetch the permanent Ethernet MAC address. */
bce_get_mac_addr(sc);
- /*
- * Trip points control how many BDs
- * should be ready before generating an
- * interrupt while ticks control how long
- * a BD can sit in the chain before
- * generating an interrupt. Set the default
- * values for the RX and TX chains.
- */
-
- /* Not used for L2. */
- sc->bce_comp_prod_trip_int = 0;
- sc->bce_comp_prod_trip = 0;
- sc->bce_com_ticks_int = 0;
- sc->bce_com_ticks = 0;
- sc->bce_cmd_ticks_int = 0;
- sc->bce_cmd_ticks = 0;
-
/* Update statistics once every second. */
sc->bce_stats_ticks = 1000000 & 0xffff00;
@@ -1465,7 +1446,7 @@ bce_attach(device_t dev)
/* MII child bus by attaching the PHY. */
rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd,
bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr,
- MII_OFFSET_ANY, MIIF_DOPAUSE | MIIF_FORCEPAUSE);
+ MII_OFFSET_ANY, MIIF_DOPAUSE);
if (rc != 0) {
BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__,
__LINE__);
@@ -1937,7 +1918,6 @@ bce_miibus_read_reg(device_t dev, int phy, int reg)
DB_PRINT_PHY_REG(reg, val);
return (val & 0xffff);
-
}
@@ -2099,10 +2079,12 @@ bce_miibus_statchg(device_t dev)
DBPRINT(sc, BCE_INFO_PHY,
"%s(): Enabling RX flow control.\n", __FUNCTION__);
BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
+ sc->bce_flags |= BCE_USING_RX_FLOW_CONTROL;
} else {
DBPRINT(sc, BCE_INFO_PHY,
"%s(): Disabling RX flow control.\n", __FUNCTION__);
BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
+ sc->bce_flags &= ~BCE_USING_RX_FLOW_CONTROL;
}
if ((IFM_OPTIONS(media_active) & IFM_ETH_TXPAUSE) != 0) {
@@ -3039,7 +3021,6 @@ bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu)
roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN);
sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
sc->rx_bd_mbuf_align_pad;
- sc->pg_bd_mbuf_alloc_size = MCLBYTES;
} else {
if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
ETHER_CRC_LEN) > MCLBYTES) {
@@ -3069,7 +3050,6 @@ bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu)
sc->rx_bd_mbuf_align_pad);
DBEXIT(BCE_VERBOSE_LOAD);
-
}
/****************************************************************************/
@@ -3486,8 +3466,6 @@ bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
} else {
*busaddr = segs->ds_addr;
}
-
- return;
}
@@ -3568,7 +3546,7 @@ bce_dma_alloc(device_t dev)
sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr,
&sc->status_block_paddr, BUS_DMA_NOWAIT);
- if (error) {
+ if (error || sc->status_block_paddr == 0) {
BCE_PRINTF("%s(%d): Could not map status block "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -3605,7 +3583,7 @@ bce_dma_alloc(device_t dev)
sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr,
&sc->stats_block_paddr, BUS_DMA_NOWAIT);
- if(error) {
+ if (error || sc->stats_block_paddr == 0) {
BCE_PRINTF("%s(%d): Could not map statistics block "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -3657,7 +3635,7 @@ bce_dma_alloc(device_t dev)
sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr,
&sc->ctx_paddr[i], BUS_DMA_NOWAIT);
- if (error) {
+ if (error || sc->ctx_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map CTX "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -3702,7 +3680,7 @@ bce_dma_alloc(device_t dev)
BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr,
&sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
- if (error) {
+ if (error || sc->tx_bd_chain_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map TX descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -3779,7 +3757,7 @@ bce_dma_alloc(device_t dev)
BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr,
&sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
- if (error) {
+ if (error || sc->rx_bd_chain_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map RX descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -3795,21 +3773,17 @@ bce_dma_alloc(device_t dev)
* Create a DMA tag for RX mbufs.
*/
if (bce_hdr_split == TRUE)
- max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
+ max_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
MCLBYTES : sc->rx_bd_mbuf_alloc_size);
else
- max_size = max_seg_size = MJUM9BYTES;
- max_segments = 1;
+ max_size = MJUM9BYTES;
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag "
- "(max size = 0x%jX max segments = %d, max segment "
- "size = 0x%jX)\n", __FUNCTION__, (uintmax_t) max_size,
- max_segments, (uintmax_t) max_seg_size);
+ "(max size = 0x%jX)\n", __FUNCTION__, (uintmax_t)max_size);
if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN,
BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
- max_size, max_segments, max_seg_size, 0, NULL, NULL,
- &sc->rx_mbuf_tag)) {
+ max_size, 1, max_size, 0, NULL, NULL, &sc->rx_mbuf_tag)) {
BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
__FILE__, __LINE__);
rc = ENOMEM;
@@ -3860,7 +3834,7 @@ bce_dma_alloc(device_t dev)
BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
&sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
- if (error) {
+ if (error || sc->pg_bd_chain_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map page descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -3875,12 +3849,9 @@ bce_dma_alloc(device_t dev)
/*
* Create a DMA tag for page mbufs.
*/
- max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
- MCLBYTES : sc->pg_bd_mbuf_alloc_size);
-
if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
- sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
- max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
+ sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
+ 1, MCLBYTES, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
BCE_PRINTF("%s(%d): Could not allocate page mbuf "
"DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
@@ -4030,7 +4001,7 @@ bce_fw_sync_exit:
/* Nothing. */
/****************************************************************************/
static void
-bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
+bce_load_rv2p_fw(struct bce_softc *sc, const u32 *rv2p_code,
u32 rv2p_code_len, u32 rv2p_proc)
{
int i;
@@ -5246,24 +5217,28 @@ bce_blockinit(struct bce_softc *sc)
REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
BCE_ADDR_HI(sc->stats_block_paddr));
- /* Program various host coalescing parameters. */
+ /*
+ * Program various host coalescing parameters.
+ * Trip points control how many BDs should be ready before generating
+ * an interrupt while ticks control how long a BD can sit in the chain
+ * before generating an interrupt.
+ */
REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
- (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
+ (sc->bce_tx_quick_cons_trip_int << 16) |
+ sc->bce_tx_quick_cons_trip);
REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
- (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
- REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
- (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
+ (sc->bce_rx_quick_cons_trip_int << 16) |
+ sc->bce_rx_quick_cons_trip);
REG_WR(sc, BCE_HC_TX_TICKS,
(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
REG_WR(sc, BCE_HC_RX_TICKS,
(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
- REG_WR(sc, BCE_HC_COM_TICKS,
- (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
- REG_WR(sc, BCE_HC_CMD_TICKS,
- (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
- REG_WR(sc, BCE_HC_STATS_TICKS,
- (sc->bce_stats_ticks & 0xffff00));
+ REG_WR(sc, BCE_HC_STATS_TICKS, sc->bce_stats_ticks & 0xffff00);
REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
+ /* Not used for L2. */
+ REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 0);
+ REG_WR(sc, BCE_HC_COM_TICKS, 0);
+ REG_WR(sc, BCE_HC_CMD_TICKS, 0);
/* Configure the Host Coalescing block. */
val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
@@ -5378,29 +5353,27 @@ bce_blockinit_exit:
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
-bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
- u16 *chain_prod, u32 *prod_bseq)
+bce_get_rx_buf(struct bce_softc *sc, u16 prod, u16 chain_prod, u32 *prod_bseq)
{
- bus_dmamap_t map;
- bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
+ bus_dma_segment_t segs[1];
struct mbuf *m_new = NULL;
struct rx_bd *rxbd;
int nsegs, error, rc = 0;
#ifdef BCE_DEBUG
- u16 debug_chain_prod = *chain_prod;
+ u16 debug_chain_prod = chain_prod;
#endif
DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
/* Make sure the inputs are valid. */
- DBRUNIF((*chain_prod > MAX_RX_BD_ALLOC),
+ DBRUNIF((chain_prod > MAX_RX_BD_ALLOC),
BCE_PRINTF("%s(%d): RX producer out of range: "
"0x%04X > 0x%04X\n", __FILE__, __LINE__,
- *chain_prod, (u16) MAX_RX_BD_ALLOC));
+ chain_prod, (u16)MAX_RX_BD_ALLOC));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
"chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__,
- *prod, *chain_prod, *prod_bseq);
+ prod, chain_prod, *prod_bseq);
/* Update some debug statistic counters */
DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
@@ -5408,35 +5381,28 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
sc->rx_empty_count++);
- /* Check whether this is a new mbuf allocation. */
- if (m == NULL) {
-
- /* Simulate an mbuf allocation failure. */
- DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
- sc->mbuf_alloc_failed_count++;
- sc->mbuf_alloc_failed_sim_count++;
- rc = ENOBUFS;
- goto bce_get_rx_buf_exit);
+ /* Simulate an mbuf allocation failure. */
+ DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
+ sc->mbuf_alloc_failed_count++;
+ sc->mbuf_alloc_failed_sim_count++;
+ rc = ENOBUFS;
+ goto bce_get_rx_buf_exit);
- /* This is a new mbuf allocation. */
- if (bce_hdr_split == TRUE)
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
- else
- m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
- sc->rx_bd_mbuf_alloc_size);
-
- if (m_new == NULL) {
- sc->mbuf_alloc_failed_count++;
- rc = ENOBUFS;
- goto bce_get_rx_buf_exit;
- }
+ /* This is a new mbuf allocation. */
+ if (bce_hdr_split == TRUE)
+ MGETHDR(m_new, M_NOWAIT, MT_DATA);
+ else
+ m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
+ sc->rx_bd_mbuf_alloc_size);
- DBRUN(sc->debug_rx_mbuf_alloc++);
- } else {
- /* Reuse an existing mbuf. */
- m_new = m;
+ if (m_new == NULL) {
+ sc->mbuf_alloc_failed_count++;
+ rc = ENOBUFS;
+ goto bce_get_rx_buf_exit;
}
+ DBRUN(sc->debug_rx_mbuf_alloc++);
+
/* Make sure we have a valid packet header. */
M_ASSERTPKTHDR(m_new);
@@ -5447,9 +5413,8 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
/* ToDo: Consider calling m_fragment() to test error handling. */
/* Map the mbuf cluster into device memory. */
- map = sc->rx_mbuf_map[*chain_prod];
- error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
- segs, &nsegs, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag,
+ sc->rx_mbuf_map[chain_prod], m_new, segs, &nsegs, BUS_DMA_NOWAIT);
/* Handle any mapping errors. */
if (error) {
@@ -5470,7 +5435,7 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
__FUNCTION__, nsegs));
/* Setup the rx_bd for the segment. */
- rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
+ rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
@@ -5479,15 +5444,15 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
*prod_bseq += segs[0].ds_len;
/* Save the mbuf and update our counter. */
- sc->rx_mbuf_ptr[*chain_prod] = m_new;
+ sc->rx_mbuf_ptr[chain_prod] = m_new;
sc->free_rx_bd -= nsegs;
DBRUNMSG(BCE_INSANE_RECV,
bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
- "chain_prod = 0x%04X, prod_bseq = 0x%08X\n",
- __FUNCTION__, *prod, *chain_prod, *prod_bseq);
+ "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod,
+ chain_prod, *prod_bseq);
bce_get_rx_buf_exit:
DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
@@ -5503,68 +5468,56 @@ bce_get_rx_buf_exit:
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
-bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
- u16 *prod_idx)
+bce_get_pg_buf(struct bce_softc *sc, u16 prod, u16 prod_idx)
{
- bus_dmamap_t map;
- bus_addr_t busaddr;
+ bus_dma_segment_t segs[1];
struct mbuf *m_new = NULL;
struct rx_bd *pgbd;
- int error, rc = 0;
+ int error, nsegs, rc = 0;
#ifdef BCE_DEBUG
- u16 debug_prod_idx = *prod_idx;
+ u16 debug_prod_idx = prod_idx;
#endif
DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
/* Make sure the inputs are valid. */
- DBRUNIF((*prod_idx > MAX_PG_BD_ALLOC),
+ DBRUNIF((prod_idx > MAX_PG_BD_ALLOC),
BCE_PRINTF("%s(%d): page producer out of range: "
"0x%04X > 0x%04X\n", __FILE__, __LINE__,
- *prod_idx, (u16) MAX_PG_BD_ALLOC));
+ prod_idx, (u16)MAX_PG_BD_ALLOC));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
- "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
+ "chain_prod = 0x%04X\n", __FUNCTION__, prod, prod_idx);
/* Update counters if we've hit a new low or run out of pages. */
DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
sc->pg_low_watermark = sc->free_pg_bd);
DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
- /* Check whether this is a new mbuf allocation. */
- if (m == NULL) {
-
- /* Simulate an mbuf allocation failure. */
- DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
- sc->mbuf_alloc_failed_count++;
- sc->mbuf_alloc_failed_sim_count++;
- rc = ENOBUFS;
- goto bce_get_pg_buf_exit);
-
- /* This is a new mbuf allocation. */
- m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
- if (m_new == NULL) {
- sc->mbuf_alloc_failed_count++;
- rc = ENOBUFS;
- goto bce_get_pg_buf_exit;
- }
-
- DBRUN(sc->debug_pg_mbuf_alloc++);
- } else {
- /* Reuse an existing mbuf. */
- m_new = m;
- m_new->m_data = m_new->m_ext.ext_buf;
+ /* Simulate an mbuf allocation failure. */
+ DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
+ sc->mbuf_alloc_failed_count++;
+ sc->mbuf_alloc_failed_sim_count++;
+ rc = ENOBUFS;
+ goto bce_get_pg_buf_exit);
+
+ /* This is a new mbuf allocation. */
+ m_new = m_getcl(M_NOWAIT, MT_DATA, 0);
+ if (m_new == NULL) {
+ sc->mbuf_alloc_failed_count++;
+ rc = ENOBUFS;
+ goto bce_get_pg_buf_exit;
}
- m_new->m_len = sc->pg_bd_mbuf_alloc_size;
+ DBRUN(sc->debug_pg_mbuf_alloc++);
+
+ m_new->m_len = MCLBYTES;
/* ToDo: Consider calling m_fragment() to test error handling. */
/* Map the mbuf cluster into device memory. */
- map = sc->pg_mbuf_map[*prod_idx];
- error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
- sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr,
- &busaddr, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load_mbuf_sg(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[prod_idx], m_new, segs, &nsegs, BUS_DMA_NOWAIT);
/* Handle any mapping errors. */
if (error) {
@@ -5578,28 +5531,32 @@ bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
goto bce_get_pg_buf_exit;
}
+ /* All mbufs must map to a single segment. */
+ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
+ __FUNCTION__, nsegs));
+
/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
/*
* The page chain uses the same rx_bd data structure
* as the receive chain but doesn't require a byte sequence (bseq).
*/
- pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
+ pgbd = &sc->pg_bd_chain[PG_PAGE(prod_idx)][PG_IDX(prod_idx)];
- pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr));
- pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr));
- pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size);
+ pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
+ pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
+ pgbd->rx_bd_len = htole32(MCLBYTES);
pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
/* Save the mbuf and update our counter. */
- sc->pg_mbuf_ptr[*prod_idx] = m_new;
+ sc->pg_mbuf_ptr[prod_idx] = m_new;
sc->free_pg_bd--;
DBRUNMSG(BCE_INSANE_RECV,
bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
- "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
+ "prod_idx = 0x%04X\n", __FUNCTION__, prod, prod_idx);
bce_get_pg_buf_exit:
DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
@@ -5921,7 +5878,7 @@ bce_fill_rx_chain(struct bce_softc *sc)
/* Keep filling the RX chain until it's full. */
while (sc->free_rx_bd > 0) {
prod_idx = RX_CHAIN_IDX(prod);
- if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
+ if (bce_get_rx_buf(sc, prod, prod_idx, &prod_bseq)) {
/* Bail out if we can't add an mbuf to the chain. */
break;
}
@@ -5935,13 +5892,11 @@ bce_fill_rx_chain(struct bce_softc *sc)
/* We should never end up pointing to a next page pointer. */
DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
- __FUNCTION__, sc->rx_prod));
+ __FUNCTION__, rx_prod));
/* Write the mailbox and tell the chip about the waiting rx_bd's. */
- REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) +
- BCE_L2MQ_RX_HOST_BDIDX, sc->rx_prod);
- REG_WR(sc, MB_GET_CID_ADDR(RX_CID) +
- BCE_L2MQ_RX_HOST_BSEQ, sc->rx_prod_bseq);
+ REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod);
+ REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, prod_bseq);
DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
@@ -5976,10 +5931,9 @@ bce_free_rx_chain(struct bce_softc *sc)
/* Clear each RX chain page. */
for (i = 0; i < sc->rx_pages; i++)
- if (sc->rx_bd_chain[i] != NULL) {
+ if (sc->rx_bd_chain[i] != NULL)
bzero((char *)sc->rx_bd_chain[i],
BCE_RX_CHAIN_PAGE_SZ);
- }
sc->free_rx_bd = sc->max_rx_bd;
@@ -6043,7 +5997,7 @@ bce_init_pg_chain(struct bce_softc *sc)
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
/* Configure the rx_bd and page chain mbuf cluster size. */
- val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
+ val = (sc->rx_bd_mbuf_data_len << 16) | MCLBYTES;
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
/* Configure the context reserved for jumbo support. */
@@ -6093,7 +6047,7 @@ bce_fill_pg_chain(struct bce_softc *sc)
/* Keep filling the page chain until it's full. */
while (sc->free_pg_bd > 0) {
prod_idx = PG_CHAIN_IDX(prod);
- if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
+ if (bce_get_pg_buf(sc, prod, prod_idx)) {
/* Bail out if we can't add an mbuf to the chain. */
break;
}
@@ -6105,14 +6059,14 @@ bce_fill_pg_chain(struct bce_softc *sc)
DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
- __FUNCTION__, sc->pg_prod));
+ __FUNCTION__, pg_prod));
/*
* Write the mailbox and tell the chip about
* the new rx_bd's in the page chain.
*/
- REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) +
- BCE_L2MQ_RX_HOST_PG_BDIDX, sc->pg_prod);
+ REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
+ prod);
DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
@@ -6345,7 +6299,7 @@ bce_ifmedia_upd_locked(struct ifnet *ifp)
/* Make sure the MII bus has been enumerated. */
if (mii) {
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
- mii_phy_reset(miisc);
+ PHY_RESET(miisc);
error = mii_mediachg(mii);
}
}
@@ -6627,14 +6581,6 @@ bce_rx_intr(struct bce_softc *sc)
DBRUN(sc->debug_rx_mbuf_alloc--);
sc->free_rx_bd++;
- if(m0 == NULL) {
- DBPRINT(sc, BCE_EXTREME_RECV,
- "%s(): Oops! Empty mbuf pointer "
- "found in sc->rx_mbuf_ptr[0x%04X]!\n",
- __FUNCTION__, sw_rx_cons_idx);
- goto bce_rx_int_next_rx;
- }
-
/*
* Frames received on the NetXteme II are prepended
* with an l2_fhdr structure which provides status
@@ -6793,7 +6739,7 @@ bce_rx_intr(struct bce_softc *sc)
m_freem(m0);
m0 = NULL;
- goto bce_rx_int_next_rx;
+ goto bce_rx_intr_next_rx;
}
/* Send the packet to the appropriate interface. */
@@ -6804,7 +6750,6 @@ bce_rx_intr(struct bce_softc *sc)
/* Validate the checksum if offload enabled. */
if (ifp->if_capenable & IFCAP_RXCSUM) {
-
/* Check for an IP datagram. */
if (!(status & L2_FHDR_STATUS_SPLIT) &&
(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
@@ -6834,7 +6779,8 @@ bce_rx_intr(struct bce_softc *sc)
}
/* Attach the VLAN tag. */
- if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
+ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+ !(sc->rx_mode & BCE_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
DBRUN(sc->vlan_tagged_frames_rcvd++);
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
DBRUN(sc->vlan_tagged_frames_stripped++);
@@ -6873,7 +6819,7 @@ bce_rx_intr(struct bce_softc *sc)
/* Increment received packet statistics. */
ifp->if_ipackets++;
-bce_rx_int_next_rx:
+bce_rx_intr_next_rx:
sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
/* If we have a packet, pass it up the stack */
@@ -7165,10 +7111,9 @@ bce_init_locked(struct bce_softc *sc)
ether_mtu = ifp->if_mtu;
else {
if (bce_hdr_split == TRUE) {
- if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len +
- sc->pg_bd_mbuf_alloc_size))
- ether_mtu = sc->rx_bd_mbuf_data_len +
- sc->pg_bd_mbuf_alloc_size;
+ if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len + MCLBYTES)
+ ether_mtu = sc->rx_bd_mbuf_data_len +
+ MCLBYTES;
else
ether_mtu = ifp->if_mtu;
} else {
@@ -7196,9 +7141,6 @@ bce_init_locked(struct bce_softc *sc)
bce_set_rx_mode(sc);
if (bce_hdr_split == TRUE) {
- DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
- __FUNCTION__, sc->pg_bd_mbuf_alloc_size);
-
/* Init page buffer descriptor chain. */
bce_init_pg_chain(sc);
}
@@ -7303,7 +7245,7 @@ bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags)
/* Controller may modify mbuf chains. */
if (M_WRITABLE(*m_head) == 0) {
- m = m_dup(*m_head, M_DONTWAIT);
+ m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
sc->mbuf_alloc_failed_count++;
@@ -7469,7 +7411,7 @@ bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
sc->mbuf_frag_count++;
/* Try to defrag the mbuf. */
- m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS);
+ m0 = m_collapse(*m_head, M_NOWAIT, BCE_MAX_SEGMENTS);
if (m0 == NULL) {
/* Defrag was unsuccessful */
m_freem(*m_head);
@@ -7692,7 +7634,6 @@ bce_start_locked(struct ifnet *ifp)
bce_start_locked_exit:
DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
- return;
}
@@ -7891,18 +7832,42 @@ bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
static void
bce_watchdog(struct bce_softc *sc)
{
+ uint32_t status;
+
DBENTER(BCE_EXTREME_SEND);
BCE_LOCK_ASSERT(sc);
+ status = 0;
/* If the watchdog timer hasn't expired then just exit. */
if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
goto bce_watchdog_exit;
+ status = REG_RD(sc, BCE_EMAC_RX_STATUS);
/* If pause frames are active then don't reset the hardware. */
- /* ToDo: Should we reset the timer here? */
- if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
- goto bce_watchdog_exit;
+ if ((sc->bce_flags & BCE_USING_RX_FLOW_CONTROL) != 0) {
+ if ((status & BCE_EMAC_RX_STATUS_FFED) != 0) {
+ /*
+ * If link partner has us in XOFF state then wait for
+ * the condition to clear.
+ */
+ sc->watchdog_timer = BCE_TX_TIMEOUT;
+ goto bce_watchdog_exit;
+ } else if ((status & BCE_EMAC_RX_STATUS_FF_RECEIVED) != 0 &&
+ (status & BCE_EMAC_RX_STATUS_N_RECEIVED) != 0) {
+ /*
+ * If we're not currently XOFF'ed but have recently
+ * been XOFF'd/XON'd then assume that's delaying TX
+ * this time around.
+ */
+ sc->watchdog_timer = BCE_TX_TIMEOUT;
+ goto bce_watchdog_exit;
+ }
+ /*
+ * Any other condition is unexpected and the controller
+ * should be reset.
+ */
+ }
BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
__FILE__, __LINE__);
@@ -7926,6 +7891,7 @@ bce_watchdog(struct bce_softc *sc)
sc->bce_ifp->if_oerrors++;
bce_watchdog_exit:
+ REG_WR(sc, BCE_EMAC_RX_STATUS, status);
DBEXIT(BCE_EXTREME_SEND);
}
@@ -7939,7 +7905,7 @@ bce_watchdog_exit:
/* interrupt causes (PHY, TX, RX). */
/* */
/* Returns: */
-/* 0 for success, positive value for failure. */
+/* Nothing. */
/****************************************************************************/
static void
bce_intr(void *xsc)
@@ -7961,16 +7927,16 @@ bce_intr(void *xsc)
DBRUN(sc->interrupts_generated++);
/* Synchnorize before we read from interface's status block */
- bus_dmamap_sync(sc->status_tag, sc->status_map,
- BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
/*
- * If the hardware status block index
- * matches the last value read by the
- * driver and we haven't asserted our
- * interrupt then there's nothing to do.
+ * If the hardware status block index matches the last value read
+ * by the driver and we haven't asserted our interrupt then there's
+ * nothing to do. This may only happen in case of INTx due to the
+ * interrupt arriving at the CPU before the status block is updated.
*/
- if ((sc->status_block->status_idx == sc->last_status_idx) &&
+ if ((sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) == 0 &&
+ sc->status_block->status_idx == sc->last_status_idx &&
(REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
@@ -8058,11 +8024,9 @@ bce_intr(void *xsc)
if ((hw_rx_cons == sc->hw_rx_cons) &&
(hw_tx_cons == sc->hw_tx_cons))
break;
-
}
- bus_dmamap_sync(sc->status_tag, sc->status_map,
- BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREREAD);
/* Re-enable interrupts. */
bce_enable_intr(sc, 0);
@@ -8128,8 +8092,9 @@ bce_set_rx_mode(struct bce_softc *sc)
/* Enable all multicast addresses. */
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
- }
+ REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
+ }
sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
} else {
/* Accept one or more multicast(s). */
@@ -8186,6 +8151,8 @@ bce_stats_update(struct bce_softc *sc)
ifp = sc->bce_ifp;
+ bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD);
+
stats = (struct statistics_block *) sc->stats_block;
/*
@@ -8493,11 +8460,7 @@ bce_tick(void *xsc)
/* Update the statistics from the hardware statistics block. */
bce_stats_update(sc);
- /*
- * ToDo: This is a safety measure. Need to re-evaluate
- * high level processing logic and eliminate this code.
- */
- /* Top off the receive and page chains. */
+ /* Ensure page and RX chains get refilled in low-memory situations. */
if (bce_hdr_split == TRUE)
bce_fill_pg_chain(sc);
bce_fill_rx_chain(sc);
@@ -8546,7 +8509,6 @@ bce_tick(void *xsc)
bce_tick_exit:
DBEXIT(BCE_EXTREME_MISC);
- return;
}
static void
@@ -8718,6 +8680,8 @@ bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
stats = (struct statistics_block *) sc->stats_block;
bzero(stats, sizeof(struct statistics_block));
+ bus_dmamap_sync(sc->stats_tag, sc->stats_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Clear the internal H/W statistics counters. */
REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
@@ -9162,7 +9126,7 @@ bce_add_sysctls(struct bce_softc *sc)
0, "Number of simulated l2_fhdr errors");
#endif
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"l2fhdr_error_count",
CTLFLAG_RD, &sc->l2fhdr_error_count,
0, "Number of l2_fhdr errors");
@@ -9173,18 +9137,18 @@ bce_add_sysctls(struct bce_softc *sc)
CTLFLAG_RW, &mbuf_alloc_failed_sim_control,
0, "Debug control to force mbuf allocation failures");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"mbuf_alloc_failed_sim_count",
CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count,
0, "Number of simulated mbuf cluster allocation failures");
#endif
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"mbuf_alloc_failed_count",
CTLFLAG_RD, &sc->mbuf_alloc_failed_count,
0, "Number of mbuf allocation failures");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"mbuf_frag_count",
CTLFLAG_RD, &sc->mbuf_frag_count,
0, "Number of fragmented mbufs");
@@ -9196,19 +9160,19 @@ bce_add_sysctls(struct bce_softc *sc)
0, "Debug control to force DMA mapping failures");
/* ToDo: Figure out how to update this value in bce_dma_map_addr(). */
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"dma_map_addr_failed_sim_count",
CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count,
0, "Number of simulated DMA mapping failures");
#endif
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"dma_map_addr_rx_failed_count",
CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count,
0, "Number of RX DMA mapping failures");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"dma_map_addr_tx_failed_count",
CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count,
0, "Number of TX DMA mapping failures");
@@ -9219,13 +9183,13 @@ bce_add_sysctls(struct bce_softc *sc)
CTLFLAG_RW, &unexpected_attention_sim_control,
0, "Debug control to simulate unexpected attentions");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"unexpected_attention_sim_count",
CTLFLAG_RW, &sc->unexpected_attention_sim_count,
0, "Number of simulated unexpected attentions");
#endif
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"unexpected_attention_count",
CTLFLAG_RW, &sc->unexpected_attention_count,
0, "Number of unexpected attentions");
@@ -9863,7 +9827,7 @@ bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
"\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
"\22M_PROMISC\23M_NOFREE",
mp->m_pkthdr.csum_flags,
- "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
+ "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP"
"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
"\12CSUM_IP_VALID\13CSUM_DATA_VALID"
"\14CSUM_PSEUDO_HDR");
@@ -10703,6 +10667,8 @@ bce_dump_status_block(struct bce_softc *sc)
{
struct status_block *sblk;
+ bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
+
sblk = sc->status_block;
BCE_PRINTF(
@@ -10765,6 +10731,8 @@ bce_dump_stats_block(struct bce_softc *sc)
{
struct statistics_block *sblk;
+ bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD);
+
sblk = sc->stats_block;
BCE_PRINTF(
@@ -11629,7 +11597,5 @@ bce_breakpoint(struct bce_softc *sc)
/* Call the debugger. */
breakpoint();
-
- return;
}
#endif
diff --git a/freebsd/sys/dev/bce/if_bcefw.h b/freebsd/sys/dev/bce/if_bcefw.h
index 0b30bb83..8d97b31c 100644
--- a/freebsd/sys/dev/bce/if_bcefw.h
+++ b/freebsd/sys/dev/bce/if_bcefw.h
@@ -57,7 +57,7 @@ u32 bce_COM_b06FwSbssAddr = 0x08004aa0;
int bce_COM_b06FwSbssLen = 0x38;
u32 bce_COM_b06FwSDataAddr = 0x00000000;
int bce_COM_b06FwSDataLen = 0x0;
-u32 bce_COM_b06FwText[(0x4a68/4) + 1] = {
+const u32 bce_COM_b06FwText[(0x4a68/4) + 1] = {
0xa000046, 0x0, 0x0,
0xd, 0x636f6d36, 0x2e302e31, 0x35000000,
0x6000f02, 0x0, 0x3, 0xc8,
@@ -1249,14 +1249,14 @@ u32 bce_COM_b06FwText[(0x4a68/4) + 1] = {
0x440fffe, 0x24020002, 0xaf5101c0, 0xa34201c4,
0x3c021000, 0xaf4201f8, 0x8fbf0018, 0x8fb10014,
0x8fb00010, 0x3e00008, 0x27bd0020, 0x0 };
-u32 bce_COM_b06FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_COM_b06FwRodata[(0x14/4) + 1] = {
+const u32 bce_COM_b06FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_COM_b06FwRodata[(0x14/4) + 1] = {
0x8000acc,
0x8000b14, 0x8000b98, 0x8000be4, 0x8000c20,
0x0 };
-u32 bce_COM_b06FwBss[(0xc4/4) + 1] = { 0x0 };
-u32 bce_COM_b06FwSbss[(0x38/4) + 1] = { 0x0 };
-u32 bce_COM_b06FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_COM_b06FwBss[(0xc4/4) + 1] = { 0x0 };
+const u32 bce_COM_b06FwSbss[(0x38/4) + 1] = { 0x0 };
+const u32 bce_COM_b06FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_RXP_b06FwReleaseMajor = 0x6;
@@ -1275,7 +1275,7 @@ u32 bce_RXP_b06FwSbssAddr = 0x08007320;
int bce_RXP_b06FwSbssLen = 0x4c;
u32 bce_RXP_b06FwSDataAddr = 0x00000000;
int bce_RXP_b06FwSDataLen = 0x0;
-u32 bce_RXP_b06FwText[(0x72d0/4) + 1] = {
+const u32 bce_RXP_b06FwText[(0x72d0/4) + 1] = {
0xa000c84, 0x0, 0x0,
0xd, 0x72787036, 0x2e302e31, 0x35000000,
0x6000f03, 0x0, 0x1, 0x0,
@@ -3114,15 +3114,15 @@ u32 bce_RXP_b06FwText[(0x72d0/4) + 1] = {
0x8fbf0020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
0x8fb00010, 0x3c021000, 0x27bd0028, 0x3e00008,
0xaf4201b8, 0x0 };
-u32 bce_RXP_b06FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_RXP_b06FwRodata[(0x24/4) + 1] = {
+const u32 bce_RXP_b06FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_RXP_b06FwRodata[(0x24/4) + 1] = {
0x8003430,
0x8003430, 0x80033a8, 0x80033e0, 0x8003414,
0x8003438, 0x8003438, 0x8003438, 0x8003318,
0x0 };
-u32 bce_RXP_b06FwBss[(0x440/4) + 1] = { 0x0 };
-u32 bce_RXP_b06FwSbss[(0x4c/4) + 1] = { 0x0 };
-u32 bce_RXP_b06FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_RXP_b06FwBss[(0x440/4) + 1] = { 0x0 };
+const u32 bce_RXP_b06FwSbss[(0x4c/4) + 1] = { 0x0 };
+const u32 bce_RXP_b06FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_TPAT_b06FwReleaseMajor = 0x6;
@@ -3141,7 +3141,7 @@ u32 bce_TPAT_b06FwSbssAddr = 0x08001c00;
int bce_TPAT_b06FwSbssLen = 0x44;
u32 bce_TPAT_b06FwSDataAddr = 0x00000000;
int bce_TPAT_b06FwSDataLen = 0x0;
-u32 bce_TPAT_b06FwText[(0x17d4/4) + 1] = {
+const u32 bce_TPAT_b06FwText[(0x17d4/4) + 1] = {
0xa000124, 0x0, 0x0,
0xd, 0x74706136, 0x2e302e31, 0x35000000,
0x6000f01, 0x0, 0x0, 0x0,
@@ -3524,11 +3524,11 @@ u32 bce_TPAT_b06FwText[(0x17d4/4) + 1] = {
0x14a0fffb, 0x42042, 0xc35021, 0x8fbf0010,
0xa4c02, 0x312200ff, 0x27bd0018, 0xaf8a002c,
0x3e00008, 0xaf890030, 0x0 };
-u32 bce_TPAT_b06FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 };
-u32 bce_TPAT_b06FwBss[(0x450/4) + 1] = { 0x0 };
-u32 bce_TPAT_b06FwSbss[(0x44/4) + 1] = { 0x0 };
-u32 bce_TPAT_b06FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b06FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b06FwBss[(0x450/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b06FwSbss[(0x44/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b06FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_TXP_b06FwReleaseMajor = 0x6;
@@ -3547,7 +3547,7 @@ u32 bce_TXP_b06FwSbssAddr = 0x08003c20;
int bce_TXP_b06FwSbssLen = 0x68;
u32 bce_TXP_b06FwSDataAddr = 0x00000000;
int bce_TXP_b06FwSDataLen = 0x0;
-u32 bce_TXP_b06FwText[(0x3bfc/4) + 1] = {
+const u32 bce_TXP_b06FwText[(0x3bfc/4) + 1] = {
0xa00002a, 0x0, 0x0,
0xd, 0x74787036, 0x2e302e31, 0x35000000,
0x6000f00, 0x0, 0x136, 0xea60,
@@ -4509,11 +4509,11 @@ u32 bce_TXP_b06FwText[(0x3bfc/4) + 1] = {
0x3c010800, 0xac243d58, 0x3c010800, 0xac233d68,
0x3c010800, 0xac223d60, 0x3e00008, 0x0,
0x0 };
-u32 bce_TXP_b06FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_TXP_b06FwRodata[(0x0/4) + 1] = { 0x0 };
-u32 bce_TXP_b06FwBss[(0x14c/4) + 1] = { 0x0 };
-u32 bce_TXP_b06FwSbss[(0x68/4) + 1] = { 0x0 };
-u32 bce_TXP_b06FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TXP_b06FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TXP_b06FwRodata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TXP_b06FwBss[(0x14c/4) + 1] = { 0x0 };
+const u32 bce_TXP_b06FwSbss[(0x68/4) + 1] = { 0x0 };
+const u32 bce_TXP_b06FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_CP_b06FwReleaseMajor = 0x6;
@@ -4532,7 +4532,7 @@ u32 bce_CP_b06FwSbssAddr = 0x08005884;
int bce_CP_b06FwSbssLen = 0xf1;
u32 bce_CP_b06FwSDataAddr = 0x00000000;
int bce_CP_b06FwSDataLen = 0x0;
-u32 bce_CP_b06FwText[(0x5688/4) + 1] = {
+const u32 bce_CP_b06FwText[(0x5688/4) + 1] = {
0xa000028, 0x0, 0x0,
0xd, 0x6370362e, 0x302e3135, 0x0,
0x6000f04, 0x0, 0x0, 0x0,
@@ -5918,7 +5918,7 @@ u32 bce_CP_b06FwText[(0x5688/4) + 1] = {
0x27bd0030, 0x8f83001c, 0x8c620004, 0x10400003,
0x0, 0x3e00008, 0x0, 0x8c640010,
0x8c650008, 0xa001527, 0x8c66000c, 0x0 };
-u32 bce_CP_b06FwData[(0x84/4) + 1] = {
+const u32 bce_CP_b06FwData[(0x84/4) + 1] = {
0x0, 0x1b, 0xf,
0xa, 0x8, 0x6, 0x5,
0x5, 0x4, 0x4, 0x3,
@@ -5928,7 +5928,7 @@ u32 bce_CP_b06FwData[(0x84/4) + 1] = {
0x2, 0x2, 0x2, 0x2,
0x2, 0x2, 0x2, 0x1,
0x1, 0x1, 0x0 };
-u32 bce_CP_b06FwRodata[(0x158/4) + 1] = {
+const u32 bce_CP_b06FwRodata[(0x158/4) + 1] = {
0x8000f24, 0x8000d6c, 0x8000fb8,
0x8001060, 0x8000f4c, 0x8000f8c, 0x8001194,
0x8000d88, 0x80011b8, 0x8000dd8, 0x8001554,
@@ -5951,12 +5951,12 @@ u32 bce_CP_b06FwRodata[(0x158/4) + 1] = {
0x8002e1c, 0x8002de4, 0x8002df0, 0x8002dfc,
0x8002e08, 0x80052e8, 0x80052a8, 0x8005274,
0x8005248, 0x8005224, 0x80051e0, 0x0 };
-u32 bce_CP_b06FwBss[(0x5d8/4) + 1] = { 0x0 };
-u32 bce_CP_b06FwSbss[(0xf1/4) + 1] = { 0x0 };
-u32 bce_CP_b06FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_CP_b06FwBss[(0x5d8/4) + 1] = { 0x0 };
+const u32 bce_CP_b06FwSbss[(0xf1/4) + 1] = { 0x0 };
+const u32 bce_CP_b06FwSdata[(0x0/4) + 1] = { 0x0 };
-u32 bce_rv2p_proc1[] = {
+const u32 bce_rv2p_proc1[] = {
0x00000010, 0xb1800006,
0x0000001f, 0x0106000f,
0x00000008, 0x0500ffff,
@@ -6681,7 +6681,7 @@ u32 bce_TXP_b09FwSbssAddr = 0x08003d88;
int bce_TXP_b09FwSbssLen = 0x64;
u32 bce_TXP_b09FwSDataAddr = 0x00000000;
int bce_TXP_b09FwSDataLen = 0x0;
-u32 bce_TXP_b09FwText[(0x3d28/4) + 1] = {
+const u32 bce_TXP_b09FwText[(0x3d28/4) + 1] = {
0xa00002a, 0x0, 0x0,
0xd, 0x74787036, 0x2e302e31, 0x37000000,
0x6001100, 0x0, 0x136, 0xea60,
@@ -7661,15 +7661,15 @@ u32 bce_TXP_b09FwText[(0x3d28/4) + 1] = {
0xac263fcc, 0x3c010800, 0xac253fc4, 0x3c010800,
0xac243fc0, 0x3c010800, 0xac233fd0, 0x3c010800,
0xac223fc8, 0x3e00008, 0x0, 0x0 };
-u32 bce_TXP_b09FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_TXP_b09FwRodata[(0x30/4) + 1] = {
+const u32 bce_TXP_b09FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TXP_b09FwRodata[(0x30/4) + 1] = {
0x80000940, 0x80000900, 0x80080100,
0x80080080, 0x80080000, 0x800e0000, 0x80080080,
0x80080000, 0x80000a80, 0x80000a00, 0x80000980,
0x80000900, 0x0 };
-u32 bce_TXP_b09FwBss[(0x24c/4) + 1] = { 0x0 };
-u32 bce_TXP_b09FwSbss[(0x64/4) + 1] = { 0x0 };
-u32 bce_TXP_b09FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TXP_b09FwBss[(0x24c/4) + 1] = { 0x0 };
+const u32 bce_TXP_b09FwSbss[(0x64/4) + 1] = { 0x0 };
+const u32 bce_TXP_b09FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_TPAT_b09FwReleaseMajor = 0x6;
@@ -7688,7 +7688,7 @@ u32 bce_TPAT_b09FwSbssAddr = 0x08001720;
int bce_TPAT_b09FwSbssLen = 0x3c;
u32 bce_TPAT_b09FwSDataAddr = 0x00000000;
int bce_TPAT_b09FwSDataLen = 0x0;
-u32 bce_TPAT_b09FwText[(0x12fc/4) + 1] = {
+const u32 bce_TPAT_b09FwText[(0x12fc/4) + 1] = {
0xa000124, 0x0, 0x0,
0xd, 0x74706136, 0x2e302e31, 0x37000000,
0x6001101, 0x0, 0x0, 0x0,
@@ -7994,12 +7994,12 @@ u32 bce_TPAT_b09FwText[(0x12fc/4) + 1] = {
0x0, 0x0, 0x2402ffff, 0x2463ffff,
0x1462fffa, 0x24840004, 0x3e00008, 0x0,
0x0 };
-u32 bce_TPAT_b09FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_TPAT_b09FwRodata[(0x4/4) + 1] = {
+const u32 bce_TPAT_b09FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b09FwRodata[(0x4/4) + 1] = {
0x1, 0x0 };
-u32 bce_TPAT_b09FwBss[(0x12b4/4) + 1] = { 0x0 };
-u32 bce_TPAT_b09FwSbss[(0x3c/4) + 1] = { 0x0 };
-u32 bce_TPAT_b09FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b09FwBss[(0x12b4/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b09FwSbss[(0x3c/4) + 1] = { 0x0 };
+const u32 bce_TPAT_b09FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_COM_b09FwReleaseMajor = 0x6;
@@ -8018,7 +8018,7 @@ u32 bce_COM_b09FwSbssAddr = 0x08005608;
int bce_COM_b09FwSbssLen = 0x30;
u32 bce_COM_b09FwSDataAddr = 0x00000000;
int bce_COM_b09FwSDataLen = 0x0;
-u32 bce_COM_b09FwText[(0x5594/4) + 1] = {
+const u32 bce_COM_b09FwText[(0x5594/4) + 1] = {
0xa000046, 0x0, 0x0,
0xd, 0x636f6d36, 0x2e302e31, 0x37000000,
0x6001102, 0x0, 0x3, 0xc8,
@@ -9389,15 +9389,15 @@ u32 bce_COM_b09FwText[(0x5594/4) + 1] = {
0x40f809, 0x0, 0xa001560, 0x0,
0xd, 0x3c1c0800, 0x279c5608, 0x8fbf0010,
0x3e00008, 0x27bd0018, 0x0 };
-u32 bce_COM_b09FwData[(0x0/4) + 1] = { 0x0 };
-u32 bce_COM_b09FwRodata[(0x38/4) + 1] = {
+const u32 bce_COM_b09FwData[(0x0/4) + 1] = { 0x0 };
+const u32 bce_COM_b09FwRodata[(0x38/4) + 1] = {
0x80080240, 0x80080100, 0x80080080,
0x80080000, 0xc80, 0x3200, 0x8000e98,
0x8000ef4, 0x8000f88, 0x8001028, 0x8001074,
0x80080100, 0x80080080, 0x80080000, 0x0 };
-u32 bce_COM_b09FwBss[(0x11c/4) + 1] = { 0x0 };
-u32 bce_COM_b09FwSbss[(0x30/4) + 1] = { 0x0 };
-u32 bce_COM_b09FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_COM_b09FwBss[(0x11c/4) + 1] = { 0x0 };
+const u32 bce_COM_b09FwSbss[(0x30/4) + 1] = { 0x0 };
+const u32 bce_COM_b09FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_RXP_b09FwReleaseMajor = 0x6;
@@ -9416,7 +9416,7 @@ u32 bce_RXP_b09FwSbssAddr = 0x08009400;
int bce_RXP_b09FwSbssLen = 0x78;
u32 bce_RXP_b09FwSDataAddr = 0x00000000;
int bce_RXP_b09FwSDataLen = 0x0;
-u32 bce_RXP_b09FwText[(0x9090/4) + 1] = {
+const u32 bce_RXP_b09FwText[(0x9090/4) + 1] = {
0xa000c84, 0x0, 0x0,
0xd, 0x72787036, 0x2e302e31, 0x37000000,
0x6001103, 0x0, 0x1, 0x0,
@@ -11786,9 +11786,9 @@ u32 bce_RXP_b09FwRodata[(0x33c/4) + 1] = {
0x8007fc0, 0x8007fc0, 0x8007fc0, 0x8007fc0,
0x8007fe8, 0x8008b6c, 0x8008cc8, 0x8008ca8,
0x8008710, 0x8008b84, 0x0 };
-u32 bce_RXP_b09FwBss[(0x1bc/4) + 1] = { 0x0 };
-u32 bce_RXP_b09FwSbss[(0x78/4) + 1] = { 0x0 };
-u32 bce_RXP_b09FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_RXP_b09FwBss[(0x1bc/4) + 1] = { 0x0 };
+const u32 bce_RXP_b09FwSbss[(0x78/4) + 1] = { 0x0 };
+const u32 bce_RXP_b09FwSdata[(0x0/4) + 1] = { 0x0 };
int bce_CP_b09FwReleaseMajor = 0x6;
@@ -11807,7 +11807,7 @@ u32 bce_CP_b09FwSbssAddr = 0x080059b0;
int bce_CP_b09FwSbssLen = 0xa8;
u32 bce_CP_b09FwSDataAddr = 0x00000000;
int bce_CP_b09FwSDataLen = 0x0;
-u32 bce_CP_b09FwText[(0x5744/4) + 1] = {
+const u32 bce_CP_b09FwText[(0x5744/4) + 1] = {
0xa000028, 0x0, 0x0,
0xd, 0x6370362e, 0x302e3137, 0x0,
0x6001104, 0x0, 0x0, 0x0,
@@ -13205,7 +13205,7 @@ u32 bce_CP_b09FwText[(0x5744/4) + 1] = {
0xa00156a, 0x8fbf001c, 0xe0010d1, 0x0,
0x5040ff9e, 0x8fbf001c, 0x9259007d, 0x3330003f,
0xa0015c6, 0x36020040, 0x0 };
-u32 bce_CP_b09FwData[(0x84/4) + 1] = {
+const u32 bce_CP_b09FwData[(0x84/4) + 1] = {
0x0, 0x1b, 0xf,
0xa, 0x8, 0x6, 0x5,
0x5, 0x4, 0x4, 0x3,
@@ -13215,7 +13215,7 @@ u32 bce_CP_b09FwData[(0x84/4) + 1] = {
0x2, 0x2, 0x2, 0x2,
0x2, 0x2, 0x2, 0x1,
0x1, 0x1, 0x0 };
-u32 bce_CP_b09FwRodata[(0x1c0/4) + 1] = {
+const u32 bce_CP_b09FwRodata[(0x1c0/4) + 1] = {
0x80080100,
0x80080080, 0x80080000, 0xc00, 0x3080,
0x80011d0, 0x800127c, 0x8001294, 0x80012a8,
@@ -13245,12 +13245,12 @@ u32 bce_CP_b09FwRodata[(0x1c0/4) + 1] = {
0x80080080, 0x80080000, 0x80080080, 0x8004c64,
0x8004c9c, 0x8004be4, 0x8004c64, 0x8004c64,
0x80049b8, 0x8004c64, 0x8005050, 0x0 };
-u32 bce_CP_b09FwBss[(0x19c/4) + 1] = { 0x0 };
-u32 bce_CP_b09FwSbss[(0xa8/4) + 1] = { 0x0 };
-u32 bce_CP_b09FwSdata[(0x0/4) + 1] = { 0x0 };
+const u32 bce_CP_b09FwBss[(0x19c/4) + 1] = { 0x0 };
+const u32 bce_CP_b09FwSbss[(0xa8/4) + 1] = { 0x0 };
+const u32 bce_CP_b09FwSdata[(0x0/4) + 1] = { 0x0 };
-u32 bce_xi_rv2p_proc1[] = {
+const u32 bce_xi_rv2p_proc1[] = {
0x00000010, 0xb1800006,
0x0000001f, 0x05060011,
0x00000008, 0x0500ffff,
@@ -13541,7 +13541,7 @@ u32 bce_xi_rv2p_proc1[] = {
};
-u32 bce_xi_rv2p_proc2[] = {
+const u32 bce_xi_rv2p_proc2[] = {
0x00000010, 0xb1800004,
0x0000001f, 0x05060011,
0x00000008, 0x050000ff,
@@ -14008,9 +14008,9 @@ u32 bce_xi_rv2p_proc2[] = {
0x00000010, 0x001f0000,
0x00000018, 0x8000fe35,
};
-
-u32 bce_xi90_rv2p_proc1[] = {
+
+const u32 bce_xi90_rv2p_proc1[] = {
0x00000010, 0xb1800006,
0x0000001f, 0x03060011,
0x00000008, 0x0500ffff,
@@ -14316,7 +14316,7 @@ u32 bce_xi90_rv2p_proc1[] = {
};
-u32 bce_xi90_rv2p_proc2[] = {
+const u32 bce_xi90_rv2p_proc2[] = {
0x00000010, 0xb1800004,
0x0000001f, 0x03060011,
0x00000008, 0x050000ff,
@@ -14849,6 +14849,3 @@ u32 bce_xi90_rv2p_proc2[] = {
bce_rv2p_proc2[BCE_RV2P_PROC2_MAX_BD_PAGE_LOC] = \
(bce_rv2p_proc2[BCE_RV2P_PROC2_MAX_BD_PAGE_LOC] & ~0xFFFF) | (value); \
}
-
-
-
diff --git a/freebsd/sys/dev/bce/if_bcereg.h b/freebsd/sys/dev/bce/if_bcereg.h
index b043df5b..450180bd 100644
--- a/freebsd/sys/dev/bce/if_bcereg.h
+++ b/freebsd/sys/dev/bce/if_bcereg.h
@@ -32,10 +32,6 @@
#ifndef _BCEREG_H_DEFINED
#define _BCEREG_H_DEFINED
-#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include <rtems/bsd/local/opt_device_polling.h>
-#endif
-
#include <rtems/bsd/sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
@@ -6337,13 +6333,13 @@ struct fw_info {
u32 bss_addr;
u32 bss_len;
u32 bss_index;
- u32 *bss;
+ const u32 *bss;
/* Read-only section. */
u32 rodata_addr;
u32 rodata_len;
u32 rodata_index;
- u32 *rodata;
+ const u32 *rodata;
};
#define RV2P_PROC1 0
@@ -6422,6 +6418,8 @@ struct fw_info {
struct bce_softc
{
+ struct mtx bce_mtx;
+
/* Interface info */
struct ifnet *bce_ifp;
@@ -6449,8 +6447,6 @@ struct bce_softc
/* IRQ Resource Handle */
struct resource *bce_res_irq;
- struct mtx bce_mtx;
-
/* Interrupt handler. */
void *bce_intrhand;
@@ -6470,6 +6466,7 @@ struct bce_softc
#define BCE_USING_MSIX_FLAG 0x00000100
#define BCE_PCIE_FLAG 0x00000200
#define BCE_USING_TX_FLOW_CONTROL 0x00000400
+#define BCE_USING_RX_FLOW_CONTROL 0x00000800
/* Controller capability flags. */
u32 bce_cap_flags;
@@ -6564,14 +6561,6 @@ struct bce_softc
u16 bce_rx_ticks;
u32 bce_stats_ticks;
- /* ToDo: Can these be removed? */
- u16 bce_comp_prod_trip_int;
- u16 bce_comp_prod_trip;
- u16 bce_com_ticks_int;
- u16 bce_com_ticks;
- u16 bce_cmd_ticks_int;
- u16 bce_cmd_ticks;
-
/* The address of the integrated PHY on the MII bus. */
int bce_phy_addr;
@@ -6604,11 +6593,9 @@ struct bce_softc
int watchdog_timer;
/* Frame size and mbuf allocation size for RX frames. */
- u32 max_frame_size;
int rx_bd_mbuf_alloc_size;
int rx_bd_mbuf_data_len;
int rx_bd_mbuf_align_pad;
- int pg_bd_mbuf_alloc_size;
/* Receive mode settings (i.e promiscuous, multicast, etc.). */
u32 rx_mode;