summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/dev/e1000/e1000_ich8lan.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/dev/e1000/e1000_ich8lan.c')
-rw-r--r--freebsd/sys/dev/e1000/e1000_ich8lan.c806
1 files changed, 606 insertions, 200 deletions
diff --git a/freebsd/sys/dev/e1000/e1000_ich8lan.c b/freebsd/sys/dev/e1000/e1000_ich8lan.c
index e843527e..b5c75f26 100644
--- a/freebsd/sys/dev/e1000/e1000_ich8lan.c
+++ b/freebsd/sys/dev/e1000/e1000_ich8lan.c
@@ -2,7 +2,7 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,14 @@
* 82578DC Gigabit Network Connection
* 82579LM Gigabit Network Connection
* 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
*/
#include "e1000_api.h"
@@ -71,8 +79,9 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count);
@@ -183,8 +192,9 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
u16 phy_reg = 0;
u32 phy_id = 0;
- s32 ret_val;
+ s32 ret_val = 0;
u16 retry_count;
+ u32 mac_reg = 0;
for (retry_count = 0; retry_count < 2; retry_count++) {
ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
@@ -203,23 +213,84 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (hw->phy.id) {
if (hw->phy.id == phy_id)
- return TRUE;
+ goto out;
} else if (phy_id) {
hw->phy.id = phy_id;
hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
- return TRUE;
+ goto out;
}
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
- hw->phy.ops.release(hw);
- ret_val = e1000_set_mdio_slow_mode_hv(hw);
- if (!ret_val)
- ret_val = e1000_get_phy_id(hw);
- hw->phy.ops.acquire(hw);
+ if (hw->mac.type < e1000_pch_lpt) {
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+ }
+
+ if (ret_val)
+ return FALSE;
+out:
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Unforce SMBus mode in PHY */
+ hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ }
+
+ return TRUE;
+}
+
+/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(10);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+ E1000_WRITE_FLUSH(hw);
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msec_delay(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ msec_delay(5);
+ } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
+ E1000_CTRL_EXT_LPCD) && count--);
- return !ret_val;
+ msec_delay(30);
+ }
}
/**
@@ -233,7 +304,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
s32 ret_val;
- u16 phy_reg;
DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
@@ -242,6 +312,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+ e1000_disable_ulp_lpt_lp(hw, TRUE);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
DEBUGOUT("Failed to initialize PHY flow\n");
@@ -264,24 +340,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msec_delay(50);
+
/* fall-through */
case e1000_pch2lan:
- if (e1000_phy_is_accessible_pchlan(hw)) {
- if (hw->mac.type == e1000_pch_lpt) {
- /* Unforce SMBus mode in PHY */
- hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL,
- &phy_reg);
- phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL,
- phy_reg);
-
- /* Unforce SMBus mode in MAC */
- mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
- }
+ if (e1000_phy_is_accessible_pchlan(hw))
break;
- }
/* fall-through */
case e1000_pchlan:
@@ -291,44 +359,27 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
if (hw->phy.ops.check_reset_block(hw)) {
DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
break;
}
- DEBUGOUT("Toggling LANPHYPC\n");
-
- /* Set Phy Config Counter to 50msec */
- mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
- mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
- mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
- E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
- if (hw->mac.type == e1000_pch_lpt) {
/* Toggling LANPHYPC brings the PHY out of SMBus mode
- * So ensure that the MAC is also out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
*/
mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
- }
- /* Toggle LANPHYPC Value bit */
- mac_reg = E1000_READ_REG(hw, E1000_CTRL);
- mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
- mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
- E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
- E1000_WRITE_FLUSH(hw);
- usec_delay(10);
- mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
- E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
- E1000_WRITE_FLUSH(hw);
- if (hw->mac.type < e1000_pch_lpt) {
- msec_delay(50);
- } else {
- u16 count = 20;
- do {
- msec_delay(5);
- } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
- E1000_CTRL_EXT_LPCD) && count--);
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
}
break;
default:
@@ -336,13 +387,33 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
}
hw->phy.ops.release(hw);
+ if (!ret_val) {
- /* Reset the PHY before any access to it. Doing so, ensures
- * that the PHY is in a known good state before we read/write
- * PHY registers. The generic reset is sufficient here,
- * because we haven't determined the PHY type yet.
- */
- ret_val = e1000_phy_hw_reset_generic(hw);
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ ERROR_REPORT("Reset blocked by ME\n");
+ goto out;
+ }
+
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ ERROR_REPORT("ME blocked access to PHY after reset\n");
+ }
out:
/* Ungate automatic PHY configuration on non-managed 82579 */
@@ -552,13 +623,12 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_nvm_params_ich8lan");
/* Can't read flash registers if the register set isn't mapped. */
+ nvm->type = e1000_nvm_flash_sw;
if (!hw->flash_address) {
DEBUGOUT("ERROR: Flash registers not mapped\n");
return -E1000_ERR_CONFIG;
}
- nvm->type = e1000_nvm_flash_sw;
-
gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
/* sector_X_addr is a "sector"-aligned address (4096 bytes)
@@ -574,8 +644,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
- nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT;
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
nvm->flash_bank_size /= 2;
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
@@ -768,7 +838,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
*
* Assumes the SW/FW/HW Semaphore is already acquired.
**/
-static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
{
DEBUGFUNC("e1000_read_emi_reg_locked");
@@ -782,18 +852,35 @@ static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
* Enable/disable EEE based on setting in dev_spec structure, the duplex of
* the link and the EEE capabilities of the link partner. The LPI Control
* register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
**/
-static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
- u16 lpi_ctrl;
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
DEBUGFUNC("e1000_set_eee_pchlan");
- if ((hw->phy.type != e1000_phy_82579) &&
- (hw->phy.type != e1000_phy_i217))
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
return E1000_SUCCESS;
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -808,34 +895,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
/* Enable EEE if not disabled by user */
if (!dev_spec->eee_disable) {
- u16 lpa, pcs_status, data;
-
/* Save off link partner's EEE ability */
- switch (hw->phy.type) {
- case e1000_phy_82579:
- lpa = I82579_EEE_LP_ABILITY;
- pcs_status = I82579_EEE_PCS_STATUS;
- break;
- case e1000_phy_i217:
- lpa = I217_EEE_LP_ABILITY;
- pcs_status = I217_EEE_PCS_STATUS;
- break;
- default:
- ret_val = -E1000_ERR_PHY;
- goto release;
- }
ret_val = e1000_read_emi_reg_locked(hw, lpa,
&dev_spec->eee_lp_ability);
if (ret_val)
goto release;
+ /* Read EEE advertisement */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+ if (ret_val)
+ goto release;
+
/* Enable EEE only for speeds in which the link partner is
- * EEE capable.
+ * EEE capable and for which we advertise EEE.
*/
- if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
- if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
if (data & NWAY_LPAR_100TX_FD_CAPS)
lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
@@ -847,13 +924,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
dev_spec->eee_lp_ability &=
~I82579_EEE_100_SUPPORTED;
}
+ }
- /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
- ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (hw->phy.type == e1000_phy_82579) {
+ ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ &data);
if (ret_val)
goto release;
+
+ data &= ~I82579_LPI_100_PLL_SHUT;
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ data);
}
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
+
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
release:
hw->phy.ops.release(hw);
@@ -869,30 +957,31 @@ release:
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
* preventing further DMA write requests. Workaround the issue by disabling
* the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
**/
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
s32 ret_val = E1000_SUCCESS;
+ u16 reg;
- if (link && (E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_SPEED_1000)) {
- u16 kmrn_reg;
-
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val =
e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
- &kmrn_reg);
+ &reg);
if (ret_val)
goto release;
ret_val =
e1000_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg &
+ reg &
~E1000_KMRNCTRLSTA_K1_ENABLE);
if (ret_val)
goto release;
@@ -905,13 +994,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
ret_val =
e1000_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg);
+ reg);
release:
hw->phy.ops.release(hw);
} else {
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
- E1000_WRITE_REG(hw, E1000_FEXTNVM6,
- fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if (!link || ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
}
return ret_val;
@@ -1020,7 +1141,6 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
lat_ns /= 1000000000;
obff_hwm = (s32)(rxa - lat_ns);
}
-
if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
return -E1000_ERR_CONFIG;
@@ -1081,6 +1201,256 @@ static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
}
/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+ goto out;
+ }
+
+ if (!to_sx) {
+ int i = 0;
+
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+
+ if (i++ == 100)
+ break;
+
+ msec_delay(50);
+ }
+ DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
+ i * 50);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val) {
+ DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
+ } else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=FALSE); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=TRUE
+ * to forcibly disable ULP.
+ */
+s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ }
+
+ /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ while (E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 10) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ msec_delay(10);
+ }
+ DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ msec_delay(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ hw->phy.ops.reset(hw);
+ msec_delay(50);
+ }
+out:
+ if (ret_val) {
+ DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
+ } else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -1105,13 +1475,13 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (!mac->get_link_status)
return E1000_SUCCESS;
- /* First we want to see if the MII Status Register reports
- * link. If so, then we want to get the current speed/duplex
- * of the PHY.
- */
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_k1_gig_workaround_hv(hw, link);
@@ -1119,14 +1489,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+ /* When connected at 10Mbps half-duplex, some parts are excessively
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if ((hw->mac.type == e1000_pch2lan) && link) {
+ if (((hw->mac.type == e1000_pch2lan) ||
+ (hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
reg = E1000_READ_REG(hw, E1000_STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ u16 emi_addr;
+
reg = E1000_READ_REG(hw, E1000_TIPG);
reg &= ~E1000_TIPG_IPGT_MASK;
reg |= 0xFF;
@@ -1137,7 +1510,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
hw->phy.ops.release(hw);
@@ -1148,15 +1525,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Work-around I218 hang issue */
if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
}
-
if (hw->mac.type == e1000_pch_lpt) {
- /* Set platform power management values for Latency Tolerance
- * Reporting (LTR) and Optimized Buffer Flush/Fill (OBFF).
+ /* Set platform power management values for
+ * Latency Tolerance Reporting (LTR)
+ * Optimized Buffer Flush/Fill (OBFF)
*/
ret_val = e1000_platform_pm_pch_lpt(hw, link);
if (ret_val)
@@ -1208,9 +1587,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
e1000_check_downshift_generic(hw);
/* Enable/Disable EEE after link up */
- ret_val = e1000_set_eee_pchlan(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -1434,7 +1815,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
* contain the MAC address but RAR[1-6] are reserved for manageability (ME).
* Use SHRA[0-3] in place of those reserved for ME.
**/
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -1458,10 +1839,13 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
E1000_WRITE_FLUSH(hw);
- return;
+ return E1000_SUCCESS;
}
- if (index < hw->mac.rar_entry_count) {
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32) (hw->mac.rar_entry_count)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1478,7 +1862,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
(E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
- return;
+ return E1000_SUCCESS;
DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), E1000_READ_REG(hw, E1000_FWSM));
@@ -1486,6 +1870,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
out:
DEBUGOUT1("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -1499,7 +1884,7 @@ out:
* contain the MAC address. SHRA[0-10] are the shared receive address
* registers that are shared between the Host and manageability engine (ME).
**/
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
@@ -1523,7 +1908,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
E1000_WRITE_FLUSH(hw);
- return;
+ return E1000_SUCCESS;
}
/* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1558,12 +1943,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
(E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
- return;
+ return E1000_SUCCESS;
}
}
out:
DEBUGOUT1("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -1621,13 +2007,21 @@ release:
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
{
u32 fwsm;
+ bool blocked = FALSE;
+ int i = 0;
DEBUGFUNC("e1000_check_reset_block_ich8lan");
- fwsm = E1000_READ_REG(hw, E1000_FWSM);
-
- return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
- : E1000_BLK_PHY_RESET;
+ do {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
+ blocked = TRUE;
+ msec_delay(10);
+ continue;
+ }
+ blocked = FALSE;
+ } while (blocked && (i++ < 10));
+ return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
}
/**
@@ -1827,9 +2221,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (status_reg == (BM_CS_STATUS_LINK_UP |
BM_CS_STATUS_RESOLVED |
@@ -1843,9 +2237,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_MASK;
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
if (status_reg == (HV_M_STATUS_LINK_UP |
HV_M_STATUS_AUTONEG_COMPLETE |
@@ -2127,8 +2521,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
- /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
(u16)(mac_reg & 0xFFFF));
@@ -2193,10 +2587,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
if (enable) {
- /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
* SHRAL/H) and initial CRC values to the MAC
*/
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
u8 mac_addr[ETH_ADDR_LEN] = {0};
u32 addr_high, addr_low;
@@ -2265,7 +2659,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
data &= ~(0x3FF << 2);
- data |= (0x1A << 2);
+ data |= (E1000_TX_PTR_GAP << 2);
ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
if (ret_val)
return ret_val;
@@ -2379,55 +2773,47 @@ release:
* e1000_k1_gig_workaround_lv - K1 Si workaround
* @hw: pointer to the HW structure
*
- * Workaround to set the K1 beacon duration for 82579 parts
+ * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ * Disable K1 for 1000 and 100 speeds
**/
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 status_reg = 0;
- u32 mac_reg;
- u16 phy_reg;
DEBUGFUNC("e1000_k1_workaround_lv");
if (hw->mac.type != e1000_pch2lan)
return E1000_SUCCESS;
- /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ /* Set K1 beacon duration based on 10Mbs speed */
ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
if (ret_val)
return ret_val;
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
- mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
- mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
-
- ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
- if (ret_val)
- return ret_val;
-
- if (status_reg & HV_M_STATUS_SPEED_1000) {
+ if (status_reg &
+ (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
u16 pm_phy_reg;
- mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
- phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
- /* LV 1G Packet drop issue wa */
+ /* LV 1G/100 Packet drop issue wa */
ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
&pm_phy_reg);
if (ret_val)
return ret_val;
- pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+ pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
pm_phy_reg);
if (ret_val)
return ret_val;
} else {
+ u32 mac_reg;
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
- phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
}
- E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
- ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
}
return ret_val;
@@ -2964,7 +3350,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
-
E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress
@@ -3031,6 +3416,7 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
+
E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
@@ -3085,6 +3471,7 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
u16 word = 0;
ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
if (ret_val)
return ret_val;
@@ -3114,11 +3501,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
DEBUGFUNC("e1000_read_flash_data_ich8lan");
- if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
-
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
usec_delay(1);
@@ -3126,13 +3512,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val != E1000_SUCCESS)
break;
-
hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
ret_val = e1000_flash_cycle_ich8lan(hw,
@@ -3171,6 +3556,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+
/**
* e1000_write_nvm_ich8lan - Write word(s) to the NVM
* @hw: pointer to the HW structure
@@ -3224,7 +3610,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
s32 ret_val;
- u16 data;
+ u16 data = 0;
DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
@@ -3260,12 +3646,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
}
-
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
- /* Determine whether to write the value stored
- * in the other NVM bank or a modified value stored
- * in the shadow RAM
- */
if (dev_spec->shadow_ram[i].modified) {
data = dev_spec->shadow_ram[i].value;
} else {
@@ -3275,7 +3656,6 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
break;
}
-
/* If the word is 0x13, then make sure the signature bits
* (15:14) are 11b until the commit has completed.
* This will allow us to write 10b which indicates the
@@ -3290,6 +3670,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
act_offset = (i + new_bank_offset) << 1;
usec_delay(100);
+
/* Write the bytes to the new bank. */
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset,
@@ -3303,7 +3684,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
(u8)(data >> 8));
if (ret_val)
break;
- }
+ }
/* Don't bother writing the segment valid bits if sector
* programming failed.
@@ -3324,8 +3705,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
goto release;
data &= 0xBFFF;
- ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
- act_offset * 2 + 1,
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
(u8)(data >> 8));
if (ret_val)
goto release;
@@ -3336,7 +3716,9 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
* to 1's. We can write 1's to 0's without an erase
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+
if (ret_val)
goto release;
@@ -3435,12 +3817,11 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
DEBUGFUNC("e1000_write_ich8_data");
- if (size < 1 || size > 2 || data > size * 0xff ||
- offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
usec_delay(1);
@@ -3448,8 +3829,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val != E1000_SUCCESS)
break;
-
hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
@@ -3467,8 +3848,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
if (ret_val == E1000_SUCCESS)
break;
@@ -3490,6 +3872,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+
/**
* e1000_write_flash_byte_ich8lan - Write a single byte to NVM
* @hw: pointer to the HW structure
@@ -3508,6 +3891,8 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
}
+
+
/**
* e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
* @hw: pointer to the HW structure
@@ -3604,8 +3989,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr = hw->nvm.flash_base_addr;
flash_linear_addr += (bank) ? flash_bank_size : 0;
- for (j = 0; j < iteration ; j++) {
+ for (j = 0; j < iteration; j++) {
do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
/* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val)
@@ -3614,8 +4001,9 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- hsflctl.regval = E1000_READ_FLASH_REG16(hw,
- ICH_FLASH_HSFCTL);
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
hsflctl.regval);
@@ -3628,8 +4016,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
if (ret_val == E1000_SUCCESS)
break;
@@ -3949,16 +4336,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy for both queues */
txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
/* ICH8 has opposite polarity of no_snoop bits.
@@ -4043,6 +4430,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
*/
reg = E1000_READ_REG(hw, E1000_RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+
/* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
@@ -4481,7 +4869,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
u16 phy_reg, device_id = hw->device_id;
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3)) {
u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
E1000_WRITE_REG(hw, E1000_FEXTNVM6,
@@ -4504,14 +4894,25 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
/* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
- * link.
+ * link, and enable Auto Enable LPI since there will
+ * be no driver to enable LPI while in Sx.
*/
if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
(dev_spec->eee_lp_ability &
I82579_EEE_100_SUPPORTED) &&
- (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+ (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_NOND0A_LPLU);
+
+ /* Set Auto Enable LPI after link up */
+ hw->phy.ops.read_reg_locked(hw,
+ I217_LPI_GPIO_CTRL,
+ &phy_reg);
+ phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ hw->phy.ops.write_reg_locked(hw,
+ I217_LPI_GPIO_CTRL,
+ phy_reg);
+ }
}
/* For i217 Intel Rapid Start Technology support,
@@ -4522,7 +4923,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* The SMBus release must also be disabled on LCD reset.
*/
if (!(E1000_READ_REG(hw, E1000_FWSM) &
- E1000_ICH_FWSM_FW_VALID)) {
+ E1000_ICH_FWSM_FW_VALID)) {
/* Enable proxy to reset only on power good. */
hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
&phy_reg);
@@ -4615,6 +5016,11 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
return;
}
+ /* Clear Auto Enable LPI after link up */
+ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+ phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
if (!(E1000_READ_REG(hw, E1000_FWSM) &
E1000_ICH_FWSM_FW_VALID)) {
/* Restore clear on SMB if no manageability engine