#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mutex.h>
#include "bnx2x.h"
#include "bnx2x_cmn.h"
typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
struct link_params *params,
u8 dev_addr, u16 addr, u8 byte_cnt,
u8 *o_buf, u8);
#define MDIO_ACCESS_TIMEOUT 1000
#define WC_LANE_MAX 4
#define I2C_SWITCH_WIDTH 2
#define I2C_BSC0 0
#define I2C_BSC1 1
#define I2C_WA_RETRY_CNT 3
#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
#define MCPR_IMC_COMMAND_READ_OP 1
#define MCPR_IMC_COMMAND_WRITE_OP 2
#define LED_BLINK_RATE_VAL_E3 354
#define LED_BLINK_RATE_VAL_E1X_E2 480
#define NIG_LATCH_BC_ENABLE_MI_INT 0
#define NIG_STATUS_EMAC0_MI_INT \
NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
#define NIG_STATUS_XGXS0_LINK10G \
NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
#define NIG_STATUS_XGXS0_LINK_STATUS \
NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
#define NIG_STATUS_SERDES0_LINK_STATUS \
NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
#define NIG_MASK_MI_INT \
NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
#define NIG_MASK_XGXS0_LINK10G \
NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
#define NIG_MASK_XGXS0_LINK_STATUS \
NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
#define NIG_MASK_SERDES0_LINK_STATUS \
NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
#define MDIO_AN_CL73_OR_37_COMPLETE \
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
#define XGXS_RESET_BITS \
(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
#define SERDES_RESET_BITS \
(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
#define AUTONEG_SGMII_FIBER_AUTODET \
SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
#define GP_STATUS_SPEED_MASK \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
#define GP_STATUS_10G_HIG \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
#define GP_STATUS_10G_CX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
#define GP_STATUS_10G_KX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
#define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
#define LINK_UPDATE_MASK \
(LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
LINK_STATUS_LINK_UP | \
LINK_STATUS_PHYSICAL_LINK_FLAG | \
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
#define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
#define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
#define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
#define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
#define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
#define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
#define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
#define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
#define SFP_EEPROM_OPTIONS_ADDR 0x40
#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
#define SFP_EEPROM_OPTIONS_SIZE 2
#define EDC_MODE_LINEAR 0x0022
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
#define EDC_MODE_ACTIVE_DAC 0x0066
#define DCBX_INVALID_COS (0xFF)
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
#define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700)
#define MAX_KR_LINK_RETRY 4
#define DEFAULT_TX_DRV_BRDCT 2
#define DEFAULT_TX_DRV_IFIR 0
#define DEFAULT_TX_DRV_POST2 3
#define DEFAULT_TX_DRV_IPRE_DRIVER 6
#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_write(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_read(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
static int bnx2x_check_half_open_conn(struct link_params *params,
struct link_vars *vars, u8 notify);
static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
struct link_params *params);
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
val |= bits;
REG_WR(bp, reg, val);
return val;
}
static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
val &= ~bits;
REG_WR(bp, reg, val);
return val;
}
static int bnx2x_check_lfa(struct link_params *params)
{
u32 link_status, cfg_idx, lfa_mask, cfg_size;
u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
u32 saved_val, req_val, eee_status;
struct bnx2x *bp = params->bp;
additional_config =
REG_RD(bp, params->lfa_base +
offsetof(struct shmem_lfa, additional_config));
if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
REG_WR(bp, params->lfa_base +
offsetof(struct shmem_lfa, additional_config),
additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
return LFA_DCC_LFA_DISABLED;
}
link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[params->port].link_status));
if (!(link_status & LINK_STATUS_LINK_UP))
return LFA_LINK_DOWN;
if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN)
return 0;
if (params->loopback_mode)
return LFA_LOOPBACK_ENABLED;
if (!params->lfa_base)
return LFA_MFW_IS_TOO_OLD;
if (params->num_phys == 3) {
cfg_size = 2;
lfa_mask = 0xffffffff;
} else {
cfg_size = 1;
lfa_mask = 0xffff;
}
saved_val = REG_RD(bp, params->lfa_base +
offsetof(struct shmem_lfa, req_duplex));
req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
(saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_DUPLEX_MISMATCH;
}
saved_val = REG_RD(bp, params->lfa_base +
offsetof(struct shmem_lfa, req_flow_ctrl));
req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
(saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_FLOW_CTRL_MISMATCH;
}
saved_val = REG_RD(bp, params->lfa_base +
offsetof(struct shmem_lfa, req_line_speed));
req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
(saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_LINK_SPEED_MISMATCH;
}
for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
offsetof(struct shmem_lfa,
speed_cap_mask[cfg_idx]));
if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
cur_speed_cap_mask,
params->speed_cap_mask[cfg_idx]);
return LFA_SPEED_CAP_MISMATCH;
}
}
cur_req_fc_auto_adv =
REG_RD(bp, params->lfa_base +
offsetof(struct shmem_lfa, additional_config)) &
REQ_FC_AUTO_ADV_MASK;
if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
cur_req_fc_auto_adv, params->req_fc_auto_adv);
return LFA_FLOW_CTRL_MISMATCH;
}
eee_status = REG_RD(bp, params->shmem2_base +
offsetof(struct shmem2_region,
eee_status[params->port]));
if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
(params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
(params->eee_mode & EEE_MODE_ADV_LPI))) {
DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
eee_status);
return LFA_EEE_MISMATCH;
}
return 0;
}
static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
{
u32 epio_mask, gp_oenable;
*en = 0;
if (epio_pin > 31) {
DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
return;
}
epio_mask = 1 << epio_pin;
gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
*en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
}
static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
{
u32 epio_mask, gp_output, gp_oenable;
if (epio_pin > 31) {
DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
return;
}
DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
epio_mask = 1 << epio_pin;
gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
if (en)
gp_output |= epio_mask;
else
gp_output &= ~epio_mask;
REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
}
static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
{
if (pin_cfg == PIN_CFG_NA)
return;
if (pin_cfg >= PIN_CFG_EPIO0) {
bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
} else {
u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
}
}
static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
{
if (pin_cfg == PIN_CFG_NA)
return -EINVAL;
if (pin_cfg >= PIN_CFG_EPIO0) {
bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
} else {
u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
*val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
}
return 0;
}
static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
}
static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
{
u32 min_w_val = 0;
if (vars->link_up) {
if (vars->line_speed == SPEED_20000)
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
else
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
} else
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
return min_w_val;
}
static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
{
const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
MAX_PACKET_SIZE);
return credit_upper_bound;
}
static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
const struct link_params *params,
const u32 min_w_val)
{
struct bnx2x *bp = params->bp;
const u8 port = params->port;
const u32 credit_upper_bound =
bnx2x_ets_get_credit_upper_bound(min_w_val);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
credit_upper_bound);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
credit_upper_bound);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
credit_upper_bound);
}
}
static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
const struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
const u8 port = params->port;
const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
if (port) {
REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
} else {
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
}
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
if (port) {
REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
} else {
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
0x43210876);
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
}
if (port)
REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
else
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
}
bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
}
static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
const struct link_params *params,
const u32 min_w_val)
{
struct bnx2x *bp = params->bp;
const u32 credit_upper_bound =
bnx2x_ets_get_credit_upper_bound(min_w_val);
const u8 port = params->port;
u32 base_upper_bound = 0;
u8 max_cos = 0;
u8 i = 0;
if (!port) {
base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
}
for (i = 0; i < max_cos; i++)
REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
}
static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
{
struct bnx2x *bp = params->bp;
const u8 port = params->port;
const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
u8 i = 0;
u32 base_weight = 0;
u8 max_cos = 0;
if (port)
REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
else
REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
if (port)
REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
else
REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
if (!port) {
base_weight = PBF_REG_COS0_WEIGHT_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
base_weight = PBF_REG_COS0_WEIGHT_P1;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
}
for (i = 0; i < max_cos; i++)
REG_WR(bp, base_weight + (0x4 * i), 0);
bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
}
static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
const struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
if (!CHIP_IS_E3B0(bp)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
return -EINVAL;
}
bnx2x_ets_e3b0_nig_disabled(params, vars);
bnx2x_ets_e3b0_pbf_disabled(params);
return 0;
}
int bnx2x_ets_disabled(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
int bnx2x_status = 0;
if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
bnx2x_ets_e2e3a0_disabled(params);
else if (CHIP_IS_E3B0(bp))
bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
else {
DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
return -EINVAL;
}
return bnx2x_status;
}
static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
const struct bnx2x_ets_params *ets_params,
const u8 cos_sp_bitmap,
const u8 cos_bw_bitmap)
{
struct bnx2x *bp = params->bp;
const u8 port = params->port;
const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
nig_cli_subject2wfq_bitmap);
REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
pbf_cli_subject2wfq_bitmap);
return 0;
}
static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
const u8 cos_entry,
const u32 min_w_val_nig,
const u32 min_w_val_pbf,
const u16 total_bw,
const u8 bw,
const u8 port)
{
u32 nig_reg_adress_crd_weight = 0;
u32 pbf_reg_adress_crd_weight = 0;
const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
switch (cos_entry) {
case 0:
nig_reg_adress_crd_weight =
(port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
pbf_reg_adress_crd_weight = (port) ?
PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
break;
case 1:
nig_reg_adress_crd_weight = (port) ?
NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
pbf_reg_adress_crd_weight = (port) ?
PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
break;
case 2:
nig_reg_adress_crd_weight = (port) ?
NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
pbf_reg_adress_crd_weight = (port) ?
PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
break;
case 3:
if (port)
return -EINVAL;
nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
break;
case 4:
if (port)
return -EINVAL;
nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
break;
case 5:
if (port)
return -EINVAL;
nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
break;
}
REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
return 0;
}
static int bnx2x_ets_e3b0_get_total_bw(
const struct link_params *params,
struct bnx2x_ets_params *ets_params,
u16 *total_bw)
{
struct bnx2x *bp = params->bp;
u8 cos_idx = 0;
u8 is_bw_cos_exist = 0;
*total_bw = 0 ;
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
is_bw_cos_exist = 1;
if (!ets_params->cos[cos_idx].params.bw_params.bw) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
"was set to 0\n");
ets_params->cos[cos_idx].params.bw_params.bw
= 1;
}
*total_bw +=
ets_params->cos[cos_idx].params.bw_params.bw;
}
}
if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
if (*total_bw == 0) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
return -EINVAL;
}
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config total BW should be 100\n");
}
return 0;
}
static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
{
u8 pri = 0;
for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
sp_pri_to_cos[pri] = DCBX_INVALID_COS;
}
static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
u8 *sp_pri_to_cos, const u8 pri,
const u8 cos_entry)
{
struct bnx2x *bp = params->bp;
const u8 port = params->port;
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
if (pri >= max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter Illegal strict priority\n");
return -EINVAL;
}
if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter There can't be two COS's with "
"the same strict pri\n");
return -EINVAL;
}
sp_pri_to_cos[pri] = cos_entry;
return 0;
}
static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
const u8 pri_set,
const u8 pri_offset,
const u8 entry_size)
{
u64 pri_cli_nig = 0;
pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
(pri_set + pri_offset));
return pri_cli_nig;
}
static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
{
const u8 nig_cos_offset = 3;
const u8 nig_pri_offset = 3;
return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
nig_pri_offset, 4);
}
static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
{
const u8 pbf_cos_offset = 0;
const u8 pbf_pri_offset = 0;
return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
pbf_pri_offset, 3);
}
static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
u8 *sp_pri_to_cos)
{
struct bnx2x *bp = params->bp;
u8 i = 0;
const u8 port = params->port;
u64 pri_cli_nig = 0x210;
u32 pri_cli_pbf = 0x0;
u8 pri_set = 0;
u8 pri_bitmask = 0;
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
for (i = 0; i < max_num_of_cos; i++) {
if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid cos entry\n");
return -EINVAL;
}
pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
sp_pri_to_cos[i], pri_set);
pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
sp_pri_to_cos[i], pri_set);
pri_bitmask = 1 << sp_pri_to_cos[i];
if (!(pri_bitmask & cos_bit_to_set)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid There can't be two COS's with"
" the same strict pri\n");
return -EINVAL;
}
cos_bit_to_set &= ~pri_bitmask;
pri_set++;
}
}
for (i = 0; i < max_num_of_cos; i++) {
pri_bitmask = 1 << i;
if (pri_bitmask & cos_bit_to_set) {
pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
i, pri_set);
pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
i, pri_set);
cos_bit_to_set &= ~pri_bitmask;
pri_set++;
}
}
if (pri_set != max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
"entries were set\n");
return -EINVAL;
}
if (port) {
REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
(u32)pri_cli_nig);
REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
} else {
const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
pri_cli_nig_lsb);
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
pri_cli_nig_msb);
REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
}
return 0;
}
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params)
{
struct bnx2x *bp = params->bp;
int bnx2x_status = 0;
const u8 port = params->port;
u16 total_bw = 0;
const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
u8 cos_bw_bitmap = 0;
u8 cos_sp_bitmap = 0;
u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
u8 cos_entry = 0;
if (!CHIP_IS_E3B0(bp)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
return -EINVAL;
}
if ((ets_params->num_of_cos > max_num_of_cos)) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
"isn't supported\n");
return -EINVAL;
}
bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
&total_bw);
if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config get_total_bw failed\n");
return -EINVAL;
}
bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
cos_bw_bitmap |= (1 << cos_entry);
bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
bp, cos_entry, min_w_val_nig, min_w_val_pbf,
total_bw,
ets_params->cos[cos_entry].params.bw_params.bw,
port);
} else if (bnx2x_cos_state_strict ==
ets_params->cos[cos_entry].state){
cos_sp_bitmap |= (1 << cos_entry);
bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
params,
sp_pri_to_cos,
ets_params->cos[cos_entry].params.sp_params.pri,
cos_entry);
} else {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_config cos state not valid\n");
return -EINVAL;
}
if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_config set cos bw failed\n");
return bnx2x_status;
}
}
bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
sp_pri_to_cos);
if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
return bnx2x_status;
}
bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
cos_sp_bitmap,
cos_bw_bitmap);
if (bnx2x_status) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
return bnx2x_status;
}
return 0;
}
static void bnx2x_ets_bw_limit_common(const struct link_params *params)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
}
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw)
{
struct bnx2x *bp = params->bp;
const u32 total_bw = cos0_bw + cos1_bw;
u32 cos0_credit_weight = 0;
u32 cos1_credit_weight = 0;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
if ((!total_bw) ||
(!cos0_bw) ||
(!cos1_bw)) {
DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
total_bw;
cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
total_bw;
bnx2x_ets_bw_limit_common(params);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
}
int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
{
struct bnx2x *bp = params->bp;
u32 val = 0;
DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
val = (!strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
return 0;
}
static void bnx2x_update_pfc_xmac(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
struct bnx2x *bp = params->bp;
u32 xmac_base;
u32 pause_val, pfc0_val, pfc1_val;
xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
pause_val = 0x18000;
pfc0_val = 0xFFFF8000;
pfc1_val = 0x2;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) {
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
} else {
pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
}
REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
(params->mac_addr[5])));
REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
((params->mac_addr[0] << 8) |
(params->mac_addr[1])));
udelay(30);
}
static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
u32 emac_base)
{
u32 new_mode, cur_mode;
u32 clc_cnt;
cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
if (USES_WARPCORE(bp))
clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
else
clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
(cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
return;
new_mode = cur_mode &
~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
new_mode |= clc_cnt;
new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
cur_mode, new_mode);
REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
udelay(40);
}
static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
struct link_params *params)
{
u8 phy_index;
for (phy_index = INT_PHY; phy_index < params->num_phys;
phy_index++)
bnx2x_set_mdio_clk(bp, params->chip_id,
params->phy[phy_index].mdio_ctrl);
}
static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
{
u32 port4mode_ovwr_val;
port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
if (port4mode_ovwr_val & (1<<0)) {
return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
}
return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
}
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u32 val;
u16 timeout;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
udelay(5);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
timeout = 200;
do {
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
if (!timeout) {
DP(NETIF_MSG_LINK, "EMAC timeout!\n");
return;
}
timeout--;
} while (val & EMAC_MODE_RESET);
bnx2x_set_mdio_emac_per_phy(bp, params);
val = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
val = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
params->mac_addr[5]);
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
}
static void bnx2x_set_xumac_nig(struct link_params *params,
u16 tx_pause_en,
u8 enable)
{
struct bnx2x *bp = params->bp;
REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
enable);
REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
enable);
REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
}
static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
{
u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
u32 val;
struct bnx2x *bp = params->bp;
if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
return;
val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
if (en)
val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
UMAC_COMMAND_CONFIG_REG_RX_ENA);
else
val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
UMAC_COMMAND_CONFIG_REG_RX_ENA);
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
}
static void bnx2x_umac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
u32 val;
u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
struct bnx2x *bp = params->bp;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
DP(NETIF_MSG_LINK, "enabling UMAC\n");
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
UMAC_COMMAND_CONFIG_REG_PAD_EN |
UMAC_COMMAND_CONFIG_REG_SW_RESET |
UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
switch (vars->line_speed) {
case SPEED_10:
val |= (0<<2);
break;
case SPEED_100:
val |= (1<<2);
break;
case SPEED_1000:
val |= (2<<2);
break;
case SPEED_2500:
val |= (3<<2);
break;
default:
DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
vars->line_speed);
break;
}
if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
if (vars->duplex == DUPLEX_HALF)
val |= UMAC_COMMAND_CONFIG_REG_HD_ENA;
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
} else {
REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
}
REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
(params->mac_addr[5])));
REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
((params->mac_addr[0] << 8) |
(params->mac_addr[1])));
val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
UMAC_COMMAND_CONFIG_REG_RX_ENA;
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
if (lb)
val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
bnx2x_set_xumac_nig(params,
((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
vars->mac_type = MAC_TYPE_UMAC;
}
static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
{
struct bnx2x *bp = params->bp;
u32 is_port4mode = bnx2x_is_4_port_mode(bp);
if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
(CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
(CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
is_port4mode &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK,
"XMAC already out of reset in 4-port mode\n");
return;
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC);
usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
if (is_port4mode) {
DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
} else {
REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
if (max_speed == SPEED_10000) {
DP(NETIF_MSG_LINK,
"Init XMAC to 10G x 1 port per path\n");
REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
} else {
DP(NETIF_MSG_LINK,
"Init XMAC to 20G x 2 ports per path\n");
REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
}
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
}
static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
{
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
u32 val;
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC) {
pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
(pfc_ctrl & ~(1<<1)));
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
(pfc_ctrl | (1<<1)));
DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
if (en)
val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
else
val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
}
}
static int bnx2x_xmac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
u32 val, xmac_base;
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "enabling XMAC\n");
xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
bnx2x_xmac_init(params, vars->line_speed);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
(XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
}
REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
bnx2x_update_pfc_xmac(params, vars, 0);
if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
} else {
REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
}
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
if ((vars->line_speed == SPEED_20000) &&
(params->phy[INT_PHY].supported &
SUPPORTED_20000baseKR2_Full))
val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
if (lb)
val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
bnx2x_set_xumac_nig(params,
((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
vars->mac_type = MAC_TYPE_XMAC;
return 0;
}
static int bnx2x_emac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u32 val;
DP(NETIF_MSG_LINK, "enabling EMAC\n");
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
if (vars->phy_flags & PHY_XGXS_FLAG) {
u32 ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
DP(NETIF_MSG_LINK, "XGXS\n");
REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
} else {
DP(NETIF_MSG_LINK, "SerDes\n");
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
}
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_RESET);
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
(EMAC_TX_MODE_EXT_PAUSE_EN |
EMAC_TX_MODE_FLOW_EN));
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) {
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_TX_MODE,
(EMAC_TX_MODE_EXT_PAUSE_EN |
EMAC_TX_MODE_FLOW_EN));
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
EMAC_REG_RX_PFC_MODE_RX_EN |
EMAC_REG_RX_PFC_MODE_TX_EN |
EMAC_REG_RX_PFC_MODE_PRIORITIES);
EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
((0x0101 <<
EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
(0x00ff <<
EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
}
EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
if (lb)
val |= 0x810;
else
val &= ~0x810;
EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD)));
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if ((params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
vars->mac_type = MAC_TYPE_EMAC;
return 0;
}
static void bnx2x_update_pfc_bmac1(struct link_params *params,
struct link_vars *vars)
{
u32 wb_data[2];
struct bnx2x *bp = params->bp;
u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
if ((!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
}
static void bnx2x_update_pfc_bmac2(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
u32 wb_data[2];
struct bnx2x *bp = params->bp;
u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
if ((!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
udelay(30);
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
wb_data[0] = 0x0;
wb_data[0] |= (1<<0);
wb_data[0] |= (1<<1);
wb_data[0] |= (1<<2);
wb_data[0] |= (1<<3);
wb_data[0] |= (1<<5);
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
wb_data, 2);
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
wb_data[0] = 0x8;
wb_data[1] = 0;
}
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
val = 0x8000;
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= (1<<16);
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
wb_data, 2);
val = 0x3;
if (is_lb) {
val |= 0x4;
DP(NETIF_MSG_LINK, "enable bmac loopback\n");
}
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= ((1<<6)|(1<<5));
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
u8 cos_entry,
u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
switch (cos_entry) {
case 0:
nig_reg_rx_priority_mask_add = (port) ?
NIG_REG_P1_RX_COS0_PRIORITY_MASK :
NIG_REG_P0_RX_COS0_PRIORITY_MASK;
break;
case 1:
nig_reg_rx_priority_mask_add = (port) ?
NIG_REG_P1_RX_COS1_PRIORITY_MASK :
NIG_REG_P0_RX_COS1_PRIORITY_MASK;
break;
case 2:
nig_reg_rx_priority_mask_add = (port) ?
NIG_REG_P1_RX_COS2_PRIORITY_MASK :
NIG_REG_P0_RX_COS2_PRIORITY_MASK;
break;
case 3:
if (port)
return -EINVAL;
nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
break;
case 4:
if (port)
return -EINVAL;
nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
break;
case 5:
if (port)
return -EINVAL;
nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
break;
}
REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
return 0;
}
static void bnx2x_update_mng(struct link_params *params, u32 link_status)
{
struct bnx2x *bp = params->bp;
REG_WR(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[params->port].link_status), link_status);
}
static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
{
struct bnx2x *bp = params->bp;
if (SHMEM2_HAS(bp, link_attr_sync))
REG_WR(bp, params->shmem2_base +
offsetof(struct shmem2_region,
link_attr_sync[params->port]), link_attr);
}
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
{
u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
u32 pkt_priority_to_cos = 0;
struct bnx2x *bp = params->bp;
u8 port = params->port;
int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK);
if (set_pfc) {
pause_enable = 0;
llfc_out_en = 0;
llfc_enable = 0;
if (CHIP_IS_E3(bp))
ppp_enable = 0;
else
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm_out_en = 0;
hwpfc_enable = 1;
} else {
if (nig_params) {
llfc_out_en = nig_params->llfc_out_en;
llfc_enable = nig_params->llfc_enable;
pause_enable = nig_params->pause_enable;
} else
pause_enable = 1;
xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm_out_en = 1;
}
if (CHIP_IS_E3(bp))
REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
NIG_REG_LLFC_ENABLE_0, llfc_enable);
REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
NIG_REG_PAUSE_ENABLE_0, pause_enable);
REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
NIG_REG_PPP_ENABLE_0, ppp_enable);
REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK, xcm_mask);
REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
NIG_REG_XCM0_OUT_EN, xcm_out_en);
REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
if (nig_params) {
u8 i = 0;
pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
bnx2x_pfc_nig_rx_priority_mask(bp, i,
nig_params->rx_cos_priority_mask[i], port);
REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
nig_params->llfc_high_priority_classes);
REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
nig_params->llfc_low_priority_classes);
}
REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
NIG_REG_P0_PKT_PRIORITY_TO_COS,
pkt_priority_to_cos);
}
int bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
u32 val;
struct bnx2x *bp = params->bp;
u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
vars->link_status |= LINK_STATUS_PFC_ENABLED;
else
vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
bnx2x_update_mng(params, vars->link_status);
bnx2x_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
return 0;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
if (CHIP_IS_E3(bp)) {
if (vars->mac_type == MAC_TYPE_XMAC)
bnx2x_update_pfc_xmac(params, vars, 0);
} else {
val = REG_RD(bp, MISC_REG_RESET_REG_2);
if ((val &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
return 0;
}
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
else
bnx2x_update_pfc_bmac1(params, vars);
val = 0;
if ((params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
return 0;
}
static int bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 val;
DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
wb_data[0] = 0x3c;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
val = 0x3;
if (is_lb) {
val |= 0x4;
DP(NETIF_MSG_LINK, "enable bmac loopback\n");
}
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
return 0;
}
static int bnx2x_bmac2_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
wb_data[0] = 0;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
udelay(30);
wb_data[0] = 0x3c;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
udelay(30);
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
wb_data, 2);
udelay(30);
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
udelay(30);
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
return 0;
}
static int bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb, u8 reset_bmac)
{
int rc = 0;
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
if (reset_bmac) {
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
usleep_range(1000, 2000);
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
if (CHIP_IS_E2(bp))
rc = bnx2x_bmac2_enable(params, vars, is_lb);
else
rc = bnx2x_bmac1_enable(params, vars, is_lb);
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
val = 0;
if ((params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
vars->mac_type = MAC_TYPE_BMAC;
return rc;
}
static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
if (CHIP_IS_E2(bp))
bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
else
bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
nig_bmac_enable) {
REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
if (en)
wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
else
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
usleep_range(1000, 2000);
}
}
static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u32 line_speed)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 init_crd, crd;
u32 count = 1000;
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) {
usleep_range(5000, 10000);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
count--;
}
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
if (init_crd != crd) {
DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
init_crd, crd);
return -EINVAL;
}
if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
line_speed == SPEED_10 ||
line_speed == SPEED_100 ||
line_speed == SPEED_1000 ||
line_speed == SPEED_2500) {
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
init_crd = 778;
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVERHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
switch (line_speed) {
case SPEED_10000:
init_crd = thresh + 553 - 22;
break;
default:
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
line_speed);
return -EINVAL;
}
}
REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
line_speed, init_crd);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
usleep_range(5000, 10000);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
return 0;
}
static u32 bnx2x_get_emac_base(struct bnx2x *bp,
u32 mdc_mdio_access, u8 port)
{
u32 emac_base = 0;
switch (mdc_mdio_access) {
case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
break;
case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
if (REG_RD(bp, NIG_REG_PORT_SWAP))
emac_base = GRCBASE_EMAC1;
else
emac_base = GRCBASE_EMAC0;
break;
case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
if (REG_RD(bp, NIG_REG_PORT_SWAP))
emac_base = GRCBASE_EMAC0;
else
emac_base = GRCBASE_EMAC1;
break;
case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
break;
case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
break;
default:
break;
}
return emac_base;
}
static int bnx2x_cl22_write(struct bnx2x *bp,
struct bnx2x_phy *phy,
u16 reg, u16 val)
{
u32 tmp, mode;
u8 i;
int rc = 0;
mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
tmp = ((phy->addr << 21) | (reg << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
rc = -EFAULT;
}
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
return rc;
}
static int bnx2x_cl22_read(struct bnx2x *bp,
struct bnx2x_phy *phy,
u16 reg, u16 *ret_val)
{
u32 val, mode;
u16 i;
int rc = 0;
mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
val = ((phy->addr << 21) | (reg << 16) |
EMAC_MDIO_COMM_COMMAND_READ_22 |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
udelay(5);
break;
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
*ret_val = 0;
rc = -EFAULT;
}
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
return rc;
}
static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 *ret_val)
{
u32 val;
u16 i;
int rc = 0;
u32 chip_id;
if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
}
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
} else {
val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
break;
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
}
}
if (phy->flags & FLAGS_MDC_MDIO_WA) {
phy->flags ^= FLAGS_DUMMY_READ;
if (phy->flags & FLAGS_DUMMY_READ) {
u16 temp_val;
bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
}
}
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
return rc;
}
static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 val)
{
u32 tmp;
u8 i;
int rc = 0;
u32 chip_id;
if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
}
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
}
}
if (phy->flags & FLAGS_MDC_MDIO_WA) {
phy->flags ^= FLAGS_DUMMY_READ;
if (phy->flags & FLAGS_DUMMY_READ) {
u16 temp_val;
bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
}
}
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
return rc;
}
static u8 bnx2x_eee_has_cap(struct link_params *params)
{
struct bnx2x *bp = params->bp;
if (REG_RD(bp, params->shmem2_base) <=
offsetof(struct shmem2_region, eee_status[params->port]))
return 0;
return 1;
}
static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
{
switch (nvram_mode) {
case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
*idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
break;
case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
*idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
break;
case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
*idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
break;
default:
*idle_timer = 0;
break;
}
return 0;
}
static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
{
switch (idle_timer) {
case EEE_MODE_NVRAM_BALANCED_TIME:
*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
break;
case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
break;
case EEE_MODE_NVRAM_LATENCY_TIME:
*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
break;
default:
*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
break;
}
return 0;
}
static u32 bnx2x_eee_calc_timer(struct link_params *params)
{
u32 eee_mode, eee_idle;
struct bnx2x *bp = params->bp;
if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
} else {
if (bnx2x_eee_nvram_to_time(params->eee_mode &
EEE_MODE_NVRAM_MASK,
&eee_idle))
return 0;
}
} else {
eee_mode = ((REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
eee_power_mode)) &
PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
return 0;
}
return eee_idle;
}
static int bnx2x_eee_set_timers(struct link_params *params,
struct link_vars *vars)
{
u32 eee_idle = 0, eee_mode;
struct bnx2x *bp = params->bp;
eee_idle = bnx2x_eee_calc_timer(params);
if (eee_idle) {
REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
eee_idle);
} else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
(params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
(params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
return -EINVAL;
}
vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
eee_idle >>= 4;
vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
SHMEM_EEE_TIME_OUTPUT_BIT;
} else {
if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
return -EINVAL;
vars->eee_status |= eee_mode;
}
return 0;
}
static int bnx2x_eee_initial_config(struct link_params *params,
struct link_vars *vars, u8 mode)
{
vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
if (params->eee_mode & EEE_MODE_ENABLE_LPI)
vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
else
vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
if (params->eee_mode & EEE_MODE_ADV_LPI)
vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
else
vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
return bnx2x_eee_set_timers(params, vars);
}
static int bnx2x_eee_disable(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
return 0;
}
static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars, u8 modes)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
if (modes & SHMEM_EEE_10G_ADV) {
DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
val |= 0x8;
}
if (modes & SHMEM_EEE_1G_ADV) {
DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
val |= 0x4;
}
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
return 0;
}
static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
{
struct bnx2x *bp = params->bp;
if (bnx2x_eee_has_cap(params))
REG_WR(bp, params->shmem2_base +
offsetof(struct shmem2_region,
eee_status[params->port]), eee_status);
}
static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 adv = 0, lp = 0;
u32 lp_adv = 0;
u8 neg = 0;
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
if (lp & 0x2) {
lp_adv |= SHMEM_EEE_100M_ADV;
if (adv & 0x2) {
if (vars->line_speed == SPEED_100)
neg = 1;
DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
}
}
if (lp & 0x14) {
lp_adv |= SHMEM_EEE_1G_ADV;
if (adv & 0x14) {
if (vars->line_speed == SPEED_1000)
neg = 1;
DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
}
}
if (lp & 0x68) {
lp_adv |= SHMEM_EEE_10G_ADV;
if (adv & 0x68) {
if (vars->line_speed == SPEED_10000)
neg = 1;
DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
}
}
vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
if (neg) {
DP(NETIF_MSG_LINK, "EEE is active\n");
vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
}
}
static void bnx2x_bsc_module_sel(struct link_params *params)
{
int idx;
u32 board_cfg, sfp_ctrl;
u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
struct bnx2x *bp = params->bp;
u8 port = params->port;
board_cfg = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
dev_info.shared_hw_config.board));
i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
sfp_ctrl = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].e3_cmn_pin_cfg));
i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
DP(NETIF_MSG_LINK, "Setting BSC switch\n");
for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
}
static int bnx2x_bsc_read(struct link_params *params,
struct bnx2x *bp,
u8 sl_devid,
u16 sl_addr,
u8 lc_addr,
u8 xfer_cnt,
u32 *data_array)
{
u64 t0, delta;
u32 val, i;
int rc = 0;
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
xfer_cnt);
return -EINVAL;
}
bnx2x_bsc_module_sel(params);
xfer_cnt = 16 - lc_addr;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
val |= MCPR_IMC_COMMAND_ENABLE;
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
val = (sl_devid << 16) | sl_addr;
REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_WRITE_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
t0 = ktime_get_ns();
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
delta = ktime_get_ns() - t0;
if (delta > 10 * NSEC_PER_MSEC) {
DP(NETIF_MSG_LINK, "wr 0 byte timed out after %Lu ns\n",
delta);
rc = -EFAULT;
break;
}
usleep_range(10, 20);
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
}
if (rc == -EFAULT)
return rc;
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_READ_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
(xfer_cnt);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
t0 = ktime_get_ns();
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
delta = ktime_get_ns() - t0;
if (delta > 10 * NSEC_PER_MSEC) {
DP(NETIF_MSG_LINK, "rd op timed out after %Lu ns\n",
delta);
rc = -EFAULT;
break;
}
usleep_range(10, 20);
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
}
if (rc == -EFAULT)
return rc;
for (i = (lc_addr >> 2); i < 4; i++) {
data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
#ifdef __BIG_ENDIAN
data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
((data_array[i] & 0x0000ff00) << 8) |
((data_array[i] & 0x00ff0000) >> 8) |
((data_array[i] & 0xff000000) >> 24);
#endif
}
return rc;
}
static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 or_val)
{
u16 val;
bnx2x_cl45_read(bp, phy, devad, reg, &val);
bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
}
static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 and_val)
{
u16 val;
bnx2x_cl45_read(bp, phy, devad, reg, &val);
bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
}
int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val)
{
u8 phy_index;
for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
if (params->phy[phy_index].addr == phy_addr) {
return bnx2x_cl45_read(params->bp,
¶ms->phy[phy_index], devad,
reg, ret_val);
}
}
return -EINVAL;
}
int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val)
{
u8 phy_index;
for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
if (params->phy[phy_index].addr == phy_addr) {
return bnx2x_cl45_write(params->bp,
¶ms->phy[phy_index], devad,
reg, val);
}
}
return -EINVAL;
}
static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
struct link_params *params)
{
u8 lane = 0;
struct bnx2x *bp = params->bp;
u32 path_swap, path_swap_ovr;
u8 path, port;
path = BP_PATH(bp);
port = params->port;
if (bnx2x_is_4_port_mode(bp)) {
u32 port_swap, port_swap_ovr;
path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
if (path_swap_ovr & 0x1)
path_swap = (path_swap_ovr & 0x2);
else
path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
if (path_swap)
path = path ^ 1;
port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
if (port_swap_ovr & 0x1)
port_swap = (port_swap_ovr & 0x2);
else
port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
if (port_swap)
port = port ^ 1;
lane = (port<<1) + path;
} else {
path_swap_ovr =
REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
if (path_swap_ovr & 0x1) {
path_swap = (path_swap_ovr & 0x2);
} else {
path_swap =
REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
}
if (path_swap)
path = path ^ 1;
lane = path << 1 ;
}
return lane;
}
static void bnx2x_set_aer_mmd(struct link_params *params,
struct bnx2x_phy *phy)
{
u32 ser_lane;
u16 offset, aer_val;
struct bnx2x *bp = params->bp;
ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
(phy->addr + ser_lane) : 0;
if (USES_WARPCORE(bp)) {
aer_val = bnx2x_get_warpcore_lane(phy, params);
if (phy->flags & FLAGS_WC_DUAL_MODE)
aer_val = (aer_val >> 1) | 0x200;
} else if (CHIP_IS_E2(bp))
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, aer_val);
}
static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
{
u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
udelay(500);
REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
udelay(500);
REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
}
static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
{
u32 val;
DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
val = SERDES_RESET_BITS << (port*16);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
bnx2x_set_serdes_access(bp, port);
REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
DEFAULT_PHY_DEV_ADDR);
}
static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
struct link_params *params,
u32 action)
{
struct bnx2x *bp = params->bp;
switch (action) {
case PHY_INIT:
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
phy->def_md_devad);
break;
}
}
static void bnx2x_xgxs_deassert(struct link_params *params)
{
struct bnx2x *bp = params->bp;
u8 port;
u32 val;
DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
port = params->port;
val = XGXS_RESET_BITS << (port*16);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params,
PHY_INIT);
}
static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
struct link_params *params, u16 *ieee_fc)
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
switch (params->req_fc_auto_adv) {
case BNX2X_FLOW_CTRL_BOTH:
case BNX2X_FLOW_CTRL_RX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
break;
case BNX2X_FLOW_CTRL_TX:
*ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
default:
break;
}
break;
case BNX2X_FLOW_CTRL_TX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_RX:
case BNX2X_FLOW_CTRL_BOTH:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
break;
case BNX2X_FLOW_CTRL_NONE:
default:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
break;
}
DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
}
static void set_phy_vars(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 actual_phy_idx, phy_index, link_cfg_idx;
u8 phy_config_swapped = params->multi_phy_config &
PORT_HW_CFG_PHY_SWAPPED_ENABLED;
for (phy_index = INT_PHY; phy_index < params->num_phys;
phy_index++) {
link_cfg_idx = LINK_CONFIG_IDX(phy_index);
actual_phy_idx = phy_index;
if (phy_config_swapped) {
if (phy_index == EXT_PHY1)
actual_phy_idx = EXT_PHY2;
else if (phy_index == EXT_PHY2)
actual_phy_idx = EXT_PHY1;
}
params->phy[actual_phy_idx].req_flow_ctrl =
params->req_flow_ctrl[link_cfg_idx];
params->phy[actual_phy_idx].req_line_speed =
params->req_line_speed[link_cfg_idx];
params->phy[actual_phy_idx].speed_cap_mask =
params->speed_cap_mask[link_cfg_idx];
params->phy[actual_phy_idx].req_duplex =
params->req_duplex[link_cfg_idx];
if (params->req_line_speed[link_cfg_idx] ==
SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
" speed_cap_mask %x\n",
params->phy[actual_phy_idx].req_flow_ctrl,
params->phy[actual_phy_idx].req_line_speed,
params->phy[actual_phy_idx].speed_cap_mask);
}
}
static void bnx2x_ext_phy_set_pause(struct link_params *params,
struct bnx2x_phy *phy,
struct link_vars *vars)
{
u16 val;
struct bnx2x *bp = params->bp;
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
}
DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
}
static void bnx2x_pause_resolve(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u32 pause_result)
{
struct bnx2x *bp = params->bp;
switch (pause_result) {
case 0xb:
DP(NETIF_MSG_LINK, "Flow Control: TX only\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
break;
case 0xe:
DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
break;
case 0x5:
case 0x7:
case 0xd:
case 0xf:
if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
} else {
DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
}
break;
default:
DP(NETIF_MSG_LINK, "Flow Control: None\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
break;
}
if (pause_result & (1<<0))
vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
if (pause_result & (1<<1))
vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
}
static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
u16 ld_pause;
u16 lp_pause;
u16 pause_result;
struct bnx2x *bp = params->bp;
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
} else if (CHIP_IS_E3(bp) &&
SINGLE_MEDIA_DIRECT(params)) {
u8 lane = bnx2x_get_warpcore_lane(phy, params);
u16 gp_status, gp_mask;
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
&gp_status);
gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
lane;
if ((gp_status & gp_mask) == gp_mask) {
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_ADV_PAUSE, &ld_pause);
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
} else {
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_CL37_FC_LD, &ld_pause);
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_CL37_FC_LP, &lp_pause);
ld_pause = ((ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
<< 3);
lp_pause = ((lp_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
<< 3);
}
} else {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_ADV_PAUSE, &ld_pause);
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
}
pause_result = (ld_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
bnx2x_pause_resolve(phy, params, vars, pause_result);
}
static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
u8 ret = 0;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
if (phy->req_line_speed == SPEED_AUTO_NEG)
bnx2x_ext_phy_update_adv_fc(phy, params, vars);
vars->flow_ctrl = phy->req_flow_ctrl;
} else if (phy->req_line_speed != SPEED_AUTO_NEG)
vars->flow_ctrl = params->req_fc_auto_adv;
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
bnx2x_ext_phy_update_adv_fc(phy, params, vars);
}
return ret;
}
#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \
((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
(idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
(ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \
(ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET))
#define WC_TX_FIR(post, main, pre) \
((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
(main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
(pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 i;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
};
DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
bnx2x_update_link_attr(params, params->link_attr_sync);
}
static void bnx2x_disable_kr2(struct link_params *params,
struct link_vars *vars,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
int i;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
};
DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
bnx2x_update_link_attr(params, params->link_attr_sync);
vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
}
static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
}
static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
bnx2x_set_aer_mmd(params, phy);
}
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
u16 lane, i, cl72_ctrl, an_adv = 0, val;
u32 wc_lane_config;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
{MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
{MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
};
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
cl72_ctrl &= 0x08ff;
cl72_ctrl |= 0x3800;
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
an_adv |= (1<<5);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
DP(NETIF_MSG_LINK, "Advertize 1G\n");
}
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
(vars->line_speed == SPEED_10000)) {
an_adv |= (1<<7);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
bnx2x_set_aer_mmd(params, phy);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
0x03f0);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
(MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
1);
DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
}
bnx2x_ext_phy_set_pause(params, phy, vars);
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
(phy->req_line_speed == SPEED_20000)) {
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
(1<<11));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
} else {
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
wc_lane_config = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
shared_hw_config.wc_lane_config));
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
val |= 1 << 11;
if (wc_lane_config &
(SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
val |= 3 << 2;
else
val &= ~(3 << 2);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
val);
bnx2x_disable_kr2(params, vars, phy);
}
bnx2x_warpcore_restart_AN_KR(phy, params);
}
static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val16, i, lane;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
0x3f00},
{MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
val16 &= ~(0x0011 << lane);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
val16 |= (0x0303 << (lane << 1));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
bnx2x_set_aer_mmd(params, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX66_CONTROL, 0x9);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, 0xF9);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
}
static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct link_params *params,
u8 is_xfi)
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
u32 cfg_tap_val, tx_drv_brdct, tx_equal;
u32 ifir_val, ipost2_val, ipre_driver_val;
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_FX100_CTRL3, 0x0080);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
0xFFEE);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
((val | 0x0006) & 0xFFFE));
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
misc1_val &= ~(0x1f);
if (is_xfi) {
misc1_val |= 0x5;
tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0);
} else {
cfg_tap_val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].
sfi_tap_values));
tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
misc1_val |= 0x9;
if (tx_equal)
tap_val = (u16)tx_equal;
else
tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
ifir_val = DEFAULT_TX_DRV_IFIR;
ipost2_val = DEFAULT_TX_DRV_POST2;
ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER;
tx_drv_brdct = DEFAULT_TX_DRV_BRDCT;
if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK |
PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK |
PORT_HW_CFG_TX_DRV_POST2_MASK)) {
ifir_val = (cfg_tap_val &
PORT_HW_CFG_TX_DRV_IFIR_MASK) >>
PORT_HW_CFG_TX_DRV_IFIR_SHIFT;
ipre_driver_val = (cfg_tap_val &
PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK)
>> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT;
ipost2_val = (cfg_tap_val &
PORT_HW_CFG_TX_DRV_POST2_MASK) >>
PORT_HW_CFG_TX_DRV_POST2_SHIFT;
}
if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) {
tx_drv_brdct = (cfg_tap_val &
PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
}
tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct,
ipre_driver_val, ifir_val);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
tx_driver_val);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
bnx2x_warpcore_set_lpi_passthrough(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
0xFFFE);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
}
static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
struct link_params *params)
{
u16 val;
struct bnx2x *bp = params->bp;
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
bnx2x_set_aer_mmd(params, phy);
bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_CTRL, 0);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL73_USERB0_CTRL, &val);
val &= ~(1<<5);
val |= (1<<6);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL73_USERB0_CTRL, val);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
val &= ~(3<<14);
val |= (1<<15);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
bnx2x_set_aer_mmd(params, phy);
}
static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
struct bnx2x_phy *phy,
u16 lane)
{
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW0, 0xE070);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW1, 0xC0D0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW2, 0xA0B0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW3, 0x8090);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
(WC_TX_FIR(0x12, 0x2d, 0x00) |
MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
WC_TX_DRIVER(0x02, 0x02, 0x02, 0));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
struct link_params *params,
u8 fiber_mode,
u8 always_autoneg)
{
struct bnx2x *bp = params->bp;
u16 val16, digctrl_kx1, digctrl_kx2;
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
bnx2x_warpcore_set_lpi_passthrough(phy, params);
if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
0x1000);
DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
val16 &= 0xcebf;
switch (phy->req_line_speed) {
case SPEED_10:
break;
case SPEED_100:
val16 |= 0x2000;
break;
case SPEED_1000:
val16 |= 0x0040;
break;
default:
DP(NETIF_MSG_LINK,
"Speed not supported: 0x%x\n", phy->req_line_speed);
return;
}
if (phy->req_duplex == DUPLEX_FULL)
val16 |= 0x0100;
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
phy->req_line_speed);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
}
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
if (fiber_mode)
digctrl_kx1 = 1;
else
digctrl_kx1 &= 0xff4a;
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
digctrl_kx1);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
(digctrl_kx2 & ~(1<<2)));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
(digctrl_kx2 | (1<<2)));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
(digctrl_kx1 | 0x10));
}
static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
struct bnx2x_phy *phy,
u8 reset)
{
u16 val;
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, &val);
if (reset)
val |= 0xC000;
else
val &= 0x3FFF;
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, val);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, &val);
}
static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
struct link_params *params,
u16 lane)
{
struct bnx2x *bp = params->bp;
u16 i;
static struct bnx2x_reg_set wc_regs[] = {
{MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
{MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
{MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
0x0195},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
0x0007},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
0x0002},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
{MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
{MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
{MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
};
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, (3<<13));
for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
}
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
u32 chip_id,
u32 shmem_base, u8 port,
u8 *gpio_num, u8 *gpio_port)
{
u32 cfg_pin;
*gpio_num = 0;
*gpio_port = 0;
if (CHIP_IS_E3(bp)) {
cfg_pin = (REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].e3_sfp_ctrl)) &
PORT_HW_CFG_E3_MOD_ABS_MASK) >>
PORT_HW_CFG_E3_MOD_ABS_SHIFT;
if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
(cfg_pin > PIN_CFG_GPIO3_P1)) {
DP(NETIF_MSG_LINK,
"No cfg pin %x for module detect indication\n",
cfg_pin);
return -EINVAL;
}
*gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
*gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
} else {
*gpio_num = MISC_REGISTERS_GPIO_3;
*gpio_port = port;
}
return 0;
}
static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u8 gpio_num, gpio_port;
u32 gpio_val;
if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
params->shmem_base, params->port,
&gpio_num, &gpio_port) != 0)
return 0;
gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
if (gpio_val == 0)
return 1;
else
return 0;
}
static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
struct link_params *params)
{
u16 gp2_status_reg0, lane;
struct bnx2x *bp = params->bp;
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
&gp2_status_reg0);
return (gp2_status_reg0 >> (8+lane)) & 0x1;
}
static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
if (!vars->turn_to_run_wc_rt)
return;
if (vars->rx_tx_asic_rst) {
u16 lane = bnx2x_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_NET_SERDES_IF_MASK);
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
&gp_status1);
lnkup = (gp_status1 >> (8+lane)) & 0x1;
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
if (lnkup_kr || lnkup) {
vars->rx_tx_asic_rst = 0;
} else {
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
vars->rx_tx_asic_rst--;
DP(NETIF_MSG_LINK, "0x%x retry left\n",
vars->rx_tx_asic_rst);
}
break;
default:
break;
}
}
}
static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
struct link_params *params)
{
u16 lane = bnx2x_get_warpcore_lane(phy, params);
struct bnx2x *bp = params->bp;
bnx2x_warpcore_clear_regs(phy, params, lane);
if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
SPEED_10000) &&
(phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
bnx2x_warpcore_set_10G_XFI(phy, params, 0);
} else {
DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
}
}
static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
struct bnx2x_phy *phy,
u8 tx_en)
{
struct bnx2x *bp = params->bp;
u32 cfg_pin;
u8 port = params->port;
cfg_pin = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].e3_sfp_ctrl)) &
PORT_HW_CFG_E3_TX_LASER_MASK;
DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
}
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u8 fiber_mode;
u16 lane = bnx2x_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_NET_SERDES_IF_MASK);
DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
"serdes_net_if = 0x%x\n",
vars->line_speed, serdes_net_if);
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_reset_lane(bp, phy, 1);
vars->phy_flags |= PHY_XGXS_FLAG;
if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
(phy->req_line_speed &&
((phy->req_line_speed == SPEED_100) ||
(phy->req_line_speed == SPEED_10)))) {
vars->phy_flags |= PHY_SGMII_FLAG;
DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
bnx2x_warpcore_clear_regs(phy, params, lane);
bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
if (params->loopback_mode != LOOPBACK_EXT)
bnx2x_warpcore_enable_AN_KR(phy, params, vars);
else {
DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
bnx2x_warpcore_set_10G_KR(phy, params, vars);
}
break;
case PORT_HW_CFG_NET_SERDES_IF_XFI:
bnx2x_warpcore_clear_regs(phy, params, lane);
if (vars->line_speed == SPEED_10000) {
DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
bnx2x_warpcore_set_10G_XFI(phy, params, 1);
} else {
if (SINGLE_MEDIA_DIRECT(params)) {
DP(NETIF_MSG_LINK, "1G Fiber\n");
fiber_mode = 1;
} else {
DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
fiber_mode = 0;
}
bnx2x_warpcore_set_sgmii_speed(phy,
params,
fiber_mode,
0);
}
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
if ((params->loopback_mode == LOOPBACK_NONE) ||
(params->loopback_mode == LOOPBACK_EXT)) {
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
else
bnx2x_sfp_e3_set_transmitter(params,
phy, 1);
}
bnx2x_warpcore_config_sfi(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
if (vars->line_speed != SPEED_20000) {
DP(NETIF_MSG_LINK, "Speed not supported yet\n");
return;
}
DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
bnx2x_sfp_module_detection(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_KR2:
if (!params->loopback_mode) {
bnx2x_warpcore_enable_AN_KR(phy, params, vars);
} else {
DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
bnx2x_warpcore_set_20G_force_KR2(phy, params);
}
break;
default:
DP(NETIF_MSG_LINK,
"Unsupported Serdes Net Interface 0x%x\n",
serdes_net_if);
return;
}
}
bnx2x_warpcore_reset_lane(bp, phy, 0);
DP(NETIF_MSG_LINK, "Exit config init\n");
}
static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val16, lane;
bnx2x_sfp_e3_set_transmitter(params, phy, 0);
bnx2x_set_mdio_emac_per_phy(bp, params);
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
~0x10);
bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
val16 |= (0x11 << lane);
if (phy->flags & FLAGS_WC_DUAL_MODE)
val16 |= (0x22 << lane);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
val16 &= ~(0x0303 << (lane << 1));
val16 |= (0x0101 << (lane << 1));
if (phy->flags & FLAGS_WC_DUAL_MODE) {
val16 &= ~(0x0c0c << (lane << 1));
val16 |= (0x0404 << (lane << 1));
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
bnx2x_set_aer_mmd(params, phy);
}
static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val16;
u32 lane;
DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
params->loopback_mode, phy->req_line_speed);
if (phy->req_line_speed < SPEED_10000 ||
phy->supported & SUPPORTED_20000baseKR2_Full) {
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
0x10);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
val16 |= (1<<lane);
if (phy->flags & FLAGS_WC_DUAL_MODE)
val16 |= (2<<lane);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL2,
val16);
bnx2x_set_aer_mmd(params, phy);
} else {
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
0x4000);
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
}
static void bnx2x_sync_link(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
switch (vars->link_status &
LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
fallthrough;
case LINK_10TFD:
vars->line_speed = SPEED_10;
break;
case LINK_100TXHD:
vars->duplex = DUPLEX_HALF;
fallthrough;
case LINK_100T4:
case LINK_100TXFD:
vars->line_speed = SPEED_100;
break;
case LINK_1000THD:
vars->duplex = DUPLEX_HALF;
fallthrough;
case LINK_1000TFD:
vars->line_speed = SPEED_1000;
break;
case LINK_2500THD:
vars->duplex = DUPLEX_HALF;
fallthrough;
case LINK_2500TFD:
vars->line_speed = SPEED_2500;
break;
case LINK_10GTFD:
vars->line_speed = SPEED_10000;
break;
case LINK_20GTFD:
vars->line_speed = SPEED_20000;
break;
default:
break;
}
vars->flow_ctrl = 0;
if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
if (!vars->flow_ctrl)
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
if (vars->line_speed &&
((vars->line_speed == SPEED_10) ||
(vars->line_speed == SPEED_100))) {
vars->phy_flags |= PHY_SGMII_FLAG;
} else {
vars->phy_flags &= ~PHY_SGMII_FLAG;
}
if (vars->line_speed &&
USES_WARPCORE(bp) &&
(vars->line_speed == SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) {
if (USES_WARPCORE(bp))
vars->mac_type = MAC_TYPE_XMAC;
else
vars->mac_type = MAC_TYPE_BMAC;
} else {
if (USES_WARPCORE(bp))
vars->mac_type = MAC_TYPE_UMAC;
else
vars->mac_type = MAC_TYPE_EMAC;
}
} else {
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
vars->line_speed = 0;
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
}
}
void bnx2x_link_status_update(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 sync_offset, media_types;
set_phy_vars(params, vars);
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
if (params->loopback_mode != LOOPBACK_NONE &&
params->loopback_mode != LOOPBACK_EXT)
vars->link_status |= LINK_STATUS_LINK_UP;
if (bnx2x_eee_has_cap(params))
vars->eee_status = REG_RD(bp, params->shmem2_base +
offsetof(struct shmem2_region,
eee_status[params->port]));
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_sync_link(params, vars);
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].media_type);
media_types = REG_RD(bp, sync_offset);
params->phy[INT_PHY].media_type =
(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
params->phy[EXT_PHY1].media_type =
(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
params->phy[EXT_PHY2].media_type =
(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].aeu_int_mask);
vars->aeu_int_mask = REG_RD(bp, sync_offset);
if (vars->link_status & LINK_STATUS_PFC_ENABLED)
params->feature_config_flags |=
FEATURE_CONFIG_PFC_ENABLED;
else
params->feature_config_flags &=
~FEATURE_CONFIG_PFC_ENABLED;
if (SHMEM2_HAS(bp, link_attr_sync))
params->link_attr_sync = SHMEM2_RD(bp,
link_attr_sync[params->port]);
DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
static void bnx2x_set_master_ln(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
&new_master_ln);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2 ,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
(new_master_ln | ser_lane));
}
static int bnx2x_reset_unicore(struct link_params *params,
struct bnx2x_phy *phy,
u8 set_serdes)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
(mii_control |
MDIO_COMBO_IEEO_MII_CONTROL_RESET));
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
udelay(5);
return 0;
}
}
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
params->port);
DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
return -EINVAL;
}
static void bnx2x_set_swap_lanes(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
u16 rx_lane_swap, tx_lane_swap;
rx_lane_swap = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
tx_lane_swap = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_RX_LN_SWAP,
(rx_lane_swap |
MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TX_LN_SWAP,
(tx_lane_swap |
MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 control2;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
&control2);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
phy->speed_cap_mask, control2);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
control2);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
&control2);
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
control2);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
}
}
static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
if (vars->line_speed == SPEED_AUTO_NEG)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
else
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
if (vars->line_speed == SPEED_AUTO_NEG)
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
®_val);
if (vars->line_speed == SPEED_AUTO_NEG) {
reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
} else {
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
reg_val);
if (enable_cl73) {
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_UCTRL,
0xe);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV2,
®_val);
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV2,
reg_val);
reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
} else
reg_val = 0;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, ®_val);
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
if (!((vars->line_speed == SPEED_1000) ||
(vars->line_speed == SPEED_100) ||
(vars->line_speed == SPEED_10))) {
reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
if (vars->line_speed == SPEED_10000)
reg_val |=
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
}
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_UP1, val);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_UP3, 0x400);
}
static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
struct link_params *params,
u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1, val);
}
static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
struct link_params *params,
u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
if (enable_cl73) {
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
&mii_control);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
(mii_control |
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
(mii_control |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
}
}
static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
u16 mii_control;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
switch (vars->line_speed) {
case SPEED_100:
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
break;
case SPEED_1000:
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case SPEED_10:
break;
default:
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
break;
}
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
mii_control);
} else {
bnx2x_restart_autoneg(phy, params, 0);
}
}
static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 pd_10g, status2_1000x;
if (phy->req_line_speed != SPEED_AUTO_NEG)
return 0;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
&status2_1000x);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
&status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
params->port);
return 1;
}
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
&pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
params->port);
return 1;
}
return 0;
}
static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u32 gp_status)
{
u16 ld_pause;
u16 lp_pause;
u16 pause_result;
struct bnx2x *bp = params->bp;
if ((gp_status &
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1,
&ld_pause);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_LP_ADV1,
&lp_pause);
pause_result = (ld_pause &
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result);
} else {
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
&ld_pause);
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
&lp_pause);
pause_result = (ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
pause_result |= (lp_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
}
bnx2x_pause_resolve(phy, params, vars, pause_result);
}
static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
if (phy->req_line_speed == SPEED_AUTO_NEG)
bnx2x_update_adv_fc(phy, params, vars, gp_status);
vars->flow_ctrl = phy->req_flow_ctrl;
} else if (phy->req_line_speed != SPEED_AUTO_NEG)
vars->flow_ctrl = params->req_fc_auto_adv;
else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
(!(vars->phy_flags & PHY_SGMII_FLAG))) {
if (bnx2x_direct_parallel_detect_used(phy, params)) {
vars->flow_ctrl = params->req_fc_auto_adv;
return;
}
bnx2x_update_adv_fc(phy, params, vars, gp_status);
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 rx_status, ustat_val, cl37_fsm_received;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_RX0,
MDIO_RX0_RX_STATUS,
&rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_USTAT1,
&ustat_val);
if ((ustat_val &
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
"ustat_val(0x8371) = 0x%x\n", ustat_val);
return;
}
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_REMOTE_PHY,
MDIO_REMOTE_PHY_MISC_RX_STATUS,
&cl37_fsm_received);
if ((cl37_fsm_received &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
"misc_rx_status(0x8330) = 0x%x\n",
cl37_fsm_received);
return;
}
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
0);
bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
}
static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u32 gp_status)
{
if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
vars->link_status |=
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
if (bnx2x_direct_parallel_detect_used(phy, params))
vars->link_status |=
LINK_STATUS_PARALLEL_DETECTION_USED;
}
static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u16 is_link_up,
u16 speed_mask,
u16 is_duplex)
{
struct bnx2x *bp = params->bp;
if (phy->req_line_speed == SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
if (is_link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
vars->phy_link_up = 1;
vars->link_status |= LINK_STATUS_LINK_UP;
switch (speed_mask) {
case GP_STATUS_10M:
vars->line_speed = SPEED_10;
if (is_duplex == DUPLEX_FULL)
vars->link_status |= LINK_10TFD;
else
vars->link_status |= LINK_10THD;
break;
case GP_STATUS_100M:
vars->line_speed = SPEED_100;
if (is_duplex == DUPLEX_FULL)
vars->link_status |= LINK_100TXFD;
else
vars->link_status |= LINK_100TXHD;
break;
case GP_STATUS_1G:
case GP_STATUS_1G_KX:
vars->line_speed = SPEED_1000;
if (is_duplex == DUPLEX_FULL)
vars->link_status |= LINK_1000TFD;
else
vars->link_status |= LINK_1000THD;
break;
case GP_STATUS_2_5G:
vars->line_speed = SPEED_2500;
if (is_duplex == DUPLEX_FULL)
vars->link_status |= LINK_2500TFD;
else
vars->link_status |= LINK_2500THD;
break;
case GP_STATUS_5G:
case GP_STATUS_6G:
DP(NETIF_MSG_LINK,
"link speed unsupported gp_status 0x%x\n",
speed_mask);
return -EINVAL;
case GP_STATUS_10G_KX4:
case GP_STATUS_10G_HIG:
case GP_STATUS_10G_CX4:
case GP_STATUS_10G_KR:
case GP_STATUS_10G_SFI:
case GP_STATUS_10G_XFI:
vars->line_speed = SPEED_10000;
vars->link_status |= LINK_10GTFD;
break;
case GP_STATUS_20G_DXGXS:
case GP_STATUS_20G_KR2:
vars->line_speed = SPEED_20000;
vars->link_status |= LINK_20GTFD;
break;
default:
DP(NETIF_MSG_LINK,
"link speed unsupported gp_status 0x%x\n",
speed_mask);
return -EINVAL;
}
} else {
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
}
DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
vars->phy_link_up, vars->line_speed);
return 0;
}
static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
int rc = 0;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
duplex = DUPLEX_FULL;
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
link_up = 1;
speed_mask = gp_status & GP_STATUS_SPEED_MASK;
DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
gp_status, link_up, speed_mask);
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
duplex);
if (rc == -EINVAL)
return rc;
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
if (SINGLE_MEDIA_DIRECT(params)) {
vars->duplex = duplex;
bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
if (phy->req_line_speed == SPEED_AUTO_NEG)
bnx2x_xgxs_an_resolve(phy, params, vars,
gp_status);
}
} else {
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
bnx2x_check_fallback_to_cl37(phy, params);
}
}
if (SINGLE_MEDIA_DIRECT(params) &&
(vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
u16 val;
CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_LP_ADV2, &val);
if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
vars->link_status |=
LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP1, &val);
if (val & MDIO_OVER_1G_UP1_2_5G)
vars->link_status |=
LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 lane;
u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
int rc = 0;
lane = bnx2x_get_warpcore_lane(phy, params);
if ((params->loopback_mode) &&
(phy->flags & FLAGS_WC_DUAL_MODE)) {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
link_up &= 0x1;
} else if ((phy->req_line_speed > SPEED_10000) &&
(phy->supported & SUPPORTED_20000baseMLD2_Full)) {
u16 temp_link_up;
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
1, &temp_link_up);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
1, &link_up);
DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
temp_link_up, link_up);
link_up &= (1<<2);
if (link_up)
bnx2x_ext_phy_resolve_fc(phy, params, vars);
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_1,
&gp_status1);
DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
link_up = ((gp_status1 >> 8) |
(gp_status1 >> 12) |
(gp_status1)) &
(1 << lane);
if (phy->supported & SUPPORTED_20000baseKR2_Full) {
u16 an_link;
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_STATUS, &an_link);
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_STATUS, &an_link);
link_up |= (an_link & (1<<2));
}
if (link_up && SINGLE_MEDIA_DIRECT(params)) {
u16 pd, gp_status4;
if (phy->req_line_speed == SPEED_AUTO_NEG) {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_4,
&gp_status4);
if (gp_status4 & ((1<<12)<<lane))
vars->link_status |=
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_PAR_DET_10G_STATUS,
&pd);
if (pd & (1<<15))
vars->link_status |=
LINK_STATUS_PARALLEL_DETECTION_USED;
}
bnx2x_ext_phy_resolve_fc(phy, params, vars);
vars->duplex = duplex;
}
}
if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
SINGLE_MEDIA_DIRECT(params)) {
u16 val;
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_LP_AUTO_NEG2, &val);
if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
vars->link_status |=
LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
if (val & MDIO_OVER_1G_UP1_2_5G)
vars->link_status |=
LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
if (lane < 2) {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
}
DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
if ((lane & 1) == 0)
gp_speed <<= 8;
gp_speed &= 0x3f00;
link_up = !!link_up;
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
duplex);
if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
(!(phy->flags & FLAGS_WC_DUAL_MODE)))
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
static void bnx2x_set_gmii_tx_driver(struct link_params *params)
{
struct bnx2x *bp = params->bp;
struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
u16 lp_up2;
u16 tx_driver;
u16 bank;
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
if (lp_up2 == 0)
return;
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
CL22_RD_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER, tx_driver);
}
}
}
static int bnx2x_emac_program(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u16 mode = 0;
DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
EMAC_REG_EMAC_MODE,
(EMAC_MODE_25G_MODE |
EMAC_MODE_PORT_MII_10M |
EMAC_MODE_HALF_DUPLEX));
switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
break;
case SPEED_100:
mode |= EMAC_MODE_PORT_MII;
break;
case SPEED_1000:
mode |= EMAC_MODE_PORT_GMII;
break;
case SPEED_2500:
mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
break;
default:
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
return -EINVAL;
}
if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
mode);
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
}
static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
struct link_params *params)
{
u16 bank, i = 0;
struct bnx2x *bp = params->bp;
for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_RX0_RX_EQ_BOOST,
phy->rx_preemphasis[i]);
}
for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER,
phy->tx_preemphasis[i]);
}
}
static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
(params->loopback_mode == LOOPBACK_XGXS));
if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
if (SINGLE_MEDIA_DIRECT(params) &&
(params->feature_config_flags &
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
bnx2x_set_preemphasis(phy, params);
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
bnx2x_set_autoneg(phy, params, vars, 0);
bnx2x_program_serdes(phy, params, vars);
} else {
DP(NETIF_MSG_LINK, "not SGMII, AN\n");
bnx2x_set_brcm_cl37_advertisement(phy, params);
bnx2x_set_ieee_aneg_advertisement(phy, params,
vars->ieee_fc);
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
bnx2x_restart_autoneg(phy, params, enable_cl73);
}
} else {
DP(NETIF_MSG_LINK, "SGMII\n");
bnx2x_initialize_sgmii_process(phy, params, vars);
}
}
static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
int rc;
vars->phy_flags |= PHY_XGXS_FLAG;
if ((phy->req_line_speed &&
((phy->req_line_speed == SPEED_100) ||
(phy->req_line_speed == SPEED_10))) ||
(!phy->req_line_speed &&
(phy->speed_cap_mask >=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
(phy->speed_cap_mask <
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
vars->phy_flags |= PHY_SGMII_FLAG;
else
vars->phy_flags &= ~PHY_SGMII_FLAG;
bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
bnx2x_set_aer_mmd(params, phy);
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
if (rc)
return rc;
bnx2x_set_aer_mmd(params, phy);
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
bnx2x_set_master_ln(params, phy);
bnx2x_set_swap_lanes(params, phy);
}
return rc;
}
static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
struct bnx2x_phy *phy,
struct link_params *params)
{
u16 cnt, ctrl;
for (cnt = 0; cnt < 1000; cnt++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
bnx2x_cl22_read(bp, phy,
MDIO_PMA_REG_CTRL, &ctrl);
else
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
usleep_range(1000, 2000);
}
if (cnt == 1000)
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
params->port);
DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
return cnt;
}
static void bnx2x_link_int_enable(struct link_params *params)
{
u8 port = params->port;
u32 mask;
struct bnx2x *bp = params->bp;
if (CHIP_IS_E3(bp)) {
mask = NIG_MASK_XGXS0_LINK_STATUS;
if (!(SINGLE_MEDIA_DIRECT(params)))
mask |= NIG_MASK_MI_INT;
} else if (params->switch_cfg == SWITCH_CFG_10G) {
mask = (NIG_MASK_XGXS0_LINK10G |
NIG_MASK_XGXS0_LINK_STATUS);
DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
if (!(SINGLE_MEDIA_DIRECT(params)) &&
params->phy[INT_PHY].type !=
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
mask |= NIG_MASK_MI_INT;
DP(NETIF_MSG_LINK, "enabled external phy int\n");
}
} else {
mask = NIG_MASK_SERDES0_LINK_STATUS;
DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
if (!(SINGLE_MEDIA_DIRECT(params)) &&
params->phy[INT_PHY].type !=
PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
mask |= NIG_MASK_MI_INT;
DP(NETIF_MSG_LINK, "enabled external phy int\n");
}
}
bnx2x_bits_en(bp,
NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
mask);
DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
(params->switch_cfg == SWITCH_CFG_10G),
REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
}
static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
u8 exp_mi_int)
{
u32 latch_status = 0;
latch_status = REG_RD(bp,
NIG_REG_LATCH_STATUS_0 + port*8);
DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
if (exp_mi_int)
bnx2x_bits_en(bp,
NIG_REG_STATUS_INTERRUPT_PORT0
+ port*4,
NIG_STATUS_EMAC0_MI_INT);
else
bnx2x_bits_dis(bp,
NIG_REG_STATUS_INTERRUPT_PORT0
+ port*4,
NIG_STATUS_EMAC0_MI_INT);
if (latch_status & 1) {
REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
(latch_status & 0xfffe) | (latch_status & 1));
}
}
static void bnx2x_link_int_ack(struct link_params *params,
struct link_vars *vars, u8 is_10g_plus)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 mask;
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
(NIG_STATUS_XGXS0_LINK10G |
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS));
if (vars->phy_link_up) {
if (USES_WARPCORE(bp))
mask = NIG_STATUS_XGXS0_LINK_STATUS;
else {
if (is_10g_plus)
mask = NIG_STATUS_XGXS0_LINK10G;
else if (params->switch_cfg == SWITCH_CFG_10G) {
u32 ser_lane =
((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
mask = ((1 << ser_lane) <<
NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
} else
mask = NIG_STATUS_SERDES0_LINK_STATUS;
}
DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
mask);
bnx2x_bits_en(bp,
NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
mask);
}
}
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
str[0] = '\0';
(*len)--;
return 0;
}
static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
if (*len < 10) {
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
num & 0xFFFF);
*len -= ret;
return 0;
}
static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
if (*len < 10) {
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
(num >> 8) & 0xFF, num & 0xFF);
*len -= ret;
return 0;
}
int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
u16 len)
{
struct bnx2x *bp;
u32 spirom_ver = 0;
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
if (version == NULL || params == NULL)
return -EINVAL;
bp = params->bp;
version[0] = '\0';
spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
if (params->phy[EXT_PHY1].format_fw_ver) {
status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
ver_p,
&remain_len);
ver_p += (len - remain_len);
}
if ((params->num_phys == MAX_PHYS) &&
(params->phy[EXT_PHY2].ver_addr != 0)) {
spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
if (params->phy[EXT_PHY2].format_fw_ver) {
*ver_p = '/';
ver_p++;
remain_len--;
status |= params->phy[EXT_PHY2].format_fw_ver(
spirom_ver,
ver_p,
&remain_len);
ver_p = version + (len - remain_len);
}
}
*ver_p = '\0';
return status;
}
static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
struct link_params *params)
{
u8 port = params->port;
struct bnx2x *bp = params->bp;
if (phy->req_line_speed != SPEED_1000) {
u32 md_devad = 0;
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) {
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
port*0x18));
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
0x5);
}
bnx2x_cl45_write(bp, phy,
5,
(MDIO_REG_BANK_AER_BLOCK +
(MDIO_AER_BLOCK_AER_REG & 0xf)),
0x2800);
bnx2x_cl45_write(bp, phy,
5,
(MDIO_REG_BANK_CL73_IEEEB0 +
(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
0x6041);
msleep(200);
bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) {
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
md_devad);
}
} else {
u16 mii_ctrl;
DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
bnx2x_cl45_read(bp, phy, 5,
(MDIO_REG_BANK_COMBO_IEEE0 +
(MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
&mii_ctrl);
bnx2x_cl45_write(bp, phy, 5,
(MDIO_REG_BANK_COMBO_IEEE0 +
(MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
mii_ctrl |
MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
}
}
int bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
u8 port = params->port;
u16 hw_led_mode = params->hw_led_mode;
int rc = 0;
u8 phy_idx;
u32 tmp;
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
speed, hw_led_mode);
for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].set_link_led) {
params->phy[phy_idx].set_link_led(
¶ms->phy[phy_idx], params, mode);
}
}
switch (mode) {
case LED_MODE_FRONT_PANEL_OFF:
case LED_MODE_OFF:
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
SHARED_HW_CFG_LED_MAC1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
if (params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
EMAC_LED_100MB_OVERRIDE |
EMAC_LED_10MB_OVERRIDE);
else
tmp |= EMAC_LED_OVERRIDE;
EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
break;
case LED_MODE_OPER:
if (!vars->link_up)
break;
fallthrough;
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
(params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
CHIP_IS_E2(bp) && params->num_phys == 2) {
if (mode == LED_MODE_ON ||
speed == SPEED_10000){
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp | EMAC_LED_OVERRIDE));
if (mode == LED_MODE_ON)
return rc;
}
} else if (SINGLE_MEDIA_DIRECT(params)) {
if ((!CHIP_IS_E3(bp)) ||
(CHIP_IS_E3(bp) &&
mode == LED_MODE_ON))
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp) ||
(mode == LED_MODE_ON))
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
else
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
hw_led_mode);
} else if ((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
(mode == LED_MODE_ON)) {
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
break;
} else {
u32 nig_led_mode = ((params->hw_led_mode <<
SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY2) ?
(SHARED_HW_CFG_LED_PHY1 >>
SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
nig_led_mode);
}
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
if (CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
LED_BLINK_RATE_VAL_E3);
else
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
LED_BLINK_RATE_VAL_E1X_E2);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp & (~EMAC_LED_OVERRIDE)));
if (CHIP_IS_E1(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
(speed == SPEED_100) ||
(speed == SPEED_10))) {
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
port*4, 0);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
port*4, 1);
}
break;
default:
rc = -EINVAL;
DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
mode);
break;
}
return rc;
}
int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 is_serdes)
{
struct bnx2x *bp = params->bp;
u16 gp_status = 0, phy_index = 0;
u8 ext_phy_link_up = 0, serdes_phy_type;
struct link_vars temp_vars;
struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY];
if (CHIP_IS_E3(bp)) {
u16 link_up;
if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
> SPEED_10000) {
bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
1, &link_up);
bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
1, &link_up);
link_up &= (1<<2);
} else {
u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_1,
&gp_status);
gp_status = ((gp_status >> 8) & 0xf) |
((gp_status >> 12) & 0xf);
link_up = gp_status & (1 << lane);
}
if (!link_up)
return -ESRCH;
} else {
CL22_RD_OVER_CL45(bp, int_phy,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
}
if (params->loopback_mode == LOOPBACK_XGXS)
return 0;
switch (params->num_phys) {
case 1:
return 0;
case 2:
ext_phy_link_up = params->phy[EXT_PHY1].read_status(
¶ms->phy[EXT_PHY1],
params, &temp_vars);
break;
case 3:
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
serdes_phy_type = ((params->phy[phy_index].media_type ==
ETH_PHY_SFPP_10G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_SFP_1G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_XFP_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_DA_TWINAX));
if (is_serdes != serdes_phy_type)
continue;
if (params->phy[phy_index].read_status) {
ext_phy_link_up |=
params->phy[phy_index].read_status(
¶ms->phy[phy_index],
params, &temp_vars);
}
}
break;
}
if (ext_phy_link_up)
return 0;
return -ESRCH;
}
static int bnx2x_link_initialize(struct link_params *params,
struct link_vars *vars)
{
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
vars->line_speed = params->phy[INT_PHY].req_line_speed;
if (!USES_WARPCORE(bp))
bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars);
non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
(params->loopback_mode == LOOPBACK_XGXS));
if (non_ext_phy ||
(params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params);
if (params->phy[INT_PHY].config_init)
params->phy[INT_PHY].config_init(phy, params, vars);
}
vars->line_speed = params->phy[INT_PHY].req_line_speed;
if (non_ext_phy) {
if (params->phy[INT_PHY].supported &
SUPPORTED_FIBRE)
vars->link_status |= LINK_STATUS_SERDES_LINK;
} else {
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
if (params->phy[phy_index].supported &
SUPPORTED_FIBRE)
vars->link_status |= LINK_STATUS_SERDES_LINK;
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
DP(NETIF_MSG_LINK,
"Not initializing second phy\n");
continue;
}
params->phy[phy_index].config_init(
¶ms->phy[phy_index],
params, vars);
}
}
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
params->port*4,
(NIG_STATUS_XGXS0_LINK10G |
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
return 0;
}
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
(0x1ff << (params->port*16)));
}
static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u8 gpio_port;
if (CHIP_IS_E2(bp))
gpio_port = BP_PATH(bp);
else
gpio_port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
gpio_port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
}
static int bnx2x_update_link_down(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
vars->mac_type = MAC_TYPE_NONE;
vars->link_status &= ~LINK_UPDATE_MASK;
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
usleep_range(10000, 20000);
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))
bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
if (CHIP_IS_E3(bp)) {
REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
0);
REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
0);
vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
SHMEM_EEE_ACTIVE_BIT);
bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_set_xmac_rxtx(params, 0);
bnx2x_set_umac_rxtx(params, 0);
}
return 0;
}
static int bnx2x_update_link_up(struct link_params *params,
struct link_vars *vars,
u8 link_10g)
{
struct bnx2x *bp = params->bp;
u8 phy_idx, port = params->port;
int rc = 0;
vars->link_status |= (LINK_STATUS_LINK_UP |
LINK_STATUS_PHYSICAL_LINK_FLAG);
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
vars->link_status |=
LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
vars->link_status |=
LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
if (USES_WARPCORE(bp)) {
if (link_10g) {
if (bnx2x_xmac_enable(params, vars, 0) ==
-ESRCH) {
DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~LINK_STATUS_LINK_UP;
}
} else
bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
(vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
(params->port << 2), 1);
REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
(params->port << 2), 0xfc20);
}
}
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
if (link_10g) {
if (bnx2x_bmac_enable(params, vars, 0, 1) ==
-ESRCH) {
DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~LINK_STATUS_LINK_UP;
}
bnx2x_set_led(params, vars,
LED_MODE_OPER, SPEED_10000);
} else {
rc = bnx2x_emac_program(params, vars);
bnx2x_emac_enable(params, vars, 0);
if ((vars->link_status &
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
&& (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
SINGLE_MEDIA_DIRECT(params))
bnx2x_set_gmii_tx_driver(params);
}
}
if (CHIP_IS_E1x(bp))
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
bnx2x_update_mng(params, vars->link_status);
bnx2x_update_mng_eee(params, vars->eee_status);
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
bnx2x_check_half_open_conn(params, vars, 0);
break;
}
}
msleep(20);
return rc;
}
static void bnx2x_chng_link_count(struct link_params *params, bool clear)
{
struct bnx2x *bp = params->bp;
u32 addr, val;
if (!(SHMEM2_HAS(bp, link_change_count)))
return;
addr = params->shmem2_base +
offsetof(struct shmem2_region, link_change_count[params->port]);
if (clear)
val = 0;
else
val = REG_RD(bp, addr) + 1;
REG_WR(bp, addr, val);
}
int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
struct link_vars phy_vars[MAX_PHYS];
u8 port = params->port;
u8 link_10g_plus, phy_index;
u32 prev_link_status = vars->link_status;
u8 ext_phy_link_up = 0, cur_link_up;
int rc = 0;
u8 is_mi_int = 0;
u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
u8 active_external_phy = INT