/* bnx2x_main.c: QLogic Everest network driver.
 *
 * Copyright (c) 2007-2013 Broadcom Corporation
 * Copyright (c) 2014 QLogic Corporation
 * All rights reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>  /* for dev_info() */
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
#include <linux/prefetch.h>
#include <linux/zlib.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
#define FW_FILE_VERSION					\
	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
#define FW_FILE_NAME_E1		"bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H	"bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2		"bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"

/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT		(5*HZ)

static char version[] =
	"QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";

MODULE_AUTHOR("Eliezer Tamir");
MODULE_DESCRIPTION("QLogic "
		   "BCM57710/57711/57711E/"
		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
		   "57840/57840_MF Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);

int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
MODULE_PARM_DESC(num_queues,
		 " Set number of queues (default is as a number of CPUs)");

static int disable_tpa;
module_param(disable_tpa, int, 0444);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");

static int int_mode;
module_param(int_mode, int, 0444);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
				"(1 INT#x; 2 MSI)");

static int dropless_fc;
module_param(dropless_fc, int, 0444);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");

static int mrrs = -1;
module_param(mrrs, int, 0444);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");

static int debug;
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, " Default debug msglevel");

static struct workqueue_struct *bnx2x_wq;
struct workqueue_struct *bnx2x_iov_wq;

struct bnx2x_mac_vals {
	u32 xmac_addr;
	u32 xmac_val;
	u32 emac_addr;
	u32 emac_val;
	u32 umac_addr[2];
	u32 umac_val[2];
	u32 bmac_addr;
	u32 bmac_val[2];
};

enum bnx2x_board_type {
	BCM57710 = 0,
	BCM57711,
	BCM57711E,
	BCM57712,
	BCM57712_MF,
	BCM57712_VF,
	BCM57800,
	BCM57800_MF,
	BCM57800_VF,
	BCM57810,
	BCM57810_MF,
	BCM57810_VF,
	BCM57840_4_10,
	BCM57840_2_20,
	BCM57840_MF,
	BCM57840_VF,
	BCM57811,
	BCM57811_MF,
	BCM57840_O,
	BCM57840_MFO,
	BCM57811_VF
};

/* indexed by board_type, above */
static struct {
	char *name;
} board_info[] = {
	[BCM57710]	= { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
	[BCM57711]	= { "QLogic BCM57711 10 Gigabit PCIe" },
	[BCM57711E]	= { "QLogic BCM57711E 10 Gigabit PCIe" },
	[BCM57712]	= { "QLogic BCM57712 10 Gigabit Ethernet" },
	[BCM57712_MF]	= { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
	[BCM57712_VF]	= { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
	[BCM57800]	= { "QLogic BCM57800 10 Gigabit Ethernet" },
	[BCM57800_MF]	= { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
	[BCM57800_VF]	= { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
	[BCM57810]	= { "QLogic BCM57810 10 Gigabit Ethernet" },
	[BCM57810_MF]	= { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
	[BCM57810_VF]	= { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
	[BCM57840_4_10]	= { "QLogic BCM57840 10 Gigabit Ethernet" },
	[BCM57840_2_20]	= { "QLogic BCM57840 20 Gigabit Ethernet" },
	[BCM57840_MF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
	[BCM57840_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
	[BCM57811]	= { "QLogic BCM57811 10 Gigabit Ethernet" },
	[BCM57811_MF]	= { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
	[BCM57840_O]	= { "QLogic BCM57840 10/20 Gigabit Ethernet" },
	[BCM57840_MFO]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
	[BCM57811_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};

#ifndef PCI_DEVICE_ID_NX2_57710
#define PCI_DEVICE_ID_NX2_57710		CHIP_NUM_57710
#endif
#ifndef PCI_DEVICE_ID_NX2_57711
#define PCI_DEVICE_ID_NX2_57711		CHIP_NUM_57711
#endif
#ifndef PCI_DEVICE_ID_NX2_57711E
#define PCI_DEVICE_ID_NX2_57711E	CHIP_NUM_57711E
#endif
#ifndef PCI_DEVICE_ID_NX2_57712
#define PCI_DEVICE_ID_NX2_57712		CHIP_NUM_57712
#endif
#ifndef PCI_DEVICE_ID_NX2_57712_MF
#define PCI_DEVICE_ID_NX2_57712_MF	CHIP_NUM_57712_MF
#endif
#ifndef PCI_DEVICE_ID_NX2_57712_VF
#define PCI_DEVICE_ID_NX2_57712_VF	CHIP_NUM_57712_VF
#endif
#ifndef PCI_DEVICE_ID_NX2_57800
#define PCI_DEVICE_ID_NX2_57800		CHIP_NUM_57800
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_MF
#define PCI_DEVICE_ID_NX2_57800_MF	CHIP_NUM_57800_MF
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_VF
#define PCI_DEVICE_ID_NX2_57800_VF	CHIP_NUM_57800_VF
#endif
#ifndef PCI_DEVICE_ID_NX2_57810
#define PCI_DEVICE_ID_NX2_57810		CHIP_NUM_57810
#endif
#ifndef PCI_DEVICE_ID_NX2_57810_MF
#define PCI_DEVICE_ID_NX2_57810_MF	CHIP_NUM_57810_MF
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_O
#define PCI_DEVICE_ID_NX2_57840_O	CHIP_NUM_57840_OBSOLETE
#endif
#ifndef PCI_DEVICE_ID_NX2_57810_VF
#define PCI_DEVICE_ID_NX2_57810_VF	CHIP_NUM_57810_VF
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_4_10
#define PCI_DEVICE_ID_NX2_57840_4_10	CHIP_NUM_57840_4_10
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_2_20
#define PCI_DEVICE_ID_NX2_57840_2_20	CHIP_NUM_57840_2_20
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_MFO
#define PCI_DEVICE_ID_NX2_57840_MFO	CHIP_NUM_57840_MF_OBSOLETE
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_VF
#define PCI_DEVICE_ID_NX2_57840_VF	CHIP_NUM_57840_VF
#endif
#ifndef PCI_DEVICE_ID_NX2_57811
#define PCI_DEVICE_ID_NX2_57811		CHIP_NUM_57811
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_MF
#define PCI_DEVICE_ID_NX2_57811_MF	CHIP_NUM_57811_MF
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_VF
#define PCI_DEVICE_ID_NX2_57811_VF	CHIP_NUM_57811_VF
#endif

static const struct pci_device_id bnx2x_pci_tbl[] = {
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
	{ 0 }
};

MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);

/* Global resources for unloading a previously loaded device */
#define BNX2X_PREV_WAIT_NEEDED 1
static DEFINE_SEMAPHORE(bnx2x_prev_sem);
static LIST_HEAD(bnx2x_prev_list);

/* Forward declaration */
static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);

/****************************************************************************
* General service functions
****************************************************************************/

static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);

static void __storm_memset_dma_mapping(struct bnx2x *bp,
				       u32 addr, dma_addr_t mapping)
{
	REG_WR(bp,  addr, U64_LO(mapping));
	REG_WR(bp,  addr + 4, U64_HI(mapping));
}

static void storm_memset_spq_addr(struct bnx2x *bp,
				  dma_addr_t mapping, u16 abs_fid)
{
	u32 addr = XSEM_REG_FAST_MEMORY +
			XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);

	__storm_memset_dma_mapping(bp, addr, mapping);
}

static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
				  u16 pf_id)
{
	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
		pf_id);
	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
		pf_id);
	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
		pf_id);
	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
		pf_id);
}

static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
				 u8 enable)
{
	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
		enable);
	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
		enable);
	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
		enable);
	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
		enable);
}

static void storm_memset_eq_data(struct bnx2x *bp,
				 struct event_ring_data *eq_data,
				u16 pfid)
{
	size_t size = sizeof(struct event_ring_data);

	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);

	__storm_memset_struct(bp, addr, size, (u32 *)eq_data);
}

static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
				 u16 pfid)
{
	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
	REG_WR16(bp, addr, eq_prod);
}

/* used only at init
 * locking is done by mcp
 */
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);
}

static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
{
	u32 val;

	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);

	return val;
}

#define DMAE_DP_SRC_GRC		"grc src_addr [%08x]"
#define DMAE_DP_SRC_PCI		"pci src_addr [%x:%08x]"
#define DMAE_DP_DST_GRC		"grc dst_addr [%08x]"
#define DMAE_DP_DST_PCI		"pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE	"dst_addr [none]"

static void bnx2x_dp_dmae(struct bnx2x *bp,
			  struct dmae_command *dmae, int msglvl)
{
	u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
	int i;

	switch (dmae->opcode & DMAE_COMMAND_DST) {
	case DMAE_CMD_DST_PCI:
		if (src_type == DMAE_CMD_SRC_PCI)
			DP(msglvl, "DMAE: opcode 0x%08x\n"
			   "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
			   dmae->comp_addr_hi, dmae->comp_addr_lo,
			   dmae->comp_val);
		else
			DP(msglvl, "DMAE: opcode 0x%08x\n"
			   "src [%08x], len [%d*4], dst [%x:%08x]\n"
			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
			   dmae->opcode, dmae->src_addr_lo >> 2,
			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
			   dmae->comp_addr_hi, dmae->comp_addr_lo,
			   dmae->comp_val);
		break;
	case DMAE_CMD_DST_GRC:
		if (src_type == DMAE_CMD_SRC_PCI)
			DP(msglvl, "DMAE: opcode 0x%08x\n"
			   "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
			   dmae->len, dmae->dst_addr_lo >> 2,
			   dmae->comp_addr_hi, dmae->comp_addr_lo,
			   dmae->comp_val);
		else
			DP(msglvl, "DMAE: opcode 0x%08x\n"
			   "src [%08x], len [%d*4], dst [%08x]\n"
			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
			   dmae->opcode, dmae->src_addr_lo >> 2,
			   dmae->len, dmae->dst_addr_lo >> 2,
			   dmae->comp_addr_hi, dmae->comp_addr_lo,
			   dmae->comp_val);
		break;
	default:
		if (src_type == DMAE_CMD_SRC_PCI)
			DP(msglvl, "DMAE: opcode 0x%08x\n"
			   "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
			   dmae->comp_val);
		else
			DP(msglvl, "DMAE: opcode 0x%08x\n"
			   "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
			   dmae->opcode, dmae->src_addr_lo >> 2,
			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
			   dmae->comp_val);
		break;
	}

	for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
		DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
		   i, *(((u32 *)dmae) + i));
}

/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
{
	u32 cmd_offset;
	int i;

	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
	}
	REG_WR(bp, dmae_reg_go_c[idx], 1);
}

u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
{
	return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
			   DMAE_CMD_C_ENABLE);
}

u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
{
	return opcode & ~DMAE_CMD_SRC_RESET;
}

u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
			     bool with_comp, u8 comp_type)
{
	u32 opcode = 0;

	opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
		   (dst_type << DMAE_COMMAND_DST_SHIFT));

	opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);

	opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
	opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
		   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
	opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);

#ifdef __BIG_ENDIAN
	opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
#else
	opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
#endif
	if (with_comp)
		opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
	return opcode;
}

void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
				      struct dmae_command *dmae,
				      u8 src_type, u8 dst_type)
{
	memset(dmae, 0, sizeof(struct dmae_command));

	/* set the opcode */
	dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
					 true, DMAE_COMP_PCI);

	/* fill in the completion parameters */
	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
	dmae->comp_val = DMAE_COMP_VAL;
}

/* issue a dmae command over the init-channel and wait for completion */
int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
			       u32 *comp)
{
	int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
	int rc = 0;

	bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);

	/* Lock the dmae channel. Disable BHs to prevent a dead-lock
	 * as long as this code is called both from syscall context and
	 * from ndo_set_rx_mode() flow that may be called from BH.
	 */

	spin_lock_bh(&bp->dmae_lock);

	/* reset completion */
	*comp = 0;

	/* post the command on the channel used for initializations */
	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));

	/* wait for completion */
	udelay(5);
	while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {

		if (!cnt ||
		    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
		     bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
			BNX2X_ERR("DMAE timeout!\n");
			rc = DMAE_TIMEOUT;
			goto unlock;
		}
		cnt--;
		udelay(50);
	}
	if (*comp & DMAE_PCI_ERR_FLAG) {
		BNX2X_ERR("DMAE PCI error!\n");
		rc = DMAE_PCI_ERROR;
	}

unlock:

	spin_unlock_bh(&bp->dmae_lock);

	return rc;
}

void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
		      u32 len32)
{
	int rc;
	struct dmae_command dmae;

	if (!bp->dmae_ready) {
		u32 *data = bnx2x_sp(bp, wb_data[0]);

		if (CHIP_IS_E1(bp))
			bnx2x_init_ind_wr(bp, dst_addr, data, len32);
		else
			bnx2x_init_str_wr(bp, dst_addr, data, len32);
		return;
	}

	/* set opcode and fixed command fields */
	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);

	/* fill in addresses and len */
	dmae.src_addr_lo = U64_LO(dma_addr);
	dmae.src_addr_hi = U64_HI(dma_addr);
	dmae.dst_addr_lo = dst_addr >> 2;
	dmae.dst_addr_hi = 0;
	dmae.len = len32;

	/* issue the command and wait for completion */
	rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
	if (rc) {
		BNX2X_ERR("DMAE returned failure %d\n", rc);
#ifdef BNX2X_STOP_ON_ERROR
		bnx2x_panic();
#endif
	}
}

void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
{
	int rc;
	struct dmae_command dmae;

	if (!bp->dmae_ready) {
		u32 *data = bnx2x_sp(bp, wb_data[0]);
		int i;

		if (CHIP_IS_E1(bp))
			for (i = 0; i < len32; i++)
				data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
		else
			for (i = 0; i < len32; i++)
				data[i] = REG_RD(bp, src_addr + i*4);

		return;
	}

	/* set opcode and fixed command fields */
	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);

	/* fill in addresses and len */
	dmae.src_addr_lo = src_addr >> 2;
	dmae.src_addr_hi = 0;
	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
	dmae.len = len32;

	/* issue the command and wait for completion */
	rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
	if (rc) {
		BNX2X_ERR("DMAE returned failure %d\n", rc);
#ifdef BNX2X_STOP_ON_ERROR
		bnx2x_panic();
#endif
	}
}

static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
				      u32 addr, u32 len)
{
	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
	int offset = 0;

	while (len > dmae_wr_max) {
		bnx2x_write_dmae(bp, phys_addr + offset,
				 addr + offset, dmae_wr_max);
		offset += dmae_wr_max * 4;
		len -= dmae_wr_max;
	}

	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
}

enum storms {
	   XSTORM,
	   TSTORM,
	   CSTORM,
	   USTORM,
	   MAX_STORMS
};

#define STORMS_NUM 4
#define REGS_IN_ENTRY 4

static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
					      enum storms storm,
					      int entry)
{
	switch (storm) {
	case XSTORM:
		return XSTORM_ASSERT_LIST_OFFSET(entry);
	case TSTORM:
		return TSTORM_ASSERT_LIST_OFFSET(entry);
	case CSTORM:
		return CSTORM_ASSERT_LIST_OFFSET(entry);
	case USTORM:
		return USTORM_ASSERT_LIST_OFFSET(entry);
	case MAX_STORMS:
	default:
		BNX2X_ERR("unknown storm\n");
	}
	return -EINVAL;
}

static int bnx2x_mc_assert(struct bnx2x *bp)
{
	char last_idx;
	int i, j, rc = 0;
	enum storms storm;
	u32 regs[REGS_IN_ENTRY];
	u32 bar_storm_intmem[STORMS_NUM] = {
		BAR_XSTRORM_INTMEM,
		BAR_TSTRORM_INTMEM,
		BAR_CSTRORM_INTMEM,
		BAR_USTRORM_INTMEM
	};
	u32 storm_assert_list_index[STORMS_NUM] = {
		XSTORM_ASSERT_LIST_INDEX_OFFSET,
		TSTORM_ASSERT_LIST_INDEX_OFFSET,
		CSTORM_ASSERT_LIST_INDEX_OFFSET,
		USTORM_ASSERT_LIST_INDEX_OFFSET
	};
	char *storms_string[STORMS_NUM] = {
		"XSTORM",
		"TSTORM",
		"CSTORM",
		"USTORM"
	};

	for (storm = XSTORM; storm < MAX_STORMS; storm++) {
		last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
				   storm_assert_list_index[storm]);
		if (last_idx)
			BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
				  storms_string[storm], last_idx);

		/* print the asserts */
		for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
			/* read a single assert entry */
			for (j = 0; j < REGS_IN_ENTRY; j++)
				regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
					  bnx2x_get_assert_list_entry(bp,
								      storm,
								      i) +
					  sizeof(u32) * j);

			/* log entry if it contains a valid assert */
			if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
				BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
					  storms_string[storm], i, regs[3],
					  regs[2], regs[1], regs[0]);
				rc++;
			} else {
				break;
			}
		}
	}

	BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
		  CHIP_IS_E1(bp) ? "everest1" :
		  CHIP_IS_E1H(bp) ? "everest1h" :
		  CHIP_IS_E2(bp) ? "everest2" : "everest3",
		  BCM_5710_FW_MAJOR_VERSION,
		  BCM_5710_FW_MINOR_VERSION,
		  BCM_5710_FW_REVISION_VERSION);

	return rc;
}

#define MCPR_TRACE_BUFFER_SIZE	(0x800)
#define SCRATCH_BUFFER_SIZE(bp)	\
	(CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))

void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
{
	u32 addr, val;
	u32 mark, offset;
	__be32 data[9];
	int word;
	u32 trace_shmem_base;
	if (BP_NOMCP(bp)) {
		BNX2X_ERR("NO MCP - can not dump\n");
		return;
	}
	netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
		(bp->common.bc_ver & 0xff0000) >> 16,
		(bp->common.bc_ver & 0xff00) >> 8,
		(bp->common.bc_ver & 0xff));

	if (pci_channel_offline(bp->pdev)) {
		BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
		return;
	}

	val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
	if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
		BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);

	if (BP_PATH(bp) == 0)
		trace_shmem_base = bp->common.shmem_base;
	else
		trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);

	/* sanity */
	if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
	    trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
				SCRATCH_BUFFER_SIZE(bp)) {
		BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
			  trace_shmem_base);
		return;
	}

	addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;

	/* validate TRCB signature */
	mark = REG_RD(bp, addr);
	if (mark != MFW_TRACE_SIGNATURE) {
		BNX2X_ERR("Trace buffer signature is missing.");
		return ;
	}

	/* read cyclic buffer pointer */
	addr += 4;
	mark = REG_RD(bp, addr);
	mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
	if (mark >= trace_shmem_base || mark < addr + 4) {
		BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
		return;
	}
	printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);

	printk("%s", lvl);

	/* dump buffer after the mark */
	for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
		for (word = 0; word < 8; word++)
			data[word] = htonl(REG_RD(bp, offset + 4*word));
		data[8] = 0x0;
		pr_cont("%s", (char *)data);
	}

	/* dump buffer before the mark */
	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
		for (word = 0; word < 8; word++)
			data[word] = htonl(REG_RD(bp, offset + 4*word));
		data[8] = 0x0;
		pr_cont("%s", (char *)data);
	}
	printk("%s" "end of fw dump\n", lvl);
}

static void bnx2x_fw_dump(struct bnx2x *bp)
{
	bnx2x_fw_dump_lvl(bp, KERN_ERR);
}

static void bnx2x_hc_int_disable(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	u32 val = REG_RD(bp, addr);

	/* in E1 we must use only PCI configuration space to disable
	 * MSI/MSIX capability
	 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
	 */
	if (CHIP_IS_E1(bp)) {
		/* Since IGU_PF_CONF_MSI_MSIX_EN still always on
		 * Use mask register to prevent from HC sending interrupts
		 * after we exit the function
		 */
		REG_WR(bp, HC_REG_INT_MASK + port*4, 0);

		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
	} else
		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);

	DP(NETIF_MSG_IFDOWN,
	   "write %x to HC %d (addr 0x%x)\n",
	   val, port, addr);

	REG_WR(bp, addr, val);
	if (REG_RD(bp, addr) != val)
		BNX2X_ERR("BUG! Proper val not read from IGU!\n");
}

static void bnx2x_igu_int_disable(struct bnx2x *bp)
{
	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);

	val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
		 IGU_PF_CONF_INT_LINE_EN |
		 IGU_PF_CONF_ATTN_BIT_EN);

	DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);

	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
	if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
		BNX2X_ERR("BUG! Proper val not read from IGU!\n");
}

static void bnx2x_int_disable(struct bnx2x *bp)
{
	if (bp->common.int_block == INT_BLOCK_HC)
		bnx2x_hc_int_disable(bp);
	else
		bnx2x_igu_int_disable(bp);
}

void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
	int i;
	u16 j;
	struct hc_sp_status_block_data sp_sb_data;
	int func = BP_FUNC(bp);
#ifdef BNX2X_STOP_ON_ERROR
	u16 start = 0, end = 0;
	u8 cos;
#endif
	if (IS_PF(bp) && disable_int)
		bnx2x_int_disable(bp);

	bp->stats_state = STATS_STATE_DISABLED;
	bp->eth_stats.unrecoverable_error++;
	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");

	BNX2X_ERR("begin crash dump -----------------\n");

	/* Indices */
	/* Common */
	if (IS_PF(bp)) {
		struct host_sp_status_block *def_sb = bp->def_status_blk;
		int data_size, cstorm_offset;

		BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
			  bp->def_idx, bp->def_att_idx, bp->attn_state,
			  bp->spq_prod_idx, bp->stats_counter);
		BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
			  def_sb->atten_status_block.attn_bits,
			  def_sb->atten_status_block.attn_bits_ack,
			  def_sb->atten_status_block.status_block_id,
			  def_sb->atten_status_block.attn_bits_index);
		BNX2X_ERR("     def (");
		for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
			pr_cont("0x%x%s",
				def_sb->sp_sb.index_values[i],
				(i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");

		data_size = sizeof(struct hc_sp_status_block_data) /
			    sizeof(u32);
		cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
		for (i = 0; i < data_size; i++)
			*((u32 *)&sp_sb_data + i) =
				REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
					   i * sizeof(u32));

		pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
			sp_sb_data.igu_sb_id,
			sp_sb_data.igu_seg_id,
			sp_sb_data.p_func.pf_id,
			sp_sb_data.p_func.vnic_id,
			sp_sb_data.p_func.vf_id,
			sp_sb_data.p_func.vf_valid,
			sp_sb_data.state);
	}

	for_each_eth_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];
		int loop;
		struct hc_status_block_data_e2 sb_data_e2;
		struct hc_status_block_data_e1x sb_data_e1x;
		struct hc_status_block_sm  *hc_sm_p =
			CHIP_IS_E1x(bp) ?
			sb_data_e1x.common.state_machine :
			sb_data_e2.common.state_machine;
		struct hc_index_data *hc_index_p =
			CHIP_IS_E1x(bp) ?
			sb_data_e1x.index_data :
			sb_data_e2.index_data;
		u8 data_size, cos;
		u32 *sb_data_p;
		struct bnx2x_fp_txdata txdata;

		if (!bp->fp)
			break;

		if (!fp->rx_cons_sb)
			continue;

		/* Rx */
		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
			  i, fp->rx_bd_prod, fp->rx_bd_cons,
			  fp->rx_comp_prod,
			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
			  fp->rx_sge_prod, fp->last_max_sge,
			  le16_to_cpu(fp->fp_hc_idx));

		/* Tx */
		for_each_cos_in_tx_queue(fp, cos)
		{
			if (!fp->txdata_ptr[cos])
				break;

			txdata = *fp->txdata_ptr[cos];

			if (!txdata.tx_cons_sb)
				continue;

			BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
				  i, txdata.tx_pkt_prod,
				  txdata.tx_pkt_cons, txdata.tx_bd_prod,
				  txdata.tx_bd_cons,
				  le16_to_cpu(*txdata.tx_cons_sb));
		}

		loop = CHIP_IS_E1x(bp) ?
			HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;

		/* host sb data */

		if (IS_FCOE_FP(fp))
			continue;

		BNX2X_ERR("     run indexes (");
		for (j = 0; j < HC_SB_MAX_SM; j++)
			pr_cont("0x%x%s",
			       fp->sb_running_index[j],
			       (j == HC_SB_MAX_SM - 1) ? ")" : " ");

		BNX2X_ERR("     indexes (");
		for (j = 0; j < loop; j++)
			pr_cont("0x%x%s",
			       fp->sb_index_values[j],
			       (j == loop - 1) ? ")" : " ");

		/* VF cannot access FW refelection for status block */
		if (IS_VF(bp))
			continue;

		/* fw sb data */
		data_size = CHIP_IS_E1x(bp) ?
			sizeof(struct hc_status_block_data_e1x) :
			sizeof(struct hc_status_block_data_e2);
		data_size /= sizeof(u32);
		sb_data_p = CHIP_IS_E1x(bp) ?
			(u32 *)&sb_data_e1x :
			(u32 *)&sb_data_e2;
		/* copy sb data in here */
		for (j = 0; j < data_size; j++)
			*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
				CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
				j * sizeof(u32));

		if (!CHIP_IS_E1x(bp)) {
			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
				sb_data_e2.common.p_func.pf_id,
				sb_data_e2.common.p_func.vf_id,
				sb_data_e2.common.p_func.vf_valid,
				sb_data_e2.common.p_func.vnic_id,
				sb_data_e2.common.same_igu_sb_1b,
				sb_data_e2.common.state);
		} else {
			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
				sb_data_e1x.common.p_func.pf_id,
				sb_data_e1x.common.p_func.vf_id,
				sb_data_e1x.common.p_func.vf_valid,
				sb_data_e1x.common.p_func.vnic_id,
				sb_data_e1x.common.same_igu_sb_1b,
				sb_data_e1x.common.state);
		}

		/* SB_SMs data */
		for (j = 0; j < HC_SB_MAX_SM; j++) {
			pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
				j, hc_sm_p[j].__flags,
				hc_sm_p[j].igu_sb_id,
				hc_sm_p[j].igu_seg_id,
				hc_sm_p[j].time_to_expire,
				hc_sm_p[j].timer_value);
		}

		/* Indices data */
		for (j = 0; j < loop; j++) {
			pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
			       hc_index_p[j].flags,
			       hc_index_p[j].timeout);
		}
	}

#ifdef BNX2X_STOP_ON_ERROR
	if (IS_PF(bp)) {
		/* event queue */
		BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
		for (i = 0; i < NUM_EQ_DESC; i++) {
			u32 *data = (u32 *)&bp->eq_ring[i].message.data;

			BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
				  i, bp->eq_ring[i].message.opcode,
				  bp->eq_ring[i].message.error);
			BNX2X_ERR("data: %x %x %x\n",
				  data[0], data[1], data[2]);
		}
	}

	/* Rings */
	/* Rx */
	for_each_valid_rx_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];

		if (!bp->fp)
			break;

		if (!fp->rx_cons_sb)
			continue;

		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
		for (j = start; j != end; j = RX_BD(j + 1)) {
			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];

			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
				  i, j, rx_bd[1], rx_bd[0], sw_bd->data);
		}

		start = RX_SGE(fp->rx_sge_prod);
		end = RX_SGE(fp->last_max_sge);
		for (j = start; j != end; j = RX_SGE(j + 1)) {
			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];

			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
		}

		start = RCQ_BD(fp->rx_comp_cons - 10);
		end = RCQ_BD(fp->rx_comp_cons + 503);
		for (j = start; j != end; j = RCQ_BD(j + 1)) {
			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];

			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
		}
	}

	/* Tx */
	for_each_valid_tx_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];

		if (!bp->fp)
			break;

		for_each_cos_in_tx_queue(fp, cos) {
			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];

			if (!fp->txdata_ptr[cos])
				break;

			if (!txdata->tx_cons_sb)
				continue;

			start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
			end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
			for (j = start; j != end; j = TX_BD(j + 1)) {
				struct sw_tx_bd *sw_bd =
					&txdata->tx_buf_ring[j];

				BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
					  i, cos, j, sw_bd->skb,
					  sw_bd->first_bd);
			}

			start = TX_BD(txdata->tx_bd_cons - 10);
			end = TX_BD(txdata->tx_bd_cons + 254);
			for (j = start; j != end; j = TX_BD(j + 1)) {
				u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];

				BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
					  i, cos, j, tx_bd[0], tx_bd[1],
					  tx_bd[2], tx_bd[3]);
			}
		}
	}
#endif
	if (IS_PF(bp)) {
		bnx2x_fw_dump(bp);
		bnx2x_mc_assert(bp);
	}
	BNX2X_ERR("end crash dump -----------------\n");
}

/*
 * FLR Support for E2
 *
 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
 * initialization.
 */
#define FLR_WAIT_USEC		10000	/* 10 milliseconds */
#define FLR_WAIT_INTERVAL	50	/* usec */
#define	FLR_POLL_CNT		(FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */

struct pbf_pN_buf_regs {
	int pN;
	u32 init_crd;
	u32 crd;
	u32 crd_freed;
};

struct pbf_pN_cmd_regs {
	int pN;
	u32 lines_occup;
	u32 lines_freed;
};

static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
				     struct pbf_pN_buf_regs *regs,
				     u32 poll_count)
{
	u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
	u32 cur_cnt = poll_count;

	crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
	crd = crd_start = REG_RD(bp, regs->crd);
	init_crd = REG_RD(bp, regs->init_crd);

	DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
	DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
	DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);

	while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
	       (init_crd - crd_start))) {
		if (cur_cnt--) {
			udelay(FLR_WAIT_INTERVAL);
			crd = REG_RD(bp, regs->crd);
			crd_freed = REG_RD(bp, regs->crd_freed);
		} else {
			DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
			   regs->pN);
			DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
			   regs->pN, crd);
			DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
			   regs->pN, crd_freed);
			break;
		}
	}
	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
	   poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}

static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
				     struct pbf_pN_cmd_regs *regs,
				     u32 poll_count)
{
	u32 occup, to_free, freed, freed_start;
	u32 cur_cnt = poll_count;

	occup = to_free = REG_RD(bp, regs->lines_occup);
	freed = freed_start = REG_RD(bp, regs->lines_freed);

	DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
	DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);

	while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
		if (cur_cnt--) {
			udelay(FLR_WAIT_INTERVAL);
			occup = REG_RD(bp, regs->lines_occup);
			freed = REG_RD(bp, regs->lines_freed);
		} else {
			DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
			   regs->pN);
			DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
			   regs->pN, occup);
			DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
			   regs->pN, freed);
			break;
		}
	}
	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
	   poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}

static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
				    u32 expected, u32 poll_count)
{
	u32 cur_cnt = poll_count;
	u32 val;

	while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
		udelay(FLR_WAIT_INTERVAL);

	return val;
}

int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
				    char *msg, u32 poll_cnt)
{
	u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
	if (val != 0) {
		BNX2X_ERR("%s usage count=%d\n", msg, val);
		return 1;
	}
	return 0;
}

/* Common routines with VF FLR cleanup */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{
	/* adjust polling timeout */
	if (CHIP_REV_IS_EMUL(bp))
		return FLR_POLL_CNT * 2000;

	if (CHIP_REV_IS_FPGA(bp))
		return FLR_POLL_CNT * 120;

	return FLR_POLL_CNT;
}

void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{
	struct pbf_pN_cmd_regs cmd_regs[] = {
		{0, (CHIP_IS_E3B0(bp)) ?
			PBF_REG_TQ_OCCUPANCY_Q0 :
			PBF_REG_P0_TQ_OCCUPANCY,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_TQ_LINES_FREED_CNT_Q0 :
			PBF_REG_P0_TQ_LINES_FREED_CNT},
		{1, (CHIP_IS_E3B0(bp)) ?
			PBF_REG_TQ_OCCUPANCY_Q1 :
			PBF_REG_P1_TQ_OCCUPANCY,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_TQ_LINES_FREED_CNT_Q1 :
			PBF_REG_P1_TQ_LINES_FREED_CNT},
		{4, (CHIP_IS_E3B0(bp)) ?
			PBF_REG_TQ_OCCUPANCY_LB_Q :
			PBF_REG_P4_TQ_OCCUPANCY,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
			PBF_REG_P4_TQ_LINES_FREED_CNT}
	};

	struct pbf_pN_buf_regs buf_regs[] = {
		{0, (CHIP_IS_E3B0(bp)) ?
			PBF_REG_INIT_CRD_Q0 :
			PBF_REG_P0_INIT_CRD ,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_CREDIT_Q0 :
			PBF_REG_P0_CREDIT,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
			PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
		{1, (CHIP_IS_E3B0(bp)) ?
			PBF_REG_INIT_CRD_Q1 :
			PBF_REG_P1_INIT_CRD,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_CREDIT_Q1 :
			PBF_REG_P1_CREDIT,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
			PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
		{4, (CHIP_IS_E3B0(bp)) ?
			PBF_REG_INIT_CRD_LB_Q :
			PBF_REG_P4_INIT_CRD,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_CREDIT_LB_Q :
			PBF_REG_P4_CREDIT,
		    (CHIP_IS_E3B0(bp)) ?
			PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
			PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
	};

	int i;

	/* Verify the command queues are flushed P0, P1, P4 */
	for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
		bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);

	/* Verify the transmission buffers are flushed P0, P1, P4 */
	for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
		bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
}

#define OP_GEN_PARAM(param) \
	(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)

#define OP_GEN_TYPE(type) \
	(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)

#define OP_GEN_AGG_VECT(index) \
	(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)

int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
{
	u32 op_gen_command = 0;
	u32 comp_addr = BAR_CSTRORM_INTMEM +
			CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
	int ret = 0;

	if (REG_RD(bp, comp_addr)) {
		BNX2X_ERR("Cleanup complete was not 0 before sending\n");
		return 1;
	}

	op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
	op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
	op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
	op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;

	DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
	REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);

	if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
		BNX2X_ERR("FW final cleanup did not succeed\n");
		DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
		   (REG_RD(bp, comp_addr)));
		bnx2x_panic();
		return 1;
	}
	/* Zero completion for next FLR */
	REG_WR(bp, comp_addr, 0);

	return ret;
}

u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
	u16 status;

	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
	return status & PCI_EXP_DEVSTA_TRPND;
}

/* PF FLR specific routines
*/
static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
{
	/* wait for CFC PF usage-counter to zero (includes all the VFs) */
	if (bnx2x_flr_clnup_poll_hw_counter(bp,
			CFC_REG_NUM_LCIDS_INSIDE_PF,
			"CFC PF usage counter timed out",
			poll_cnt))
		return 1;

	/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
	if (bnx2x_flr_clnup_poll_hw_counter(bp,
			DORQ_REG_PF_USAGE_CNT,
			"DQ PF usage counter timed out",
			poll_cnt))
		return 1;

	/* Wait for QM PF usage-counter to zero (until DQ cleanup) */
	if (bnx2x_flr_clnup_poll_hw_counter(bp,
			QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
			"QM PF usage counter timed out",
			poll_cnt))
		return 1;

	/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
	if (bnx2x_flr_clnup_poll_hw_counter(bp,
			TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
			"Timers VNIC usage counter timed out",
			poll_cnt))
		return 1;
	if (bnx2x_flr_clnup_poll_hw_counter(bp,
			TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
			"Timers NUM_SCANS usage counter timed out",
			poll_cnt))
		return 1;

	/* Wait DMAE PF usage counter to zero */
	if (bnx2x_flr_clnup_poll_hw_counter(bp,
			dmae_reg_go_c[INIT_DMAE_C(bp)],
			"DMAE command register timed out",
			poll_cnt))
		return 1;

	return 0;
}

static void bnx2x_hw_enable_status(struct bnx2x *bp)
{
	u32 val;

	val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
	DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);

	val = REG_RD(bp, PBF_REG_DISABLE_PF);
	DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);

	val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);

	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);

	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);

	val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
	DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);

	val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
	DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);

	val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
	DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
	   val);
}

static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
{
	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);

	DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));

	/* Re-enable PF target read access */
	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);

	/* Poll HW usage counters */
	DP(BNX2X_MSG_SP, "Polling usage counters\n");
	if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
		return -EBUSY;

	/* Zero the igu 'trailing edge' and 'leading edge' */

	/* Send the FW cleanup command */
	if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
		return -EBUSY;

	/* ATC cleanup */

	/* Verify TX hw is flushed */
	bnx2x_tx_hw_flushed(bp, poll_cnt);

	/* Wait 100ms (not adjusted according to platform) */
	msleep(100);

	/* Verify no pending pci transactions */
	if (bnx2x_is_pcie_pending(bp->pdev))
		BNX2X_ERR("PCIE Transactions still pending\n");

	/* Debug */
	bnx2x_hw_enable_status(bp);

	/*
	 * Master enable - Due to WB DMAE writes performed before this
	 * register is re-initialized as part of the regular function init
	 */
	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);

	return 0;
}

static void bnx2x_hc_int_enable(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	u32 val = REG_RD(bp, addr);
	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;

	if (msix) {
		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			 HC_CONFIG_0_REG_INT_LINE_EN_0);
		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
		if (single_msix)
			val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
	} else if (msi) {
		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
	} else {
		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_INT_LINE_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);

		if (!CHIP_IS_E1(bp)) {
			DP(NETIF_MSG_IFUP,
			   "write %x to HC %d (addr 0x%x)\n", val, port, addr);

			REG_WR(bp, addr, val);

			val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
		}
	}

	if (CHIP_IS_E1(bp))
		REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);

	DP(NETIF_MSG_IFUP,
	   "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
	   (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));

	REG_WR(bp, addr, val);
	/*
	 * Ensure that HC_CONFIG is written before leading/trailing edge config
	 */
	barrier();

	if (!CHIP_IS_E1(bp)) {
		/* init leading/trailing edge */
		if (IS_MF(bp)) {
			val = (0xee0f | (1 << (BP_VN(bp) + 4)));
			if (bp->port.pmf)
				/* enable nig and gpio3 attention */
				val |= 0x1100;
		} else
			val = 0xffff;

		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
	}
}

static void bnx2x_igu_int_enable(struct bnx2x *bp)
{
	u32 val;
	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;

	val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);

	if (msix) {
		val &= ~(IGU_PF_CONF_INT_LINE_EN |
			 IGU_PF_CONF_SINGLE_ISR_EN);
		val |= (IGU_PF_CONF_MSI_MSIX_EN |
			IGU_PF_CONF_ATTN_BIT_EN);

		if (single_msix)
			val |= IGU_PF_CONF_SINGLE_ISR_EN;
	} else if (msi) {
		val &= ~IGU_PF_CONF_INT_LINE_EN;
		val |= (IGU_PF_CONF_MSI_MSIX_EN |
			IGU_PF_CONF_ATTN_BIT_EN |
			IGU_PF_CONF_SINGLE_ISR_EN);
	} else {
		val &= ~IGU_PF_CONF_MSI_MSIX_EN;
		val |= (IGU_PF_CONF_INT_LINE_EN |
			IGU_PF_CONF_ATTN_BIT_EN |
			IGU_PF_CONF_SINGLE_ISR_EN);
	}

	/* Clean previous status - need to configure igu prior to ack*/
	if ((!msix) || single_msix) {
		REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
		bnx2x_ack_int(bp);
	}

	val |= IGU_PF_CONF_FUNC_EN;

	DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
	   val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));

	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);

	if (val & IGU_PF_CONF_INT_LINE_EN)
		pci_intx(bp->pdev, true);

	barrier();

	/* init leading/trailing edge */
	if (IS_MF(bp)) {
		val = (0xee0f | (1 << (BP_VN(bp) + 4)));
		if (bp->port.pmf)
			/* enable nig and gpio3 attention */
			val |= 0x1100;
	} else
		val = 0xffff;

	REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
	REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
}

void bnx2x_int_enable(struct bnx2x *bp)
{
	if (bp->common.int_block == INT_BLOCK_HC)
		bnx2x_hc_int_enable(bp);
	else
		bnx2x_igu_int_enable(bp);
}

void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
	int i, offset;

	if (disable_hw)
		/* prevent the HW from sending interrupts */
		bnx2x_int_disable(bp);

	/* make sure all ISRs are done */
	if (msix) {
		synchronize_irq(bp->msix_table[0].vector);
		offset = 1;
		if (CNIC_SUPPORT(bp))
			offset++;
		for_each_eth_queue(bp, i)
			synchronize_irq(bp->msix_table[offset++].vector);
	} else
		synchronize_irq(bp->pdev->irq);

	/* make sure sp_task is not running */
	cancel_delayed_work(&bp->sp_task);
	cancel_delayed_work(&bp->period_task);
	flush_workqueue(bnx2x_wq);
}

/* fast path */

/*
 * General service functions
 */

/* Return true if succeeded to acquire the lock */
static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
{
	u32 lock_status;
	u32 resource_bit = (1 << resource);
	int func = BP_FUNC(bp);
	u32 hw_lock_control_reg;

	DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
	   "Trying to take a lock on resource %d\n", resource);

	/* Validating that the resource is within range */
	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
		DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
		return false;
	}

	if (func <= 5)
		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
	else
		hw_lock_control_reg =
				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);

	/* Try to acquire the lock */
	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
	lock_status = REG_RD(bp, hw_lock_control_reg);
	if (lock_status & resource_bit)
		return true;

	DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
	   "Failed to get a lock on resource %d\n", resource);
	return false;
}

/**
 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
 *
 * @bp:	driver handle
 *
 * Returns the recovery leader resource id according to the engine this function
 * belongs to. Currently only only 2 engines is supported.
 */
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
	if (BP_PATH(bp))
		return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
	else
		return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
}

/**
 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
 *
 * @bp: driver handle
 *
 * Tries to acquire a leader lock for current engine.
 */
static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
{
	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
}

static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);

/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
static int bnx2x_schedule_sp_task(struct bnx2x *bp)
{
	/* Set the interrupt occurred bit for the sp-task to recognize it
	 * must ack the interrupt and transition according to the IGU
	 * state machine.
	 */
	atomic_set(&bp->interrupt_occurred, 1);

	/* The sp_task must execute only after this bit
	 * is set, otherwise we will get out of sync and miss all
	 * further interrupts. Hence, the barrier.
	 */
	smp_wmb();

	/* schedule sp_task to workqueue */
	return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
}

void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
	struct bnx2x *bp = fp->bp;
	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
	enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
	struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;

	DP(BNX2X_MSG_SP,
	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
	   fp->index, cid, command, bp->state,
	   rr_cqe->ramrod_cqe.ramrod_type);

	/* If cid is within VF range, replace the slowpath object with the
	 * one corresponding to this VF
	 */
	if (cid >= BNX2X_FIRST_VF_CID  &&
	    cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
		bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);

	switch (command) {
	case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
		DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
		drv_cmd = BNX2X_Q_CMD_UPDATE;
		break;

	case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
		DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
		drv_cmd = BNX2X_Q_CMD_SETUP;
		break;

	case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
		DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
		drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
		break;

	case (RAMROD_CMD_ID_ETH_HALT):
		DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
		drv_cmd = BNX2X_Q_CMD_HALT;
		break;

	case (RAMROD_CMD_ID_ETH_TERMINATE):
		DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
		drv_cmd = BNX2X_Q_CMD_TERMINATE;
		break;

	case (RAMROD_CMD_ID_ETH_EMPTY):
		DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
		drv_cmd = BNX2X_Q_CMD_EMPTY;
		break;

	case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
		DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
		drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
		break;

	default:
		BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
			  command, fp->index);
		return;
	}

	if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
	    q_obj->complete_cmd(bp, q_obj, drv_cmd))
		/* q_obj->complete_cmd() failure means that this was
		 * an unexpected completion.
		 *
		 * In this case we don't want to increase the bp->spq_left
		 * because apparently we haven't sent this command the first
		 * place.
		 */
#ifdef BNX2X_STOP_ON_ERROR
		bnx2x_panic();
#else
		return;
#endif

	smp_mb__before_atomic();
	atomic_inc(&bp->cq_spq_left);
	/* push the change in bp->spq_left and towards the memory */
	smp_mb__after_atomic();

	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));

	if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
	    (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
		/* if Q update ramrod is completed for last Q in AFEX vif set
		 * flow, then ACK MCP at the end
		 *
		 * mark pending ACK to MCP bit.
		 * prevent case that both bits are cleared.
		 * At the end of load/unload driver checks that
		 * sp_state is cleared, and this order prevents
		 * races
		 */
		smp_mb__before_atomic();
		set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
		wmb();
		clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
		smp_mb__after_atomic();

		/* schedule the sp task as mcp ack is required */
		bnx2x_schedule_sp_task(bp);
	}

	return;
}

irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
	struct bnx2x *bp = netdev_priv(dev_instance);
	u16 status = bnx2x_ack_int(bp);
	u16 mask;
	int i;
	u8 cos;

	/* Return here if interrupt is shared and it's not for us */
	if (unlikely(status == 0)) {
		DP(NETIF_MSG_INTR, "not our interrupt!\n");
		return IRQ_NONE;
	}
	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	for_each_eth_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];

		mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
		if (status & mask) {
			/* Handle Rx or Tx according to SB id */
			for_each_cos_in_tx_queue(fp, cos)
				prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
			prefetch(&fp->sb_running_index[SM_RX_ID]);
			napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
			status &= ~mask;
		}
	}

	if (CNIC_SUPPORT(bp)) {
		mask = 0x2;
		if (status & (mask | 0x1)) {
			struct cnic_ops *c_ops = NULL;

			rcu_read_lock();
			c_ops = rcu_dereference(bp->cnic_ops);
			if (c_ops && (bp->cnic_eth_dev.drv_state &
				      CNIC_DRV_STATE_HANDLES_IRQ))
				c_ops->cnic_handler(bp->cnic_data, NULL);
			rcu_read_unlock();

			status &= ~mask;
		}
	}

	if (unlikely(status & 0x1)) {

		/* schedule sp task to perform default status block work, ack
		 * attentions and enable interrupts.
		 */
		bnx2x_schedule_sp_task(bp);

		status &= ~0x1;
		if (!status)
			return IRQ_HANDLED;
	}

	if (unlikely(status))
		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
		   status);

	return IRQ_HANDLED;
}

/* Link */

/*
 * General service functions
 */

int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
{
	u32 lock_status;
	u32 resource_bit = (1 << resource);
	int func = BP_FUNC(bp);
	u32 hw_lock_control_reg;
	int cnt;

	/* Validating that the resource is within range */
	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
		BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
		return -EINVAL;
	}

	if (func <= 5) {
		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
	} else {
		hw_lock_control_reg =
				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
	}

	/* Validating that the resource is not already taken */
	lock_status = REG_RD(bp, hw_lock_control_reg);
	if (lock_status & resource_bit) {
		BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
		   lock_status, resource_bit);
		return -EEXIST;
	}

	/* Try for 5 second every 5ms */
	for (cnt = 0; cnt < 1000; cnt++) {
		/* Try to acquire the lock */
		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
		lock_status = REG_RD(bp, hw_lock_control_reg);
		if (lock_status & resource_bit)
			return 0;

		usleep_range(5000, 10000);
	}
	BNX2X_ERR("Timeout\n");
	return -EAGAIN;
}

int bnx2x_release_leader_lock(struct bnx2x *bp)
{
	return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
}

int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
{
	u32 lock_status;
	u32 resource_bit = (1 << resource);
	int func = BP_FUNC(bp);
	u32 hw_lock_control_reg;

	/* Validating that the resource is within range */
	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
		BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
		return -EINVAL;
	}

	if (func <= 5) {
		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
	} else {
		hw_lock_control_reg =
				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
	}

	/* Validating that the resource is currently taken */
	lock_status = REG_RD(bp, hw_lock_control_reg);
	if (!(lock_status & resource_bit)) {
		BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
			  lock_status, resource_bit);
		return -EFAULT;
	}

	REG_WR(bp, hw_lock_control_reg, resource_bit);
	return 0;
}

int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
{
	/* The GPIO should be swapped if swap register is set and active */
	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
	int gpio_shift = gpio_num +
			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
	u32 gpio_mask = (1 << gpio_shift);
	u32 gpio_reg;
	int value;

	if (gpio_num > MISC_REGISTERS_GPIO_3) {
		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
		return -EINVAL;
	}

	/* read GPIO value */
	gpio_reg = REG_RD(bp, MISC_REG_GPIO);

	/* get the requested pin value */
	if ((gpio_reg & gpio_mask) == gpio_mask)
		value = 1;
	else
		value = 0;

	return value;
}

int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
{
	/* The GPIO should be swapped if swap register is set and active */
	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
	int gpio_shift = gpio_num +
			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
	u32 gpio_mask = (1 << gpio_shift);
	u32 gpio_reg;

	if (gpio_num > MISC_REGISTERS_GPIO_3) {
		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
		return -EINVAL;
	}

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
	/* read GPIO and mask except the float bits */
	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);

	switch (mode) {
	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
		DP(NETIF_MSG_LINK,
		   "Set GPIO %d (shift %d) -> output low\n",
		   gpio_num, gpio_shift);
		/* clear FLOAT and set CLR */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
		break;

	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
		DP(NETIF_MSG_LINK,
		   "Set GPIO %d (shift %d) -> output high\n",
		   gpio_num, gpio_shift);
		/* clear FLOAT and set SET */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
		break;

	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
		DP(NETIF_MSG_LINK,
		   "Set GPIO %d (shift %d) -> input\n",
		   gpio_num, gpio_shift);
		/* set FLOAT */
		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
		break;

	default:
		break;
	}

	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);

	return 0;
}

int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
{
	u32 gpio_reg = 0;
	int rc = 0;

	/* Any port swapping should be handled by caller. */

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
	/* read GPIO and mask except the float bits */
	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);

	switch (mode) {
	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
		/* set CLR */
		gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
		break;

	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
		/* set SET */
		gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
		break;

	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
		/* set FLOAT */
		gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
		break;

	default:
		BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
		rc = -EINVAL;
		break;
	}

	if (rc == 0)
		REG_WR(bp, MISC_REG_GPIO, gpio_reg);

	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);

	return rc;
}

int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
{
	/* The GPIO should be swapped if swap register is set and active */
	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
	int gpio_shift = gpio_num +
			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
	u32 gpio_mask = (1 << gpio_shift);
	u32 gpio_reg;

	if (gpio_num > MISC_REGISTERS_GPIO_3) {
		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
		return -EINVAL;
	}

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
	/* read GPIO int */
	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);

	switch (mode) {
	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
		DP(NETIF_MSG_LINK,
		   "Clear GPIO INT %d (shift %d) -> output low\n",
		   gpio_num, gpio_shift);
		/* clear SET and set CLR */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
		break;

	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
		DP(NETIF_MSG_LINK,
		   "Set GPIO INT %d (shift %d) -> output high\n",
		   gpio_num, gpio_shift);
		/* clear CLR and set SET */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
		break;

	default:
		break;
	}

	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);

	return 0;
}

static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
{
	u32 spio_reg;

	/* Only 2 SPIOs are configurable */
	if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
		BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
		return -EINVAL;
	}

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
	/* read SPIO and mask except the float bits */
	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);

	switch (mode) {
	case MISC_SPIO_OUTPUT_LOW:
		DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
		/* clear FLOAT and set CLR */
		spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
		spio_reg |=  (spio << MISC_SPIO_CLR_POS);
		break;

	case MISC_SPIO_OUTPUT_HIGH:
		DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
		/* clear FLOAT and set SET */
		spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
		spio_reg |=  (spio << MISC_SPIO_SET_POS);
		break;

	case MISC_SPIO_INPUT_HI_Z:
		DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
		/* set FLOAT */
		spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
		break;

	default:
		break;
	}

	REG_WR(bp, MISC_REG_SPIO, spio_reg);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);

	return 0;
}

void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
	u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);

	bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
					   ADVERTISED_Pause);
	switch (bp->link_vars.ieee_fc &
		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
		bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
						  ADVERTISED_Pause);
		break;

	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
		bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
		break;

	default:
		break;
	}
}

static void bnx2x_set_requested_fc(struct bnx2x *bp)
{
	/* Initialize link parameters structure variables
	 * It is recommended to turn off RX FC for jumbo frames
	 *  for better performance
	 */
	if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
		bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
	else
		bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}

static void bnx2x_init_dropless_fc(struct bnx2x *bp)
{
	u32 pause_enabled = 0;

	if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
			pause_enabled = 1;

		REG_WR(bp, BAR_USTRORM_INTMEM +
			   USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
		       pause_enabled);
	}

	DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
	   pause_enabled ? "enabled" : "disabled");
}

int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
	int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
	u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];

	if (!BP_NOMCP(bp)) {
		bnx2x_set_requested_fc(bp);
		bnx2x_acquire_phy_lock(bp);

		if (load_mode == LOAD_DIAG) {
			struct link_params *lp = &bp->link_params;
			lp->loopback_mode = LOOPBACK_XGXS;
			/* Prefer doing PHY loopback at highest speed */
			if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
				if (lp->speed_cap_mask[cfx_idx] &
				    PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
					lp->req_line_speed[cfx_idx] =
					SPEED_20000;
				else if (lp->speed_cap_mask[cfx_idx] &
					    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
						lp->req_line_speed[cfx_idx] =
						SPEED_10000;
				else
					lp->req_line_speed[cfx_idx] =
					SPEED_1000;
			}
		}

		if (load_mode == LOAD_LOOPBACK_EXT) {
			struct link_params *lp = &bp->link_params;
			lp->loopback_mode = LOOPBACK_EXT;
		}

		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);

		bnx2x_release_phy_lock(bp);

		bnx2x_init_dropless_fc(bp);

		bnx2x_calc_fc_adv(bp);

		if (bp->link_vars.link_up) {
			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
			bnx2x_link_report(bp);
		}
		queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
		bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
		return rc;
	}
	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
	return -EINVAL;
}

void bnx2x_link_set(struct bnx2x *bp)
{
	if (!BP_NOMCP(bp)) {
		bnx2x_acquire_phy_lock(bp);
		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
		bnx2x_release_phy_lock(bp);

		bnx2x_init_dropless_fc(bp);

		bnx2x_calc_fc_adv(bp);
	} else
		BNX2X_ERR("Bootcode is missing - can not set link\n");
}

static void bnx2x__link_reset(struct bnx2x *bp)
{
	if (!BP_NOMCP(bp)) {
		bnx2x_acquire_phy_lock(bp);
		bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
		bnx2x_release_phy_lock(bp);
	} else
		BNX2X_ERR("Bootcode is missing - can not reset link\n");
}

void bnx2x_force_link_reset(struct bnx2x *bp)
{
	bnx2x_acquire_phy_lock(bp);
	bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
	bnx2x_release_phy_lock(bp);
}

u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
{
	u8 rc = 0;

	if (!BP_NOMCP(bp)) {
		bnx2x_acquire_phy_lock(bp);
		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
				     is_serdes);
		bnx2x_release_phy_lock(bp);
	} else
		BNX2X_ERR("Bootcode is missing - can not test link\n");

	return rc;
}

/* Calculates the sum of vn_min_rates.
   It's needed for further normalizing of the min_rates.
   Returns:
     sum of vn_min_rates.
       or
     0 - if all the min_rates are 0.
     In the later case fairness algorithm should be deactivated.
     If not all min_rates are zero then those that are zeroes will be set to 1.
 */
static void bnx2x_calc_vn_min(struct bnx2x *bp,
				      struct cmng_init_input *input)
{
	int all_zero = 1;
	int vn;

	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
		u32 vn_cfg = bp->mf_config[vn];
		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;

		/* Skip hidden vns */
		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
			vn_min_rate = 0;
		/* If min rate is zero - set it to 1 */
		else if (!vn_min_rate)
			vn_min_rate = DEF_MIN_RATE;
		else
			all_zero = 0;

		input->vnic_min_rate[vn] = vn_min_rate;
	}

	/* if ETS or all min rates are zeros - disable fairness */
	if (BNX2X_IS_ETS_ENABLED(bp)) {
		input->flags.cmng_enables &=
					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
	} else if (all_zero) {
		input->flags.cmng_enables &=
					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
		DP(NETIF_MSG_IFUP,
		   "All MIN values are zeroes fairness will be disabled\n");
	} else
		input->flags.cmng_enables |=
					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}

static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
				    struct cmng_init_input *input)
{
	u16 vn_max_rate;
	u32 vn_cfg = bp->mf_config[vn];

	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
		vn_max_rate = 0;
	else {
		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);

		if (IS_MF_PERCENT_BW(bp)) {
			/* maxCfg in percents of linkspeed */
			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
		} else /* SD modes */
			/* maxCfg is absolute in 100Mb units */
			vn_max_rate = maxCfg * 100;
	}

	DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);

	input->vnic_max_rate[vn] = vn_max_rate;
}

static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
{
	if (CHIP_REV_IS_SLOW(bp))
		return CMNG_FNS_NONE;
	if (IS_MF(bp))
		return CMNG_FNS_MINMAX;

	return CMNG_FNS_NONE;
}

void bnx2x_read_mf_cfg(struct bnx2x *bp)
{
	int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);

	if (BP_NOMCP(bp))
		return; /* what should be the default value in this case */

	/* For 2 port configuration the absolute function number formula
	 * is:
	 *      abs_func = 2 * vn + BP_PORT + BP_PATH
	 *
	 *      and there are 4 functions per port
	 *
	 * For 4 port configuration it is
	 *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
	 *
	 *      and there are 2 functions per port
	 */
	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);

		if (func >= E1H_FUNC_MAX)
			break;

		bp->mf_config[vn] =
			MF_CFG_RD(bp, func_mf_config[func].config);
	}
	if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
		DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
		bp->flags |= MF_FUNC_DIS;
	} else {
		DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
		bp->flags &= ~MF_FUNC_DIS;
	}
}

static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
{
	struct cmng_init_input input;
	memset(&input, 0, sizeof(struct cmng_init_input));

	input.port_rate = bp->link_vars.line_speed;

	if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
		int vn;

		/* read mf conf from shmem */
		if (read_cfg)
			bnx2x_read_mf_cfg(bp);

		/* vn_weight_sum and enable fairness if not 0 */
		bnx2x_calc_vn_min(bp, &input);

		/* calculate and set min-max rate for each vn */
		if (bp->port.pmf)
			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
				bnx2x_calc_vn_max(bp, vn, &input);

		/* always enable rate shaping and fairness */
		input.flags.cmng_enables |=
					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;

		bnx2x_init_cmng(&input, &bp->cmng);
		return;
	}

	/* rate shaping and fairness are disabled */
	DP(NETIF_MSG_IFUP,
	   "rate shaping and fairness are disabled\n");
}

static void storm_memset_cmng(struct bnx2x *bp,
			      struct cmng_init *cmng,
			      u8 port)
{
	int vn;
	size_t size = sizeof(struct cmng_struct_per_port);

	u32 addr = BAR_XSTRORM_INTMEM +
			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);

	__storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);

	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
		int func = func_by_vn(bp, vn);

		addr = BAR_XSTRORM_INTMEM +
		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
		size = sizeof(struct rate_shaping_vars_per_vn);
		__storm_memset_struct(bp, addr, size,
				      (u32 *)&cmng->vnic.vnic_max_rate[vn]);

		addr = BAR_XSTRORM_INTMEM +
		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
		size = sizeof(struct fairness_vars_per_vn);
		__storm_memset_struct(bp, addr, size,
				      (u32 *)&cmng->vnic.vnic_min_rate[vn]);
	}
}

/* init cmng mode in HW according to local configuration */
void bnx2x_set_local_cmng(struct bnx2x *bp)
{
	int cmng_fns = bnx2x_get_cmng_fns_mode(bp);

	if (cmng_fns != CMNG_FNS_NONE) {
		bnx2x_cmng_fns_init(bp, false, cmng_fns);
		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
	} else {
		/* rate shaping and fairness are disabled */
		DP(NETIF_MSG_IFUP,
		   "single function mode without fairness\n");
	}
}

/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
	/* Make sure that we are synced with the current statistics */
	bnx2x_stats_handle(bp, STATS_EVENT_STOP);

	bnx2x_link_update(&bp->link_params, &bp->link_vars);

	bnx2x_init_dropless_fc(bp);

	if (bp->link_vars.link_up) {

		if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
			struct host_port_stats *pstats;

			pstats = bnx2x_sp(bp, port_stats);
			/* reset old mac stats */
			memset(&(pstats->mac_stx[0]), 0,
			       sizeof(struct mac_stx));
		}
		if (bp->state == BNX2X_STATE_OPEN)
			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
	}

	if (bp->link_vars.link_up && bp->link_vars.line_speed)
		bnx2x_set_local_cmng(bp);

	__bnx2x_link_report(bp);

	if (IS_MF(bp))
		bnx2x_link_sync_notify(bp);
}

void bnx2x__link_status_update(struct bnx2x *bp)
{
	if (bp->state != BNX2X_STATE_OPEN)
		return;

	/* read updated dcb configuration */
	if (IS_PF(bp)) {
		bnx2x_dcbx_pmf_update(bp);
		bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
		if (bp->link_vars.link_up)
			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
		else
			bnx2x_stats_handle(bp, STATS_EVENT_STOP);
			/* indicate link status */
		bnx2x_link_report(bp);

	} else { /* VF */
		bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
					  SUPPORTED_10baseT_Full |
					  SUPPORTED_100baseT_Half |
					  SUPPORTED_100baseT_Full |
					  SUPPORTED_1000baseT_Full |
					  SUPPORTED_2500baseX_Full |
					  SUPPORTED_10000baseT_Full |
					  SUPPORTED_TP |
					  SUPPORTED_FIBRE |
					  SUPPORTED_Autoneg |
					  SUPPORTED_Pause |
					  SUPPORTED_Asym_Pause);
		bp->port.advertising[0] = bp->port.supported[0];

		bp->link_params.bp = bp;
		bp->link_params.port = BP_PORT(bp);
		bp->link_params.req_duplex[0] = DUPLEX_FULL;
		bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
		bp->link_params.req_line_speed[0] = SPEED_10000;
		bp->link_params.speed_cap_mask[0] = 0x7f0000;
		bp->link_params.switch_cfg = SWITCH_CFG_10G;
		bp->link_vars.mac_type = MAC_TYPE_BMAC;
		bp->link_vars.line_speed = SPEED_10000;
		bp->link_vars.link_status =
			(LINK_STATUS_LINK_UP |
			 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
		bp->link_vars.link_up = 1;
		bp->link_vars.duplex = DUPLEX_FULL;
		bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
		__bnx2x_link_report(bp);

		bnx2x_sample_bulletin(bp);

		/* if bulletin board did not have an update for link status
		 * __bnx2x_link_report will report current status
		 * but it will NOT duplicate report in case of already reported
		 * during sampling bulletin board.
		 */
		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
	}
}

static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
				  u16 vlan_val, u8 allowed_prio)
{
	struct bnx2x_func_state_params func_params = {NULL};
	struct bnx2x_func_afex_update_params *f_update_params =
		&func_params.params.afex_update;

	func_params.f_obj = &bp->func_obj;
	func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;

	/* no need to wait for RAMROD completion, so don't
	 * set RAMROD_COMP_WAIT flag
	 */

	f_update_params->vif_id = vifid;
	f_update_params->afex_default_vlan = vlan_val;
	f_update_params->allowed_priorities = allowed_prio;

	/* if ramrod can not be sent, response to MCP immediately */
	if (bnx2x_func_state_change(bp, &func_params) < 0)
		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);

	return 0;
}

static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
					  u16 vif_index, u8 func_bit_map)
{
	struct bnx2x_func_state_params func_params = {NULL};
	struct bnx2x_func_afex_viflists_params *update_params =
		&func_params.params.afex_viflists;
	int rc;
	u32 drv_msg_code;

	/* validate only LIST_SET and LIST_GET are received from switch */
	if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
		BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
			  cmd_type);

	func_params.f_obj = &bp->func_obj;
	func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;

	/* set parameters according to cmd_type */
	update_params->afex_vif_list_command = cmd_type;
	update_params->vif_list_index = vif_index;
	update_params->func_bit_map =
		(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
	update_params->func_to_clear = 0;
	drv_msg_code =
		(cmd_type == VIF_LIST_RULE_GET) ?
		DRV_MSG_CODE_AFEX_LISTGET_ACK :
		DRV_MSG_CODE_AFEX_LISTSET_ACK;

	/* if ramrod can not be sent, respond to MCP immediately for
	 * SET and GET requests (other are not triggered from MCP)
	 */
	rc = bnx2x_func_state_change(bp, &func_params);
	if (rc < 0)
		bnx2x_fw_command(bp, drv_msg_code, 0);

	return 0;
}

static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
{
	struct afex_stats afex_stats;
	u32 func = BP_ABS_FUNC(bp);
	u32 mf_config;
	u16 vlan_val;
	u32 vlan_prio;
	u16 vif_id;
	u8 allowed_prio;
	u8 vlan_mode;
	u32 addr_to_write, vifid, addrs, stats_type, i;

	if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
		DP(BNX2X_MSG_MCP,
		   "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
	}

	if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
		addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
		DP(BNX2X_MSG_MCP,
		   "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
		   vifid, addrs);
		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
					       addrs);
	}

	if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
		addr_to_write = SHMEM2_RD(bp,
			afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
		stats_type = SHMEM2_RD(bp,
			afex_param1_to_driver[BP_FW_MB_IDX(bp)]);

		DP(BNX2X_MSG_MCP,
		   "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
		   addr_to_write);

		bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);

		/* write response to scratchpad, for MCP */
		for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
			REG_WR(bp, addr_to_write + i*sizeof(u32),
			       *(((u32 *)(&afex_stats))+i));

		/* send ack message to MCP */
		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
	}

	if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
		mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
		bp->mf_config[BP_VN(bp)] = mf_config;
		DP(BNX2X_MSG_MCP,
		   "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
		   mf_config);

		/* if VIF_SET is "enabled" */
		if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
			/* set rate limit directly to internal RAM */
			struct cmng_init_input cmng_input;
			struct rate_shaping_vars_per_vn m_rs_vn;
			size_t size = sizeof(struct rate_shaping_vars_per_vn);
			u32 addr = BAR_XSTRORM_INTMEM +
			    XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));

			bp->mf_config[BP_VN(bp)] = mf_config;

			bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
			m_rs_vn.vn_counter.rate =
				cmng_input.vnic_max_rate[BP_VN(bp)];
			m_rs_vn.vn_counter.quota =
				(m_rs_vn.vn_counter.rate *
				 RS_PERIODIC_TIMEOUT_USEC) / 8;

			__storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);

			/* read relevant values from mf_cfg struct in shmem */
			vif_id =
				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
				 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
				FUNC_MF_CFG_E1HOV_TAG_SHIFT;
			vlan_val =
				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
				 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
				FUNC_MF_CFG_AFEX_VLAN_SHIFT;
			vlan_prio = (mf_config &
				     FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
				    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
			vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
			vlan_mode =
				(MF_CFG_RD(bp,
					   func_mf_config[func].afex_config) &
				 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
				FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
			allowed_prio =
				(MF_CFG_RD(bp,
					   func_mf_config[func].afex_config) &
				 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
				FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;

			/* send ramrod to FW, return in case of failure */
			if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
						   allowed_prio))
				return;

			bp->afex_def_vlan_tag = vlan_val;
			bp->afex_vlan_mode = vlan_mode;
		} else {
			/* notify link down because BP->flags is disabled */
			bnx2x_link_report(bp);

			/* send INVALID VIF ramrod to FW */
			bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);

			/* Reset the default afex VLAN */
			bp->afex_def_vlan_tag = -1;
		}
	}
}

static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
{
	struct bnx2x_func_switch_update_params *switch_update_params;
	struct bnx2x_func_state_params func_params;

	memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
	switch_update_params = &func_params.params.switch_update;
	func_params.f_obj = &bp->func_obj;
	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;

	/* Prepare parameters for function state transitions */
	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);

	if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
		int func = BP_ABS_FUNC(bp);
		u32 val;

		/* Re-learn the S-tag from shmem */
		val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
				FUNC_MF_CFG_E1HOV_TAG_MASK;
		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
			bp->mf_ov = val;
		} else {
			BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
			goto fail;
		}

		/* Configure new S-tag in LLH */
		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
		       bp->mf_ov);

		/* Send Ramrod to update FW of change */
		__set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
			  &switch_update_params->changes);
		switch_update_params->vlan = bp->mf_ov;

		if (bnx2x_func_state_change(bp, &func_params) < 0) {
			BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
				  bp->mf_ov);
			goto fail;
		} else {
			DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
			   bp->mf_ov);
		}
	} else {
		goto fail;
	}

	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
	return;
fail:
	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
}

static void bnx2x_pmf_update(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	u32 val;

	bp->port.pmf = 1;
	DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);

	/*
	 * We need the mb() to ensure the ordering between the writing to
	 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
	 */
	smp_mb();

	/* queue a periodic task */
	queue_delayed_work(bnx2x_wq, &bp->period_task, 0);

	bnx2x_dcbx_pmf_update(bp);

	/* enable nig attention */
	val = (0xff0f | (1 << (BP_VN(bp) + 4)));
	if (bp->common.int_block == INT_BLOCK_HC) {
		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
	} else if (!CHIP_IS_E1x(bp)) {
		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
	}

	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
}

/* end of Link */

/* slow path */

/*
 * General service functions
 */

/* send the MCP a request, block until there is a reply */
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
{
	int mb_idx = BP_FW_MB_IDX(bp);
	u32 seq;
	u32 rc = 0;
	u32 cnt = 1;
	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;

	mutex_lock(&bp->fw_mb_mutex);
	seq = ++bp->fw_seq;
	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));

	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
			(command | seq), param);

	do {
		/* let the FW do it's magic ... */
		msleep(delay);

		rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);

		/* Give the FW up to 5 second (500*10ms) */
	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));

	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
	   cnt*delay, rc, seq);

	/* is this a reply to our command? */
	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
		rc &= FW_MSG_CODE_MASK;
	else {
		/* FW BUG! */
		BNX2X_ERR("FW failed to respond!\n");
		bnx2x_fw_dump(bp);
		rc = 0;
	}
	mutex_unlock(&bp->fw_mb_mutex);

	return rc;
}

static void storm_memset_func_cfg(struct bnx2x *bp,
				 struct tstorm_eth_function_common_config *tcfg,
				 u16 abs_fid)
{
	size_t size = sizeof(struct tstorm_eth_function_common_config);

	u32 addr = BAR_TSTRORM_INTMEM +
			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);

	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
}

void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
	if (CHIP_IS_E1x(bp)) {
		struct tstorm_eth_function_common_config tcfg = {0};

		storm_memset_func_cfg(bp, &tcfg, p->func_id);
	}

	/* Enable the function in the FW */
	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
	storm_memset_func_en(bp, p->func_id, 1);

	/* spq */
	if (p->spq_active) {
		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
		REG_WR(bp, XSEM_REG_FAST_MEMORY +
		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
	}
}

/**
 * bnx2x_get_common_flags - Return common flags
 *
 * @bp		device handle
 * @fp		queue handle
 * @zero_stats	TRUE if statistics zeroing is needed
 *
 * Return the flags that are common for the Tx-only and not normal connections.
 */
static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
					    struct bnx2x_fastpath *fp,
					    bool zero_stats)
{
	unsigned long flags = 0;

	/* PF driver will always initialize the Queue to an ACTIVE state */
	__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);

	/* tx only connections collect statistics (on the same index as the
	 * parent connection). The statistics are zeroed when the parent
	 * connection is initialized.
	 */

	__set_bit(BNX2X_Q_FLG_STATS, &flags);
	if (zero_stats)
		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);

	if (bp->flags & TX_SWITCHING)
		__set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);

	__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
	__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);

#ifdef BNX2X_STOP_ON_ERROR
	__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
#endif

	return flags;
}

static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
				       struct bnx2x_fastpath *fp,
				       bool leading)
{
	unsigned long flags = 0;

	/* calculate other queue flags */
	if (IS_MF_SD(bp))
		__set_bit(BNX2X_Q_FLG_OV, &flags);

	if (IS_FCOE_FP(fp)) {
		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
		/* For FCoE - force usage of default priority (for afex) */
		__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
	}

	if (fp->mode != TPA_MODE_DISABLED) {
		__set_bit(BNX2X_Q_FLG_TPA, &flags);
		__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
		if (fp->mode == TPA_MODE_GRO)
			__set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
	}

	if (leading) {
		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
		__set_bit(BNX2X_Q_FLG_MCAST, &flags);
	}

	/* Always set HW VLAN stripping */
	__set_bit(BNX2X_Q_FLG_VLAN, &flags);

	/* configure silent vlan removal */
	if (IS_MF_AFEX(bp))
		__set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);

	return flags | bnx2x_get_common_flags(bp, fp, true);
}

static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
	struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
	u8 cos)
{
	gen_init->stat_id = bnx2x_stats_id(fp);
	gen_init->spcl_id = fp->cl_id;

	/* Always use mini-jumbo MTU for FCoE L2 ring */
	if (IS_FCOE_FP(fp))
		gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
	else
		gen_init->mtu = bp->dev->mtu;

	gen_init->cos = cos;

	gen_init->fp_hsi = ETH_FP_HSI_VERSION;
}

static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
	struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
	struct bnx2x_rxq_setup_params *rxq_init)
{
	u8 max_sge = 0;
	u16 sge_sz = 0;
	u16 tpa_agg_size = 0;

	if (fp->mode != TPA_MODE_DISABLED) {
		pause->sge_th_lo = SGE_TH_LO(bp);
		pause->sge_th_hi = SGE_TH_HI(bp);

		/* validate SGE ring has enough to cross high threshold */
		WARN_ON(bp->dropless_fc &&
				pause->sge_th_hi + FW_PREFETCH_CNT >
				MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);

		tpa_agg_size = TPA_AGG_SIZE;
		max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
			SGE_PAGE_SHIFT;
		max_sge = ((max_sge + PAGES_PER_SGE - 1) &
			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
		sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
	}

	/* pause - not for e1 */
	if (!CHIP_IS_E1(bp)) {
		pause->bd_th_lo = BD_TH_LO(bp);
		pause->bd_th_hi = BD_TH_HI(bp);

		pause->rcq_th_lo = RCQ_TH_LO(bp);
		pause->rcq_th_hi = RCQ_TH_HI(bp);
		/*
		 * validate that rings have enough entries to cross
		 * high thresholds
		 */
		WARN_ON(bp->dropless_fc &&
				pause->bd_th_hi + FW_PREFETCH_CNT >
				bp->rx_ring_size);
		WARN_ON(bp->dropless_fc &&
				pause->rcq_th_hi + FW_PREFETCH_CNT >
				NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);

		pause->pri_map = 1;
	}

	/* rxq setup */
	rxq_init->dscr_map = fp->rx_desc_mapping;
	rxq_init->sge_map = fp->rx_sge_mapping;
	rxq_init->rcq_map = fp->rx_comp_mapping;
	rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;

	/* This should be a maximum number of data bytes that may be
	 * placed on the BD (not including paddings).
	 */
	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
			   BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;

	rxq_init->cl_qzone_id = fp->cl_qzone_id;
	rxq_init->tpa_agg_sz = tpa_agg_size;
	rxq_init->sge_buf_sz = sge_sz;
	rxq_init->max_sges_pkt = max_sge;
	rxq_init->rss_engine_id = BP_FUNC(bp);
	rxq_init->mcast_engine_id = BP_FUNC(bp);

	/* Maximum number or simultaneous TPA aggregation for this Queue.
	 *
	 * For PF Clients it should be the maximum available number.
	 * VF driver(s) may want to define it to a smaller value.
	 */
	rxq_init->max_tpa_queues = MAX_AGG_QS(bp);

	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
	rxq_init->fw_sb_id = fp->fw_sb_id;

	if (IS_FCOE_FP(fp))
		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
	else
		rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
	/* configure silent vlan removal
	 * if multi function mode is afex, then mask default vlan
	 */
	if (IS_MF_AFEX(bp)) {
		rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
		rxq_init->silent_removal_mask = VLAN_VID_MASK;
	}
}

static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
	struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
	u8 cos)
{
	txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
	txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
	txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
	txq_init->fw_sb_id = fp->fw_sb_id;

	/*
	 * set the tss leading client id for TX classification ==
	 * leading RSS client id
	 */
	txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);

	if (IS_FCOE_FP(fp)) {
		txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
		txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
	}
}

static void bnx2x_pf_init(struct bnx2x *bp)
{
	struct bnx2x_func_init_params func_init = {0};
	struct event_ring_data eq_data = { {0} };

	if (!CHIP_IS_E1x(bp)) {
		/* reset IGU PF statistics: MSIX + ATTN */
		/* PF */
		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
			   (CHIP_MODE_IS_4_PORT(bp) ?
				BP_FUNC(bp) : BP_VN(bp))*4, 0);
		/* ATTN */
		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
			   BNX2X_IGU_STAS_MSG_PF_CNT*4 +
			   (CHIP_MODE_IS_4_PORT(bp) ?
				BP_FUNC(bp) : BP_VN(bp))*4, 0);
	}

	func_init.spq_active = true;
	func_init.pf_id = BP_FUNC(bp);
	func_init.func_id = BP_FUNC(bp);
	func_init.spq_map = bp->spq_mapping;
	func_init.spq_prod = bp->spq_prod_idx;

	bnx2x_func_init(bp, &func_init);

	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));

	/*
	 * Congestion management values depend on the link rate
	 * There is no active link so initial link rate is set to 10 Gbps.
	 * When the link comes up The congestion management values are
	 * re-calculated according to the actual link rate.
	 */
	bp->link_vars.line_speed = SPEED_10000;
	bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));

	/* Only the PMF sets the HW */
	if (bp->port.pmf)
		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));

	/* init Event Queue - PCI bus guarantees correct endianity*/
	eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
	eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
	eq_data.producer = bp->eq_prod;
	eq_data.index_id = HC_SP_INDEX_EQ_CONS;
	eq_data.sb_id = DEF_SB_ID;
	storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
}

static void bnx2x_e1h_disable(struct bnx2x *bp)
{
	int port = BP_PORT(bp);

	bnx2x_tx_disable(bp);

	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
}

static void bnx2x_e1h_enable(struct bnx2x *bp)
{
	int port = BP_PORT(bp);

	if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);

	/* Tx queue should be only re-enabled */
	netif_tx_wake_all_queues(bp->dev);

	/*
	 * Should not call netif_carrier_on since it will be called if the link
	 * is up when checking for link state
	 */
}

#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3

static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
	struct eth_stats_info *ether_stat =
		&bp->slowpath->drv_info_to_mcp.ether_stat;
	struct bnx2x_vlan_mac_obj *mac_obj =
		&bp->sp_objs->mac_obj;
	int i;

	strlcpy(ether_stat->version, DRV_MODULE_VERSION,
		ETH_STAT_INFO_VERSION_LEN);

	/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
	 * mac_local field in ether_stat struct. The base address is offset by 2
	 * bytes to account for the field being 8 bytes but a mac address is
	 * only 6 bytes. Likewise, the stride for the get_n_elements function is
	 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
	 * allocated by the ether_stat struct, so the macs will land in their
	 * proper positions.
	 */
	for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
		memset(ether_stat->mac_local + i, 0,
		       sizeof(ether_stat->mac_local[0]));
	mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
				DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
				ether_stat->mac_local + MAC_PAD, MAC_PAD,
				ETH_ALEN);
	ether_stat->mtu_size = bp->dev->mtu;
	if (bp->dev->features & NETIF_F_RXCSUM)
		ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
	if (bp->dev->features & NETIF_F_TSO)
		ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
	ether_stat->feature_flags |= bp->common.boot_mode;

	ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;

	ether_stat->txq_size = bp->tx_ring_size;
	ether_stat->rxq_size = bp->rx_ring_size;

#ifdef CONFIG_BNX2X_SRIOV
	ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
#endif
}

static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
{
	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
	struct fcoe_stats_info *fcoe_stat =
		&bp->slowpath->drv_info_to_mcp.fcoe_stat;

	if (!CNIC_LOADED(bp))
		return;

	memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);

	fcoe_stat->qos_priority =
		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];

	/* insert FCoE stats from ramrod response */
	if (!NO_FCOE(bp)) {
		struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
			&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
			tstorm_queue_statistics;

		struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
			&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
			xstorm_queue_statistics;

		struct fcoe_statistics_params *fw_fcoe_stat =
			&bp->fw_stats_data->fcoe;

		ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
			  fcoe_stat->rx_bytes_lo,
			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);

		ADD_64_LE(fcoe_stat->rx_bytes_hi,
			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
			  fcoe_stat->rx_bytes_lo,
			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);

		ADD_64_LE(fcoe_stat->rx_bytes_hi,
			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
			  fcoe_stat->rx_bytes_lo,
			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);

		ADD_64_LE(fcoe_stat->rx_bytes_hi,
			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
			  fcoe_stat->rx_bytes_lo,
			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);

		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
			  fcoe_stat->rx_frames_lo,
			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);

		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
			  fcoe_stat->rx_frames_lo,
			  fcoe_q_tstorm_stats->rcv_ucast_pkts);

		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
			  fcoe_stat->rx_frames_lo,
			  fcoe_q_tstorm_stats->rcv_bcast_pkts);

		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
			  fcoe_stat->rx_frames_lo,
			  fcoe_q_tstorm_stats->rcv_mcast_pkts);

		ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
			  fcoe_stat->tx_bytes_lo,
			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);

		ADD_64_LE(fcoe_stat->tx_bytes_hi,
			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
			  fcoe_stat->tx_bytes_lo,
			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);

		ADD_64_LE(fcoe_stat->tx_bytes_hi,
			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
			  fcoe_stat->tx_bytes_lo,
			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);

		ADD_64_LE(fcoe_stat->tx_bytes_hi,
			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
			  fcoe_stat->tx_bytes_lo,
			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);

		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
			  fcoe_stat->tx_frames_lo,
			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);

		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
			  fcoe_stat->tx_frames_lo,
			  fcoe_q_xstorm_stats->ucast_pkts_sent);

		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
			  fcoe_stat->tx_frames_lo,
			  fcoe_q_xstorm_stats->bcast_pkts_sent);

		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
			  fcoe_stat->tx_frames_lo,
			  fcoe_q_xstorm_stats->mcast_pkts_sent);
	}

	/* ask L5 driver to add data to the struct */
	bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
}

static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
{
	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
	struct iscsi_stats_info *iscsi_stat =
		&bp->slowpath->drv_info_to_mcp.iscsi_stat;

	if (!CNIC_LOADED(bp))
		return;

	memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
	       ETH_ALEN);

	iscsi_stat->qos_priority =
		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];

	/* ask L5 driver to add data to the struct */
	bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
}

/* called due to MCP event (on pmf):
 *	reread new bandwidth configuration
 *	configure FW
 *	notify others function about the change
 */
static void bnx2x_config_mf_bw(struct bnx2x *bp)
{
	/* Workaround for MFW bug.
	 * MFW is not supposed to generate BW attention in
	 * single function mode.
	 */
	if (!IS_MF(bp)) {
		DP(BNX2X_MSG_MCP,
		   "Ignoring MF BW config in single function mode\n");
		return;
	}

	if (bp->link_vars.link_up) {
		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
		bnx2x_link_sync_notify(bp);
	}
	storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
}

static void bnx2x_set_mf_bw(struct bnx2x *bp)
{
	bnx2x_config_mf_bw(bp);
	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}

static void bnx2x_handle_eee_event(struct bnx2x *bp)
{
	DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
	bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}

#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH	(20)
#define BNX2X_UPDATE_DRV_INFO_IND_COUNT		(25)

static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
	enum drv_info_opcode op_code;
	u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
	bool release = false;
	int wait;

	/* if drv_info version supported by MFW doesn't match - send NACK */
	if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
		return;
	}

	op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
		  DRV_INFO_CONTROL_OP_CODE_SHIFT;

	/* Must prevent other flows from accessing drv_info_to_mcp */
	mutex_lock(&bp->drv_info_mutex);

	memset(&bp->slowpath->drv_info_to_mcp, 0,
	       sizeof(union drv_info_to_mcp));

	switch (op_code) {
	case ETH_STATS_OPCODE:
		bnx2x_drv_info_ether_stat(bp);
		break;
	case FCOE_STATS_OPCODE:
		bnx2x_drv_info_fcoe_stat(bp);
		break;
	case ISCSI_STATS_OPCODE:
		bnx2x_drv_info_iscsi_stat(bp);
		break;
	default:
		/* if op code isn't supported - send NACK */
		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
		goto out;
	}

	/* if we got drv_info attn from MFW then these fields are defined in
	 * shmem2 for sure
	 */
	SHMEM2_WR(bp, drv_info_host_addr_lo,
		U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
	SHMEM2_WR(bp, drv_info_host_addr_hi,
		U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));

	bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);

	/* Since possible management wants both this and get_driver_version
	 * need to wait until management notifies us it finished utilizing
	 * the buffer.
	 */
	if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
		DP(BNX2X_MSG_MCP, "Management does not support indication\n");
	} else if (!bp->drv_info_mng_owner) {
		u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));

		for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
			u32 indication = SHMEM2_RD(bp, mfw_drv_indication);

			/* Management is done; need to clear indication */
			if (indication & bit) {
				SHMEM2_WR(bp, mfw_drv_indication,
					  indication & ~bit);
				release = true;
				break;
			}

			msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
		}
	}
	if (!release) {
		DP(BNX2X_MSG_MCP, "Management did not release indication\n");
		bp->drv_info_mng_owner = true;
	}

out:
	mutex_unlock(&bp->drv_info_mutex);
}

static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
{
	u8 vals[4];
	int i = 0;

	if (bnx2x_format) {
		i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
			   &vals[0], &vals[1], &vals[2], &vals[3]);
		if (i > 0)
			vals[0] -= '0';
	} else {
		i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
			   &vals[0], &vals[1], &vals[2], &vals[3]);
	}

	while (i < 4)
		vals[i++] = 0;

	return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
}

void bnx2x_update_mng_version(struct bnx2x *bp)
{
	u32 iscsiver = DRV_VER_NOT_LOADED;
	u32 fcoever = DRV_VER_NOT_LOADED;
	u32 ethver = DRV_VER_NOT_LOADED;
	int idx = BP_FW_MB_IDX(bp);
	u8 *version;

	if (!SHMEM2_HAS(bp, func_os_drv_ver))
		return;

	mutex_lock(&bp->drv_info_mutex);
	/* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
	if (bp->drv_info_mng_owner)
		goto out;

	if (bp->state != BNX2X_STATE_OPEN)
		goto out;

	/* Parse ethernet driver version */
	ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
	if (!CNIC_LOADED(bp))
		goto out;

	/* Try getting storage driver version via cnic */
	memset(&bp->slowpath->drv_info_to_mcp, 0,
	       sizeof(union drv_info_to_mcp));
	bnx2x_drv_info_iscsi_stat(bp);
	version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
	iscsiver = bnx2x_update_mng_version_utility(version, false);

	memset(&bp->slowpath->drv_info_to_mcp, 0,
	       sizeof(union drv_info_to_mcp));
	bnx2x_drv_info_fcoe_stat(bp);
	version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
	fcoever = bnx2x_update_mng_version_utility(version, false);

out:
	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);

	mutex_unlock(&bp->drv_info_mutex);

	DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
	   ethver, iscsiver, fcoever);
}

void bnx2x_update_mfw_dump(struct bnx2x *bp)
{
	u32 drv_ver;
	u32 valid_dump;

	if (!SHMEM2_HAS(bp, drv_info))
		return;

	/* Update Driver load time, possibly broken in y2038 */
	SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());

	drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
	SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);

	SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));

	/* Check & notify On-Chip dump. */
	valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);

	if (valid_dump & FIRST_DUMP_VALID)
		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");

	if (valid_dump & SECOND_DUMP_VALID)
		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
}

static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
	u32 cmd_ok, cmd_fail;

	/* sanity */
	if (event & DRV_STATUS_DCC_EVENT_MASK &&
	    event & DRV_STATUS_OEM_EVENT_MASK) {
		BNX2X_ERR("Received simultaneous events %08x\n", event);
		return;
	}

	if (event & DRV_STATUS_DCC_EVENT_MASK) {
		cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
		cmd_ok = DRV_MSG_CODE_DCC_OK;
	} else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
		cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
		cmd_ok = DRV_MSG_CODE_OEM_OK;
	}

	DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);

	if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
		     DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
		/* This is the only place besides the function initialization
		 * where the bp->flags can change so it is done without any
		 * locks
		 */
		if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
			DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
			bp->flags |= MF_FUNC_DIS;

			bnx2x_e1h_disable(bp);
		} else {
			DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
			bp->flags &= ~MF_FUNC_DIS;

			bnx2x_e1h_enable(bp);
		}
		event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
			   DRV_STATUS_OEM_DISABLE_ENABLE_PF);
	}

	if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
		     DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
		bnx2x_config_mf_bw(bp);
		event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
			   DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
	}

	/* Report results to MCP */
	if (event)
		bnx2x_fw_command(bp, cmd_fail, 0);
	else
		bnx2x_fw_command(bp, cmd_ok, 0);
}

/* must be called under the spq lock */
static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
{
	struct eth_spe *next_spe = bp->spq_prod_bd;

	if (bp->spq_prod_bd == bp->spq_last_bd) {
		bp->spq_prod_bd = bp->spq;
		bp->spq_prod_idx = 0;
		DP(BNX2X_MSG_SP, "end of spq\n");
	} else {
		bp->spq_prod_bd++;
		bp->spq_prod_idx++;
	}
	return next_spe;
}

/* must be called under the spq lock */
static void bnx2x_sp_prod_update(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);

	/*
	 * Make sure that BD data is updated before writing the producer:
	 * BD data is written to the memory, the producer is read from the
	 * memory, thus we need a full memory barrier to ensure the ordering.
	 */
	mb();

	REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
			 bp->spq_prod_idx);
}

/**
 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
 *
 * @cmd:	command to check
 * @cmd_type:	command type
 */
static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
{
	if ((cmd_type == NONE_CONNECTION_TYPE) ||
	    (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
	    (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
	    (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
	    (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
	    (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
	    (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
		return true;
	else
		return false;
}

/**
 * bnx2x_sp_post - place a single command on an SP ring
 *
 * @bp:		driver handle
 * @command:	command to place (e.g. SETUP, FILTER_RULES, etc.)
 * @cid:	SW CID the command is related to
 * @data_hi:	command private data address (high 32 bits)
 * @data_lo:	command private data address (low 32 bits)
 * @cmd_type:	command type (e.g. NONE, ETH)
 *
 * SP data is handled as if it's always an address pair, thus data fields are
 * not swapped to little endian in upper functions. Instead this function swaps
 * data as if it's two u32 fields.
 */
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
		  u32 data_hi, u32 data_lo, int cmd_type)
{
	struct eth_spe *spe;
	u16 type;
	bool common = bnx2x_is_contextless_ramrod(command, cmd_type);

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic)) {
		BNX2X_ERR("Can't post SP when there is panic\n");
		return -EIO;
	}
#endif

	spin_lock_bh(&bp->spq_lock);

	if (common) {
		if (!atomic_read(&bp->eq_spq_left)) {
			BNX2X_ERR("BUG! EQ ring full!\n");
			spin_unlock_bh(&bp->spq_lock);
			bnx2x_panic();
			return -EBUSY;
		}
	} else if (!atomic_read(&bp->cq_spq_left)) {
			BNX2X_ERR("BUG! SPQ ring full!\n");
			spin_unlock_bh(&bp->spq_lock);
			bnx2x_panic();
			return -EBUSY;
	}

	spe = bnx2x_sp_get_next(bp);

	/* CID needs port number to be encoded int it */
	spe->hdr.conn_and_cmd_data =
			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
				    HW_CID(bp, cid));

	/* In some cases, type may already contain the func-id
	 * mainly in SRIOV related use cases, so we add it here only
	 * if it's not already set.
	 */
	if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
		type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
			SPE_HDR_CONN_TYPE;
		type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
			 SPE_HDR_FUNCTION_ID);
	} else {
		type = cmd_type;
	}

	spe->hdr.type = cpu_to_le16(type);

	spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
	spe->data.update_data_addr.lo = cpu_to_le32(data_lo);

	/*
	 * It's ok if the actual decrement is issued towards the memory
	 * somewhere between the spin_lock and spin_unlock. Thus no
	 * more explicit memory barrier is needed.
	 */
	if (common)
		atomic_dec(&bp->eq_spq_left);
	else
		atomic_dec(&bp->cq_spq_left);

	DP(BNX2X_MSG_SP,
	   "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
	   (u32)(U64_LO(bp->spq_mapping) +
	   (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
	   HW_CID(bp, cid), data_hi, data_lo, type,
	   atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));

	bnx2x_sp_prod_update(bp);
	spin_unlock_bh(&bp->spq_lock);
	return 0;
}

/* acquire split MCP access lock register */
static int bnx2x_acquire_alr(struct bnx2x *bp)
{
	u32 j, val;
	int rc = 0;

	might_sleep();
	for (j = 0; j < 1000; j++) {
		REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
		val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
		if (val & MCPR_ACCESS_LOCK_LOCK)
			break;

		usleep_range(5000, 10000);
	}
	if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
		BNX2X_ERR("Cannot acquire MCP access lock register\n");
		rc = -EBUSY;
	}

	return rc;
}

/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
{
	REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
}

#define BNX2X_DEF_SB_ATT_IDX	0x0001
#define BNX2X_DEF_SB_IDX	0x0002

static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
{
	struct host_sp_status_block *def_sb = bp->def_status_blk;
	u16 rc = 0;

	barrier(); /* status block is written to by the chip */
	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
		rc |= BNX2X_DEF_SB_ATT_IDX;
	}

	if (bp->def_idx != def_sb->sp_sb.running_index) {
		bp->def_idx = def_sb->sp_sb.running_index;
		rc |= BNX2X_DEF_SB_IDX;
	}

	/* Do not reorder: indices reading should complete before handling */
	barrier();
	return rc;
}

/*
 * slow path service functions
 */

static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
{
	int port = BP_PORT(bp);
	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
				       NIG_REG_MASK_INTERRUPT_PORT0;
	u32 aeu_mask;
	u32 nig_mask = 0;
	u32 reg_addr;

	if (bp->attn_state & asserted)
		BNX2X_ERR("IGU ERROR\n");

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
	aeu_mask = REG_RD(bp, aeu_addr);

	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
	   aeu_mask, asserted);
	aeu_mask &= ~(asserted & 0x3ff);
	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);

	REG_WR(bp, aeu_addr, aeu_mask);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);

	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
	bp->attn_state |= asserted;
	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);

	if (asserted & ATTN_HARD_WIRED_MASK) {
		if (asserted & ATTN_NIG_FOR_FUNC) {

			bnx2x_acquire_phy_lock(bp);

			/* save nig interrupt mask */
			nig_mask = REG_RD(bp, nig_int_mask_addr);

			/* If nig_mask is not set, no need to call the update
			 * function.
			 */
			if (nig_mask) {
				REG_WR(bp, nig_int_mask_addr, 0);

				bnx2x_link_attn(bp);
			}

			/* handle unicore attn? */
		}
		if (asserted & ATTN_SW_TIMER_4_FUNC)
			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");

		if (asserted & GPIO_2_FUNC)
			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");

		if (asserted & GPIO_3_FUNC)
			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");

		if (asserted & GPIO_4_FUNC)
			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");

		if (port == 0) {
			if (asserted & ATTN_GENERAL_ATTN_1) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_2) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_3) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
			}
		} else {
			if (asserted & ATTN_GENERAL_ATTN_4) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_5) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_6) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
			}
		}

	} /* if hardwired */

	if (bp->common.int_block == INT_BLOCK_HC)
		reg_addr = (HC_REG_COMMAND_REG + port*32 +
			    COMMAND_REG_ATTN_BITS_SET);
	else
		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);

	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
	REG_WR(bp, reg_addr, asserted);

	/* now set back the mask */
	if (asserted & ATTN_NIG_FOR_FUNC) {
		/* Verify that IGU ack through BAR was written before restoring
		 * NIG mask. This loop should exit after 2-3 iterations max.
		 */
		if (bp->common.int_block != INT_BLOCK_HC) {
			u32 cnt = 0, igu_acked;
			do {
				igu_acked = REG_RD(bp,
						   IGU_REG_ATTENTION_ACK_BITS);
			} while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
				 (++cnt < MAX_IGU_ATTN_ACK_TO));
			if (!igu_acked)
				DP(NETIF_MSG_HW,
				   "Failed to verify IGU ack on time\n");
			barrier();
		}
		REG_WR(bp, nig_int_mask_addr, nig_mask);
		bnx2x_release_phy_lock(bp);
	}
}

static void bnx2x_fan_failure(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	u32 ext_phy_config;
	/* mark the failure */
	ext_phy_config =
		SHMEM_RD(bp,
			 dev_info.port_hw_config[port].external_phy_config);

	ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
	ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
		 ext_phy_config);

	/* log the failure */
	netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
			    "Please contact OEM Support for assistance\n");

	/* Schedule device reset (unload)
	 * This is due to some boards consuming sufficient power when driver is
	 * up to overheat if fan fails.
	 */
	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}

static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
{
	int port = BP_PORT(bp);
	int reg_offset;
	u32 val;

	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);

	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {

		val = REG_RD(bp, reg_offset);
		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("SPIO5 hw attention\n");

		/* Fan failure attention */
		bnx2x_hw_reset_phy(&bp->link_params);
		bnx2x_fan_failure(bp);
	}

	if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
		bnx2x_acquire_phy_lock(bp);
		bnx2x_handle_module_detect_int(&bp->link_params);
		bnx2x_release_phy_lock(bp);
	}

	if (attn & HW_INTERRUPT_ASSERT_SET_0) {

		val = REG_RD(bp, reg_offset);
		val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
			  (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
		bnx2x_panic();
	}
}

static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
{
	u32 val;

	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {

		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
		BNX2X_ERR("DB hw attention 0x%x\n", val);
		/* DORQ discard attention */
		if (val & 0x2)
			BNX2X_ERR("FATAL error from DORQ\n");
	}

	if (attn & HW_INTERRUPT_ASSERT_SET_1) {

		int port = BP_PORT(bp);
		int reg_offset;

		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);

		val = REG_RD(bp, reg_offset);
		val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
			  (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
		bnx2x_panic();
	}
}

static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
{
	u32 val;

	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {

		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
		BNX2X_ERR("CFC hw attention 0x%x\n", val);
		/* CFC error attention */
		if (val & 0x2)
			BNX2X_ERR("FATAL error from CFC\n");
	}

	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
		BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
		/* RQ_USDMDP_FIFO_OVERFLOW */
		if (val & 0x18000)
			BNX2X_ERR("FATAL error from PXP\n");

		if (!CHIP_IS_E1x(bp)) {
			val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
			BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
		}
	}

	if (attn & HW_INTERRUPT_ASSERT_SET_2) {

		int port = BP_PORT(bp);
		int reg_offset;

		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);

		val = REG_RD(bp, reg_offset);
		val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
			  (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
		bnx2x_panic();
	}
}

static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
{
	u32 val;

	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {

		if (attn & BNX2X_PMF_LINK_ASSERT) {
			int func = BP_FUNC(bp);

			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
			bnx2x_read_mf_cfg(bp);
			bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
					func_mf_config[BP_ABS_FUNC(bp)].config);
			val = SHMEM_RD(bp,
				       func_mb[BP_FW_MB_IDX(bp)].drv_status);

			if (val & (DRV_STATUS_DCC_EVENT_MASK |
				   DRV_STATUS_OEM_EVENT_MASK))
				bnx2x_oem_event(bp,
					(val & (DRV_STATUS_DCC_EVENT_MASK |
						DRV_STATUS_OEM_EVENT_MASK)));

			if (val & DRV_STATUS_SET_MF_BW)
				bnx2x_set_mf_bw(bp);

			if (val & DRV_STATUS_DRV_INFO_REQ)
				bnx2x_handle_drv_info_req(bp);

			if (val & DRV_STATUS_VF_DISABLED)
				bnx2x_schedule_iov_task(bp,
							BNX2X_IOV_HANDLE_FLR);

			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
				bnx2x_pmf_update(bp);

			if (bp->port.pmf &&
			    (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
				bp->dcbx_enabled > 0)
				/* start dcbx state machine */
				bnx2x_dcbx_set_params(bp,
					BNX2X_DCBX_STATE_NEG_RECEIVED);
			if (val & DRV_STATUS_AFEX_EVENT_MASK)
				bnx2x_handle_afex_cmd(bp,
					val & DRV_STATUS_AFEX_EVENT_MASK);
			if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
				bnx2x_handle_eee_event(bp);

			if (val & DRV_STATUS_OEM_UPDATE_SVID)
				bnx2x_schedule_sp_rtnl(bp,
					BNX2X_SP_RTNL_UPDATE_SVID, 0);

			if (bp->link_vars.periodic_flags &
			    PERIODIC_FLAGS_LINK_EVENT) {
				/*  sync with link */
				bnx2x_acquire_phy_lock(bp);
				bp->link_vars.periodic_flags &=
					~PERIODIC_FLAGS_LINK_EVENT;
				bnx2x_release_phy_lock(bp);
				if (IS_MF(bp))
					bnx2x_link_sync_notify(bp);
				bnx2x_link_report(bp);
			}
			/* Always call it here: bnx2x_link_report() will
			 * prevent the link indication duplication.
			 */
			bnx2x__link_status_update(bp);
		} else if (attn & BNX2X_MC_ASSERT_BITS) {

			BNX2X_ERR("MC assert!\n");
			bnx2x_mc_assert(bp);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
			bnx2x_panic();

		} else if (attn & BNX2X_MCP_ASSERT) {

			BNX2X_ERR("MCP assert!\n");
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
			bnx2x_fw_dump(bp);

		} else
			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
	}

	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
		if (attn & BNX2X_GRC_TIMEOUT) {
			val = CHIP_IS_E1(bp) ? 0 :
					REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
			BNX2X_ERR("GRC time-out 0x%08x\n", val);
		}
		if (attn & BNX2X_GRC_RSV) {
			val = CHIP_IS_E1(bp) ? 0 :
					REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
			BNX2X_ERR("GRC reserved 0x%08x\n", val);
		}
		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
	}
}

/*
 * Bits map:
 * 0-7   - Engine0 load counter.
 * 8-15  - Engine1 load counter.
 * 16    - Engine0 RESET_IN_PROGRESS bit.
 * 17    - Engine1 RESET_IN_PROGRESS bit.
 * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
 *         on the engine
 * 19    - Engine1 ONE_IS_LOADED.
 * 20    - Chip reset flow bit. When set none-leader must wait for both engines
 *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
 *         just the one belonging to its engine).
 *
 */
#define BNX2X_RECOVERY_GLOB_REG		MISC_REG_GENERIC_POR_1

#define BNX2X_PATH0_LOAD_CNT_MASK	0x000000ff
#define BNX2X_PATH0_LOAD_CNT_SHIFT	0
#define BNX2X_PATH1_LOAD_CNT_MASK	0x0000ff00
#define BNX2X_PATH1_LOAD_CNT_SHIFT	8
#define BNX2X_PATH0_RST_IN_PROG_BIT	0x00010000
#define BNX2X_PATH1_RST_IN_PROG_BIT	0x00020000
#define BNX2X_GLOBAL_RESET_BIT		0x00040000

/*
 * Set the GLOBAL_RESET bit.
 *
 * Should be run under rtnl lock
 */
void bnx2x_set_reset_global(struct bnx2x *bp)
{
	u32 val;
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}

/*
 * Clear the GLOBAL_RESET bit.
 *
 * Should be run under rtnl lock
 */
static void bnx2x_clear_reset_global(struct bnx2x *bp)
{
	u32 val;
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}

/*
 * Checks the GLOBAL_RESET bit.
 *
 * should be run under rtnl lock
 */
static bool bnx2x_reset_is_global(struct bnx2x *bp)
{
	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);

	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
	return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
}

/*
 * Clear RESET_IN_PROGRESS bit for the current engine.
 *
 * Should be run under rtnl lock
 */
static void bnx2x_set_reset_done(struct bnx2x *bp)
{
	u32 val;
	u32 bit = BP_PATH(bp) ?
		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);

	/* Clear the bit */
	val &= ~bit;
	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);

	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}

/*
 * Set RESET_IN_PROGRESS for the current engine.
 *
 * should be run under rtnl lock
 */
void bnx2x_set_reset_in_progress(struct bnx2x *bp)
{
	u32 val;
	u32 bit = BP_PATH(bp) ?
		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);

	/* Set the bit */
	val |= bit;
	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}

/*
 * Checks the RESET_IN_PROGRESS bit for the given engine.
 * should be run under rtnl lock
 */
bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
{
	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
	u32 bit = engine ?
		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;

	/* return false if bit is set */
	return (val & bit) ? false : true;
}

/*
 * set pf load for the current pf.
 *
 * should be run under rtnl lock
 */
void bnx2x_set_pf_load(struct bnx2x *bp)
{
	u32 val1, val;
	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
			     BNX2X_PATH0_LOAD_CNT_MASK;
	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
			     BNX2X_PATH0_LOAD_CNT_SHIFT;

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);

	DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);

	/* get the current counter value */
	val1 = (val & mask) >> shift;

	/* set bit of that PF */
	val1 |= (1 << bp->pf_num);

	/* clear the old value */
	val &= ~mask;

	/* set the new one */
	val |= ((val1 << shift) & mask);

	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}

/**
 * bnx2x_clear_pf_load - clear pf load mark
 *
 * @bp:		driver handle
 *
 * Should be run under rtnl lock.
 * Decrements the load counter for the current engine. Returns
 * whether other functions are still loaded
 */
bool bnx2x_clear_pf_load(struct bnx2x *bp)
{
	u32 val1, val;
	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
			     BNX2X_PATH0_LOAD_CNT_MASK;
	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
			     BNX2X_PATH0_LOAD_CNT_SHIFT;

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
	DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);

	/* get the current counter value */
	val1 = (val & mask) >> shift;

	/* clear bit of that PF */
	val1 &= ~(1 << bp->pf_num);

	/* clear the old value */
	val &= ~mask;

	/* set the new one */
	val |= ((val1 << shift) & mask);

	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
	return val1 != 0;
}

/*
 * Read the load status for the current engine.
 *
 * should be run under rtnl lock
 */
static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
{
	u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
			     BNX2X_PATH0_LOAD_CNT_MASK);
	u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
			     BNX2X_PATH0_LOAD_CNT_SHIFT);
	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);

	DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);

	val = (val & mask) >> shift;

	DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
	   engine, val);

	return val != 0;
}

static void _print_parity(struct bnx2x *bp, u32 reg)
{
	pr_cont(" [0x%08x] ", REG_RD(bp, reg));
}

static void _print_next_block(int idx, const char *blk)
{
	pr_cont("%s%s", idx ? ", " : "", blk);
}

static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
					    int *par_num, bool print)
{
	u32 cur_bit;
	bool res;
	int i;

	res = false;

	for (i = 0; sig; i++) {
		cur_bit = (0x1UL << i);
		if (sig & cur_bit) {
			res |= true; /* Each bit is real error! */

			if (print) {
				switch (cur_bit) {
				case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
					_print_next_block((*par_num)++, "BRB");
					_print_parity(bp,
						      BRB1_REG_BRB1_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
					_print_next_block((*par_num)++,
							  "PARSER");
					_print_parity(bp, PRS_REG_PRS_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
					_print_next_block((*par_num)++, "TSDM");
					_print_parity(bp,
						      TSDM_REG_TSDM_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
					_print_next_block((*par_num)++,
							  "SEARCHER");
					_print_parity(bp, SRC_REG_SRC_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
					_print_next_block((*par_num)++, "TCM");
					_print_parity(bp, TCM_REG_TCM_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
					_print_next_block((*par_num)++,
							  "TSEMI");
					_print_parity(bp,
						      TSEM_REG_TSEM_PRTY_STS_0);
					_print_parity(bp,
						      TSEM_REG_TSEM_PRTY_STS_1);
					break;
				case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
					_print_next_block((*par_num)++, "XPB");
					_print_parity(bp, GRCBASE_XPB +
							  PB_REG_PB_PRTY_STS);
					break;
				}
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return res;
}

static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
					    int *par_num, bool *global,
					    bool print)
{
	u32 cur_bit;
	bool res;
	int i;

	res = false;

	for (i = 0; sig; i++) {
		cur_bit = (0x1UL << i);
		if (sig & cur_bit) {
			res |= true; /* Each bit is real error! */
			switch (cur_bit) {
			case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "PBF");
					_print_parity(bp, PBF_REG_PBF_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "QM");
					_print_parity(bp, QM_REG_QM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "TM");
					_print_parity(bp, TM_REG_TM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "XSDM");
					_print_parity(bp,
						      XSDM_REG_XSDM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "XCM");
					_print_parity(bp, XCM_REG_XCM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++,
							  "XSEMI");
					_print_parity(bp,
						      XSEM_REG_XSEM_PRTY_STS_0);
					_print_parity(bp,
						      XSEM_REG_XSEM_PRTY_STS_1);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++,
							  "DOORBELLQ");
					_print_parity(bp,
						      DORQ_REG_DORQ_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "NIG");
					if (CHIP_IS_E1x(bp)) {
						_print_parity(bp,
							NIG_REG_NIG_PRTY_STS);
					} else {
						_print_parity(bp,
							NIG_REG_NIG_PRTY_STS_0);
						_print_parity(bp,
							NIG_REG_NIG_PRTY_STS_1);
					}
				}
				break;
			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
				if (print)
					_print_next_block((*par_num)++,
							  "VAUX PCI CORE");
				*global = true;
				break;
			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++,
							  "DEBUG");
					_print_parity(bp, DBG_REG_DBG_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "USDM");
					_print_parity(bp,
						      USDM_REG_USDM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "UCM");
					_print_parity(bp, UCM_REG_UCM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++,
							  "USEMI");
					_print_parity(bp,
						      USEM_REG_USEM_PRTY_STS_0);
					_print_parity(bp,
						      USEM_REG_USEM_PRTY_STS_1);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "UPB");
					_print_parity(bp, GRCBASE_UPB +
							  PB_REG_PB_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "CSDM");
					_print_parity(bp,
						      CSDM_REG_CSDM_PRTY_STS);
				}
				break;
			case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
				if (print) {
					_print_next_block((*par_num)++, "CCM");
					_print_parity(bp, CCM_REG_CCM_PRTY_STS);
				}
				break;
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return res;
}

static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
					    int *par_num, bool print)
{
	u32 cur_bit;
	bool res;
	int i;

	res = false;

	for (i = 0; sig; i++) {
		cur_bit = (0x1UL << i);
		if (sig & cur_bit) {
			res = true; /* Each bit is real error! */
			if (print) {
				switch (cur_bit) {
				case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
					_print_next_block((*par_num)++,
							  "CSEMI");
					_print_parity(bp,
						      CSEM_REG_CSEM_PRTY_STS_0);
					_print_parity(bp,
						      CSEM_REG_CSEM_PRTY_STS_1);
					break;
				case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
					_print_next_block((*par_num)++, "PXP");
					_print_parity(bp, PXP_REG_PXP_PRTY_STS);
					_print_parity(bp,
						      PXP2_REG_PXP2_PRTY_STS_0);
					_print_parity(bp,
						      PXP2_REG_PXP2_PRTY_STS_1);
					break;
				case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
					_print_next_block((*par_num)++,
							  "PXPPCICLOCKCLIENT");
					break;
				case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
					_print_next_block((*par_num)++, "CFC");
					_print_parity(bp,
						      CFC_REG_CFC_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
					_print_next_block((*par_num)++, "CDU");
					_print_parity(bp, CDU_REG_CDU_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
					_print_next_block((*par_num)++, "DMAE");
					_print_parity(bp,
						      DMAE_REG_DMAE_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
					_print_next_block((*par_num)++, "IGU");
					if (CHIP_IS_E1x(bp))
						_print_parity(bp,
							HC_REG_HC_PRTY_STS);
					else
						_print_parity(bp,
							IGU_REG_IGU_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
					_print_next_block((*par_num)++, "MISC");
					_print_parity(bp,
						      MISC_REG_MISC_PRTY_STS);
					break;
				}
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return res;
}

static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
					    int *par_num, bool *global,
					    bool print)
{
	bool res = false;
	u32 cur_bit;
	int i;

	for (i = 0; sig; i++) {
		cur_bit = (0x1UL << i);
		if (sig & cur_bit) {
			switch (cur_bit) {
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
				if (print)
					_print_next_block((*par_num)++,
							  "MCP ROM");
				*global = true;
				res = true;
				break;
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
				if (print)
					_print_next_block((*par_num)++,
							  "MCP UMP RX");
				*global = true;
				res = true;
				break;
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
				if (print)
					_print_next_block((*par_num)++,
							  "MCP UMP TX");
				*global = true;
				res = true;
				break;
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
				(*par_num)++;
				/* clear latched SCPAD PATIRY from MCP */
				REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
				       1UL << 10);
				break;
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return res;
}

static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
					    int *par_num, bool print)
{
	u32 cur_bit;
	bool res;
	int i;

	res = false;

	for (i = 0; sig; i++) {
		cur_bit = (0x1UL << i);
		if (sig & cur_bit) {
			res = true; /* Each bit is real error! */
			if (print) {
				switch (cur_bit) {
				case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
					_print_next_block((*par_num)++,
							  "PGLUE_B");
					_print_parity(bp,
						      PGLUE_B_REG_PGLUE_B_PRTY_STS);
					break;
				case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
					_print_next_block((*par_num)++, "ATC");
					_print_parity(bp,
						      ATC_REG_ATC_PRTY_STS);
					break;
				}
			}
			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return res;
}

static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
			      u32 *sig)
{
	bool res = false;

	if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
	    (sig[1] & HW_PRTY_ASSERT_SET_1) ||
	    (sig[2] & HW_PRTY_ASSERT_SET_2) ||
	    (sig[3] & HW_PRTY_ASSERT_SET_3) ||
	    (sig[4] & HW_PRTY_ASSERT_SET_4)) {
		int par_num = 0;

		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
				 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
			  sig[0] & HW_PRTY_ASSERT_SET_0,
			  sig[1] & HW_PRTY_ASSERT_SET_1,
			  sig[2] & HW_PRTY_ASSERT_SET_2,
			  sig[3] & HW_PRTY_ASSERT_SET_3,
			  sig[4] & HW_PRTY_ASSERT_SET_4);
		if (print) {
			if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
			     (sig[1] & HW_PRTY_ASSERT_SET_1) ||
			     (sig[2] & HW_PRTY_ASSERT_SET_2) ||
			     (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
			     (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
				netdev_err(bp->dev,
					   "Parity errors detected in blocks: ");
			} else {
				print = false;
			}
		}
		res |= bnx2x_check_blocks_with_parity0(bp,
			sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
		res |= bnx2x_check_blocks_with_parity1(bp,
			sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
		res |= bnx2x_check_blocks_with_parity2(bp,
			sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
		res |= bnx2x_check_blocks_with_parity3(bp,
			sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
		res |= bnx2x_check_blocks_with_parity4(bp,
			sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);

		if (print)
			pr_cont("\n");
	}

	return res;
}

/**
 * bnx2x_chk_parity_attn - checks for parity attentions.
 *
 * @bp:		driver handle
 * @global:	true if there was a global attention
 * @print:	show parity attention in syslog
 */
bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
{
	struct attn_route attn = { {0} };
	int port = BP_PORT(bp);

	attn.sig[0] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
			     port*4);
	attn.sig[1] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
			     port*4);
	attn.sig[2] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
			     port*4);
	attn.sig[3] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
			     port*4);
	/* Since MCP attentions can't be disabled inside the block, we need to
	 * read AEU registers to see whether they're currently disabled
	 */
	attn.sig[3] &= ((REG_RD(bp,
				!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
				      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
			 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
			~MISC_AEU_ENABLE_MCP_PRTY_BITS);

	if (!CHIP_IS_E1x(bp))
		attn.sig[4] = REG_RD(bp,
			MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
				     port*4);

	return bnx2x_parity_attn(bp, global, print, attn.sig);
}

static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
{
	u32 val;
	if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {

		val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
		BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
		if (val &
		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
		if (val &
		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
	}
	if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
		val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
		BNX2X_ERR("ATC hw attention 0x%x\n", val);
		if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
			BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
		if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
		if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
		if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
	}

	if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
		BNX2X_ERR("FATAL parity attention set4 0x%x\n",
		(u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
	}
}

static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
{
	struct attn_route attn, *group_mask;
	int port = BP_PORT(bp);
	int index;
	u32 reg_addr;
	u32 val;
	u32 aeu_mask;
	bool global = false;

	/* need to take HW lock because MCP or other port might also
	   try to handle this event */
	bnx2x_acquire_alr(bp);

	if (bnx2x_chk_parity_attn(bp, &global, true)) {
#ifndef BNX2X_STOP_ON_ERROR
		bp->recovery_state = BNX2X_RECOVERY_INIT;
		schedule_delayed_work(&bp->sp_rtnl_task, 0);
		/* Disable HW interrupts */
		bnx2x_int_disable(bp);
		/* In case of parity errors don't handle attentions so that
		 * other function would "see" parity errors.
		 */
#else
		bnx2x_panic();
#endif
		bnx2x_release_alr(bp);
		return;
	}

	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
	if (!CHIP_IS_E1x(bp))
		attn.sig[4] =
		      REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
	else
		attn.sig[4] = 0;

	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);

	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
		if (deasserted & (1 << index)) {
			group_mask = &bp->attn_group[index];

			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
			   index,
			   group_mask->sig[0], group_mask->sig[1],
			   group_mask->sig[2], group_mask->sig[3],
			   group_mask->sig[4]);

			bnx2x_attn_int_deasserted4(bp,
					attn.sig[4] & group_mask->sig[4]);
			bnx2x_attn_int_deasserted3(bp,
					attn.sig[3] & group_mask->sig[3]);
			bnx2x_attn_int_deasserted1(bp,
					attn.sig[1] & group_mask->sig[1]);
			bnx2x_attn_int_deasserted2(bp,
					attn.sig[2] & group_mask->sig[2]);
			bnx2x_attn_int_deasserted0(bp,
					attn.sig[0] & group_mask->sig[0]);
		}
	}

	bnx2x_release_alr(bp);

	if (bp->common.int_block == INT_BLOCK_HC)
		reg_addr = (HC_REG_COMMAND_REG + port*32 +
			    COMMAND_REG_ATTN_BITS_CLR);
	else
		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);

	val = ~deasserted;
	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
	REG_WR(bp, reg_addr, val);

	if (~bp->attn_state & deasserted)
		BNX2X_ERR("IGU ERROR\n");

	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
			  MISC_REG_AEU_MASK_ATTN_FUNC_0;

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
	aeu_mask = REG_RD(bp, reg_addr);

	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
	   aeu_mask, deasserted);
	aeu_mask |= (deasserted & 0x3ff);
	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);

	REG_WR(bp, reg_addr, aeu_mask);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);

	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
	bp->attn_state &= ~deasserted;
	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
}

static void bnx2x_attn_int(struct bnx2x *bp)
{
	/* read local copy of bits */
	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
								attn_bits);
	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
								attn_bits_ack);
	u32 attn_state = bp->attn_state;

	/* look for changed bits */
	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;

	DP(NETIF_MSG_HW,
	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
	   attn_bits, attn_ack, asserted, deasserted);

	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
		BNX2X_ERR("BAD attention state\n");

	/* handle bits that were raised */
	if (asserted)
		bnx2x_attn_int_asserted(bp, asserted);

	if (deasserted)
		bnx2x_attn_int_deasserted(bp, deasserted);
}

void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
		      u16 index, u8 op, u8 update)
{
	u32 igu_addr = bp->igu_base_addr;
	igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
			     igu_addr);
}

static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
{
	/* No memory barriers */
	storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
}

static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
				      union event_ring_elem *elem)
{
	u8 err = elem->message.error;

	if (!bp->cnic_eth_dev.starting_cid  ||
	    (cid < bp->cnic_eth_dev.starting_cid &&
	    cid != bp->cnic_eth_dev.iscsi_l2_cid))
		return 1;

	DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);

	if (unlikely(err)) {

		BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
			  cid);
		bnx2x_panic_dump(bp, false);
	}
	bnx2x_cnic_cfc_comp(bp, cid, err);
	return 0;
}

static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
{
	struct bnx2x_mcast_ramrod_params rparam;
	int rc;

	memset(&rparam, 0, sizeof(rparam));

	rparam.mcast_obj = &bp->mcast_obj;

	netif_addr_lock_bh(bp->dev);

	/* Clear pending state for the last command */
	bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);

	/* If there are pending mcast commands - send them */
	if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
		if (rc < 0)
			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
				  rc);
	}

	netif_addr_unlock_bh(bp->dev);
}

static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
					    union event_ring_elem *elem)
{
	unsigned long ramrod_flags = 0;
	int rc = 0;
	u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
	u32 cid = echo & BNX2X_SWCID_MASK;
	struct bnx2x_vlan_mac_obj *vlan_mac_obj;

	/* Always push next commands out, don't wait here */
	__set_bit(RAMROD_CONT, &ramrod_flags);

	switch (echo >> BNX2X_SWCID_SHIFT) {
	case BNX2X_FILTER_MAC_PENDING:
		DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
		if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
			vlan_mac_obj = &bp->iscsi_l2_mac_obj;
		else
			vlan_mac_obj = &bp->sp_objs[cid].mac_obj;

		break;
	case BNX2X_FILTER_VLAN_PENDING:
		DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
		vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
		break;
	case BNX2X_FILTER_MCAST_PENDING:
		DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
		/* This is only relevant for 57710 where multicast MACs are
		 * configured as unicast MACs using the same ramrod.
		 */
		bnx2x_handle_mcast_eqe(bp);
		return;
	default:
		BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
		return;
	}

	rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);

	if (rc < 0)
		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
	else if (rc > 0)
		DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
}

static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);

static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
{
	netif_addr_lock_bh(bp->dev);

	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);

	/* Send rx_mode command again if was requested */
	if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
		bnx2x_set_storm_rx_mode(bp);
	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
				    &bp->sp_state))
		bnx2x_set_iscsi_eth_rx_mode(bp, true);
	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
				    &bp->sp_state))
		bnx2x_set_iscsi_eth_rx_mode(bp, false);

	netif_addr_unlock_bh(bp->dev);
}

static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
					      union event_ring_elem *elem)
{
	if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
		DP(BNX2X_MSG_SP,
		   "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
		   elem->message.data.vif_list_event.func_bit_map);
		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
			elem->message.data.vif_list_event.func_bit_map);
	} else if (elem->message.data.vif_list_event.echo ==
		   VIF_LIST_RULE_SET) {
		DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
	}
}

/* called with rtnl_lock */
static void bnx2x_after_function_update(struct bnx2x *bp)
{
	int q, rc;
	struct bnx2x_fastpath *fp;
	struct bnx2x_queue_state_params queue_params = {NULL};
	struct bnx2x_queue_update_params *q_update_params =
		&queue_params.params.update;

	/* Send Q update command with afex vlan removal values for all Qs */
	queue_params.cmd = BNX2X_Q_CMD_UPDATE;

	/* set silent vlan removal values according to vlan mode */
	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
		  &q_update_params->update_flags);
	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
		  &q_update_params->update_flags);
	__set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);

	/* in access mode mark mask and value are 0 to strip all vlans */
	if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
		q_update_params->silent_removal_value = 0;
		q_update_params->silent_removal_mask = 0;
	} else {
		q_update_params->silent_removal_value =
			(bp->afex_def_vlan_tag & VLAN_VID_MASK);
		q_update_params->silent_removal_mask = VLAN_VID_MASK;
	}

	for_each_eth_queue(bp, q) {
		/* Set the appropriate Queue object */
		fp = &bp->fp[q];
		queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;

		/* send the ramrod */
		rc = bnx2x_queue_state_change(bp, &queue_params);
		if (rc < 0)
			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
				  q);
	}

	if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
		fp = &bp->fp[FCOE_IDX(bp)];
		queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;

		/* clear pending completion bit */
		__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);

		/* mark latest Q bit */
		smp_mb__before_atomic();
		set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
		smp_mb__after_atomic();

		/* send Q update ramrod for FCoE Q */
		rc = bnx2x_queue_state_change(bp, &queue_params);
		if (rc < 0)
			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
				  q);
	} else {
		/* If no FCoE ring - ACK MCP now */
		bnx2x_link_report(bp);
		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
	}
}

static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
	struct bnx2x *bp, u32 cid)
{
	DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);

	if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
		return &bnx2x_fcoe_sp_obj(bp, q_obj);
	else
		return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}

static void bnx2x_eq_int(struct bnx2x *bp)
{
	u16 hw_cons, sw_cons, sw_prod;
	union event_ring_elem *elem;
	u8 echo;
	u32 cid;
	u8 opcode;
	int rc, spqe_cnt = 0;
	struct bnx2x_queue_sp_obj *q_obj;
	struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
	struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;

	hw_cons = le16_to_cpu(*bp->eq_cons_sb);

	/* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
	 * when we get the next-page we need to adjust so the loop
	 * condition below will be met. The next element is the size of a
	 * regular element and hence incrementing by 1
	 */
	if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
		hw_cons++;

	/* This function may never run in parallel with itself for a
	 * specific bp, thus there is no need in "paired" read memory
	 * barrier here.
	 */
	sw_cons = bp->eq_cons;
	sw_prod = bp->eq_prod;

	DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
			hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));

	for (; sw_cons != hw_cons;
	      sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {

		elem = &bp->eq_ring[EQ_DESC(sw_cons)];

		rc = bnx2x_iov_eq_sp_event(bp, elem);
		if (!rc) {
			DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
			   rc);
			goto next_spqe;
		}

		opcode = elem->message.opcode;

		/* handle eq element */
		switch (opcode) {
		case EVENT_RING_OPCODE_VF_PF_CHANNEL:
			bnx2x_vf_mbx_schedule(bp,
					      &elem->message.data.vf_pf_event);
			continue;

		case EVENT_RING_OPCODE_STAT_QUERY:
			DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
			       "got statistics comp event %d\n",
			       bp->stats_comp++);
			/* nothing to do with stats comp */
			goto next_spqe;

		case EVENT_RING_OPCODE_CFC_DEL:
			/* handle according to cid range */
			/*
			 * we may want to verify here that the bp state is
			 * HALTING
			 */

			/* elem CID originates from FW; actually LE */
			cid = SW_CID(elem->message.data.cfc_del_event.cid);

			DP(BNX2X_MSG_SP,
			   "got delete ramrod for MULTI[%d]\n", cid);

			if (CNIC_LOADED(bp) &&
			    !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
				goto next_spqe;

			q_obj = bnx2x_cid_to_q_obj(bp, cid);

			if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
				break;

			goto next_spqe;

		case EVENT_RING_OPCODE_STOP_TRAFFIC:
			DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
			if (f_obj->complete_cmd(bp, f_obj,
						BNX2X_F_CMD_TX_STOP))
				break;
			goto next_spqe;

		case EVENT_RING_OPCODE_START_TRAFFIC:
			DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
			if (f_obj->complete_cmd(bp, f_obj,
						BNX2X_F_CMD_TX_START))
				break;
			goto next_spqe;

		case EVENT_RING_OPCODE_FUNCTION_UPDATE:
			echo = elem->message.data.function_update_event.echo;
			if (echo == SWITCH_UPDATE) {
				DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
				   "got FUNC_SWITCH_UPDATE ramrod\n");
				if (f_obj->complete_cmd(
					bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
					break;

			} else {
				int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;

				DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
				   "AFEX: ramrod completed FUNCTION_UPDATE\n");
				f_obj->complete_cmd(bp, f_obj,
						    BNX2X_F_CMD_AFEX_UPDATE);

				/* We will perform the Queues update from
				 * sp_rtnl task as all Queue SP operations
				 * should run under rtnl_lock.
				 */
				bnx2x_schedule_sp_rtnl(bp, cmd, 0);
			}

			goto next_spqe;

		case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
			f_obj->complete_cmd(bp, f_obj,
					    BNX2X_F_CMD_AFEX_VIFLISTS);
			bnx2x_after_afex_vif_lists(bp, elem);
			goto next_spqe;
		case EVENT_RING_OPCODE_FUNCTION_START:
			DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
			   "got FUNC_START ramrod\n");
			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
				break;

			goto next_spqe;

		case EVENT_RING_OPCODE_FUNCTION_STOP:
			DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
			   "got FUNC_STOP ramrod\n");
			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
				break;

			goto next_spqe;

		case EVENT_RING_OPCODE_SET_TIMESYNC:
			DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
			   "got set_timesync ramrod completion\n");
			if (f_obj->complete_cmd(bp, f_obj,
						BNX2X_F_CMD_SET_TIMESYNC))
				break;
			goto next_spqe;
		}

		switch (opcode | bp->state) {
		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
		      BNX2X_STATE_OPEN):
		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
		      BNX2X_STATE_OPENING_WAIT4_PORT):
		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
		      BNX2X_STATE_CLOSING_WAIT4_HALT):
			DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
			   SW_CID(elem->message.data.eth_event.echo));
			rss_raw->clear_pending(rss_raw);
			break;

		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
		case (EVENT_RING_OPCODE_SET_MAC |
		      BNX2X_STATE_CLOSING_WAIT4_HALT):
		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
		      BNX2X_STATE_OPEN):
		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
		      BNX2X_STATE_DIAG):
		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
		      BNX2X_STATE_CLOSING_WAIT4_HALT):
			DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
			bnx2x_handle_classification_eqe(bp, elem);
			break;

		case (EVENT_RING_OPCODE_MULTICAST_RULES |
		      BNX2X_STATE_OPEN):
		case (EVENT_RING_OPCODE_MULTICAST_RULES |
		      BNX2X_STATE_DIAG):
		case (EVENT_RING_OPCODE_MULTICAST_RULES |
		      BNX2X_STATE_CLOSING_WAIT4_HALT):
			DP(BNX2X_MSG_SP, "got mcast ramrod\n");
			bnx2x_handle_mcast_eqe(bp);
			break;

		case (EVENT_RING_OPCODE_FILTERS_RULES |
		      BNX2X_STATE_OPEN):
		case (EVENT_RING_OPCODE_FILTERS_RULES |
		      BNX2X_STATE_DIAG):
		case (EVENT_RING_OPCODE_FILTERS_RULES |
		      BNX2X_STATE_CLOSING_WAIT4_HALT):
			DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
			bnx2x_handle_rx_mode_eqe(bp);
			break;
		default:
			/* unknown event log error and continue */
			BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
				  elem->message.opcode, bp->state);
		}
next_spqe:
		spqe_cnt++;
	} /* for */

	smp_mb__before_atomic();
	atomic_add(spqe_cnt, &bp->eq_spq_left);

	bp->eq_cons = sw_cons;
	bp->eq_prod = sw_prod;
	/* Make sure that above mem writes were issued towards the memory */
	smp_wmb();

	/* update producer */
	bnx2x_update_eq_prod(bp, bp->eq_prod);
}

static void bnx2x_sp_task(struct work_struct *work)
{
	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);

	DP(BNX2X_MSG_SP, "sp task invoked\n");

	/* make sure the atomic interrupt_occurred has been written */
	smp_rmb();
	if (atomic_read(&bp->interrupt_occurred)) {

		/* what work needs to be performed? */
		u16 status = bnx2x_update_dsb_idx(bp);

		DP(BNX2X_MSG_SP, "status %x\n", status);
		DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
		atomic_set(&bp->interrupt_occurred, 0);

		/* HW attentions */
		if (status & BNX2X_DEF_SB_ATT_IDX) {
			bnx2x_attn_int(bp);
			status &= ~BNX2X_DEF_SB_ATT_IDX;
		}

		/* SP events: STAT_QUERY and others */
		if (status & BNX2X_DEF_SB_IDX) {
			struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);

			if (FCOE_INIT(bp) &&
			    (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
				/* Prevent local bottom-halves from running as
				 * we are going to change the local NAPI list.
				 */
				local_bh_disable();
				napi_schedule(&bnx2x_fcoe(bp, napi));
				local_bh_enable();
			}

			/* Handle EQ completions */
			bnx2x_eq_int(bp);
			bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
				     le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);

			status &= ~BNX2X_DEF_SB_IDX;
		}

		/* if status is non zero then perhaps something went wrong */
		if (unlikely(status))
			DP(BNX2X_MSG_SP,
			   "got an unknown interrupt! (status 0x%x)\n", status);

		/* ack status block only if something was actually handled */
		bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
			     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
	}

	/* afex - poll to check if VIFSET_ACK should be sent to MFW */
	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
			       &bp->sp_state)) {
		bnx2x_link_report(bp);
		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
	}
}

irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
{
	struct net_device *dev = dev_instance;
	struct bnx2x *bp = netdev_priv(dev);

	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
		     IGU_INT_DISABLE, 0);

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	if (CNIC_LOADED(bp)) {
		struct cnic_ops *c_ops;

		rcu_read_lock();
		c_ops = rcu_dereference(bp->cnic_ops);
		if (c_ops)
			c_ops->cnic_handler(bp->cnic_data, NULL);
		rcu_read_unlock();
	}

	/* schedule sp task to perform default status block work, ack
	 * attentions and enable interrupts.
	 */
	bnx2x_schedule_sp_task(bp);

	return IRQ_HANDLED;
}

/* end of slow path */

void bnx2x_drv_pulse(struct bnx2x *bp)
{
	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
		 bp->fw_drv_pulse_wr_seq);
}

static void bnx2x_timer(struct timer_list *t)
{
	struct bnx2x *bp = from_timer(bp, t, timer);

	if (!netif_running(bp->dev))
		return;

	if (IS_PF(bp) &&
	    !BP_NOMCP(bp)) {
		int mb_idx = BP_FW_MB_IDX(bp);
		u16 drv_pulse;
		u16 mcp_pulse;

		++bp->fw_drv_pulse_wr_seq;
		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
		drv_pulse = bp->fw_drv_pulse_wr_seq;
		bnx2x_drv_pulse(bp);

		mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
			     MCP_PULSE_SEQ_MASK);
		/* The delta between driver pulse and mcp response
		 * should not get too big. If the MFW is more than 5 pulses
		 * behind, we should worry about it enough to generate an error
		 * log.
		 */
		if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
			BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
				  drv_pulse, mcp_pulse);
	}

	if (bp->state == BNX2X_STATE_OPEN)
		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);

	/* sample pf vf bulletin board for new posts from pf */
	if (IS_VF(bp))
		bnx2x_timer_sriov(bp);

	mod_timer(&bp->timer, jiffies + bp->current_interval);
}

/* end of Statistics */

/* nic init */

/*
 * nic init service functions
 */

static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
{
	u32 i;
	if (!(len%4) && !(addr%4))
		for (i = 0; i < len; i += 4)
			REG_WR(bp, addr + i, fill);
	else
		for (i = 0; i < len; i++)
			REG_WR8(bp, addr + i, fill);
}

/* helper: writes FP SP data to FW - data_size in dwords */
static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
				int fw_sb_id,
				u32 *sb_data_p,
				u32 data_size)
{
	int index;
	for (index = 0; index < data_size; index++)
		REG_WR(bp, BAR_CSTRORM_INTMEM +
			CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
			sizeof(u32)*index,
			*(sb_data_p + index));
}

static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
{
	u32 *sb_data_p;
	u32 data_size = 0;
	struct hc_status_block_data_e2 sb_data_e2;
	struct hc_status_block_data_e1x sb_data_e1x;

	/* disable the function first */
	if (!CHIP_IS_E1x(bp)) {
		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
		sb_data_e2.common.state = SB_DISABLED;
		sb_data_e2.common.p_func.vf_valid = false;
		sb_data_p = (u32 *)&sb_data_e2;
		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
	} else {
		memset(&sb_data_e1x, 0,
		       sizeof(struct hc_status_block_data_e1x));
		sb_data_e1x.common.state = SB_DISABLED;
		sb_data_e1x.common.p_func.vf_valid = false;
		sb_data_p = (u32 *)&sb_data_e1x;
		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
	}
	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);

	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
			CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
			CSTORM_STATUS_BLOCK_SIZE);
	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
			CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
			CSTORM_SYNC_BLOCK_SIZE);
}

/* helper:  writes SP SB data to FW */
static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
		struct hc_sp_status_block_data *sp_sb_data)
{
	int func = BP_FUNC(bp);
	int i;
	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
		REG_WR(bp, BAR_CSTRORM_INTMEM +
			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
			i*sizeof(u32),
			*((u32 *)sp_sb_data + i));
}

static void bnx2x_zero_sp_sb(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
	struct hc_sp_status_block_data sp_sb_data;
	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));

	sp_sb_data.state = SB_DISABLED;
	sp_sb_data.p_func.vf_valid = false;

	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);

	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
			CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
			CSTORM_SP_STATUS_BLOCK_SIZE);
	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
			CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
			CSTORM_SP_SYNC_BLOCK_SIZE);
}

static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
					   int igu_sb_id, int igu_seg_id)
{
	hc_sm->igu_sb_id = igu_sb_id;
	hc_sm->igu_seg_id = igu_seg_id;
	hc_sm->timer_value = 0xFF;
	hc_sm->time_to_expire = 0xFFFFFFFF;
}

/* allocates state machine ids. */
static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
{
	/* zero out state machine indices */
	/* rx indices */
	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;

	/* tx indices */
	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;

	/* map indices */
	/* rx indices */
	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
		SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;

	/* tx indices */
	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}

void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
			  u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
	int igu_seg_id;

	struct hc_status_block_data_e2 sb_data_e2;
	struct hc_status_block_data_e1x sb_data_e1x;
	struct hc_status_block_sm  *hc_sm_p;
	int data_size;
	u32 *sb_data_p;

	if (CHIP_INT_MODE_IS_BC(bp))
		igu_seg_id = HC_SEG_ACCESS_NORM;
	else
		igu_seg_id = IGU_SEG_ACCESS_NORM;

	bnx2x_zero_fp_sb(bp, fw_sb_id);

	if (!CHIP_IS_E1x(bp)) {
		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
		sb_data_e2.common.state = SB_ENABLED;
		sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
		sb_data_e2.common.p_func.vf_id = vfid;
		sb_data_e2.common.p_func.vf_valid = vf_valid;
		sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
		sb_data_e2.common.same_igu_sb_1b = true;
		sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
		sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
		hc_sm_p = sb_data_e2.common.state_machine;
		sb_data_p = (u32 *)&sb_data_e2;
		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
		bnx2x_map_sb_state_machines(sb_data_e2.index_data);
	} else {
		memset(&sb_data_e1x, 0,
		       sizeof(struct hc_status_block_data_e1x));
		sb_data_e1x.common.state = SB_ENABLED;
		sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
		sb_data_e1x.common.p_func.vf_id = 0xff;
		sb_data_e1x.common.p_func.vf_valid = false;
		sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
		sb_data_e1x.common.same_igu_sb_1b = true;
		sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
		sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
		hc_sm_p = sb_data_e1x.common.state_machine;
		sb_data_p = (u32 *)&sb_data_e1x;
		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
		bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
	}

	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
				       igu_sb_id, igu_seg_id);
	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
				       igu_sb_id, igu_seg_id);

	DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);

	/* write indices to HW - PCI guarantees endianity of regpairs */
	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}

static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
				     u16 tx_usec, u16 rx_usec)
{
	bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
				    false, rx_usec);
	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
				       HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
				       tx_usec);
	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
				       HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
				       tx_usec);
	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
				       HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
				       tx_usec);
}

static void bnx2x_init_def_sb(struct bnx2x *bp)
{
	struct host_sp_status_block *def_sb = bp->def_status_blk;
	dma_addr_t mapping = bp->def_status_blk_mapping;
	int igu_sp_sb_index;
	int igu_seg_id;
	int port = BP_PORT(bp);
	int func = BP_FUNC(bp);
	int reg_offset, reg_offset_en5;
	u64 section;
	int index;
	struct hc_sp_status_block_data sp_sb_data;
	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));

	if (CHIP_INT_MODE_IS_BC(bp)) {
		igu_sp_sb_index = DEF_SB_IGU_ID;
		igu_seg_id = HC_SEG_ACCESS_DEF;
	} else {
		igu_sp_sb_index = bp->igu_dsb_id;
		igu_seg_id = IGU_SEG_ACCESS_DEF;
	}

	/* ATTN */
	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
					    atten_status_block);
	def_sb->atten_status_block.status_block_id = igu_sp_sb_index;

	bp->attn_state = 0;

	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
	reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
				 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
		int sindex;
		/* take care of sig[0]..sig[4] */
		for (sindex = 0; sindex < 4; sindex++)
			bp->attn_group[index].sig[sindex] =
			   REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);

		if (!CHIP_IS_E1x(bp))
			/*
			 * enable5 is separate from the rest of the registers,
			 * and therefore the address skip is 4
			 * and not 16 between the different groups
			 */
			bp->attn_group[index].sig[4] = REG_RD(bp,
					reg_offset_en5 + 0x4*index);
		else
			bp->attn_group[index].sig[4] = 0;
	}

	if (bp->common.int_block == INT_BLOCK_HC) {
		reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
				     HC_REG_ATTN_MSG0_ADDR_L);

		REG_WR(bp, reg_offset, U64_LO(section));
		REG_WR(bp, reg_offset + 4, U64_HI(section));
	} else if (!CHIP_IS_E1x(bp)) {
		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
	}

	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
					    sp_sb);

	bnx2x_zero_sp_sb(bp);

	/* PCI guarantees endianity of regpairs */
	sp_sb_data.state		= SB_ENABLED;
	sp_sb_data.host_sb_addr.lo	= U64_LO(section);
	sp_sb_data.host_sb_addr.hi	= U64_HI(section);
	sp_sb_data.igu_sb_id		= igu_sp_sb_index;
	sp_sb_data.igu_seg_id		= igu_seg_id;
	sp_sb_data.p_func.pf_id		= func;
	sp_sb_data.p_func.vnic_id	= BP_VN(bp);
	sp_sb_data.p_func.vf_id		= 0xff;

	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);

	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
}

void bnx2x_update_coalesce(struct bnx2x *bp)
{
	int i;

	for_each_eth_queue(bp, i)
		bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
					 bp->tx_ticks, bp->rx_ticks);
}

static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
	spin_lock_init(&bp->spq_lock);
	atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);

	bp->spq_prod_idx = 0;
	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
	bp->spq_prod_bd = bp->spq;
	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
}

static void bnx2x_init_eq_ring(struct bnx2x *bp)
{
	int i;
	for (i = 1; i <= NUM_EQ_PAGES; i++) {
		union event_ring_elem *elem =
			&bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];

		elem->next_page.addr.hi =
			cpu_to_le32(U64_HI(bp->eq_mapping +
				   BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
		elem->next_page.addr.lo =
			cpu_to_le32(U64_LO(bp->eq_mapping +
				   BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
	}
	bp->eq_cons = 0;
	bp->eq_prod = NUM_EQ_DESC;
	bp->eq_cons_sb = BNX2X_EQ_INDEX;
	/* we want a warning message before it gets wrought... */
	atomic_set(&bp->eq_spq_left,
		min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}

/* called with netif_addr_lock_bh() */
static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
			       unsigned long rx_mode_flags,
			       unsigned long rx_accept_flags,
			       unsigned long tx_accept_flags,
			       unsigned long ramrod_flags)
{
	struct bnx2x_rx_mode_ramrod_params ramrod_param;
	int rc;

	memset(&ramrod_param, 0, sizeof(ramrod_param));

	/* Prepare ramrod parameters */
	ramrod_param.cid = 0;
	ramrod_param.cl_id = cl_id;
	ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
	ramrod_param.func_id = BP_FUNC(bp);

	ramrod_param.pstate = &bp->sp_state;
	ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;

	ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
	ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);

	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);

	ramrod_param.ramrod_flags = ramrod_flags;
	ramrod_param.rx_mode_flags = rx_mode_flags;

	ramrod_param.rx_accept_flags = rx_accept_flags;
	ramrod_param.tx_accept_flags = tx_accept_flags;

	rc = bnx2x_config_rx_mode(bp, &ramrod_param);
	if (rc < 0) {
		BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
		return rc;
	}

	return 0;
}

static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
				   unsigned long *rx_accept_flags,
				   unsigned long *tx_accept_flags)
{
	/* Clear the flags first */
	*rx_accept_flags = 0;
	*tx_accept_flags = 0;

	switch (rx_mode) {
	case BNX2X_RX_MODE_NONE:
		/*
		 * 'drop all' supersedes any accept flags that may have been
		 * passed to the function.
		 */
		break;
	case BNX2X_RX_MODE_NORMAL:
		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);

		/* internal switching mode */
		__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
		__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);

		if (bp->accept_any_vlan) {
			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
		}

		break;
	case BNX2X_RX_MODE_ALLMULTI:
		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);

		/* internal switching mode */
		__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);

		if (bp->accept_any_vlan) {
			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
		}

		break;
	case BNX2X_RX_MODE_PROMISC:
		/* According to definition of SI mode, iface in promisc mode
		 * should receive matched and unmatched (in resolution of port)
		 * unicast packets.
		 */
		__set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);

		/* internal switching mode */
		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);

		if (IS_MF_SI(bp))
			__set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
		else
			__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);

		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);

		break;
	default:
		BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
		return -EINVAL;
	}

	return 0;
}

/* called with netif_addr_lock_bh() */
static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
	int rc;

	if (!NO_FCOE(bp))
		/* Configure rx_mode of FCoE Queue */
		__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);

	rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
				     &tx_accept_flags);
	if (rc)
		return rc;

	__set_bit(RAMROD_RX, &ramrod_flags);
	__set_bit(RAMROD_TX, &ramrod_flags);

	return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
				   rx_accept_flags, tx_accept_flags,
				   ramrod_flags);
}

static void bnx2x_init_internal_common(struct bnx2x *bp)
{
	int i;

	/* Zero this manually as its initialization is
	   currently missing in the initTool */
	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
		REG_WR(bp, BAR_USTRORM_INTMEM +
		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
	if (!CHIP_IS_E1x(bp)) {
		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
			CHIP_INT_MODE_IS_BC(bp) ?
			HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
	}
}

static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
{
	switch (load_code) {
	case FW_MSG_CODE_DRV_LOAD_COMMON:
	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
		bnx2x_init_internal_common(bp);
		/* no break */

	case FW_MSG_CODE_DRV_LOAD_PORT:
		/* nothing to do */
		/* no break */

	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
		/* internal memory per function is
		   initialized inside bnx2x_pf_init */
		break;

	default:
		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
		break;
	}
}

static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
{
	return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
}

static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
{
	return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
}

static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
{
	if (CHIP_IS_E1x(fp->bp))
		return BP_L_ID(fp->bp) + fp->index;
	else	/* We want Client ID to be the same as IGU SB ID for 57712 */
		return bnx2x_fp_igu_sb_id(fp);
}

static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
{
	struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
	u8 cos;
	unsigned long q_type = 0;
	u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
	fp->rx_queue = fp_idx;
	fp->cid = fp_idx;
	fp->cl_id = bnx2x_fp_cl_id(fp);
	fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
	fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
	/* qZone id equals to FW (per path) client id */
	fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);

	/* init shortcut */
	fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);

	/* Setup SB indices */
	fp->rx_cons_sb = BNX2X_RX_SB_INDEX;

	/* Configure Queue State object */
	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);

	BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);

	/* init tx data */
	for_each_cos_in_tx_queue(fp, cos) {
		bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
				  CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
				  FP_COS_TO_TXQ(fp, cos, bp),
				  BNX2X_TX_SB_INDEX_BASE + cos, fp);
		cids[cos] = fp->txdata_ptr[cos]->cid;
	}

	/* nothing more for vf to do here */
	if (IS_VF(bp))
		return;

	bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
		      fp->fw_sb_id, fp->igu_sb_id);
	bnx2x_update_fpsb_idx(fp);
	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
			     fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
			     bnx2x_sp_mapping(bp, q_rdata), q_type);

	/**
	 * Configure classification DBs: Always enable Tx switching
	 */
	bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);

	DP(NETIF_MSG_IFUP,
	   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
	   fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
	   fp->igu_sb_id);
}

static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
{
	int i;

	for (i = 1; i <= NUM_TX_RINGS; i++) {
		struct eth_tx_next_bd *tx_next_bd =
			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;

		tx_next_bd->addr_hi =
			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
		tx_next_bd->addr_lo =
			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
	}

	*txdata->tx_cons_sb = cpu_to_le16(0);

	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
	txdata->tx_db.data.zero_fill1 = 0;
	txdata->tx_db.data.prod = 0;

	txdata->tx_pkt_prod = 0;
	txdata->tx_pkt_cons = 0;
	txdata->tx_bd_prod = 0;
	txdata->tx_bd_cons = 0;
	txdata->tx_pkt = 0;
}

static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
{
	int i;

	for_each_tx_queue_cnic(bp, i)
		bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
}

static void bnx2x_init_tx_rings(struct bnx2x *bp)
{
	int i;
	u8 cos;

	for_each_eth_queue(bp, i)
		for_each_cos_in_tx_queue(&bp->fp[i], cos)
			bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}

static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
{
	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
	unsigned long q_type = 0;

	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
						     BNX2X_FCOE_ETH_CL_ID_IDX);
	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
	bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
			  fp);

	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);

	/* qZone id equals to FW (per path) client id */
	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
	/* init shortcut */
	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
		bnx2x_rx_ustorm_prods_offset(fp);

	/* Configure Queue State object */
	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);

	/* No multi-CoS for FCoE L2 client */
	BUG_ON(fp->max_cos != 1);

	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
			     &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
			     bnx2x_sp_mapping(bp, q_rdata), q_type);

	DP(NETIF_MSG_IFUP,
	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
	   fp->igu_sb_id);
}

void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
	if (!NO_FCOE(bp))
		bnx2x_init_fcoe_fp(bp);

	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
		      BNX2X_VF_ID_INVALID, false,
		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));

	/* ensure status block indices were read */
	rmb();
	bnx2x_init_rx_rings_cnic(bp);
	bnx2x_init_tx_rings_cnic(bp);

	/* flush all */
	mb();
}

void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
{
	int i;

	/* Setup NIC internals and enable interrupts */
	for_each_eth_queue(bp, i)
		bnx2x_init_eth_fp(bp, i);

	/* ensure status block indices were read */
	rmb();
	bnx2x_init_rx_rings(bp);
	bnx2x_init_tx_rings(bp);

	if (IS_PF(bp)) {
		/* Initialize MOD_ABS interrupts */
		bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
				       bp->common.shmem_base,
				       bp->common.shmem2_base, BP_PORT(bp));

		/* initialize the default status block and sp ring */
		bnx2x_init_def_sb(bp);
		bnx2x_update_dsb_idx(bp);
		bnx2x_init_sp_ring(bp);
	} else {
		bnx2x_memset_stats(bp);
	}
}

void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
{
	bnx2x_init_eq_ring(bp);
	bnx2x_init_internal(bp, load_code);
	bnx2x_pf_init(bp);
	bnx2x_stats_init(bp);

	/* flush all before enabling interrupts */
	mb();

	bnx2x_int_enable(bp);

	/* Check for SPIO5 */
	bnx2x_attn_int_deasserted0(bp,
		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
				   AEU_INPUTS_ATTN_BITS_SPIO5);
}

/* gzip service functions */
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
					    &bp->gunzip_mapping, GFP_KERNEL);
	if (bp->gunzip_buf  == NULL)
		goto gunzip_nomem1;

	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
	if (bp->strm  == NULL)
		goto gunzip_nomem2;

	bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
	if (bp->strm->workspace == NULL)
		goto gunzip_nomem3;

	return 0;

gunzip_nomem3:
	kfree(bp->strm);
	bp->strm = NULL;

gunzip_nomem2:
	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
			  bp->gunzip_mapping);
	bp->gunzip_buf = NULL;

gunzip_nomem1:
	BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
	return -ENOMEM;
}

static void bnx2x_gunzip_end(struct bnx2x *bp)
{
	if (bp->strm) {
		vfree(bp->strm->workspace);
		kfree(bp->strm);
		bp->strm = NULL;
	}

	if (bp->gunzip_buf) {
		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
				  bp->gunzip_mapping);
		bp->gunzip_buf = NULL;
	}
}

static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
{
	int n, rc;

	/* check gzip header */
	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
		BNX2X_ERR("Bad gzip header\n");
		return -EINVAL;
	}

	n = 10;

#define FNAME				0x8

	if (zbuf[3] & FNAME)
		while ((zbuf[n++] != 0) && (n < len));

	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
	bp->strm->avail_in = len - n;
	bp->strm->next_out = bp->gunzip_buf;
	bp->strm->avail_out = FW_BUF_SIZE;

	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
	if (rc != Z_OK)
		return rc;

	rc = zlib_inflate(bp->strm, Z_FINISH);
	if ((rc != Z_OK) && (rc != Z_STREAM_END))
		netdev_err(bp->dev, "Firmware decompression error: %s\n",
			   bp->strm->msg);

	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
	if (bp->gunzip_outlen & 0x3)
		netdev_err(bp->dev,
			   "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
				bp->gunzip_outlen);
	bp->gunzip_outlen >>= 2;

	zlib_inflateEnd(bp->strm);

	if (rc == Z_STREAM_END)
		return 0;

	return rc;
}

/* nic load/unload */

/*
 * General service functions
 */

/* send a NIG loopback debug packet */
static void bnx2x_lb_pckt(struct bnx2x *bp)
{
	u32 wb_write[3];

	/* Ethernet source and destination addresses */
	wb_write[0] = 0x55555555;
	wb_write[1] = 0x55555555;
	wb_write[2] = 0x20;		/* SOP */
	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);

	/* NON-IP protocol */
	wb_write[0] = 0x09000000;
	wb_write[1] = 0x55555555;
	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
}

/* some of the internal memories
 * are not directly readable from the driver
 * to test them we send debug packets
 */
static int bnx2x_int_mem_test(struct bnx2x *bp)
{
	int factor;
	int count, i;
	u32 val = 0;

	if (CHIP_REV_IS_FPGA(bp))
		factor = 120;
	else if (CHIP_REV_IS_EMUL(bp))
		factor = 200;
	else
		factor = 1;

	/* Disable inputs of parser neighbor blocks */
	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);

	/*  Write 0 to parser credits for CFC search request */
	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);

	/* send Ethernet packet */
	bnx2x_lb_pckt(bp);

	/* TODO do i reset NIG statistic? */
	/* Wait until NIG register shows 1 packet of size 0x10 */
	count = 1000 * factor;
	while (count) {

		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
		val = *bnx2x_sp(bp, wb_data[0]);
		if (val == 0x10)
			break;

		usleep_range(10000, 20000);
		count--;
	}
	if (val != 0x10) {
		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
		return -1;
	}

	/* Wait until PRS register shows 1 packet */
	count = 1000 * factor;
	while (count) {
		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
		if (val == 1)
			break;

		usleep_range(10000, 20000);
		count--;
	}
	if (val != 0x1) {
		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
		return -2;
	}

	/* Reset and init BRB, PRS */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
	msleep(50);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
	msleep(50);
	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);

	DP(NETIF_MSG_HW, "part2\n");

	/* Disable inputs of parser neighbor blocks */
	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);

	/* Write 0 to parser credits for CFC search request */
	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);

	/* send 10 Ethernet packets */
	for (i = 0; i < 10; i++)
		bnx2x_lb_pckt(bp);

	/* Wait until NIG register shows 10 + 1
	   packets of size 11*0x10 = 0xb0 */
	count = 1000 * factor;
	while (count) {

		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
		val = *bnx2x_sp(bp, wb_data[0]);
		if (val == 0xb0)
			break;

		usleep_range(10000, 20000);
		count--;
	}
	if (val != 0xb0) {
		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
		return -3;
	}

	/* Wait until PRS register shows 2 packets */
	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
	if (val != 2)
		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);

	/* Write 1 to parser credits for CFC search request */
	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);

	/* Wait until PRS register shows 3 packets */
	msleep(10 * factor);
	/* Wait until NIG register shows 1 packet of size 0x10 */
	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
	if (val != 3)
		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);

	/* clear NIG EOP FIFO */
	for (i = 0; i < 11; i++)
		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
	if (val != 1) {
		BNX2X_ERR("clear of NIG failed\n");
		return -4;
	}

	/* Reset and init BRB, PRS, NIG */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
	msleep(50);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
	msleep(50);
	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
	if (!CNIC_SUPPORT(bp))
		/* set NIC mode */
		REG_WR(bp, PRS_REG_NIC_MODE, 1);

	/* Enable inputs of parser neighbor blocks */
	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);

	DP(NETIF_MSG_HW, "done\n");

	return 0; /* OK */
}

static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{
	u32 val;

	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
	if (!CHIP_IS_E1x(bp))
		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
	else
		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
	/*
	 * mask read length error interrupts in brb for parser
	 * (parsing unit and 'checksum and crc' unit)
	 * these errors are legal (PU reads fixed length and CAC can cause
	 * read length error on truncated packets)
	 */
	REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */

	val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
		PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
		PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
	if (!CHIP_IS_E1x(bp))
		val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
			PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
	REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);

	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */

	if (!CHIP_IS_E1x(bp))
		/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
		REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);

	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
}

static void bnx2x_reset_common(struct bnx2x *bp)
{
	u32 val = 0x1400;

	/* reset_common */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
	       0xd3ffff7f);

	if (CHIP_IS_E3(bp)) {
		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
	}

	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
}

static void bnx2x_setup_dmae(struct bnx2x *bp)
{
	bp->dmae_ready = 0;
	spin_lock_init(&bp->dmae_lock);
}

static void bnx2x_init_pxp(struct bnx2x *bp)
{
	u16 devctl;
	int r_order, w_order;

	pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
	if (bp->mrrs == -1)
		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
	else {
		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
		r_order = bp->mrrs;
	}

	bnx2x_init_pxp_arb(bp, r_order, w_order);
}

static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
{
	int is_required;
	u32 val;
	int port;

	if (BP_NOMCP(bp))
		return;

	is_required = 0;
	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
	      SHARED_HW_CFG_FAN_FAILURE_MASK;

	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
		is_required = 1;

	/*
	 * The fan failure mechanism is usually related to the PHY type since
	 * the power consumption of the board is affected by the PHY. Currently,
	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
	 */
	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
		for (port = PORT_0; port < PORT_MAX; port++) {
			is_required |=
				bnx2x_fan_failure_det_req(
					bp,
					bp->common.shmem_base,
					bp->common.shmem2_base,
					port);
		}

	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);

	if (is_required == 0)
		return;

	/* Fan failure is indicated by SPIO 5 */
	bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);

	/* set to active low mode */
	val = REG_RD(bp, MISC_REG_SPIO_INT);
	val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
	REG_WR(bp, MISC_REG_SPIO_INT, val);

	/* enable interrupt to signal the IGU */
	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
	val |= MISC_SPIO_SPIO5;
	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}

void bnx2x_pf_disable(struct bnx2x *bp)
{
	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
	val &= ~IGU_PF_CONF_FUNC_EN;

	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
	REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
}

static void bnx2x__common_init_phy(struct bnx2x *bp)
{
	u32 shmem_base[2], shmem2_base[2];
	/* Avoid common init in case MFW supports LFA */
	if (SHMEM2_RD(bp, size) >
	    (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
		return;
	shmem_base[0] =  bp->common.shmem_base;
	shmem2_base[0] = bp->common.shmem2_base;
	if (!CHIP_IS_E1x(bp)) {
		shmem_base[1] =
			SHMEM2_RD(bp, other_shmem_base_addr);
		shmem2_base[1] =
			SHMEM2_RD(bp, other_shmem2_base_addr);
	}
	bnx2x_acquire_phy_lock(bp);
	bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
			      bp->common.chip_id);
	bnx2x_release_phy_lock(bp);
}

static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
{
	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);

	/* make sure this value is 0 */
	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);

	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
}

static void bnx2x_set_endianity(struct bnx2x *bp)
{
#ifdef __BIG_ENDIAN
	bnx2x_config_endianity(bp, 1);
#else
	bnx2x_config_endianity(bp, 0);
#endif
}

static void bnx2x_reset_endianity(struct bnx2x *bp)
{
	bnx2x_config_endianity(bp, 0);
}

/**
 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
 *
 * @bp:		driver handle
 */
static int bnx2x_init_hw_common(struct bnx2x *bp)
{
	u32 val;

	DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));

	/*
	 * take the RESET lock to protect undi_unload flow from accessing
	 * registers while we're resetting the chip
	 */
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);

	bnx2x_reset_common(bp);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);

	val = 0xfffc;
	if (CHIP_IS_E3(bp)) {
		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
	}
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);

	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);

	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);

	if (!CHIP_IS_E1x(bp)) {
		u8 abs_func_id;

		/**
		 * 4-port mode or 2-port mode we need to turn of master-enable
		 * for everyone, after that, turn it back on for self.
		 * so, we disregard multi-function or not, and always disable
		 * for all functions on the given path, this means 0,2,4,6 for
		 * path 0 and 1,3,5,7 for path 1
		 */
		for (abs_func_id = BP_PATH(bp);
		     abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
			if (abs_func_id == BP_ABS_FUNC(bp)) {
				REG_WR(bp,
				    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
				    1);
				continue;
			}

			bnx2x_pretend_func(bp, abs_func_id);
			/* clear pf enable */
			bnx2x_pf_disable(bp);
			bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
		}
	}

	bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
	if (CHIP_IS_E1(bp)) {
		/* enable HW interrupt from PXP on USDM overflow
		   bit 16 on INT_MASK_0 */
		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
	}

	bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
	bnx2x_init_pxp(bp);
	bnx2x_set_endianity(bp);
	bnx2x_ilt_init_page_size(bp, INITOP_SET);

	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);

	/* let the HW do it's magic ... */
	msleep(100);
	/* finish PXP init */
	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
	if (val != 1) {
		BNX2X_ERR("PXP2 CFG failed\n");
		return -EBUSY;
	}
	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
	if (val != 1) {
		BNX2X_ERR("PXP2 RD_INIT failed\n");
		return -EBUSY;
	}

	/* Timers bug workaround E2 only. We need to set the entire ILT to
	 * have entries with value "0" and valid bit on.
	 * This needs to be done by the first PF that is loaded in a path
	 * (i.e. common phase)
	 */
	if (!CHIP_IS_E1x(bp)) {
/* In E2 there is a bug in the timers block that can cause function 6 / 7
 * (i.e. vnic3) to start even if it is marked as "scan-off".
 * This occurs when a different function (func2,3) is being marked
 * as "scan-off". Real-life scenario for example: if a driver is being
 * load-unloaded while func6,7 are down. This will cause the timer to access
 * the ilt, translate to a logical address and send a request to read/write.
 * Since the ilt for the function that is down is not valid, this will cause
 * a translation error which is unrecoverable.
 * The Workaround is intended to make sure that when this happens nothing fatal
 * will occur. The workaround:
 *	1.  First PF driver which loads on a path will:
 *		a.  After taking the chip out of reset, by using pretend,
 *		    it will write "0" to the following registers of
 *		    the other vnics.
 *		    REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
 *		    REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
 *		    REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
 *		    And for itself it will write '1' to
 *		    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
 *		    dmae-operations (writing to pram for example.)
 *		    note: can be done for only function 6,7 but cleaner this
 *			  way.
 *		b.  Write zero+valid to the entire ILT.
 *		c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
 *		    VNIC3 (of that port). The range allocated will be the
 *		    entire ILT. This is needed to prevent  ILT range error.
 *	2.  Any PF driver load flow:
 *		a.  ILT update with the physical addresses of the allocated
 *		    logical pages.
 *		b.  Wait 20msec. - note that this timeout is needed to make
 *		    sure there are no requests in one of the PXP internal
 *		    queues with "old" ILT addresses.
 *		c.  PF enable in the PGLC.
 *		d.  Clear the was_error of the PF in the PGLC. (could have
 *		    occurred while driver was down)
 *		e.  PF enable in the CFC (WEAK + STRONG)
 *		f.  Timers scan enable
 *	3.  PF driver unload flow:
 *		a.  Clear the Timers scan_en.
 *		b.  Polling for scan_on=0 for that PF.
 *		c.  Clear the PF enable bit in the PXP.
 *		d.  Clear the PF enable in the CFC (WEAK + STRONG)
 *		e.  Write zero+valid to all ILT entries (The valid bit must
 *		    stay set)
 *		f.  If this is VNIC 3 of a port then also init
 *		    first_timers_ilt_entry to zero and last_timers_ilt_entry
 *		    to the last entry in the ILT.
 *
 *	Notes:
 *	Currently the PF error in the PGLC is non recoverable.
 *	In the future the there will be a recovery routine for this error.
 *	Currently attention is masked.
 *	Having an MCP lock on the load/unload process does not guarantee that
 *	there is no Timer disable during Func6/7 enable. This is because the
 *	Timers scan is currently being cleared by the MCP on FLR.
 *	Step 2.d can be done only for PF6/7 and the driver can also check if
 *	there is error before clearing it. But the flow above is simpler and
 *	more general.
 *	All ILT entries are written by zero+valid and not just PF6/7
 *	ILT entries since in the future the ILT entries allocation for
 *	PF-s might be dynamic.
 */
		struct ilt_client_info ilt_cli;
		struct bnx2x_ilt ilt;
		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
		memset(&ilt, 0, sizeof(struct bnx2x_ilt));

		/* initialize dummy TM client */
		ilt_cli.start = 0;
		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
		ilt_cli.client_num = ILT_CLIENT_TM;

		/* Step 1: set zeroes to all ilt page entries with valid bit on
		 * Step 2: set the timers first/last ilt entry to point
		 * to the entire range to prevent ILT range error for 3rd/4th
		 * vnic	(this code assumes existence of the vnic)
		 *
		 * both steps performed by call to bnx2x_ilt_client_init_op()
		 * with dummy TM client
		 *
		 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
		 * and his brother are split registers
		 */
		bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
		bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));

		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
	}

	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);

	if (!CHIP_IS_E1x(bp)) {
		int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
				(CHIP_REV_IS_FPGA(bp) ? 400 : 0);
		bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);

		bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);

		/* let the HW do it's magic ... */
		do {
			msleep(200);
			val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
		} while (factor-- && (val != 1));

		if (val != 1) {
			BNX2X_ERR("ATC_INIT failed\n");
			return -EBUSY;
		}
	}

	bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);

	bnx2x_iov_init_dmae(bp);

	/* clean the DMAE memory */
	bp->dmae_ready = 1;
	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);

	bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);

	bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);

	bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);

	bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);

	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);

	bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);

	/* QM queues pointers table */
	bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);

	/* soft reset pulse */
	REG_WR(bp, QM_REG_SOFT_RESET, 1);
	REG_WR(bp, QM_REG_SOFT_RESET, 0);

	if (CNIC_SUPPORT(bp))
		bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);

	bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);

	if (!CHIP_REV_IS_SLOW(bp))
		/* enable hw interrupt from doorbell Q */
		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);

	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);

	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);

	if (!CHIP_IS_E1(bp))
		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);

	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
		if (IS_MF_AFEX(bp)) {
			/* configure that VNTag and VLAN headers must be
			 * received in afex mode
			 */
			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
			REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
			REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
			REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
			REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
		} else {
			/* Bit-map indicating which L2 hdrs may appear
			 * after the basic Ethernet header
			 */
			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
			       bp->path_has_ovlan ? 7 : 6);
		}
	}

	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);

	if (!CHIP_IS_E1x(bp)) {
		/* reset VFC memories */
		REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
			   VFC_MEMORIES_RST_REG_CAM_RST |
			   VFC_MEMORIES_RST_REG_RAM_RST);
		REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
			   VFC_MEMORIES_RST_REG_CAM_RST |
			   VFC_MEMORIES_RST_REG_RAM_RST);

		msleep(20);
	}

	bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);

	/* sync semi rtc */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
	       0x80000000);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
	       0x80000000);

	bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);

	if (!CHIP_IS_E1x(bp)) {
		if (IS_MF_AFEX(bp)) {
			/* configure that VNTag and VLAN headers must be
			 * sent in afex mode
			 */
			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
			REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
			REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
			REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
			REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
		} else {
			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
			       bp->path_has_ovlan ? 7 : 6);
		}
	}

	REG_WR(bp, SRC_REG_SOFT_RST, 1);

	bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);

	if (CNIC_SUPPORT(bp)) {
		REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
		REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
		REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
		REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
		REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
		REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
		REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
		REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
		REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
		REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
	}
	REG_WR(bp, SRC_REG_SOFT_RST, 0);

	if (sizeof(union cdu_context) != 1024)
		/* we currently assume that a context is 1024 bytes */
		dev_alert(&bp->pdev->dev,
			  "please adjust the size of cdu_context(%ld)\n",
			  (long)sizeof(union cdu_context));

	bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
	val = (4 << 24) + (0 << 12) + 1024;
	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);

	bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
	/* enable context validation interrupt from CFC */
	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);

	/* set the thresholds to prevent CFC/CDU race */
	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);

	bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);

	if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
		REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);

	bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
	bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);

	/* Reset PCIE errors for debug */
	REG_WR(bp, 0x2814, 0xffffffff);
	REG_WR(bp, 0x3820, 0xffffffff);

	if (!CHIP_IS_E1x(bp)) {
		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
			   (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
				PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
			   (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
			   (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
	}

	bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
	if (!CHIP_IS_E1(bp)) {
		/* in E3 this done in per-port section */
		if (!CHIP_IS_E3(bp))
			REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
	}
	if (CHIP_IS_E1H(bp))
		/* not applicable for E2 (and above ...) */
		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));

	if (CHIP_REV_IS_SLOW(bp))
		msleep(200);

	/* finish CFC init */
	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
	if (val != 1) {
		BNX2X_ERR("CFC LL_INIT failed\n");
		return -EBUSY;
	}
	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
	if (val != 1) {
		BNX2X_ERR("CFC AC_INIT failed\n");
		return -EBUSY;
	}
	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
	if (val != 1) {
		BNX2X_ERR("CFC CAM_INIT failed\n");
		return -EBUSY;
	}
	REG_WR(bp, CFC_REG_DEBUG0, 0);

	if (CHIP_IS_E1(bp)) {
		/* read NIG statistic
		   to see if this is our first up since powerup */
		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
		val = *bnx2x_sp(bp, wb_data[0]);

		/* do internal memory self test */
		if ((val == 0) && bnx2x_int_mem_test(bp)) {
			BNX2X_ERR("internal mem self test failed\n");
			return -EBUSY;
		}
	}

	bnx2x_setup_fan_failure_detection(bp);

	/* clear PXP2 attentions */
	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);

	bnx2x_enable_blocks_attention(bp);
	bnx2x_enable_blocks_parity(bp);

	if (!BP_NOMCP(bp)) {
		if (CHIP_IS_E1x(bp))
			bnx2x__common_init_phy(bp);
	} else
		BNX2X_ERR("Bootcode is missing - can not initialize link\n");

	if (SHMEM2_HAS(bp, netproc_fw_ver))
		SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));

	return 0;
}

/**
 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
 *
 * @bp:		driver handle
 */
static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
{
	int rc = bnx2x_init_hw_common(bp);

	if (rc)
		return rc;

	/* In E2 2-PORT mode, same ext phy is used for the two paths */
	if (!BP_NOMCP(bp))
		bnx2x__common_init_phy(bp);

	return 0;
}

static int bnx2x_init_hw_port(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
	u32 low, high;
	u32 val, reg;

	DP(NETIF_MSG_HW, "starting port init  port %d\n", port);

	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);

	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);

	/* Timers bug workaround: disables the pf_master bit in pglue at
	 * common phase, we need to enable it here before any dmae access are
	 * attempted. Therefore we manually added the enable-master to the
	 * port phase (it also happens in the function phase)
	 */
	if (!CHIP_IS_E1x(bp))
		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);

	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
	bnx2x_init_block(bp, BLOCK_QM, init_phase);

	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
	bnx2x_init_block(bp, BLOCK_XCM, init_phase);

	/* QM cid (connection) count */
	bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);

	if (CNIC_SUPPORT(bp)) {
		bnx2x_init_block(bp, BLOCK_TM, init_phase);
		REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
		REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
	}

	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);

	bnx2x_init_block(bp, BLOCK_BRB1, init_phase);

	if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {

		if (IS_MF(bp))
			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
		else if (bp->dev->mtu > 4096) {
			if (bp->flags & ONE_PORT_FLAG)
				low = 160;
			else {
				val = bp->dev->mtu;
				/* (24*1024 + val*4)/256 */
				low = 96 + (val/64) +
						((val % 64) ? 1 : 0);
			}
		} else
			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
		high = low + 56;	/* 14*1024/256 */
		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
	}

	if (CHIP_MODE_IS_4_PORT(bp))
		REG_WR(bp, (BP_PORT(bp) ?
			    BRB1_REG_MAC_GUARANTIED_1 :
			    BRB1_REG_MAC_GUARANTIED_0), 40);

	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
	if (CHIP_IS_E3B0(bp)) {
		if (IS_MF_AFEX(bp)) {
			/* configure headers for AFEX mode */
			REG_WR(bp, BP_PORT(bp) ?
			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
			       PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
			REG_WR(bp, BP_PORT(bp) ?
			       PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
			       PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
			REG_WR(bp, BP_PORT(bp) ?
			       PRS_REG_MUST_HAVE_HDRS_PORT_1 :
			       PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
		} else {
			/* Ovlan exists only if we are in multi-function +
			 * switch-dependent mode, in switch-independent there
			 * is no ovlan headers
			 */