#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
#include <linux/numa.h>
#include <generated/utsrelease.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
#include <net/xdp_sock_drv.h>
#include <net/xfrm.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
#ifdef IXGBE_FCOE
char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#else
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
};
static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
{0, }
};
MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
#ifdef CONFIG_IXGBE_DCA
static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
void *p);
static struct notifier_block dca_notifier = {
.notifier_call = ixgbe_notify_dca,
.next = NULL,
.priority = 0
};
#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
MODULE_PARM_DESC(max_vfs,
"Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
#endif /* CONFIG_PCI_IOV */
static unsigned int allow_unsupported_sfp;
module_param(allow_unsupported_sfp, uint, 0);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
static bool netif_is_ixgbe(struct net_device *dev)
{
return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
}
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
struct pci_dev *parent_dev;
struct pci_bus *parent_bus;
parent_bus = adapter->pdev->bus->parent;
if (!parent_bus)
return -1;
parent_dev = parent_bus->self;
if (!parent_dev)
return -1;
if (!pci_is_pcie(parent_dev))
return -1;
pcie_capability_read_word(parent_dev, reg, value);
if (*value == IXGBE_FAILED_READ_CFG_WORD &&
ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
return -1;
return 0;
}
static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 link_status = 0;
int err;
hw->bus.type = ixgbe_bus_type_pci_express;
err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
if (err)
return err;
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
return 0;
}
static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
return true;
default:
return false;
}
}
static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
int expected_gts)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev;
if (hw->bus.type == ixgbe_bus_type_internal)
return;
if (ixgbe_pcie_from_parent(&adapter->hw))
pdev = adapter->pdev->bus->parent->self;
else
pdev = adapter->pdev;
pcie_print_link_status(pdev);
}
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
!test_bit(__IXGBE_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
queue_work(ixgbe_wq, &adapter->service_task);
}
static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
if (!hw->hw_addr)
return;
hw->hw_addr = NULL;
e_dev_err("Adapter removed\n");
if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
ixgbe_service_event_schedule(adapter);
}
static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u8 __iomem *reg_addr;
u32 value;
int i;
reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
value = readl(reg_addr + IXGBE_STATUS);
if (value != IXGBE_FAILED_READ_REG)
break;
mdelay(3);
}
if (value == IXGBE_FAILED_READ_REG)
ixgbe_remove_adapter(hw);
else
value = readl(reg_addr + reg);
return value;
}
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
{
u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
if (unlikely(hw->phy.nw_mng_if_sel &
IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
struct ixgbe_adapter *adapter;
int i;
for (i = 0; i < 200; ++i) {
value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
if (likely(!value))
goto writes_completed;
if (value == IXGBE_FAILED_READ_REG) {
ixgbe_remove_adapter(hw);
return IXGBE_FAILED_READ_REG;
}
udelay(5);
}
adapter = hw->back;
e_warn(hw, "register writes incomplete %08x\n", value);
}
writes_completed:
value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG))
value = ixgbe_check_remove(hw, reg);
return value;
}
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
{
u16 value;
pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
if (value == IXGBE_FAILED_READ_CFG_WORD) {
ixgbe_remove_adapter(hw);
return true;
}
return false;
}
u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
{
struct ixgbe_adapter *adapter = hw->back;
u16 value;
if (ixgbe_removed(hw->hw_addr))
return IXGBE_FAILED_READ_CFG_WORD;
pci_read_config_word(adapter->pdev, reg, &value);
if (value == IXGBE_FAILED_READ_CFG_WORD &&
ixgbe_check_cfg_remove(hw, adapter->pdev))
return IXGBE_FAILED_READ_CFG_WORD;
return value;
}
#ifdef CONFIG_PCI_IOV
static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
{
struct ixgbe_adapter *adapter = hw->back;
u32 value;
if (ixgbe_removed(hw->hw_addr))
return IXGBE_FAILED_READ_CFG_DWORD;
pci_read_config_dword(adapter->pdev, reg, &value);
if (value == IXGBE_FAILED_READ_CFG_DWORD &&
ixgbe_check_cfg_remove(hw, adapter->pdev))
return IXGBE_FAILED_READ_CFG_DWORD;
return value;
}
#endif /* CONFIG_PCI_IOV */
void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
{
struct ixgbe_adapter *adapter = hw->back;
if (ixgbe_removed(hw->hw_addr))
return;
pci_write_config_word(adapter->pdev, reg, value);
}
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
smp_mb__before_atomic();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
struct ixgbe_reg_info {
u32 ofs;
char *name;
};
static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
{IXGBE_CTRL, "CTRL"},
{IXGBE_STATUS, "STATUS"},
{IXGBE_CTRL_EXT, "CTRL_EXT"},
{IXGBE_EICR, "EICR"},
{IXGBE_SRRCTL(0), "SRRCTL"},
{IXGBE_DCA_RXCTRL(0), "DRXCTL"},
{IXGBE_RDLEN(0), "RDLEN"},
{IXGBE_RDH(0), "RDH"},
{IXGBE_RDT(0), "RDT"},
{IXGBE_RXDCTL(0), "RXDCTL"},
{IXGBE_RDBAL(0), "RDBAL"},
{IXGBE_RDBAH(0), "RDBAH"},
{IXGBE_TDBAL(0), "TDBAL"},
{IXGBE_TDBAH(0), "TDBAH"},
{IXGBE_TDLEN(0), "TDLEN"},
{IXGBE_TDH(0), "TDH"},
{IXGBE_TDT(0), "TDT"},
{IXGBE_TXDCTL(0), "TXDCTL"},
{ .name = NULL }
};
static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
{
int i;
char rname[16];
u32 regs[64];
switch (reginfo->ofs) {
case IXGBE_SRRCTL(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
break;
case IXGBE_DCA_RXCTRL(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
break;
case IXGBE_RDLEN(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
break;
case IXGBE_RDH(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
break;
case IXGBE_RDT(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
break;
case IXGBE_RXDCTL(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
break;
case IXGBE_RDBAL(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
break;
case IXGBE_RDBAH(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
break;
case IXGBE_TDBAL(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
break;
case IXGBE_TDBAH(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
break;
case IXGBE_TDLEN(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
break;
case IXGBE_TDH(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
break;
case IXGBE_TDT(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
break;
case IXGBE_TXDCTL(0):
for (i = 0; i < 64; i++)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
pr_info("%-15s %08x\n",
reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
i = 0;
while (i < 64) {
int j;
char buf[9 * 8 + 1];
char *p = buf;
snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
for (j = 0; j < 8; j++)
p += sprintf(p, " %08x", regs[i++]);
pr_err("%-15s%s\n", rname, buf);
}
}
static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
{
struct ixgbe_tx_buffer *tx_buffer;
tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
n, ring->next_to_use, ring->next_to_clean,
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp);
}
static void ixgbe_dump(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_reg_info *reginfo;
int n = 0;
struct ixgbe_ring *ring;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
struct ixgbe_ring *rx_ring;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer_info;
int i = 0;
if (!netif_msg_hw(adapter))
return;
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state "
"trans_start\n");
pr_info("%-15s %016lX %016lX\n",
netdev->name,
netdev->state,
dev_trans_start(netdev));
}
dev_info(&adapter->pdev->dev, "Register Dump\n");
pr_info(" Register Name Value\n");
for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
reginfo->name; reginfo++) {
ixgbe_regdump(hw, reginfo);
}
if (!netdev || !netif_running(netdev))
return;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
pr_info(" %s %s %s %s\n",
"Queue [NTU] [NTC] [bi(ntc)->dma ]",
"leng", "ntw", "timestamp");
for (n = 0; n < adapter->num_tx_queues; n++) {
ring = adapter->tx_ring[n];
ixgbe_print_buffer(ring, n);
}
for (n = 0; n < adapter->num_xdp_queues; n++) {
ring = adapter->xdp_ring[n];
ixgbe_print_buffer(ring, n);
}
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
ring = adapter->tx_ring[n];
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s %s %s %s %s\n",
"T [desc] [address 63:0 ] ",
"[PlPOIdStDDt Ln] [bi->dma ] ",
"leng", "ntw", "timestamp", "bi->skb");
for (i = 0; ring->desc && (i < ring->count); i++) {
tx_desc = IXGBE_TX_DESC(ring, i);
tx_buffer = &ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (dma_unmap_len(tx_buffer, len) > 0) {
const char *ring_desc;
if (i == ring->next_to_use &&
i == ring->next_to_clean)
ring_desc = " NTC/U";
else if (i == ring->next_to_use)
ring_desc = " NTU";
else if (i == ring->next_to_clean)
ring_desc = " NTC";
else
ring_desc = "";
pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
le64_to_cpu((__force __le64)u0->a),
le64_to_cpu((__force __le64)u0->b),
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp,
tx_buffer->skb,
ring_desc);
if (netif_msg_pktdata(adapter) &&
tx_buffer->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
tx_buffer->skb->data,
dma_unmap_len(tx_buffer, len),
true);
}
}
}
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("%5d %5X %5X\n",
n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
if (!netif_msg_rx_status(adapter))
return;
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s%s\n",
"R [desc] [ PktBuf A0] ",
"[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
"<-- Adv Rx Read format");
pr_info("%s%s%s\n",
"RWB[desc] [PcsmIpSHl PtRs] ",
"[vl er S cks ln] ---------------- [bi->skb ] ",
"<-- Adv Rx Write-Back format");
for (i = 0; i < rx_ring->count; i++) {
const char *ring_desc;
if (i == rx_ring->next_to_use)
ring_desc = " NTU";
else if (i == rx_ring->next_to_clean)
ring_desc = " NTC";
else
ring_desc = "";
rx_buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
if (rx_desc->wb.upper.length) {
pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
i,
le64_to_cpu((__force __le64)u0->a),
le64_to_cpu((__force __le64)u0->b),
rx_buffer_info->skb,
ring_desc);
} else {
pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
i,
le64_to_cpu((__force __le64)u0->a),
le64_to_cpu((__force __le64)u0->b),
(u64)rx_buffer_info->dma,
rx_buffer_info->skb,
ring_desc);
if (netif_msg_pktdata(adapter) &&
rx_buffer_info->dma) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
page_address(rx_buffer_info->page) +
rx_buffer_info->page_offset,
ixgbe_rx_bufsz(rx_ring), true);
}
}
}
}
}
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
}
static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector)
{
u32 ivar, index;
struct ixgbe_hw *hw = &adapter->hw;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
if (direction == -1)
direction = 0;
index = (((direction * 64) + queue) >> 2) & 0x1F;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
ivar &= ~(0xFF << (8 * (queue & 0x3)));
ivar |= (msix_vector << (8 * (queue & 0x3)));
IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
if (direction == -1) {
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
index = ((queue & 1) * 8);
ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
ivar &= ~(0xFF << index);
ivar |= (msix_vector << index);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
break;
} else {
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
ivar &= ~(0xFF << index);
ivar |= (msix_vector << index);
IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
break;
}
default:
break;
}
}
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
break;
default:
break;
}
}
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
int i;
u32 data;
if ((hw->fc.current_mode != ixgbe_fc_full) &&
(hw->fc.current_mode != ixgbe_fc_rx_pause))
return;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
break;
default:
data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
}
hwstats->lxoffrxc += data;
if (!data)
return;
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
for (i = 0; i < adapter->num_xdp_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u32 xoff[8] = {0};
u8 tc;
int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
ixgbe_update_xoff_rx_lfc(adapter);
return;
}
for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
u32 pxoffrxc;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
break;
default:
pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
}
hwstats->pxoffrxc[i] += pxoffrxc;
tc = netdev_get_prio_tc_map(adapter->netdev, i);
xoff[tc] += pxoffrxc;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
tc = tx_ring->dcb_tc;
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
tc = xdp_ring->dcb_tc;
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
}
}
static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
{
return ring->stats.packets;
}
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
unsigned int head, tail;
head = ring->next_to_clean;
tail = ring->next_to_use;
return ((head <= tail) ? tail : tail + ring->count) - head;
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
clear_check_for_tx_hang(tx_ring);
if (tx_done_old == tx_done && tx_pending)
return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
&tx_ring->state);
tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
return false;
}
static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
e_warn(drv, "initiating reset due to tx timeout\n");
ixgbe_service_event_schedule(adapter);
}
}
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 bcnrc_val = ixgbe_link_mbps(adapter);
if (!maxrate)
return 0;
bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
bcnrc_val /= maxrate;
bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
IXGBE_RTTBCNRC_RF_DEC_MASK;
bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
return 0;
}
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return true;
tx_buffer = &tx_ring->tx_buffer_info[i];
tx_desc = IXGBE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
do {
union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
if (!eop_desc)
break;
smp_rmb();
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
tx_buffer->next_to_watch = NULL;
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
total_ipsec++;
if (ring_is_xdp(tx_ring))
xdp_return_frame(tx_buffer->xdpf);
else
napi_consume_skb(tx_buffer->skb, napi_budget);
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++;
if (unlikely(!i)) {
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
}
}
tx_buffer++;
tx_desc++;
i++;
if (unlikely(!i)) {
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
prefetch(tx_desc);
budget--;
} while (likely(budget));
i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
struct ixgbe_hw *hw = &adapter->hw;
e_err(drv, "Detected Tx Unit Hang %s\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n"
"tx_buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
ring_is_xdp(tx_ring) ? "(XDP)" : "",
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, i,
tx_ring->tx_buffer_info[i].time_stamp, jiffies);
if (!ring_is_xdp(tx_ring))
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
e_info(probe,
"tx hang %d detected on queue %d, resetting adapter\n",
adapter->tx_timeout_count + 1, tx_ring->queue_index);
ixgbe_tx_timeout_reset(adapter);
return true;
}
if (ring_is_xdp(tx_ring))
return !!budget;
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index)
&& !test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
}
}
return !!budget;
}
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 txctrl = 0;
u16 reg_offset;
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
txctrl = dca3_get_tag(tx_ring->dev, cpu);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
break;
default:
return;
}
txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
IXGBE_DCA_TXCTRL_DATA_RRO_EN |
IXGBE_DCA_TXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, reg_offset, txctrl);
}
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl = 0;
u8 reg_idx = rx_ring->reg_idx;
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
rxctrl = dca3_get_tag(rx_ring->dev, cpu);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
break;
default:
break;
}
rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
IXGBE_DCA_RXCTRL_DATA_DCA_EN |
IXGBE_DCA_RXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_update_tx_dca(adapter, ring, cpu);
ixgbe_for_each_ring(ring, q_vector->rx)
ixgbe_update_rx_dca(adapter, ring, cpu);
q_vector->cpu = cpu;
out_no_update:
put_cpu();
}
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
int i;
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
IXGBE_DCA_CTRL_DCA_MODE_CB2);
else
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
IXGBE_DCA_CTRL_DCA_DISABLE);
for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
ixgbe_update_dca(adapter->q_vector[i]);
}
}
static int __ixgbe_notify_dca(struct device *dev, void *data)
{
struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
return 0;
switch (event) {
case DCA_PROVIDER_ADD:
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
break;
if (dca_add_requester(dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
fallthrough;
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
IXGBE_DCA_CTRL_DCA_DISABLE);
}
break;
}
return 0;
}
#endif /* CONFIG_IXGBE_DCA */
#define IXGBE_RSS_L4_TYPES_MASK \
((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
(1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
(1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
(1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
u16 rss_type;
if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
IXGBE_RXDADV_RSSTYPE_MASK;
if (!rss_type)
return;
skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
(IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
}
#endif /* IXGBE_FCOE */
static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
bool encap_pkt = false;
skb_checksum_none_assert(skb);
if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
encap_pkt = true;
skb->encapsulation = 1;
}
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
ring->rx_stats.csum_err++;
return;
}
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
return;
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
return;
ring->rx_stats.csum_err++;
return;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (encap_pkt) {
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
return;
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
skb->csum_level = 1;
}
}
static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
if (likely(page))
return true;
page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
rx_ring->rx_stats.alloc_rx_page++;
return true;
}
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
u16 bufsz;
if (!cleaned_count)
return;
rx_desc = IXGBE_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
bufsz = ixgbe_rx_bufsz(rx_ring);
do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bufsz,
DMA_FROM_DEVICE);
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
i++;
if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}
rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
rx_ring->next_to_alloc = i;
wmb();
writel(i, rx_ring->tail);
}
}
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
u16 hdr_len = skb_headlen(skb);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
IXGBE_CB(skb)->append_cnt);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
}
static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
if (!IXGBE_CB(skb)->append_cnt)
return;
rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
rx_ring->rx_stats.rsc_flush++;
ixgbe_set_rsc_gso_size(rx_ring, skb);
IXGBE_CB(skb)->append_cnt = 0;
}
void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
u32 flags = rx_ring->q_vector->adapter->flags;
ixgbe_update_rsc_stats(rx_ring, skb);
ixgbe_rx_hash(rx_ring, rx_desc, skb);
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
if (netif_is_ixgbe(dev))
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
false);
skb->protocol = eth_type_trans(skb, dev);
}
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
napi_gro_receive(&q_vector->napi, skb);
}
static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
prefetch(IXGBE_RX_DESC(rx_ring, ntc));
if (ring_is_rsc_enabled(rx_ring)) {
__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
if (unlikely(rsc_enabled)) {
u32 rsc_cnt = le32_to_cpu(rsc_enabled);
rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
ntc &= IXGBE_RXDADV_NEXTP_MASK;
ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
}
}
if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
return false;
rx_ring->rx_buffer_info[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
}
static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
va = skb_frag_address(frag);
pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
skb_frag_size_sub(frag, pull_len);
skb_frag_off_add(frag, pull_len);
skb->data_len -= pull_len;
skb->tail += pull_len;
}
static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
if (ring_uses_build_skb(rx_ring)) {
unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
offset,
skb_headlen(skb),
DMA_FROM_DEVICE);
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
}
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
}
}
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct net_device *netdev = rx_ring->netdev;
if (IS_ERR(skb))
return true;
if (!netdev ||
(unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL)))) {
dev_kfree_skb_any(skb);
return true;
}
if (!skb_headlen(skb))
ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
return false;
#endif
if (eth_skb_pad(skb))
return true;
return false;
}
static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *old_buff)
{
struct ixgbe_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_buffer_info[nta];
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
new_buff->dma = old_buff->dma;
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define IXGBE_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
return false;
#endif
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
return true;
}
static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct sk_buff *skb,
unsigned int size)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = rx_ring->rx_offset ?
SKB_DATA_ALIGN(rx_ring->rx_offset + size) :
SKB_DATA_ALIGN(size);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
}
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
const unsigned int size,
int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
if (!*skb)
goto skip_sync;
} else {
if (*skb)
ixgbe_dma_sync_frag(rx_ring, *skb);
}
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
skip_sync:
rx_buffer->pagecnt_bias--;
return rx_buffer;
}
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
IXGBE_CB(skb)->page_released = true;
} else {
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
}
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
}
rx_buffer->page = NULL;
rx_buffer->skb = NULL;
}
static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
union ixgbe_adv_rx_desc *rx_desc)
{
unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
struct sk_buff *skb;
net_prefetch(xdp->data);
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
if (size > IXGBE_RX_HDR_SIZE) {
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
IXGBE_CB(skb)->dma = rx_buffer->dma;
skb_add_rx_frag(skb, 0, rx_buffer->page,
xdp->data - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
} else {
memcpy(__skb_put(skb, size),
xdp->data, ALIGN(size, sizeof(long)));
rx_buffer->pagecnt_bias++;
}
return skb;
}
static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
union ixgbe_adv_rx_desc *rx_desc)
{
unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
struct sk_buff *skb;
net_prefetch(xdp->data_meta);
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
if (metasize)
skb_metadata_set(skb, metasize);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
IXGBE_CB(skb)->dma = rx_buffer->dma;
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
return skb;
}
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
u32 act;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
goto xdp_out;
prefetchw(xdp->data_hard_start);
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
}
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
if (!err)
result = IXGBE_XDP_REDIR;
else
result = IXGBE_XDP_CONSUMED;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
}
xdp_out:
rcu_read_unlock();
return ERR_PTR(-result);
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
unsigned int size)
{
unsigned int truesize;
#if (PAGE_SIZE < 8192)
truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
truesize = rx_ring->rx_offset ?
SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
return truesize;
}
static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
unsigned int size)
{
unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
}
static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbe_adapter *adapter = q_vector->adapter;
#ifdef IXGBE_FCOE
int ddp_bytes;
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
#if (PAGE_SIZE < 8192)
frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
int rx_buffer_pgcnt;
unsigned int size;
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.upper.length);
if (!size)
break;
dma_rmb();
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
if (!skb) {
unsigned char *hard_start;
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
if (IS_ERR(skb)) {
unsigned int xdp_res = -PTR_ERR(skb);
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
} else {
rx_buffer->pagecnt_bias++;
}
total_rx_packets++;
total_rx_bytes += size;
} else if (skb) {
ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
} else if (ring_uses_build_skb(rx_ring)) {
skb = ixgbe_build_skb(rx_ring, rx_buffer,
&xdp, rx_desc);
} else {
skb = ixgbe_construct_skb(rx_ring, rx_buffer,
&xdp, rx_desc);
}
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
}
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
continue;
if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
total_rx_bytes += skb->len;
ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
#ifdef IXGBE_FCOE
if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
if (ddp_bytes > 0) {
if (!mss) {
mss = rx_ring->netdev->mtu -
sizeof(struct fcoe_hdr) -
sizeof(struct fc_frame_header) -
sizeof(struct fcoe_crc_eof);
if (mss > 512)
mss &= ~511;
}
total_rx_bytes += ddp_bytes;
total_rx_packets += DIV_ROUND_UP(ddp_bytes,
mss);
}
if (!ddp_bytes) {
dev_kfree_skb_any(skb);
continue;
}
}
#endif /* IXGBE_FCOE */
ixgbe_rx_skb(q_vector, skb);
total_rx_packets++;
}
if (xdp_xmit & IXGBE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
wmb();
writel(ring->next_to_use, ring->tail);
}
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets;
}
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
int v_idx;
u32 mask;
if (adapter->num_vfs > 32) {
u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
}
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
ixgbe_for_each_ring(ring, q_vector->rx)
ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
ixgbe_write_eitr(q_vector);
}
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
v_idx);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
break;
}
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
mask = IXGBE_EIMS_ENABLE_MASK;
mask &= ~(IXGBE_EIMS_OTHER |
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
{
unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
unsigned int avg_wire_size, packets, bytes;
unsigned long next_update = jiffies;
if (!ring_container->ring)
return;
if (time_after(next_update, ring_container->next_update))
goto clear_counts;
packets = ring_container->total_packets;
if (!packets) {
itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
goto clear_counts;
}
bytes = ring_container->total_bytes;
if (packets < 4 && bytes < 9000) {
itr = IXGBE_ITR_ADAPTIVE_LATENCY;
goto adjust_by_size;
}
if (packets < 48) {
itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
goto clear_counts;
}
if (packets < 96) {
itr = q_vector->itr >> 2;
goto clear_counts;
}
if (packets < 256) {
itr = q_vector->itr >> 3;
if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
goto clear_counts;
}
itr = IXGBE_ITR_ADAPTIVE_BULK;
adjust_by_size:
avg_wire_size = bytes / packets;
if (avg_wire_size <= 60) {
avg_wire_size = 5120;
} else if (avg_wire_size <= 316) {
avg_wire_size *= 40;
avg_wire_size += 2720;
} else if (avg_wire_size <= 1084) {
avg_wire_size *= 15;
avg_wire_size += 11452;
} else if (avg_wire_size < 1968) {
avg_wire_size *= 5;
avg_wire_size += 22420;
} else {
avg_wire_size = 32256;
}
if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
avg_wire_size >>= 1;
switch (q_vector->adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
case IXGBE_LINK_SPEED_100_FULL:
default:
itr += DIV_ROUND_UP(avg_wire_size,
IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
IXGBE_ITR_ADAPTIVE_MIN_INC;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
case IXGBE_LINK_SPEED_1GB_FULL:
case IXGBE_LINK_SPEED_10_FULL:
if (avg_wire_size > 8064)
avg_wire_size = 8064;
itr += DIV_ROUND_UP(avg_wire_size,
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
IXGBE_ITR_ADAPTIVE_MIN_INC;
break;
}
clear_counts:
ring_container->itr = itr;
ring_container->next_update = next_update + 1;
ring_container->total_bytes = 0;
ring_container->total_packets = 0;
}
void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
int v_idx = q_vector->v_idx;
u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
itr_reg |= (itr_reg << 16);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
itr_reg |= IXGBE_EITR_CNT_WDIS;
break;
default:
break;
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
}
static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{
u32 new_itr;
ixgbe_update_itr(q_vector, &q_vector->tx);
ixgbe_update_itr(q_vector, &q_vector->rx);
new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
new_itr <<= 2;
if (new_itr != q_vector->itr) {
q_vector->itr = new_itr;
ixgbe_write_eitr(q_vector);
}
}
static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
return;
adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_T3_LOM:
if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
!(eicr & IXGBE_EICR_LSC))
return;
if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
u32 speed;
bool link_up = false;
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (link_up)
return;
}
if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
return;
break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
rc = hw->phy.ops.check_overtemp(hw);
if (rc != IXGBE_ERR_OVERTEMP)
return;
break;
default:
if (adapter->hw.mac.type >= ixgbe_mac_X540)
return;
if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
return;
break;
}
e_crit(drv, "%s\n", ixgbe_overheat_msg);
adapter->interrupt_event = 0;
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
(eicr & IXGBE_EICR_GPI_SDP1(hw))) {
e_crit(probe, "Fan has stopped, replace the adapter\n");
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
}
}
static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
return;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
(eicr & IXGBE_EICR_LSC)) &&
(!test_bit(__IXGBE_DOWN, &adapter->state))) {
adapter->interrupt_event = eicr;
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
ixgbe_service_event_schedule(adapter);
return;
}
return;
case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
adapter->interrupt_event = eicr;
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
IXGBE_EICR_GPI_SDP0_X550EM_a);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
IXGBE_EICR_GPI_SDP0_X550EM_a);
}
return;
case ixgbe_mac_X550:
case ixgbe_mac_X540:
if (!(eicr & IXGBE_EICR_TS))
return;
break;
default:
return;
}
e_crit(drv, "%s\n", ixgbe_overheat_msg);
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->phy.type == ixgbe_phy_nl)
return true;
return false;
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_fiber_qsfp:
return true;
default:
return false;
}
default:
return false;
}
}
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
if (!ixgbe_is_sfp(hw))
return;
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
if (eicr & eicr_mask) {
IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
adapter->sfp_poll_time = 0;
ixgbe_service_event_schedule(adapter);
}
}
if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
(eicr & IXGBE_EICR_GPI_SDP1(hw))) {
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
ixgbe_service_event_schedule(adapter);
}
}
}
static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
adapter->lsc_int++;
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
IXGBE_WRITE_FLUSH(hw);
ixgbe_service_event_schedule(adapter);
}
}
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
mask = (qmask >> 32);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
break;
default:
break;
}
}
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
mask &= ~IXGBE_EIMS_LSC;
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_GPI_SDP0(hw);
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
mask |= IXGBE_EIMS_TS;
break;
default:
break;
}
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1(hw);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_GPI_SDP1(hw);
mask |= IXGBE_EIMS_GPI_SDP2(hw);
fallthrough;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_MAILBOX;
break;
default:
break;
}
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
mask |= IXGBE_EIMS_FLOW_DIR;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
if (queues)
ixgbe_irq_enable_queues(adapter, ~0);
if (flush)
IXGBE_WRITE_FLUSH(&adapter->hw);
}
static irqreturn_t ixgbe_msix_other(int irq, void *data)
{
struct ixgbe_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr;
eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
eicr &= 0xFFFF0000;
IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR,
IXGBE_EICR_GPI_SDP0_X540);
}
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
if (eicr & IXGBE_EICR_FLOW_DIR) {
int reinit_count = 0;
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *ring = adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
&ring->state))
reinit_count++;
}
if (reinit_count) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
ixgbe_service_event_schedule(adapter);
}
}
ixgbe_check_sfp_event(adapter, eicr);
ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
break;
}
ixgbe_check_fan_failure(adapter, eicr);
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter, false, false);
return IRQ_HANDLED;
}
static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
{
struct ixgbe_q_vector *q_vector = data;
if (q_vector->rx.ring || q_vector->tx.ring)
napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
int ixgbe_poll(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
int per_ring_budget, work_done = 0;
bool clean_complete = true;
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector);
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
bool wd = ring->xsk_pool ?
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ixgbe_clean_tx_irq(q_vector, ring, budget);
if (!wd)
clean_complete = false;
}
if (budget <= 0)
return budget;
if (q_vector->rx.count > 1)
per_ring_budget = max(budget/q_vector->rx.count, 1);
else
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
int cleaned = ring->xsk_pool ?
ixgbe_clean_rx_irq_zc(q_vector, ring,
per_ring_budget) :
ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
if (cleaned >= per_ring_budget)
clean_complete = false;
}
if (!clean_complete)
return budget;
if (likely(napi_complete_done(napi, work_done))) {
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
BIT_ULL(q_vector->v_idx));
}
return min(work_done, budget - 1);
}
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
unsigned int ri = 0, ti = 0;
int vector, err;
for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
"%s-TxRx-%u", netdev->name, ri++);
ti++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
"%s-rx-%u", netdev->name, ri++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
"%s-tx-%u", netdev->name, ti++);
} else {
continue;
}
err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
q_vector->name, q_vector);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
goto free_queue_irqs;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
irq_set_affinity_hint(entry->vector,
&q_vector->affinity_mask);
}
}
err = request_irq(adapter->msix_entries[vector].vector,
ixgbe_msix_other, 0, netdev->name, adapter);
if (err) {
e_err(probe, "request_irq for msix_other failed: %d\n", err);
goto free_queue_irqs;
}
return 0;
free_queue_irqs:
while (vector) {
vector--;
irq_set_affinity_hint(adapter->msix_entries[vector].vector,
NULL);
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
return err;
}
static irqreturn_t ixgbe_intr(int irq, void *data)
{
struct ixgbe_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u32 eicr;
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter, true, true);
return IRQ_NONE;
}
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
fallthrough;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
break;
}
ixgbe_check_fan_failure(adapter, eicr);
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter);
napi_schedule_irqoff(&q_vector->napi);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter, false, false);
return IRQ_HANDLED;
}
static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
err = ixgbe_request_msix_irqs(adapter);
else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, adapter);
else
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, adapter);
if (err)
e_err(probe, "request_irq failed, Error %d\n", err);
return err;
}
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
int vector;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
free_irq(adapter->pdev->irq, adapter);
return;
}
if (!adapter->msix_entries)
return;
for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (!q_vector->rx.ring && !q_vector->tx.ring)
continue;
irq_set_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector);
}
free_irq(adapter->msix_entries[vector].vector, adapter);
}
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
break;
default:
break;
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int vector;
for (vector = 0; vector < adapter->num_q_vectors; vector++)
synchronize_irq(adapter->msix_entries[vector].vector);
synchronize_irq(adapter->msix_entries[vector++].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
ixgbe_write_eitr(q_vector);
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
e_info(hw, "Legacy interrupt IVAR setup done\n");
}
void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u64 tdba = ring->dma;
int wait_loop = 10;
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
ring->xsk_pool = NULL;
if (ring_is_xdp(ring))
ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
(tdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
txdctl |= 1u << 16;
else
txdctl |= 8u << 16;
txdctl |= (1u << 8) |
32;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ring->atr_sample_rate = adapter->atr_sample_rate;
ring->atr_count = 0;
set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
} else {
ring->atr_sample_rate = 0;
}
if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
netif_set_xps_queue(ring->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
memset(ring->tx_buffer_info, 0,
sizeof(struct ixgbe_tx_buffer) * ring->count);
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
return;
do {
usleep_range(1000, 2000);
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
}
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs, mtqc;
u8 tcs = adapter->hw_tcs;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
mtqc = IXGBE_MTQC_VT_ENA;
if (tcs > 4)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
else if (tcs > 1)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
else if (adapter->ring_feature[RING_F_VMDQ].mask ==
IXGBE_82599_VMDQ_4Q_MASK)
mtqc |= IXGBE_MTQC_32VF;
else
mtqc |= IXGBE_MTQC_64VF;
} else {
if (tcs > 4) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
} else if (tcs > 1) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
} else {
u8 max_txq = adapter->num_tx_queues +
adapter->num_xdp_queues;
if (max_txq > 63)
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
else
mtqc = IXGBE_MTQC_64Q_1PB;
}
}
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
if (tcs) {
u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
sectx |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
}
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
}
static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 dmatxctl;
u32 i;
ixgbe_setup_mtqc(adapter);
if (hw->mac.type != ixgbe_mac_82598EB) {
dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
dmatxctl |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
}
static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u8 reg_idx = ring->reg_idx;
u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
srrctl |= IXGBE_SRRCTL_DROP_EN;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u8 reg_idx = ring->reg_idx;
u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
srrctl &= ~IXGBE_SRRCTL_DROP_EN;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#else
static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#endif
{
int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
!(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
} else {
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
}
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
u8 reg_idx = rx_ring->reg_idx;
if (hw->mac.type == ixgbe_mac_82598EB) {
u16 mask = adapter->ring_feature[RING_F_RSS].mask;
reg_idx &= mask;
}
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
if (rx_ring->xsk_pool) {
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
if (hw->mac.type != ixgbe_mac_82599EB)
srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
} else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
} else {
srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
}
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
{
if (adapter->hw.mac.type < ixgbe_mac_X550)
return 128;
else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return 64;
else
return 512;
}
void ixgbe_store_key(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
}
static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
{
u32 *rss_key;
if (!adapter->rss_key) {
rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
adapter->rss_key = rss_key;
}
return 0;
}
void ixgbe_store_reta(struct ixgbe_adapter *adapter)
{
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 reta = 0;
u32 indices_multi;
u8 *indir_tbl = adapter->rss_indir_tbl;
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
indices_multi = 0x11;
else
indices_multi = 0x1;
for (i = 0; i < reta_entries; i++) {
reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
if (i < 128)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
else
IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
reta);
reta = 0;
}
}
}
static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
{
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
for (i = 0; i < reta_entries; i++) {
u16 pool = adapter->num_rx_pools;
vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) != 3)
continue;
while (pool--)
IXGBE_WRITE_REG(hw,
IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
vfreta);
vfreta = 0;
}
}
static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
{
u32 i, j;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
rss_i = 4;
ixgbe_store_key(adapter);
memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
for (i = 0, j = 0; i < reta_entries; i++, j++) {
if (j == rss_i)
j = 0;
adapter->rss_indir_tbl[i] = j;
}
ixgbe_store_reta(adapter);
}
static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
int i, j;
for (i = 0; i < 10; i++) {
u16 pool = adapter->num_rx_pools;
while (pool--)
IXGBE_WRITE_REG(hw,
IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
*(adapter->rss_key + i));
}
for (i = 0, j = 0; i < 64; i++, j++) {
if (j == rss_i)
j = 0;
adapter->rss_indir_tbl[i] = j;
}
ixgbe_store_vfreta(adapter);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
u32 rxcsum;
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
if (adapter->ring_feature[RING_F_RSS].mask)
mrqc = IXGBE_MRQC_RSSEN;
} else {
u8 tcs = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
if (tcs > 4)
mrqc = IXGBE_MRQC_VMDQRT8TCEN;
else if (tcs > 1)
mrqc = IXGBE_MRQC_VMDQRT4TCEN;
else if (adapter->ring_feature[RING_F_VMDQ].mask ==
IXGBE_82599_VMDQ_4Q_MASK)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
if (hw->mac.type >= ixgbe_mac_X550)
mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
else if (tcs > 1)
mrqc = IXGBE_MRQC_RTRSS4TCEN;
else
mrqc = IXGBE_MRQC_RSSEN;
}
}
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
IXGBE_MRQC_RSS_FIELD_IPV6 |
IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
u16 pool = adapter->num_rx_pools;
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
while (pool--)
IXGBE_WRITE_REG(hw,
IXGBE_PFVFMRQC(VMDQ_P(pool)),
vfmrqc);
} else {
ixgbe_setup_reta(adapter);
mrqc |= rss_field;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
}
static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rscctrl;
u8 reg_idx = ring->reg_idx;
if (!ring_is_rsc_enabled(ring))
return;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
rscctrl |= IXGBE_RSCCTL_RSCEN;
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
#define IXGBE_MAX_RX_DESC_POLL 10
static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
if (ixgbe_removed(hw->hw_addr))
return;
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
return;
do {
usleep_range(1000, 2000);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop) {
e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
"the polling period\n", reg_idx);
}
}
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
if (ring->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
}
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
if (hw->mac.type == ixgbe_mac_82598EB) {
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
#if (PAGE_SIZE < 8192)
} else if (hw->mac.type != ixgbe_mac_82599EB) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
ring->rx_buf_len = xsk_buf_len;
}
memset(ring->rx_buffer_info, 0,
sizeof(struct ixgbe_rx_buffer) * ring->count);
rx_desc = IXGBE_RX_DESC(ring, 0);
rx_desc->wb.upper.length = 0;
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
if (ring->xsk_pool)
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
else
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
u16 pool = adapter->num_rx_pools;
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_L2HDR |
IXGBE_PSRTYPE_IPV6HDR;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
if (rss_i > 3)
psrtype |= 2u << 29;
else if (rss_i > 1)
psrtype |= 1u << 29;
while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 pool = adapter->num_rx_pools;
u32 reg_offset, vf_shift, vmolr;
u32 gcr_ext, vmdctl;
int i;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return;
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
vmdctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
vmolr = IXGBE_VMOLR_AUPE;
while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
vf_shift = VMDQ_P(0) % 32;
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
switch (adapter->ring_feature[RING_F_VMDQ].mask) {
case IXGBE_82599_VMDQ_8Q_MASK:
gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
break;
case IXGBE_82599_VMDQ_4Q_MASK:
gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
break;
default:
gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
break;
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
for (i = 0; i < adapter->num_vfs; i++) {
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
adapter->vfinfo[i].spoofchk_enabled);
ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
adapter->vfinfo[i].rss_query_enabled);
}
}
static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
struct ixgbe_ring *rx_ring;
int i;
u32 mhadd, hlreg0;
#ifdef IXGBE_FCOE
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK;
mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
clear_ring_rsc_enabled(rx_ring);
clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
continue;
set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
#if (PAGE_SIZE < 8192)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
#endif
}
}
static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
break;
default:
return;
}
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
u32 rxctrl, rfctl;
hw->mac.ops.disable_rx(hw);
ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter);
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
rfctl &= ~IXGBE_RFCTL_RSC_DIS;
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
rfctl |= IXGBE_RFCTL_RSC_DIS;
rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
ixgbe_setup_mrqc(adapter);
ixgbe_set_rx_buffer_len(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (hw->mac.type == ixgbe_mac_82598EB)
rxctrl |= IXGBE_RXCTRL_DMBYPS;
rxctrl |= IXGBE_RXCTRL_RXEN;
hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
set_bit(vid, adapter->active_vlans);
return 0;
}
static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
{
u32 vlvf;
int idx;
if (vlan == 0)
return 0;
for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
if ((vlvf & VLAN_VID_MASK) == vlan)
break;
}
return idx;
}
void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 bits, word;
int idx;
idx = ixgbe_find_vlvf_entry(hw, vid);
if (!idx)
return;
word = idx * 2 + (VMDQ_P(0) / 32);
bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
}
}
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
clear_bit(vid, adapter->active_vlans);
return 0;
}
static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl;
int i, j;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlnctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (!netif_is_ixgbe(ring->netdev))
continue;
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
}
break;
default:
break;
}
}
static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl;
int i, j;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlnctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (!netif_is_ixgbe(ring->netdev))
continue;
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
}
break;
default:
break;
}
}
static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
} else {
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
if (hw->mac.type == ixgbe_mac_82598EB)
return;
if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
return;
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
vlvfb |= BIT(VMDQ_P(0) % 32);
IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
}
for (i = hw->mac.vft_size; i--;)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
}
#define VFTA_BLOCK_SIZE 8
static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
u32 vid_start = vfta_offset * 32;
u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
u32 i, vid, word, bits;
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
vid = vlvf & VLAN_VID_MASK;
if (vid < vid_start || vid >= vid_end)
continue;
if (vlvf) {
vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
if (test_bit(vid, adapter->active_vlans))
continue;
}
word = i * 2 + VMDQ_P(0) / 32;
bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
}
for (i = VFTA_BLOCK_SIZE; i--;) {
vid = (vfta_offset + i) * 32;
word = vid / BITS_PER_LONG;
bits = vid % BITS_PER_LONG;
vfta[i] |= adapter->active_vlans[word] >> bits;
IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
}
}
static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
hw->mac.type == ixgbe_mac_82598EB)
return;
if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
return;
adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
ixgbe_scrub_vfta(adapter, i);
}
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
u16 vid = 1;
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
static int ixgbe_write_mc_addr_list(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!netif_running(netdev))
return 0;
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
else
return -ENOMEM;
#ifdef CONFIG_PCI_IOV
ixgbe_restore_vf_multicasts(adapter);
#endif
return netdev_mc_count(netdev);
}
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
hw->mac.ops.set_rar(hw, i,
mac_table->addr,
mac_table->pool,
IXGBE_RAH_AV);
else
hw->mac.ops.clear_rar(hw, i);
}
}
#endif
static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
continue;
mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
hw->mac.ops.set_rar(hw, i,
mac_table->addr,
mac_table->pool,
IXGBE_RAH_AV);
else
hw->mac.ops.clear_rar(hw, i);
}
}
static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
}
ixgbe_sync_mac_table(adapter);
}
static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i, count = 0;
for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
continue;
if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
if (mac_table->pool != pool)
continue;
}
count++;
}
return count;
}
static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
mac_table->pool = VMDQ_P(0);
mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
IXGBE_RAH_AV);
}
int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
const u8 *addr, u16 pool)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
if (is_zero_ether_addr(addr))
return -EINVAL;
for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
continue;
ether_addr_copy(mac_table->addr, addr);
mac_table->pool = pool;
mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
IXGBE_MAC_STATE_IN_USE;
ixgbe_sync_mac_table(adapter);
return i;
}
return -ENOMEM;
}
int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
const u8 *addr, u16 pool)
{
struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
if (is_zero_ether_addr(addr))
return -EINVAL;
for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
continue;
if (mac_table->pool != pool)
continue;
if (!ether_addr_equal(addr, mac_table->addr))
continue;
mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
ixgbe_sync_mac_table(adapter);
return 0;
}
return -ENOMEM;
}
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret;
ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
return min_t(int, ret, 0);
}
static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
return 0;
}
void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
netdev_features_t features = netdev->features;
int count;
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl &= ~IXGBE_FCTRL_SBP;
fctrl |= IXGBE_FCTRL_BAM;
fctrl |= IXGBE_FCTRL_DPF;
fctrl |= IXGBE_FCTRL_PMCF;
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= IXGBE_VMOLR_MPE;
features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
hw->addr_ctrl.user_set_promisc = false;
}
if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
}
count = ixgbe_write_mc_addr_list(netdev);
if (count < 0) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
} else if (count) {
vmolr |= IXGBE_VMOLR_ROMPE;
}
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
}
if (features & NETIF_F_RXALL) {
fctrl |= (IXGBE_FCTRL_SBP |
IXGBE_FCTRL_BAM |
IXGBE_FCTRL_PMCF);
fctrl &= ~(IXGBE_FCTRL_DPF);
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
ixgbe_vlan_promisc_disable(adapter);
else
ixgbe_vlan_promisc_enable(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_enable(&adapter->q_vector[q_idx]->napi);
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_disable(&adapter->q_vector[q_idx]->napi);
}
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
struct udp_tunnel_info ti;
udp_tunnel_nic_get_port(dev, table, 0, &ti);
if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
adapter->vxlan_port = ti.port;
else
adapter->geneve_port = ti.port;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
ntohs(adapter->vxlan_port) |
ntohs(adapter->geneve_port) <<
IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
return 0;
}
static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
.sync_table = ixgbe_udp_tunnel_sync,
.flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
};
static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
.sync_table = ixgbe_udp_tunnel_sync,
.flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
};
#ifdef CONFIG_IXGBE_DCB
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 65536);
return;
}
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_RX_CONFIG);
ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
} else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
ixgbe_dcb_hw_ets(&adapter->hw,
adapter->ixgbe_ieee_ets,
max_frame);
ixgbe_dcb_hw_pfc_config(&adapter->hw,
adapter->ixgbe_ieee_pfc->pfc_en,
adapter->ixgbe_ieee_ets->prio_tc);
}
if (hw->mac.type != ixgbe_mac_82598EB) {
u32 msb = 0;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
while (rss_i) {
msb++;
rss_i >>= 1;
}
IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
}
}
#endif
#define IXGBE_ETH_FRAMING 20
static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
int link, tc, kb, marker;
u32 dv_id, rx_pba;
tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
#ifdef IXGBE_FCOE
if ((dev->features & NETIF_F_FCOE_MTU) &&
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
switch (hw->mac.type) {
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
dv_id = IXGBE_DV(link, tc);
break;
}
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
dv_id += IXGBE_B2BT(tc);
kb = IXGBE_BT2KB(dv_id);
rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
marker = rx_pba - kb;
if (marker < 0) {
e_warn(drv, "Packet Buffer(%i) can not provide enough"
"headroom to support flow control."
"Decrease MTU or number of traffic classes\n", pb);
marker = tc + 1;
}
return marker;
}
static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
int tc;
u32 dv_id;
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
#ifdef IXGBE_FCOE
if ((dev->features & NETIF_F_FCOE_MTU) &&
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
switch (hw->mac.type) {
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
dv_id = IXGBE_LOW_DV(tc);
break;
}
return IXGBE_BT2KB(dv_id);
}
static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int num_tc = adapter->hw_tcs;
int i;
if (!num_tc)
num_tc = 1;
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
if (hw->fc.low_water[i] > hw->fc.high_water[i])
hw->fc.low_water[i] = 0;
}
for (; i < MAX_TRAFFIC_CLASS; i++)
hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int hdrm;
u8 tc = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
hdrm = 32 << adapter->fdir_pballoc;
else
hdrm = 0;
hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
ixgbe_pbthresh_setup(adapter);
}
static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
if (!hlist_empty(&adapter->fdir_filter_list))
ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
queue = IXGBE_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(filter->action);
u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
if (!vf && (ring >= adapter->num_rx_queues)) {
e_err(drv, "FDIR restore failed without VF, ring: %u\n",
ring);
continue;
} else if (vf &&
((vf > adapter->num_vfs) ||
ring >= adapter->num_rx_queues_per_pool)) {
e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
vf, ring);
continue;
}
if (!vf)
queue = adapter->rx_ring[ring]->reg_idx;
else
queue = ((vf - 1) *
adapter->num_rx_queues_per_pool) + ring;
}
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
}
static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
if (rx_ring->xsk_pool) {
ixgbe_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
if (IXGBE_CB(skb)->page_released)
dma_unmap_page_attrs(rx_ring->dev,
IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
dev_kfree_skb(skb);
}
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
i++;
rx_buffer++;
if (i == rx_ring->count) {
i = 0;
rx_buffer = rx_ring->rx_buffer_info;
}
}
skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
int num_tc = netdev_get_num_tc(adapter->netdev);
struct net_device *vdev = accel->netdev;
int i, baseq, err;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
baseq, baseq + adapter->num_rx_queues_per_pool);
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(adapter->netdev, vdev,
i, rss_i, baseq + (rss_i * i));
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = vdev;
wmb();
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
if (err >= 0)
return 0;
macvlan_release_l2fw_offload(vdev);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = NULL;
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
netdev_unbind_sb_channel(adapter->netdev, vdev);
netdev_set_sb_channel(vdev, 0);
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
return err;
}
static int ixgbe_macvlan_up(struct net_device *vdev,
struct netdev_nested_priv *priv)
{
struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
struct ixgbe_fwd_adapter *accel;
if (!netif_is_macvlan(vdev))
return 0;
accel = macvlan_accel_priv(vdev);
if (!accel)
return 0;
ixgbe_fwd_ring_up(adapter, accel);
return 0;
}
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
netdev_walk_all_upper_dev_rcu(adapter->netdev,
ixgbe_macvlan_up, &priv);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_configure_pb(adapter);
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
#endif
ixgbe_configure_virtualization(adapter);
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
ixgbe_ipsec_restore(adapter);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
hw->mac.ops.disable_rx_buff(hw);
break;
default:
break;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ixgbe_init_fdir_signature_82599(&adapter->hw,
adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
ixgbe_init_fdir_perfect_82599(&adapter->hw,
adapter->fdir_pballoc);
ixgbe_fdir_filter_restore(adapter);
}
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
hw->mac.ops.enable_rx_buff(hw);
break;
default:
break;
}
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
ixgbe_setup_dca(adapter);
#endif /* CONFIG_IXGBE_DCA */
#ifdef IXGBE_FCOE
ixgbe_configure_fcoe(adapter);
#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
ixgbe_configure_dfwd(adapter);
}
static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
{
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
adapter->sfp_poll_time = 0;
}
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
int ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (ret)
return ret;
speed = hw->phy.autoneg_advertised;
if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
IXGBE_LINK_SPEED_2_5GB_FULL);
}
if (ret)
return ret;
if (hw->mac.ops.setup_link)
ret = hw->mac.ops.setup_link(hw, speed, link_up);
return ret;
}
static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 gpie = 0;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
IXGBE_GPIE_OCD;
gpie |= IXGBE_GPIE_EIAME;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
break;
}
} else {
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
switch (adapter->ring_feature[RING_F_VMDQ].mask) {
case IXGBE_82599_VMDQ_8Q_MASK:
gpie |= IXGBE_GPIE_VTMODE_16;
break;
case IXGBE_82599_VMDQ_4Q_MASK:
gpie |= IXGBE_GPIE_VTMODE_32;
break;
default:
gpie |= IXGBE_GPIE_VTMODE_64;
break;
}
}
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
gpie |= IXGBE_SDP0_GPIEN_8259X;
break;
default:
break;
}
}
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN(hw);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
break;
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
gpie |= IXGBE_SDP0_GPIEN_X540;
break;
default:
break;
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int err;
u32 ctrl_ext;
ixgbe_get_hw_control(adapter);
ixgbe_setup_gpie(adapter);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
ixgbe_configure_msix(adapter);
else
ixgbe_configure_msi_and_legacy(adapter);
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
if (hw->phy.ops.set_phy_power)
hw->phy.ops.set_phy_power(hw, true);
smp_mb__before_atomic();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
if (ixgbe_is_sfp(hw)) {
ixgbe_sfp_link_config(adapter);
} else {
err = ixgbe_non_sfp_link_config(hw);
if (err)
e_err(probe, "link_config FAILED %d\n", err);
}
IXGBE_READ_REG(hw, IXGBE_EICR);
ixgbe_irq_enable(adapter, true, true);
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
if (esdp & IXGBE_ESDP_SDP1)
e_crit(drv, "Fan has stopped, replace the adapter\n");
}
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->service_timer, jiffies);
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
}
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
netif_trans_update(adapter->netdev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (adapter->hw.phy.type == ixgbe_phy_fw)
ixgbe_watchdog_link_is_down(adapter);
ixgbe_down(adapter);
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
msleep(2000);
ixgbe_up(adapter);
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
void ixgbe_up(struct ixgbe_adapter *adapter)
{
ixgbe_configure(adapter);
ixgbe_up_complete(adapter);
}
static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
{
u16 devctl2;
pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
case IXGBE_PCIDEVCTRL2_17_34s:
case IXGBE_PCIDEVCTRL2_4_8s:
case IXGBE_PCIDEVCTRL2_1_2s:
return 2000000ul;
case IXGBE_PCIDEVCTRL2_260_520ms:
return 520000ul;
case IXGBE_PCIDEVCTRL2_65_130ms:
return 130000ul;
case IXGBE_PCIDEVCTRL2_16_32ms:
return 32000ul;
case IXGBE_PCIDEVCTRL2_1_2ms:
return 2000ul;
case IXGBE_PCIDEVCTRL2_50_100us:
return 100ul;
case IXGBE_PCIDEVCTRL2_16_32ms_def:
return 32000ul;
default:
break;
}
return 32000ul;
}
void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
{
unsigned long wait_delay, delay_interval;
struct ixgbe_hw *hw = &adapter->hw;
int i, wait_loop;
u32 rxdctl;
hw->mac.ops.disable_rx(hw);
if (ixgbe_removed(hw->hw_addr))
return;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
u8 reg_idx = ring->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
rxdctl |= IXGBE_RXDCTL_SWFLSH;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
}
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
return;
delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
wait_loop = IXGBE_MAX_RX_DESC_POLL;
wait_delay = delay_interval;
while (wait_loop--) {
usleep_range(wait_delay, wait_delay + 10);
wait_delay += delay_interval * 2;
rxdctl = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
u8 reg_idx = ring->reg_idx;
rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
}
if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
return;
}
e_err(drv,
"RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
}
void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
{
unsigned long wait_delay, delay_interval;
struct ixgbe_hw *hw = &adapter->hw;
int i, wait_loop;
u32 txdctl;
if (ixgbe_removed(hw->hw_addr))
return;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *ring = adapter->tx_ring[i];
u8 reg_idx = ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
struct ixgbe_ring *ring = adapter->xdp_ring[i];
u8 reg_idx = ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
goto dma_engine_disable;
delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
wait_loop = IXGBE_MAX_RX_DESC_POLL;
wait_delay = delay_interval;
while (wait_loop--) {
usleep_range(wait_delay, wait_delay + 10);
wait_delay += delay_interval * 2;
txdctl = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *ring = adapter->tx_ring[i];
u8 reg_idx = ring->reg_idx;
txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
struct ixgbe_ring *ring = adapter->xdp_ring[i];
u8 reg_idx = ring->reg_idx;
txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
}
if (!(txdctl & IXGBE_TXDCTL_ENABLE))
goto dma_engine_disable;
}
e_err(drv,
"TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
dma_engine_disable:
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
fallthrough;
default:
break;
}
}
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int err;
if (ixgbe_removed(hw->hw_addr))
return;
while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
usleep_range(1000, 2000);
adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
IXGBE_FLAG2_SFP_NEEDS_RESET);
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
err = hw->mac.ops.init_hw(hw);
switch (err) {
case 0:
case IXGBE_ERR_SFP_NOT_PRESENT:
case IXGBE_ERR_SFP_NOT_SUPPORTED:
break;
case IXGBE_ERR_MASTER_REQUESTS_PENDING:
e_dev_err("master disable timed out\n");
break;
case IXGBE_ERR_EEPROM_VERSION:
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated with "
"your hardware. If you are experiencing problems "
"please contact your Intel or hardware "
"representative who provided you with this "
"hardware.\n");
break;
default:
e_dev_err("Hardware Error: %d\n", err);
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
ixgbe_flush_sw_mac_table(adapter);
__dev_uc_unsync(netdev, NULL);
ixgbe_mac_set_default_filter(adapter);
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
if (hw->phy.ops.set_phy_power) {
if (!netif_running(adapter->netdev) && !adapter->wol)
hw->phy.ops.set_phy_power(hw, false);
else
hw->phy.ops.set_phy_power(hw, true);
}
}
static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
if (tx_ring->xsk_pool) {
ixgbe_xsk_clean_tx_ring(tx_ring);
goto out;
}
while (i != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
if (ring_is_xdp(tx_ring))
xdp_return_frame(tx_buffer->xdpf);
else
dev_kfree_skb_any(tx_buffer->skb);
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
eop_desc = tx_buffer->next_to_watch;
tx_desc = IXGBE_TX_DESC(tx_ring, i);
while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
tx_buffer++;
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
tx_buffer = tx_ring->tx_buffer_info;
}
}
if (!ring_is_xdp(tx_ring))
netdev_tx_reset_queue(txring_txq(tx_ring));
out:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_clean_rx_ring(adapter->rx_ring[i]);
}
static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_clean_tx_ring(adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
}
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
{
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
hlist_del(&filter->fdir_node);
kfree(filter);
}
adapter->fdir_filter_count = 0;
spin_unlock(&adapter->fdir_perfect_lock);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
int i;
if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
return;
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
ixgbe_disable_rx(adapter);
if (adapter->xdp_ring[0])
synchronize_rcu();
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
del_timer_sync(&adapter->service_timer);
if (adapter->num_vfs) {
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
for (i = 0 ; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = false;
ixgbe_ping_all_vfs(adapter);
ixgbe_disable_tx_rx(adapter);
}
ixgbe_disable_tx(adapter);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
}
static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
if (!hw->phy.eee_speeds_supported)
break;
adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
if (!hw->phy.eee_speeds_advertised)
break;
adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
break;
default:
adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
break;
}
}
static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
ixgbe_tx_timeout_reset(adapter);
}
#ifdef CONFIG_IXGBE_DCB
static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct tc_configuration *tc;
int j;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
case ixgbe_mac_82599EB:
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
default:
adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
break;
}
for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
tc = &adapter->dcb_cfg.tc_config[j];
tc->path[DCB_TX_CONFIG].bwg_id = 0;
tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
tc->path[DCB_RX_CONFIG].bwg_id = 0;
tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
tc->dcb_pfc = pfc_disabled;
}
tc = &adapter->dcb_cfg.tc_config[0];
tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
sizeof(adapter->temp_dcb_cfg));
}
#endif
static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir;
u32 fwsm;
int i;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->revision_id = pdev->revision;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
ii->get_invariants(hw);
rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20;
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
adapter->ring_feature[RING_F_VMDQ].limit = 1;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
#ifdef CONFIG_IXGBE_DCB
adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
#endif
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
#ifdef CONFIG_IXGBE_DCB
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
GFP_KERNEL);
if (!adapter->jump_tables[0])
return -ENOMEM;
adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
adapter->jump_tables[i] = NULL;
adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
sizeof(struct ixgbe_mac_addr),
GFP_KERNEL);
if (!adapter->mac_table)
return -ENOMEM;
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
if (!adapter->af_xdp_zc_qps)
return -ENOMEM;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
adapter->ring_feature[RING_F_FDIR].limit = 0;
adapter->atr_sample_rate = 0;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
#ifdef CONFIG_IXGBE_DCB
adapter->fcoe.up = 0;
#endif /* IXGBE_DCB */
#endif /* IXGBE_FCOE */
break;
case ixgbe_mac_82599EB:
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_x550em_a:
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
default:
break;
}
fallthrough;
case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
#endif
#ifdef IXGBE_FCOE
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
#ifdef CONFIG_IXGBE_DCB
adapter->fcoe.up = 0;
#endif /* IXGBE_DCB */
#endif /* IXGBE_FCOE */
fallthrough;
case ixgbe_mac_X550:
if (hw->mac.type == ixgbe_mac_X550)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
break;
default:
break;
}
#ifdef IXGBE_FCOE
spin_lock_init(&adapter->fcoe.lock);
#endif
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif
ixgbe_init_ipsec_offload(adapter);
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
if (max_vfs > 0)
e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
if (hw->mac.type != ixgbe_mac_82598EB) {
if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
max_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
}
}
#endif /* CONFIG_PCI_IOV */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
if (ixgbe_init_eeprom_params_generic(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
}
int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
int ring_node = NUMA_NO_NODE;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
if (tx_ring->q_vector)
ring_node = tx_ring->q_vector->numa_node;
tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
set_dev_node(dev, ring_node);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);
set_dev_node(dev, orig_node);
if (!tx_ring->desc)
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
return 0;
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
{
int i, j = 0, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
goto err_setup_tx;
}
for (j = 0; j < adapter->num_xdp_queues; j++) {
err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
if (!err)
continue;
e_err(probe, "Allocation for Tx Queue %u failed\n", j);
goto err_setup_tx;
}
return 0;
err_setup_tx:
while (j--)
ixgbe_free_tx_resources(adapter->xdp_ring[j]);
while (i--)
ixgbe_free_tx_resources(adapter->tx_ring[i]);
return err;
}
int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int ring_node = NUMA_NO_NODE;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
if (rx_ring->q_vector)
ring_node = rx_ring->q_vector->numa_node;
rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
set_dev_node(dev, ring_node);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);
set_dev_node(dev, orig_node);
if (!rx_ring->desc)
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
goto err;
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
err:
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
goto err_setup_rx;
}
#ifdef IXGBE_FCOE
err = ixgbe_setup_fcoe_ddp_resources(adapter);
if (!err)
#endif
return 0;
err_setup_rx:
while (i--)
ixgbe_free_rx_resources(adapter->rx_ring[i]);
return err;
}
void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
{
ixgbe_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
if (!tx_ring->desc)
return;
dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
ixgbe_free_tx_resources(adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
if (adapter->xdp_ring[i]->desc)
ixgbe_free_tx_resources(adapter->xdp_ring[i]);
}
void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
ixgbe_clean_rx_ring(rx_ring);
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
if (!rx_ring->desc)
return;
dma_free_coherent(rx_ring->dev, rx_ring->size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i;
#ifdef IXGBE_FCOE
ixgbe_free_fcoe_ddp_resources(adapter);
#endif
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
ixgbe_free_rx_resources(adapter->rx_ring[i]);
}
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (adapter->xdp_prog) {
int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN;
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (new_frame_size > ixgbe_rx_bufsz(ring)) {
e_warn(probe, "Requested MTU size is not supported with XDP\n");
return -EINVAL;
}
}
}
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
return 0;
}
int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
if (test_bit(__IXGBE_TESTING, &adapter->state))
return -EBUSY;
netif_carrier_off(netdev);
err = ixgbe_setup_all_tx_resources(adapter);
if (err)
goto err_setup_tx;
err = ixgbe_setup_all_rx_resources(adapter);
if (err)
goto err_setup_rx;
ixgbe_configure(adapter);
err = ixgbe_request_irq(adapter);
if (err)
goto err_req_irq;
queues = adapter->num_tx_queues;
err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
queues = adapter->num_rx_queues;
err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
ixgbe_ptp_init(adapter);
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
return 0;
err_set_queues:
ixgbe_free_irq(adapter);
err_req_irq:
ixgbe_free_all_rx_resources(adapter);
if (hw->phy.ops.set_phy_power && !adapter->wol)
hw->phy.ops.set_phy_power(&adapter->hw, false);
err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
err_setup_tx:
ixgbe_reset(adapter);
return err;
}
static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
{
ixgbe_ptp_suspend(adapter);
if (adapter->hw.phy.ops.enter_lplu) {
adapter->hw.phy.reset_disable = true;
ixgbe_down(adapter);
adapter->hw.phy.ops.enter_lplu(&adapter->hw);
adapter->hw.phy.reset_disable = false;
} else {
ixgbe_down(adapter);
}
ixgbe_free_irq(adapter);
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
}
int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
ixgbe_ptp_stop(adapter);
if (netif_device_present(netdev))
ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
ixgbe_release_hw_control(adapter);
return 0;
}
static int __maybe_unused ixgbe_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
device_wakeup_disable(dev_d);
ixgbe_reset(adapter);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
rtnl_lock();
err = ixgbe_init_interrupt_scheme(adapter);
if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
if (!err)
netif_device_attach(netdev);
rtnl_unlock();
return err;
}
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl;
u32 wufc = adapter->wol;
rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev))
ixgbe_close_suspend(adapter);
ixgbe_clear_interrupt_scheme(adapter);
rtnl_unlock();
if (hw->mac.ops.stop_link_on_d3)
hw->mac.ops.stop_link_on_d3(hw);
if (wufc) {
u32 fctrl;
ixgbe_set_rx_mode(netdev);
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_MPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
ctrl |= IXGBE_CTRL_GIO_DIS;
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
} else {
IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
}
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
pci_wake_from_d3(pdev, false);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
break;
}
*enable_wake = !!wufc;
if (hw->phy.ops.set_phy_power && !*enable_wake)
hw->phy.ops.set_phy_power(hw, false);
ixgbe_release_hw_control(adapter);
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
return 0;
}
static int __maybe_unused ixgbe_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
bool wake;
retval = __ixgbe_shutdown(pdev, &wake);
device_set_wakeup_enable(dev_d, wake);
return retval;
}
static void ixgbe_shutdown(struct pci_dev *pdev)
{
bool wake;
__ixgbe_shutdown(pdev, &wake);
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, wake);
pci_set_power_state(pdev, PCI_D3hot);
}
}
void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 alloc_rx_page = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
u64 rsc_count = 0;
u64 rsc_flush = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
if (!rx_ring)
continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
hw_csum_rx_error += rx_ring->rx_stats.csum_err;
bytes += rx_ring->stats.bytes;
packets += rx_ring->stats.packets;
}
adapter->non_eop_descs = non_eop_descs;
adapter->alloc_rx_page = alloc_rx_page;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
adapter->hw_csum_rx_error = hw_csum_rx_error;
netdev->stats.rx_bytes = bytes;
netdev->stats.rx_packets = packets;
bytes = 0;
packets = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
if (!tx_ring)
continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
if (!xdp_ring)
continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
packets += xdp_ring->stats.packets;
}
adapter->restart_queue = restart_queue;
adapter->tx_busy = tx_busy;
netdev->stats.tx_bytes = bytes;
netdev->stats.tx_packets = packets;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
missed_rx += mpc;
hwstats->mpc[i] += mpc;
total_mpc += hwstats->mpc[i];
hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
default:
break;
}
}
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
if ((hw->mac.type == ixgbe_mac_82599EB) ||
(hw->mac.type == ixgbe_mac_X540) ||
(hw->mac.type == ixgbe_mac_X550) ||
(hw->mac.type == ixgbe_mac_X550EM_x) ||
(hw->mac.type == ixgbe_mac_x550em_a)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
hwstats->qbrc[