#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4_msg.h"
#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define MSI_MSIX 2
#define MSI_MSI 1
#define MSI_DEFAULT MSI_MSIX
static int msi = MSI_DEFAULT;
module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_RSPQ_ENTRIES = 16384,
MAX_RX_BUFFERS = 16384,
MIN_TXQ_ENTRIES = 32,
MIN_RSPQ_ENTRIES = 128,
MIN_FL_ENTRIES = 16,
EQ_UNIT = SGE_EQ_IDXSIZE,
FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
MIN_FL_RESID = FL_PER_EQ_UNIT,
};
static struct dentry *cxgb4vf_debugfs_root;
void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
{
struct net_device *dev = adapter->port[pidx];
if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
return;
if (link_ok) {
const char *s;
const char *fc;
const struct port_info *pi = netdev_priv(dev);
netif_carrier_on(dev);
switch (pi->link_cfg.speed) {
case 100:
s = "100Mbps";
break;
case 1000:
s = "1Gbps";
break;
case 10000:
s = "10Gbps";
break;
case 25000:
s = "25Gbps";
break;
case 40000:
s = "40Gbps";
break;
case 100000:
s = "100Gbps";
break;
default:
s = "unknown";
break;
}
switch ((int)pi->link_cfg.fc) {
case PAUSE_RX:
fc = "RX";
break;
case PAUSE_TX:
fc = "TX";
break;
case PAUSE_RX | PAUSE_TX:
fc = "RX/TX";
break;
default:
fc = "no";
break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
netif_carrier_off(dev);
netdev_info(dev, "link down\n");
}
}
void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
{
static const char * const mod_str[] = {
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
const struct net_device *dev = adapter->port[pidx];
const struct port_info *pi = netdev_priv(dev);
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
dev->name);
else if (pi->mod_type < ARRAY_SIZE(mod_str))
dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
dev->name, mod_str[pi->mod_type]);
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
dev_info(adapter->pdev_dev, "%s: unsupported optical port "
"module inserted\n", dev->name);
else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
"forcing TWINAX\n", dev->name);
else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
dev->name);
else
dev_info(adapter->pdev_dev, "%s: unknown module type %d "
"inserted\n", dev->name, pi->mod_type);
}
static int cxgb4vf_set_addr_hash(struct port_info *pi)
{
struct adapter *adapter = pi->adapter;
u64 vec = 0;
bool ucast = false;
struct hash_mac_addr *entry;
list_for_each_entry(entry, &adapter->mac_hlist, list) {
ucast |= is_unicast_ether_addr(entry->addr);
vec |= (1ULL << hash_mac_addr(entry->addr));
}
return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
}
static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr, bool persistent)
{
struct hash_mac_addr *new_entry, *entry;
struct adapter *adapter = pi->adapter;
int ret;
ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent);
if (ret == -ENOMEM) {
list_for_each_entry(entry, &adapter->mac_hlist, list) {
if (entry->iface_mac) {
ether_addr_copy(entry->addr, addr);
goto set_hash;
}
}
new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
ether_addr_copy(new_entry->addr, addr);
new_entry->iface_mac = true;
list_add_tail(&new_entry->list, &adapter->mac_hlist);
set_hash:
ret = cxgb4vf_set_addr_hash(pi);
} else if (ret >= 0) {
*tcam_idx = ret;
ret = 0;
}
return ret;
}
static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
true);
if (ret == 0)
ret = cxgb4vf_change_mac(pi, pi->viid,
&pi->xact_addr_filt,
dev->dev_addr, true);
if (ret == 0)
ret = t4vf_enable_pi(pi->adapter, pi, true, true);
return ret;
}
static void name_msix_vecs(struct adapter *adapter)
{
int namelen = sizeof(adapter->msix_info[0].desc) - 1;
int pidx;
snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
"%s-FWeventq", adapter->name);
adapter->msix_info[MSIX_FW].desc[namelen] = 0;
for_each_port(adapter, pidx) {
struct net_device *dev = adapter->port[pidx];
const struct port_info *pi = netdev_priv(dev);
int qs, msi;
for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
snprintf(adapter->msix_info[msi].desc, namelen,
"%s-%d", dev->name, qs);
adapter->msix_info[msi].desc[namelen] = 0;
}
}
}
static int request_msix_queue_irqs(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int rxq, msi, err;
err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
if (err)
return err;
msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq) {
err = request_irq(adapter->msix_info[msi].vec,
t4vf_sge_intr_msix, 0,
adapter->msix_info[msi].desc,
&s->ethrxq[rxq].rspq);
if (err)
goto err_free_irqs;
msi++;
}
return 0;
err_free_irqs:
while (--rxq >= 0)
free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
return err;
}
static void free_msix_queue_irqs(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int rxq, msi;
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq)
free_irq(adapter->msix_info[msi++].vec,
&s->ethrxq[rxq].rspq);
}
static void qenable(struct sge_rspq *rspq)
{
napi_enable(&rspq->napi);
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC_V(0) |
SEINTARM_V(rspq->intr_params) |
INGRESSQID_V(rspq->cntxt_id));
}
static void enable_rx(struct adapter *adapter)
{
int rxq;
struct sge *s = &adapter->sge;
for_each_ethrxq(s, rxq)
qenable(&s->ethrxq[rxq].rspq);
qenable(&s->fw_evtq);
if (adapter->flags & CXGB4VF_USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC_V(0) |
SEINTARM_V(s->intrq.intr_params) |
INGRESSQID_V(s->intrq.cntxt_id));
}
static void quiesce_rx(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int rxq;
for_each_ethrxq(s, rxq)
napi_disable(&s->ethrxq[rxq].rspq.napi);
napi_disable(&s->fw_evtq.napi);
}
static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct adapter *adapter = rspq->adapter;
u8 opcode = ((const struct rss_header *)rsp)->opcode;
void *cpl = (void *)(rsp + 1);
switch (opcode) {
case CPL_FW6_MSG: {
const struct cpl_fw6_msg *fw_msg = cpl;
if (fw_msg->type == FW6_TYPE_CMD_RPL)
t4vf_handle_fw_rpl(adapter, fw_msg->data);
break;
}
case CPL_FW4_MSG: {
const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode);
break;
}
cpl = (void *)p;
}
fallthrough;
case CPL_SGE_EGR_UPDATE: {
const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
struct sge_eth_txq *txq;
unsigned int eq_idx;
eq_idx = EQ_IDX(s, qid);
if (unlikely(eq_idx >= MAX_EGRQ)) {
dev_err(adapter->pdev_dev,
"Egress Update QID %d out of range\n", qid);
break;
}
tq = s->egr_map[eq_idx];
if (unlikely(tq == NULL)) {
dev_err(adapter->pdev_dev,
"Egress Update QID %d TXQ=NULL\n", qid);
break;
}
txq = container_of(tq, struct sge_eth_txq, q);
if (unlikely(tq->abs_id != qid)) {
dev_err(adapter->pdev_dev,
"Egress Update QID %d refers to TXQ %d\n",
qid, tq->abs_id);
break;
}
txq->q.restarts++;
netif_tx_wake_queue(txq->txq);
break;
}
default:
dev_err(adapter->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
}
return 0;
}
static int setup_sge_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int err, pidx, msix;
bitmap_zero(s->starving_fl, MAX_EGRQ);
if (adapter->flags & CXGB4VF_USING_MSI) {
err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
adapter->port[0], 0, NULL, NULL);
if (err)
goto err_free_queues;
}
err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
MSIX_FW, NULL, fwevtq_handler);
if (err)
goto err_free_queues;
msix = MSIX_IQFLINT;
for_each_port(adapter, pidx) {
struct net_device *dev = adapter->port[pidx];
struct port_info *pi = netdev_priv(dev);
struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
int qs;
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
dev, msix++,
&rxq->fl, t4vf_ethrx_handler);
if (err)
goto err_free_queues;
err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
netdev_get_tx_queue(dev, qs),
s->fw_evtq.cntxt_id);
if (err)
goto err_free_queues;
rxq->rspq.idx = qs;
memset(&rxq->stats, 0, sizeof(rxq->stats));
}
}
s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
for_each_port(adapter, pidx) {
struct net_device *dev = adapter->port[pidx];
struct port_info *pi = netdev_priv(dev);
struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
int qs;
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
EQ_MAP(s, txq->q.abs_id) = &txq->q;
rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
}
}
return 0;
err_free_queues:
t4vf_free_sge_resources(adapter);
return err;
}
static int setup_rss(struct adapter *adapter)
{
int pidx;
for_each_port(adapter, pidx) {
struct port_info *pi = adap2pinfo(adapter, pidx);
struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
u16 rss[MAX_PORT_QSETS];
int qs, err;
for (qs = 0; qs < pi->nqsets; qs++)
rss[qs] = rxq[qs].rspq.abs_id;
err = t4vf_config_rss_range(adapter, pi->viid,
0, pi->rss_size, rss, pi->nqsets);
if (err)
return err;
switch (adapter->params.rss.mode) {
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
union rss_vi_config config;
err = t4vf_read_rss_vi_config(adapter,
pi->viid,
&config);
if (err)
return err;
config.basicvirtual.defaultq =
rxq[0].rspq.abs_id;
err = t4vf_write_rss_vi_config(adapter,
pi->viid,
&config);
if (err)
return err;
}
break;
}
}
return 0;
}
static int adapter_up(struct adapter *adapter)
{
int err;
if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
err = setup_sge_queues(adapter);
if (err)
return err;
err = setup_rss(adapter);
if (err) {
t4vf_free_sge_resources(adapter);
return err;
}
if (adapter->flags & CXGB4VF_USING_MSIX)
name_msix_vecs(adapter);
adapter->flags |= CXGB4VF_FULL_INIT_DONE;
}
BUG_ON((adapter->flags &
(CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
if (adapter->flags & CXGB4VF_USING_MSIX)
err = request_msix_queue_irqs(adapter);
else
err = request_irq(adapter->pdev->irq,
t4vf_intr_handler(adapter), 0,
adapter->name, adapter);
if (err) {
dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
err);
return err;
}
enable_rx(adapter);
t4vf_sge_start(adapter);
return 0;
}
static void adapter_down(struct adapter *adapter)
{
if (adapter->flags & CXGB4VF_USING_MSIX)
free_msix_queue_irqs(adapter);
else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
}
static int cxgb4vf_open(struct net_device *dev)
{
int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
if (!(adapter->flags & CXGB4VF_FW_OK))
return -ENXIO;
if (adapter->open_device_map == 0) {
err = adapter_up(adapter);
if (err)
return err;
}
err = t4vf_update_port_info(pi);
if (err < 0)
goto err_unwind;
err = link_start(dev);
if (err)
goto err_unwind;
pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
netif_tx_start_all_queues(dev);
set_bit(pi->port_id, &adapter->open_device_map);
return 0;
err_unwind:
if (adapter->open_device_map == 0)
adapter_down(adapter);
return err;
}
static int cxgb4vf_stop(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
t4vf_enable_pi(adapter, pi, false, false);
clear_bit(pi->port_id, &adapter->open_device_map);
if (adapter->open_device_map == 0)
adapter_down(adapter);
return 0;
}
static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
{
struct t4vf_port_stats stats;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adapter = pi->adapter;
struct net_device_stats *ns = &dev->stats;
int err;
spin_lock(&adapter->stats_lock);
err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
spin_unlock(&adapter->stats_lock);
memset(ns, 0, sizeof(*ns));
if (err)
return ns;
ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
stats.tx_ucast_bytes + stats.tx_offload_bytes);
ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
stats.tx_ucast_frames + stats.tx_offload_frames);
ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
stats.rx_ucast_bytes);
ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
stats.rx_ucast_frames);
ns->multicast = stats.rx_mcast_frames;
ns->tx_errors = stats.tx_drop_frames;
ns->rx_errors = stats.rx_err_frames;
return ns;
}
static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{
struct port_info *pi = netdev_priv(netdev);
struct adapter *adapter = pi->adapter;
int ret;
u64 mhash = 0;
u64 uhash = 0;
bool free = false;
bool ucast = is_unicast_ether_addr(mac_addr);
const u8 *maclist[1] = {mac_addr};
struct hash_mac_addr *new_entry;
ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
NULL, ucast ? &uhash : &mhash, false);
if (ret < 0)
goto out;
if (uhash || mhash) {
new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
if (!new_entry)
return -ENOMEM;
ether_addr_copy(new_entry->addr, mac_addr);
list_add_tail(&new_entry->list, &adapter->mac_hlist);
ret = cxgb4vf_set_addr_hash(pi);
}
out:
return ret < 0 ? ret : 0;
}
static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
{
struct port_info *pi = netdev_priv(netdev);
struct adapter *adapter = pi->adapter;
int ret;
const u8 *maclist[1] = {mac_addr};
struct hash_mac_addr *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
if (ether_addr_equal(entry->addr, mac_addr)) {
list_del(&entry->list);
kfree(entry);
return cxgb4vf_set_addr_hash(pi);
}
}
ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
return ret < 0 ? -EINVAL : 0;
}
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
struct port_info *pi = netdev_priv(dev);
__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
(dev->flags & IFF_PROMISC) != 0,
(dev->flags & IFF_ALLMULTI) != 0,
1, -1, sleep_ok);
}
static void cxgb4vf_set_rxmode(struct net_device *dev)
{
set_rxmode(dev, -1, false);
}
static int closest_timer(const struct sge *s, int us)
{
int i, timer_idx = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
int delta = us - s->timer_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
timer_idx = i;
}
}
return timer_idx;
}
static int closest_thres(const struct sge *s, int thres)
{
int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
delta = thres - s->counter_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
pktcnt_idx = i;
}
}
return pktcnt_idx;
}
static unsigned int qtimer_val(const struct adapter *adapter,
const struct sge_rspq *rspq)
{
unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
return timer_idx < SGE_NTIMERS
? adapter->sge.timer_val[timer_idx]
: 0;
}
static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
unsigned int us, unsigned int cnt)
{
unsigned int timer_idx;
if ((us | cnt) == 0)
cnt = 1;
if (cnt) {
int err;
u32 v, pktcnt_idx;
pktcnt_idx = closest_thres(&adapter->sge, cnt);
if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
if (err)
return err;
}
rspq->pktcnt_idx = pktcnt_idx;
}
timer_idx = (us == 0
? SGE_TIMER_RSTRT_CNTR
: closest_timer(&adapter->sge, us));
rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
QINTR_CNT_EN_V(cnt > 0));
return 0;
}
static inline unsigned int mk_adap_vers(const struct adapter *adapter)
{
return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
}
static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int ret = 0;
switch (cmd) {
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
struct port_info *pi = netdev_priv(dev);
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
return ret;
}
static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
netdev_features_t features)
{
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX;
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
static int cxgb4vf_set_features(struct net_device *dev,
netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
features & NETIF_F_HW_VLAN_CTAG_TX, 0);
return 0;
}
static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
{
int ret;
struct sockaddr *addr = _addr;
struct port_info *pi = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
addr->sa_data, true);
if (ret < 0)
return ret;
eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb4vf_poll_controller(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
if (adapter->flags & CXGB4VF_USING_MSIX) {
struct sge_eth_rxq *rxq;
int nqsets;
rxq = &adapter->sge.ethrxq[pi->first_qset];
for (nqsets = pi->nqsets; nqsets; nqsets--) {
t4vf_sge_intr_msix(0, &rxq->rspq);
rxq++;
}
} else
t4vf_intr_handler(adapter)(0, adapter);
}
#endif
static int from_fw_port_mod_type(enum fw_port_type port_type,
enum fw_port_module_type mod_type)
{
if (port_type == FW_PORT_TYPE_BT_SGMII ||
port_type == FW_PORT_TYPE_BT_XFI ||
port_type == FW_PORT_TYPE_BT_XAUI) {
return PORT_TP;
} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
port_type == FW_PORT_TYPE_FIBER_XAUI) {
return PORT_FIBRE;
} else if (port_type == FW_PORT_TYPE_SFP ||
port_type == FW_PORT_TYPE_QSFP_10G ||
port_type == FW_PORT_TYPE_QSA ||
port_type == FW_PORT_TYPE_QSFP ||
port_type == FW_PORT_TYPE_CR4_QSFP ||
port_type == FW_PORT_TYPE_CR_QSFP ||
port_type == FW_PORT_TYPE_CR2_QSFP ||
port_type == FW_PORT_TYPE_SFP28) {
if (mod_type == FW_PORT_MOD_TYPE_LR ||
mod_type == FW_PORT_MOD_TYPE_SR ||
mod_type == FW_PORT_MOD_TYPE_ER ||
mod_type == FW_PORT_MOD_TYPE_LRM)
return PORT_FIBRE;
else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
return PORT_DA;
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
port_type == FW_PORT_TYPE_KR_SFP28 ||
port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
return PORT_OTHER;
}
static void fw_caps_to_lmm(enum fw_port_type port_type,
unsigned int fw_caps,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
link_mode_mask)
#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
do { \
if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
SET_LMM(__lmm_name); \
} while (0)
switch (port_type) {
case FW_PORT_TYPE_BT_SGMII:
case FW_PORT_TYPE_BT_XFI:
case FW_PORT_TYPE_BT_XAUI:
SET_LMM(TP);
FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
break;
case FW_PORT_TYPE_KX4:
case FW_PORT_TYPE_KX:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
break;
case FW_PORT_TYPE_KR:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
break;
case FW_PORT_TYPE_BP_AP:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
break;
case FW_PORT_TYPE_BP4_AP:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
break;
case FW_PORT_TYPE_FIBER_XFI:
case FW_PORT_TYPE_FIBER_XAUI:
case FW_PORT_TYPE_SFP:
case FW_PORT_TYPE_QSFP_10G:
case FW_PORT_TYPE_QSA:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
break;
case FW_PORT_TYPE_BP40_BA:
case FW_PORT_TYPE_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
break;
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_SFP28:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
break;
case FW_PORT_TYPE_KR_SFP28:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
break;
case FW_PORT_TYPE_KR_XLAUI:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
break;
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
break;
case FW_PORT_TYPE_KR4_100G:
case FW_PORT_TYPE_CR4_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
break;
default:
break;
}
if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
} else {
SET_LMM(FEC_NONE);
}
FW_CAPS_TO_LMM(ANEG, Autoneg);
FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
#undef FW_CAPS_TO_LMM
#undef SET_LMM
}
static int cxgb4vf_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
struct port_info *pi = netdev_priv(dev);
struct ethtool_link_settings *base = &link_ksettings->base;
if (!netif_running(dev))
(void)t4vf_update_port_info(pi);
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
if (pi->mdio_addr >= 0) {
base->phy_address = pi->mdio_addr;
base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
? ETH_MDIO_SUPPORTS_C22
: ETH_MDIO_SUPPORTS_C45);
} else {
base->phy_address = 255;
base->mdio_support = 0;
}
fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
link_ksettings->link_modes.supported);
fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
link_ksettings->link_modes.advertising);
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
if (netif_carrier_ok(dev)) {
base->speed = pi->link_cfg.speed;
base->duplex = DUPLEX_FULL;
} else {
base->speed = SPEED_UNKNOWN;
base->duplex = DUPLEX_UNKNOWN;
}
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, Autoneg);
if (pi->link_cfg.autoneg)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Autoneg);
return 0;
}
static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
{
unsigned int eth_fec = 0;
if (fw_fec & FW_PORT_CAP32_FEC_RS)
eth_fec |= ETHTOOL_FEC_RS;
if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
eth_fec |= ETHTOOL_FEC_BASER;
if (!eth_fec)
eth_fec = ETHTOOL_FEC_OFF;
return eth_fec;
}
static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
{
unsigned int eth_fec = 0;
if (cc_fec & FEC_AUTO)
eth_fec |= ETHTOOL_FEC_AUTO;
if (cc_fec & FEC_RS)
eth_fec |= ETHTOOL_FEC_RS;
if (cc_fec & FEC_BASER_RS)
eth_fec |= ETHTOOL_FEC_BASER;
if (!eth_fec)
eth_fec = ETHTOOL_FEC_OFF;
return eth_fec;
}
static int cxgb4vf_get_fecparam(struct net_device *dev,
struct ethtool_fecparam *fec)
{
const struct port_info *pi = netdev_priv(dev);
const struct link_config *lc = &pi->link_cfg;
fec->fec = fwcap_to_eth_fec(lc->pcaps);
if (fec->fec != ETHTOOL_FEC_OFF)
fec->fec |= ETHTOOL_FEC_AUTO;
fec->active_fec = cc_to_eth_fec(lc->fec);
return 0;
}
static void cxgb4vf_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
struct adapter *adapter = netdev2adap(dev);
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
}
static u32 cxgb4vf_get_msglevel(struct net_device *dev)
{
return netdev2adap(dev)->msg_enable;
}
static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
{
netdev2adap(dev)->msg_enable = msglevel;
}
static void cxgb4vf_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
rp->rx_max_pending = MAX_RX_BUFFERS;
rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
rp->rx_jumbo_max_pending = 0;
rp->tx_max_pending = MAX_TXQ_ENTRIES;
rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
rp->rx_jumbo_pending = 0;
rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
static int cxgb4vf_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
int qs;
if (rp->rx_pending > MAX_RX_BUFFERS ||
rp->rx_jumbo_pending ||
rp->tx_pending > MAX_TXQ_ENTRIES ||
rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
rp->rx_pending < MIN_FL_ENTRIES ||
rp->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
return -EBUSY;
for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
s->ethtxq[qs].q.size = rp->tx_pending;
}
return 0;
}
static int cxgb4vf_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coalesce,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct adapter *adapter = pi->adapter;
const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
coalesce->rx_max_coalesced_frames =
((rspq->intr_params & QINTR_CNT_EN_F)
? adapter->sge.counter_val[rspq->pktcnt_idx]
: 0);
return 0;
}
static int cxgb4vf_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coalesce,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
return set_rxq_intr_params(adapter,
&adapter->sge.ethrxq[pi->first_qset].rspq,
coalesce->rx_coalesce_usecs,
coalesce->rx_max_coalesced_frames);
}
static void cxgb4vf_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pauseparam)
{
struct port_info *pi = netdev_priv(dev);
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
}
static int cxgb4vf_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
unsigned int val;
struct port_info *pi = netdev_priv(dev);
if (state == ETHTOOL_ID_ACTIVE)
val = 0xffff;
else if (state == ETHTOOL_ID_INACTIVE)
val = 0;
else
return -EINVAL;
return t4vf_identify_port(pi->adapter, pi->viid, val);
}
struct queue_port_stats {
u64 tso;
u64 tx_csum;
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
u64 lro_pkts;
u64 lro_merged;
};
static const char stats_strings[][ETH_GSTRING_LEN] = {
"TxBroadcastBytes ",
"TxBroadcastFrames ",
"TxMulticastBytes ",
"TxMulticastFrames ",
"TxUnicastBytes ",
"TxUnicastFrames ",
"TxDroppedFrames ",
"TxOffloadBytes ",
"TxOffloadFrames ",
"RxBroadcastBytes ",
"RxBroadcastFrames ",
"RxMulticastBytes ",
"RxMulticastFrames ",
"RxUnicastBytes ",
"RxUnicastFrames ",
"RxErrorFrames ",
"TSO ",
"TxCsumOffload ",
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
"GROPackets ",
"GROMerged ",
};
static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(stats_strings);
default:
return -EOPNOTSUPP;
}
}
static void cxgb4vf_get_strings(struct net_device *dev,
u32 sset,
u8 *data)
{
switch (sset) {
case ETH_SS_STATS:
memcpy(data, stats_strings, sizeof(stats_strings));
break;
}
}
static void collect_sge_port_stats(const struct adapter *adapter,
const struct port_info *pi,
struct queue_port_stats *stats)
{
const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
int qs;
memset(stats, 0, sizeof(*stats));
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
stats->tso += txq->tso;
stats->tx_csum += txq->tx_cso;
stats->rx_csum += rxq->stats.rx_cso;
stats->vlan_ex += rxq->stats.vlan_ex;
stats->vlan_ins += txq->vlan_ins;
stats->lro_pkts += rxq->stats.lro_pkts;
stats->lro_merged += rxq->stats.lro_merged;
}
}
static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adapter = pi->adapter;
int err = t4vf_get_port_stats(adapter, pi->pidx,
(struct t4vf_port_stats *)data);
if (err)
memset(data, 0, sizeof(struct t4vf_port_stats));
data += sizeof(struct t4vf_port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
}
static int cxgb4vf_get_regs_len(struct net_device *dev)
{
return T4VF_REGMAP_SIZE;
}
static void reg_block_dump(struct adapter *adapter, void *regbuf,
unsigned int start, unsigned int end)
{
u32 *bp = regbuf + start - T4VF_REGMAP_START;
for ( ; start <= end; start += sizeof(u32)) {
if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
*bp++ = 0xffff;
else
*bp++ = t4_read_reg(adapter, start);
}
}
static void cxgb4vf_get_regs(struct net_device *dev,
struct ethtool_regs *regs,
void *regbuf)
{
struct adapter *adapter = netdev2adap(dev);
regs->version = mk_adap_vers(adapter);
memset(regbuf, 0, T4VF_REGMAP_SIZE);
reg_block_dump(adapter, regbuf,
T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
reg_block_dump(adapter, regbuf,
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
reg_block_dump(adapter, regbuf,
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
reg_block_dump(adapter, regbuf,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
reg_block_dump(adapter, regbuf,
T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
}
static void cxgb4vf_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = cxgb4vf_get_link_ksettings,
.get_fecparam = cxgb4vf_get_fecparam,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
.set_msglevel = cxgb4vf_set_msglevel,
.get_ringparam = cxgb4vf_get_ringparam,
.set_ringparam = cxgb4vf_set_ringparam,
.get_coalesce = cxgb4vf_get_coalesce,
.set_coalesce = cxgb4vf_set_coalesce,
.get_pauseparam = cxgb4vf_get_pauseparam,
.get_link = ethtool_op_get_link,
.get_strings = cxgb4vf_get_strings,
.set_phys_id = cxgb4vf_phys_id,
.get_sset_count = cxgb4vf_get_sset_count,
.get_ethtool_stats = cxgb4vf_get_ethtool_stats,
.get_regs_len = cxgb4vf_get_regs_len,
.get_regs = cxgb4vf_get_regs,
.get_wol = cxgb4vf_get_wol,
};
static int mboxlog_show(struct seq_file *seq, void *v)
{
struct adapter *adapter = seq->private;
struct mbox_cmd_log *log = adapter->mbox_log;
struct mbox_cmd *entry;
int entry_idx, i;
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"%10s %15s %5s %5s %s\n",
"Seq#", "Tstamp", "Atime", "Etime",
"Command/Reply");
return 0;
}
entry_idx = log->cursor + ((uintptr_t)v - 2);
if (entry_idx >= log->size)
entry_idx -= log->size;
entry = mbox_cmd_log_entry(log, entry_idx);
if (entry->timestamp == 0)
return 0;
seq_printf(seq, "%10u %15llu %5d %5d",
entry->seqno, entry->timestamp,
entry->access, entry->execute);
for (i = 0; i < MBOX_LEN / 8; i++) {
u64 flit = entry->cmd[i];
u32 hi = (u32)(flit >> 32);
u32 lo = (u32)flit;
seq_printf(seq, " %08x %08x", hi, lo);
}
seq_puts(seq, "\n");
return 0;
}
static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
{
struct adapter *adapter = seq->private;
struct mbox_cmd_log *log = adapter->mbox_log;
return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
}
static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
}
static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return mboxlog_get_idx(seq, *pos);
}
static void mboxlog_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations mboxlog_sops = {
.start = mboxlog_start,
.next = mboxlog_next,
.stop = mboxlog_stop,
.show = mboxlog_show
};
DEFINE_SEQ_ATTRIBUTE(mboxlog);
#define QPL 4
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adapter = seq->private;
int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
int qs, r = (uintptr_t)v - 1;
if (r)
seq_putc(seq, '\n');
#define S3(fmt_spec, s, v) \
do {\
seq_printf(seq, "%-12s", s); \
for (qs = 0; qs < n; ++qs) \
seq_printf(seq, " %16" fmt_spec, v); \
seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
#define T(s, v) S3("u", s, txq[qs].v)
#define R(s, v) S3("u", s, rxq[qs].v)
if (r < eth_entries) {
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
int n = min(QPL, adapter->sge.ethqsets - QPL * r);
S("QType:", "Ethernet");
S("Interface:",
(rxq[qs].rspq.netdev
? rxq[qs].rspq.netdev->name
: "N/A"));
S3("d", "Port:",
(rxq[qs].rspq.netdev
? ((struct port_info *)
netdev_priv(rxq[qs].rspq.netdev))->port_id
: -1));
T("TxQ ID:", q.abs_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ PIdx:", q.pidx);
T("TxQ CIdx:", q.cidx);
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
S3("u", "Intr pktcnt:",
adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
R("RspQ CIdx:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
R("FL ID:", fl.abs_id);
R("FL size:", fl.size - MIN_FL_RESID);
R("FL avail:", fl.avail);
R("FL PIdx:", fl.pidx);
R("FL CIdx:", fl.cidx);
return 0;
}
r -= eth_entries;
if (r == 0) {
const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
qtimer_val(adapter, evtq));
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
adapter->sge.counter_val[evtq->pktcnt_idx]);
seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
} else if (r == 1) {
const struct sge_rspq *intrq = &adapter->sge.intrq;
seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
qtimer_val(adapter, intrq));
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
adapter->sge.counter_val[intrq->pktcnt_idx]);
seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
}
#undef R
#undef T
#undef S
#undef S3
return 0;
}
static int sge_queue_entries(const struct adapter *adapter)
{
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
{
int entries = sge_queue_entries(seq->private);
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
static void sge_queue_stop(struct seq_file *seq, void *v)
{
}
static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
{
int entries = sge_queue_entries(seq->private);
++*pos;
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
static const struct seq_operations sge_qinfo_sops = {
.start = sge_queue_start,
.next = sge_queue_next,
.stop = sge_queue_stop,
.show = sge_qinfo_show
};
DEFINE_SEQ_ATTRIBUTE(sge_qinfo);
#define QPL 4
static int sge_qstats_show(struct seq_file *seq, void *v)
{
struct adapter *adapter = seq->private;
int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
int qs, r = (uintptr_t)v - 1;
if (r)
seq_putc(seq, '\n');
#define S3(fmt, s, v) \
do { \
seq_printf(seq, "%-16s", s); \
for (qs = 0; qs < n; ++qs) \
seq_printf(seq, " %8" fmt, v); \
seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
#define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
#define T(s, v) T3("lu", s, v)
#define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
#define R(s, v) R3("lu", s, v)
if (r < eth_entries) {
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
int n = min(QPL, adapter->sge.ethqsets - QPL * r);
S("QType:", "Ethernet");
S("Interface:",
(rxq[qs].rspq.netdev
? rxq[qs].rspq.netdev->name
: "N/A"));
R3("u", "RspQNullInts:", rspq.unhandled_irqs);
R("RxPackets:", stats.pkts);
R("RxCSO:", stats.rx_cso);
R("VLANxtract:", stats.vlan_ex);
R("LROmerged:", stats.lro_merged);
R("LROpackets:", stats.lro_pkts);
R("RxDrops:", stats.rx_drops);
T("TSO:", tso);
T("TxCSO:", tx_cso);
T("VLANins:", vlan_ins);
T("TxQFull:", q.stops);
T("TxQRestarts:", q.restarts);
T("TxMapErr:", mapping_err);
R("FLAllocErr:", fl.alloc_failed);
R("FLLrgAlcErr:", fl.large_alloc_failed);
R("FLStarving:", fl.starving);
return 0;
}
r -= eth_entries;
if (r == 0) {
const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
evtq->unhandled_irqs);
seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
} else if (r == 1) {
const struct sge_rspq *intrq = &adapter->sge.intrq;
seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
intrq->unhandled_irqs);
seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
}
#undef R
#undef T
#undef S
#undef R3
#undef T3
#undef S3
return 0;
}
static int sge_qstats_entries(const struct adapter *adapter)
{
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
{
int entries = sge_qstats_entries(seq->private);
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
static void sge_qstats_stop(struct seq_file *seq, void *v)
{
}
static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
{
int entries = sge_qstats_entries(seq->private);
(*pos)++;
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
static const struct seq_operations sge_qstats_sops = {
.start = sge_qstats_start,
.next = sge_qstats_next,
.stop = sge_qstats_stop,
.show = sge_qstats_show
};
DEFINE_SEQ_ATTRIBUTE(sge_qstats);
static int resources_show(struct seq_file *seq, void *v)
{
struct adapter *adapter = seq->private;
struct vf_resources *vfres = &adapter->params.vfres;
#define S(desc, fmt, var) \
seq_printf(seq, "%-60s " fmt "\n", \
desc " (" #var "):", vfres->var)
S("Virtual Interfaces", "%d", nvi);
S("Egress Queues", "%d", neq);
S("Ethernet Control", "%d", nethctrl);
S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
S("Ingress Queues", "%d", niq);
S("Traffic Class", "%d", tc);
S("Port Access Rights Mask", "%#x", pmask);
S("MAC Address Filters", "%d", nexactf);
S("Firmware Command Read Capabilities", "%#x", r_caps);
S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
#undef S
return 0;
}
DEFINE_SHOW_ATTRIBUTE(resources);
static int interfaces_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "Interface Port VIID\n");
} else {
struct adapter *adapter = seq->private;
int pidx = (uintptr_t)v - 2;
struct net_device *dev = adapter->port[pidx];
struct port_info *pi = netdev_priv(dev);
seq_printf(seq, "%9s %4d %#5x\n",
dev->name, pi->port_id, pi->viid);
}
return 0;
}
static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
{
return pos <= adapter->params.nports
? (void *)(uintptr_t)(pos + 1)
: NULL;
}
static void *interfaces_start(struct seq_file *seq, loff_t *pos)
{
return *pos
? interfaces_get_idx(seq->private, *pos)
: SEQ_START_TOKEN;
}
static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return interfaces_get_idx(seq->private, *pos);
}
static void interfaces_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations interfaces_sops = {
.start = interfaces_start,
.next = interfaces_next,
.stop = interfaces_stop,
.show = interfaces_show
};
DEFINE_SEQ_ATTRIBUTE(interfaces);
struct cxgb4vf_debugfs_entry {
const char *name;
umode_t mode;
const struct file_operations *fops;
};
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
{ "sge_qinfo", 0444, &sge_qinfo_fops },
{ "sge_qstats", 0444, &sge_qstats_fops },
{ "resources", 0444, &resources_fops },
{ "interfaces", 0444, &interfaces_fops },
};
static int setup_debugfs(struct adapter *adapter)
{
int i;
BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
debugfs_create_file(debugfs_files[i].name,
debugfs_files[i].mode,
adapter->debugfs_root, adapter,
debugfs_files[i].fops);
return 0;
}
static void cleanup_debugfs(struct adapter *adapter)
{
BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
}
static void size_nports_qsets(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
unsigned int ethqsets, pmask_nports;
adapter->params.nports = vfres->nvi;
if (adapter->params.nports > MAX_NPORTS) {
dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
" allowed virtual interfaces\n", MAX_NPORTS,
adapter->params.nports);
adapter->params.nports = MAX_NPORTS;
}
pmask_nports = hweight32(adapter->params.vfres.pmask);
if (pmask_nports < adapter->params.nports) {
dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
" virtual interfaces; limited by Port Access Rights"
" mask %#x\n", pmask_nports, adapter->params.nports,
adapter->params.vfres.pmask);
adapter->params.nports = pmask_nports;
}
ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
if (vfres->nethctrl != ethqsets)
ethqsets = min(vfres->nethctrl, ethqsets);
if (vfres->neq < ethqsets*2)
ethqsets = vfres->neq/2;
if (ethqsets > MAX_ETH_QSETS)
ethqsets = MAX_ETH_QSETS;
adapter->sge.max_ethqsets = ethqsets;
if (adapter->sge.max_ethqsets < adapter->params.nports) {
dev_warn(adapter->pdev_dev, "only using %d of %d available"
" virtual interfaces (too few Queue Sets)\n",
adapter->sge.max_ethqsets, adapter->params.nports);
adapter->params.nports = adapter->sge.max_ethqsets;
}
}
static int adap_init0(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
struct sge *s = &adapter->sge;
int err;
u32 param, val = 0;
err = t4vf_fw_reset(adapter);
if (err < 0) {
dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
return err;
}
err = t4vf_get_dev_params(adapter);
if (err) {
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
" device parameters: err=%d\n", err);
return err;
}
err = t4vf_get_vpd_params(adapter);
if (err) {
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
" VPD parameters: err=%d\n", err);
return err;
}
err = t4vf_get_sge_params(adapter);
if (err) {
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
" SGE parameters: err=%d\n", err);
return err;
}
err = t4vf_get_rss_glb_config(adapter);
if (err) {
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
" RSS parameters: err=%d\n", err);
return err;
}
if (adapter->params.rss.mode !=
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
dev_err(adapter->pdev_dev, "unable to operate with global RSS"
" mode %d\n", adapter->params.rss.mode);
return -EINVAL;
}
err = t4vf_sge_init(adapter);
if (err) {
dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
" err=%d\n", err);
return err;
}
param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
val = 1;
(void) t4vf_set_params(adapter, 1, ¶m, &val);
s->timer_val[0] = core_ticks_to_us(adapter,
TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adapter,
TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adapter,
TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adapter,
TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adapter,
TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adapter,
TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
err = t4vf_get_vfres(adapter);
if (err) {
dev_err(adapter->pdev_dev, "unable to get virtual interface"
" resources: err=%d\n", err);
return err;
}
if (adapter->params.vfres.pmask == 0) {
dev_err(adapter->pdev_dev, "no port access configured\n"
"usable!\n");
return -EINVAL;
}
if (adapter->params.vfres.nvi == 0) {
dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
"usable!\n");
return -EINVAL;
}
size_nports_qsets(adapter);
adapter->flags |= CXGB4VF_FW_OK;
return 0;
}
static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
u8 pkt_cnt_idx, unsigned int size,
unsigned int iqe_size)
{
rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
(pkt_cnt_idx < SGE_NCOUNTERS ?
QINTR_CNT_EN_F : 0));
rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
? pkt_cnt_idx
: 0);
rspq->iqe_len = iqe_size;
rspq->size = size;
}
static void cfg_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
size_t iqe_size;
BUG_ON((adapter->flags &
(CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
n10g = 0;
for_each_port(adapter, pidx)
n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
if (n10g == 0)
q10g = 0;
else {
int n1g = (adapter->params.nports - n10g);
q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
if (q10g > num_online_cpus())
q10g = num_online_cpus();
}
qidx = 0;
for_each_port(adapter, pidx) {
struct port_info *pi = adap2pinfo(adapter, pidx);
pi->first_qset = qidx;
pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
s->ethqsets = qidx;
iqe_size = 64;
for (qs = 0; qs < s->max_ethqsets; qs++) {
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
struct sge_eth_txq *txq = &s->ethtxq[qs];
init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
rxq->fl.size = 72;
txq->q.size = 1024;
}
init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
iqe_size);
}
static void reduce_ethqs(struct adapter *adapter, int n)
{
int i;
struct port_info *pi;
BUG_ON(n < adapter->params.nports);
while (n < adapter->sge.ethqsets)
for_each_port(adapter, i) {
pi = adap2pinfo(adapter, i);
if (pi->nqsets > 1) {
pi->nqsets--;
adapter->sge.ethqsets--;
if (adapter->sge.ethqsets <= n)
break;
}
}
n = 0;
for_each_port(adapter, i) {
pi = adap2pinfo(adapter, i);
pi->first_qset = n;
n += pi->nqsets;
}
}
static int enable_msix(struct adapter *adapter)
{
int i, want, need, nqsets;
struct msix_entry entries[MSIX_ENTRIES];
struct sge *s = &adapter->sge;
for (i = 0; i < MSIX_ENTRIES; ++i)
entries[i].entry = i;
want = s->max_ethqsets + MSIX_EXTRAS;
need = adapter->params.nports + MSIX_EXTRAS;
want = pci_enable_msix_range(adapter->pdev, entries, need, want);
if (want < 0)
return want;
nqsets = want - MSIX_EXTRAS;
if (nqsets < s->max_ethqsets) {
dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
" for %d Queue Sets\n", nqsets);
s->max_ethqsets = nqsets;
if (nqsets < s->ethqsets)
reduce_ethqs(adapter, nqsets);
}
for (i = 0; i < want; ++i)
adapter->msix_info[i].vec = entries[i].vector;
return 0;
}
static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_open = cxgb4vf_open,
.ndo_stop = cxgb4vf_stop,
.ndo_start_xmit = t4vf_eth_xmit,
.ndo_get_stats = cxgb4vf_get_stats,
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
.ndo_fix_features = cxgb4vf_fix_features,
.ndo_set_features = cxgb4vf_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb4vf_poll_controller,
#endif
};
static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
{
unsigned int naddr = 1, pidx = 0;
unsigned int pmask, rmask = 0;
u8 mac[ETH_ALEN];
int err;
pmask = adapter->params.vfres.pmask;
while (pmask) {
if (pmask & 1) {
err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
if (!err && !is_zero_ether_addr(mac))
rmask |= (1 << pidx);
}
pmask >>= 1;
pidx++;
}
if (!rmask)
rmask = adapter->params.vfres.pmask;
return rmask;
}
static int cxgb4vf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct adapter *adapter;
struct net_device *netdev;
struct port_info *pi;
unsigned int pmask;
int err, pidx;
err = pci_enable_device(pdev);
if (err)
return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "cannot obtain PCI resources\n");
goto err_disable_device;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_release_regions;
}
pci_set_master(pdev);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
goto err_release_regions;
}
pci_set_drvdata(pdev, adapter);
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
(sizeof(struct mbox_cmd) *
T4VF_OS_LOG_MBOX_CMDS),
GFP_KERNEL);
if (!adapter->mbox_log) {
err = -ENOMEM;
goto err_free_adapter;
}
adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->mbox_lock);
INIT_LIST_HEAD(&adapter->mlist.list);
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
goto err_free_adapter;
}
err = t4vf_prep_adapter(adapter);
if (err) {
dev_err(adapter->pdev_dev, "device didn't become ready:"
" err=%d\n", err);
goto err_unmap_bar0;
}
if (!is_t4(adapter->params.chip)) {
adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
if (!adapter->bar2) {
dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
err = -ENOMEM;
goto err_unmap_bar0;
}
}
adapter->name = pci_name(pdev);
adapter->msg_enable = DFLT_MSG_ENABLE;
if (!pcie_relaxed_ordering_enabled(pdev))
adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
err = adap_init0(adapter);
if (err)
dev_err(&pdev->dev,
"Adapter initialization failed, error %d. Continuing in debug mode\n",
err);
INIT_LIST_HEAD(&adapter->mac_hlist);
pmask = cxgb4vf_get_port_mask(adapter);
for_each_port(adapter, pidx) {
int port_id, viid;
u8 mac[ETH_ALEN];
unsigned int naddr = 1;
if (pmask == 0)
break;
port_id = ffs(pmask) - 1;
pmask &= ~(1 << port_id);
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_PORT_QSETS);
if (netdev == NULL) {
err = -ENOMEM;
goto err_free_dev;
}
adapter->port[pidx] = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
pi = netdev_priv(netdev);
pi->adapter = adapter;
pi->pidx = pidx;
pi->port_id = port_id;
pi->xact_addr_filt = -1;
netdev->irq = pdev->irq;
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 81;
netdev->max_mtu = ETH_MAX_MTU;
netdev->netdev_ops = &cxgb4vf_netdev_ops;
netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
netdev->dev_port = pi->port_id;
if (!(adapter->flags & CXGB4VF_FW_OK))
continue;
viid = t4vf_alloc_vi(adapter, port_id);
if (viid < 0) {
dev_err(&pdev->dev,
"cannot allocate VI for port %d: err=%d\n",
port_id, viid);
err = viid;
goto err_free_dev;
}
pi->viid = viid;
err = t4vf_port_init(adapter, pidx);
if (err) {
dev_err(&pdev->dev, "cannot initialize port %d\n",
pidx);
goto err_free_dev;
}
err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
if (err) {
dev_err(&pdev->dev,
"unable to determine MAC ACL address, "
"continuing anyway.. (status %d)\n", err);
} else if (naddr && adapter->params.vfres.nvi == 1) {
struct sockaddr addr;
ether_addr_copy(addr.sa_data, mac);
err = cxgb4vf_set_mac_addr(netdev, &addr);
if (err) {
dev_err(&pdev->dev,
"unable to set MAC address %pM\n",
mac);
goto err_free_dev;
}
dev_info(&pdev->dev,
"Using assigned MAC ACL: %pM\n", mac);
}
}
if (msi == MSI_MSIX && enable_msix(adapter) == 0)
adapter->flags |= CXGB4VF_USING_MSIX;
else {
if (msi == MSI_MSIX) {
dev_info(adapter->pdev_dev,
"Unable to use MSI-X Interrupts; falling "
"back to MSI Interrupts\n");
msi = MSI_MSI;
size_nports_qsets(adapter);
}
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
" err=%d\n", err);
goto err_free_dev;
}
adapter->flags |= CXGB4VF_USING_MSI;
}
cfg_queues(adapter);
for_each_port(adapter, pidx) {
struct port_info *pi = netdev_priv(adapter->port[pidx]);
netdev = adapter->port[pidx];
if (netdev == NULL)
continue;
netif_set_real_num_tx_queues(netdev, pi->nqsets);
netif_set_real_num_rx_queues(netdev, pi->nqsets);
err = register_netdev(netdev);
if (err) {
dev_warn(&pdev->dev, "cannot register net device %s,"
" skipping\n", netdev->name);
continue;
}
netif_carrier_off(netdev);
set_bit(pidx, &adapter->registered_device_map);
}
if (adapter->registered_device_map == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
err = -EINVAL;
goto err_disable_interrupts;
}
if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
setup_debugfs(adapter);
}
for_each_port(adapter, pidx) {
dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
adapter->port[pidx]->name,
(adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
(adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : "");
}
return 0;
err_disable_interrupts:
if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
adapter->flags &= ~CXGB4VF_USING_MSIX;
} else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
adapter->flags &= ~CXGB4VF_USING_MSI;
}
err_free_dev:
for_each_port(adapter, pidx) {
netdev = adapter->port[pidx];
if (netdev == NULL)
continue;
pi = netdev_priv(netdev);
if (pi->viid)
t4vf_free_vi(adapter, pi->viid);
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(netdev);
free_netdev(netdev);
}
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
err_unmap_bar0:
iounmap(adapter->regs);
err_free_adapter:
kfree(adapter->mbox_log);
kfree(adapter);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
return err;
}
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
struct hash_mac_addr *entry, *tmp;
if (adapter) {
int pidx;
for_each_port(adapter, pidx)
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(adapter->port[pidx]);
t4vf_sge_stop(adapter);
if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
adapter->flags &= ~CXGB4VF_USING_MSIX;
} else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
adapter->flags &= ~CXGB4VF_USING_MSI;
}
if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
t4vf_free_sge_resources(adapter);
for_each_port(adapter, pidx) {
struct net_device *netdev = adapter->port[pidx];
struct port_info *pi;
if (netdev == NULL)
continue;
pi = netdev_priv(netdev);
if (pi->viid)
t4vf_free_vi(adapter, pi->viid);
free_netdev(netdev);
}
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter->mbox_log);
list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
list) {
list_del(&entry->list);
kfree(entry);
}
kfree(adapter);
}
pci_disable_device(pdev);
pci_release_regions(pdev);
}
static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
{
struct adapter *adapter;
int pidx;
adapter = pci_get_drvdata(pdev);
if (!adapter)
return;
for_each_port(adapter, pidx)
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(adapter->port[pidx]);
t4vf_sge_stop(adapter);
if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
adapter->flags &= ~CXGB4VF_USING_MSIX;
} else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
adapter->flags &= ~CXGB4VF_USING_MSI;
}
t4vf_free_sge_resources(adapter);
pci_set_drvdata(pdev, NULL);
}
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static const struct pci_device_id cxgb4vf_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION 0x8
#define CH_PCI_ID_TABLE_ENTRY(devid) \
{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
#include "../cxgb4/t4_pci_id_tbl.h"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
static struct pci_driver cxgb4vf_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
.remove = cxgb4vf_pci_remove,
.shutdown = cxgb4vf_pci_shutdown,
};
static int __init cxgb4vf_module_init(void)
{
int ret;
if (msi != MSI_MSIX && msi != MSI_MSI) {
pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
msi, MSI_MSIX, MSI_MSI);
return -EINVAL;
}
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = pci_register_driver(&cxgb4vf_driver);
if (ret < 0)
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
static void __exit cxgb4vf_module_exit(void)
{
pci_unregister_driver(&cxgb4vf_driver);
debugfs_remove(cxgb4vf_debugfs_root);
}
module_init(cxgb4vf_module_init);
module_exit