#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/eisa.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include "tlan.h"
static struct net_device *tlan_eisa_devices;
static int tlan_devices_installed;
static int aui[MAX_TLAN_BOARDS];
static int duplex[MAX_TLAN_BOARDS];
static int speed[MAX_TLAN_BOARDS];
static int boards_found;
module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
MODULE_PARM_DESC(duplex,
"ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
static const char tlan_signature[] = "TLAN";
static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
static int tlan_have_pci;
static int tlan_have_eisa;
static const char * const media[] = {
"10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
"100BaseTx-FD", "100BaseT4", NULL
};
static struct board {
const char *device_label;
u32 flags;
u16 addr_ofs;
} board_info[] = {
{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Netelligent 10/100 TX PCI UTP",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/P",
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq Netelligent Integrated 10/100 TX UTP",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Netelligent Dual 10/100 TX PCI UTP",
TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq Netelligent 10/100 TX Embedded UTP",
TLAN_ADAPTER_NONE, 0x83 },
{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
{ "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
{ "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/E",
TLAN_ADAPTER_ACTIVITY_LED |
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
{ "Compaq NetFlex-3/E",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
};
static const struct pci_device_id tlan_pci_tbl[] = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
static void tlan_eisa_probe(void);
static void tlan_eisa_cleanup(void);
static int tlan_init(struct net_device *);
static int tlan_open(struct net_device *dev);
static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
static irqreturn_t tlan_handle_interrupt(int, void *);
static int tlan_close(struct net_device *);
static struct net_device_stats *tlan_get_stats(struct net_device *);
static void tlan_set_multicast_list(struct net_device *);
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
int irq, int rev, const struct pci_device_id *ent);
static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tlan_tx_timeout_work(struct work_struct *work);
static int tlan_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static u32 tlan_handle_tx_eof(struct net_device *, u16);
static u32 tlan_handle_stat_overflow(struct net_device *, u16);
static u32 tlan_handle_rx_eof(struct net_device *, u16);
static u32 tlan_handle_dummy(struct net_device *, u16);
static u32 tlan_handle_tx_eoc(struct net_device *, u16);
static u32 tlan_handle_status_check(struct net_device *, u16);
static u32 tlan_handle_rx_eoc(struct net_device *, u16);
static void tlan_timer(struct timer_list *t);
static void tlan_phy_monitor(struct timer_list *t);
static void tlan_reset_lists(struct net_device *);
static void tlan_free_lists(struct net_device *);
static void tlan_print_dio(u16);
static void tlan_print_list(struct tlan_list *, char *, int);
static void tlan_read_and_clear_stats(struct net_device *, int);
static void tlan_reset_adapter(struct net_device *);
static void tlan_finish_reset(struct net_device *);
static void tlan_set_mac(struct net_device *, int areg, const char *mac);
static void __tlan_phy_print(struct net_device *);
static void tlan_phy_print(struct net_device *);
static void tlan_phy_detect(struct net_device *);
static void tlan_phy_power_down(struct net_device *);
static void tlan_phy_power_up(struct net_device *);
static void tlan_phy_reset(struct net_device *);
static void tlan_phy_start_link(struct net_device *);
static void tlan_phy_finish_auto_neg(struct net_device *);
static bool __tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
static void tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
static void tlan_mii_send_data(u16, u32, unsigned);
static void tlan_mii_sync(u16);
static void __tlan_mii_write_reg(struct net_device *, u16, u16, u16);
static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
static void tlan_ee_send_start(u16);
static int tlan_ee_send_byte(u16, u8, int);
static void tlan_ee_receive_byte(u16, u8 *, int);
static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
static inline void
tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
{
unsigned long addr = (unsigned long)skb;
tag->buffer[9].address = addr;
tag->buffer[8].address = upper_32_bits(addr);
}
static inline struct sk_buff *
tlan_get_skb(const struct tlan_list *tag)
{
unsigned long addr;
addr = tag->buffer[9].address;
addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
return (struct sk_buff *) addr;
}
static u32
(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
NULL,
tlan_handle_tx_eof,
tlan_handle_stat_overflow,
tlan_handle_rx_eof,
tlan_handle_dummy,
tlan_handle_tx_eoc,
tlan_handle_status_check,
tlan_handle_rx_eoc
};
static void
tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
spin_lock_irqsave(&priv->lock, flags);
if (priv->timer.function != NULL &&
priv->timer_type != TLAN_TIMER_ACTIVITY) {
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
priv->timer.function = tlan_timer;
spin_unlock_irqrestore(&priv->lock, flags);
priv->timer_set_at = jiffies;
priv->timer_type = type;
mod_timer(&priv->timer, jiffies + ticks);
}
static void tlan_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tlan_priv *priv = netdev_priv(dev);
unregister_netdev(dev);
if (priv->dma_storage) {
dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
priv->dma_storage, priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
pci_release_regions(pdev);
#endif
cancel_work_sync(&priv->tlan_tqueue);
free_netdev(dev);
}
static void tlan_start(struct net_device *dev)
{
tlan_reset_lists(dev);
tlan_read_and_clear_stats(dev, TLAN_IGNORE);
tlan_reset_adapter(dev);
netif_wake_queue(dev);
}
static void tlan_stop(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
del_timer_sync(&priv->media_timer);
tlan_read_and_clear_stats(dev, TLAN_RECORD);
outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
tlan_reset_adapter(dev);
if (priv->timer.function != NULL) {
del_timer_sync(&priv->timer);
priv->timer.function = NULL;
}
}
static int __maybe_unused tlan_suspend(struct device *dev_d)
{
struct net_device *dev = dev_get_drvdata(dev_d);
if (netif_running(dev))
tlan_stop(dev);
netif_device_detach(dev);
return 0;
}
static int __maybe_unused tlan_resume(struct device *dev_d)
{
struct net_device *dev = dev_get_drvdata(dev_d);
netif_device_attach(dev);
if (netif_running(dev))
tlan_start(dev);
return 0;
}
static SIMPLE_DEV_PM_OPS(tlan_pm_ops, tlan_suspend, tlan_resume);
static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
.remove = tlan_remove_one,
.driver.pm = &tlan_pm_ops,
};
static int __init tlan_probe(void)
{
int rc = -ENODEV;
pr_info("%s", tlan_banner);
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
rc = pci_register_driver(&tlan_driver);
if (rc != 0) {
pr_err("Could not register pci driver\n");
goto err_out_pci_free;
}
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
tlan_eisa_probe();
pr_info("%d device%s installed, PCI: %d EISA: %d\n",
tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
tlan_have_pci, tlan_have_eisa);
if (tlan_devices_installed == 0) {
rc = -ENODEV;
goto err_out_pci_unreg;
}
return 0;
err_out_pci_unreg:
pci_unregister_driver(&tlan_driver);
err_out_pci_free:
return rc;
}
static int tlan_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
return tlan_probe1(pdev, -1, -1, 0, ent);
}
static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct tlan_priv *priv;
u16 device_id;
int reg, rc = -ENODEV;
#ifdef CONFIG_PCI
if (pdev) {
rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, tlan_signature);
if (rc) {
pr_err("Could not reserve IO regions\n");
goto err_out;
}
}
#endif /* CONFIG_PCI */
dev = alloc_etherdev(sizeof(struct tlan_priv));
if (dev == NULL) {
rc = -ENOMEM;
goto err_out_regions;
}
SET_NETDEV_DEV(dev, &pdev->dev);
priv = netdev_priv(dev);
priv->pci_dev = pdev;
priv->dev = dev;
if (pdev) {
u32 pci_io_base = 0;
priv->adapter = &board_info[ent->driver_data];
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
pr_err("No suitable PCI mapping available\n");
goto err_out_free_dev;
}
for (reg = 0; reg <= 5; reg++) {
if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
pci_io_base = pci_resource_start(pdev, reg);
TLAN_DBG(TLAN_DEBUG_GNRL,
"IO mapping is available at %x.\n",
pci_io_base);
break;
}
}
if (!pci_io_base) {
pr_err("No IO mappings available\n");
rc = -EIO;
goto err_out_free_dev;
}
dev->base_addr = pci_io_base;
dev->irq = pdev->irq;
priv->adapter_rev = pdev->revision;
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
} else {
device_id = inw(ioaddr + EISA_ID2);
if (device_id == 0x20F1) {
priv->adapter = &board_info[13];
priv->adapter_rev = 23;
} else {
priv->adapter = &board_info[14];
priv->adapter_rev = 10;
}
dev->base_addr = ioaddr;
dev->irq = irq;
}
if (dev->mem_start) {
priv->aui = dev->mem_start & 0x01;
priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
: (dev->mem_start & 0x06) >> 1;
priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
: (dev->mem_start & 0x18) >> 3;
if (priv->speed == 0x1)
priv->speed = TLAN_SPEED_10;
else if (priv->speed == 0x2)
priv->speed = TLAN_SPEED_100;
debug = priv->debug = dev->mem_end;
} else {
priv->aui = aui[boards_found];
priv->speed = speed[boards_found];
priv->duplex = duplex[boards_found];
priv->debug = debug;
}
INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
spin_lock_init(&priv->lock);
rc = tlan_init(dev);
if (rc) {
pr_err("Could not set up device\n");
goto err_out_free_dev;
}
rc = register_netdev(dev);
if (rc) {
pr_err("Could not register device\n");
goto err_out_uninit;
}
tlan_devices_installed++;
boards_found++;
if (pdev)
tlan_have_pci++;
else {
priv->next_device = tlan_eisa_devices;
tlan_eisa_devices = dev;
tlan_have_eisa++;
}
netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
(int)dev->irq,
(int)dev->base_addr,
priv->adapter->device_label,
priv->adapter_rev);
return 0;
err_out_uninit:
dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
priv->dma_storage, priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
#ifdef CONFIG_PCI
if (pdev)
pci_release_regions(pdev);
err_out:
#endif
if (pdev)
pci_disable_device(pdev);
return rc;
}
static void tlan_eisa_cleanup(void)
{
struct net_device *dev;
struct tlan_priv *priv;
while (tlan_have_eisa) {
dev = tlan_eisa_devices;
priv = netdev_priv(dev);
if (priv->dma_storage) {
dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
priv->dma_storage,
priv->dma_storage_dma);
}
release_region(dev->base_addr, 0x10);
unregister_netdev(dev);
tlan_eisa_devices = priv->next_device;
free_netdev(dev);
tlan_have_eisa--;
}
}
static void __exit tlan_exit(void)
{
pci_unregister_driver(&tlan_driver);
if (tlan_have_eisa)
tlan_eisa_cleanup();
}
module_init(tlan_probe);
module_exit(tlan_exit);
static void __init tlan_eisa_probe(void)
{
long ioaddr;
int irq;
u16 device_id;
if (!EISA_bus) {
TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
return;
}
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
(int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
(int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
TLAN_DBG(TLAN_DEBUG_PROBE,
"Probing for EISA adapter at IO: 0x%4x : ",
(int) ioaddr);
if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
goto out;
if (inw(ioaddr + EISA_ID) != 0x110E) {
release_region(ioaddr, 0x10);
goto out;
}
device_id = inw(ioaddr + EISA_ID2);
if (device_id != 0x20F1 && device_id != 0x40F1) {
release_region(ioaddr, 0x10);
goto out;
}
if (inb(ioaddr + EISA_CR) != 0x1) {
release_region(ioaddr, 0x10);
goto out2;
}
if (debug == 0x10)
pr_info("Found one\n");
switch (inb(ioaddr + 0xcc0)) {
case(0x10):
irq = 5;
break;
case(0x20):
irq = 9;
break;
case(0x40):
irq = 10;
break;
case(0x80):
irq = 11;
break;
default:
goto out;
}
tlan_probe1(NULL, ioaddr, irq, 12, NULL);
continue;
out:
if (debug == 0x10)
pr_info("None found\n");
continue;
out2:
if (debug == 0x10)
pr_info("Card found but it is not enabled, skipping\n");
continue;
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tlan_poll(struct net_device *dev)
{
disable_irq(dev->irq);
tlan_handle_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static const struct net_device_ops tlan_netdev_ops = {
.ndo_open = tlan_open,
.ndo_stop = tlan_close,
.ndo_start_xmit = tlan_start_tx,
.ndo_tx_timeout = tlan_tx_timeout,
.ndo_get_stats = tlan_get_stats,
.ndo_set_rx_mode = tlan_set_multicast_list,
.ndo_eth_ioctl = tlan_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tlan_poll,
#endif
};
static void tlan_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct tlan_priv *priv = netdev_priv(dev);
strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
if (priv->pci_dev)
strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
else
strscpy(info->bus_info, "EISA", sizeof(info->bus_info));
}
static int tlan_get_eeprom_len(struct net_device *dev)
{
return TLAN_EEPROM_SIZE;
}
static int tlan_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
int i;
for (i = 0; i < TLAN_EEPROM_SIZE; i++)
if (tlan_ee_read_byte(dev, i, &data[i]))
return -EIO;
return 0;
}
static const struct ethtool_ops tlan_ethtool_ops = {
.get_drvinfo = tlan_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_eeprom_len = tlan_get_eeprom_len,
.get_eeprom = tlan_get_eeprom,
};
static int tlan_init(struct net_device *dev)
{
int dma_size;
int err;
int i;
struct tlan_priv *priv;
u8 addr[ETH_ALEN];
priv = netdev_priv(dev);
dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
* (sizeof(struct tlan_list));
priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
&priv->dma_storage_dma, GFP_KERNEL);
priv->dma_size = dma_size;
if (priv->dma_storage == NULL) {
pr_err("Could not allocate lists and buffers for %s\n",
dev->name);
return -ENOMEM;
}
priv->rx_list = (struct tlan_list *)
ALIGN((unsigned long)priv->dma_storage, 8);
priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
priv->tx_list_dma =
priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
err = 0;
for (i = 0; i < ETH_ALEN; i++)
err |= tlan_ee_read_byte(dev,
(u8) priv->adapter->addr_ofs + i,
addr + i);
if (err) {
pr_err("%s: Error reading MAC from eeprom: %d\n",
dev->name, err);
}
if (priv->adapter->addr_ofs == 0xf8) {
for (i = 0; i < ETH_ALEN; i += 2) {
char tmp = addr[i];
addr[i] = addr[i + 1];
addr[i + 1] = tmp;
}
}
eth_hw_addr_set(dev, addr);
netif_carrier_off(dev);
dev->netdev_ops = &tlan_netdev_ops;
dev->ethtool_ops = &tlan_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return 0;
}
static int tlan_open(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
int err;
priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
dev->name, dev);
if (err) {
netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
dev->irq);
return err;
}
timer_setup(&priv->timer, NULL, 0);
timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
tlan_start(dev);
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
dev->name, priv->tlan_rev);
return 0;
}
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct tlan_priv *priv = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
u32 phy = priv->phy[priv->phy_num];
if (!priv->phy_online)
return -EAGAIN;
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = phy;
fallthrough;
case SIOCGMIIREG:
tlan_mii_read_reg(dev, data->phy_id & 0x1f,
data->reg_num & 0x1f, &data->val_out);
return 0;
case SIOCSMIIREG:
tlan_mii_write_reg(dev, data->phy_id & 0x1f,
data->reg_num & 0x1f, data->val_in);
return 0;
default:
return -EOPNOTSUPP;
}
}
static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
tlan_free_lists(dev);
tlan_reset_lists(dev);
tlan_read_and_clear_stats(dev, TLAN_IGNORE);
tlan_reset_adapter(dev);
netif_trans_update(dev);
netif_wake_queue(dev);
}
static void tlan_tx_timeout_work(struct work_struct *work)
{
struct tlan_priv *priv =
container_of(work, struct tlan_priv, tlan_tqueue);
tlan_tx_timeout(priv->dev, UINT_MAX);
}
static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t tail_list_phys;
struct tlan_list *tail_list;
unsigned long flags;
unsigned int txlen;
if (!priv->phy_online) {
TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
return NETDEV_TX_OK;
txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
tail_list = priv->tx_list + priv->tx_tail;
tail_list_phys =
priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
dev->name, priv->tx_head, priv->tx_tail);
netif_stop_queue(dev);
priv->tx_busy_count++;
return NETDEV_TX_BUSY;
}
tail_list->forward = 0;
tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data, txlen,
DMA_TO_DEVICE);
tlan_store_skb(tail_list, skb);
tail_list->frame_size = (u16) txlen;
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
spin_lock_irqsave(&priv->lock, flags);
tail_list->c_stat = TLAN_CSTAT_READY;
if (!priv->tx_in_progress) {
priv->tx_in_progress = 1;
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: Starting TX on buffer %d\n",
priv->tx_tail);
outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
} else {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: Adding buffer %d to TX channel\n",
priv->tx_tail);
if (priv->tx_tail == 0) {
(priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
= tail_list_phys;
} else {
(priv->tx_list + (priv->tx_tail - 1))->forward
= tail_list_phys;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
return NETDEV_TX_OK;
}
static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct tlan_priv *priv = netdev_priv(dev);
u16 host_int;
u16 type;
spin_lock(&priv->lock);
host_int = inw(dev->base_addr + TLAN_HOST_INT);
type = (host_int & TLAN_HI_IT_MASK) >> 2;
if (type) {
u32 ack;
u32 host_cmd;
outw(host_int, dev->base_addr + TLAN_HOST_INT);
ack = tlan_int_vector[type](dev, host_int);
if (ack) {
host_cmd = TLAN_HC_ACK | ack | (type << 18);
outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
}
}
spin_unlock(&priv->lock);
return IRQ_RETVAL(type);
}
static int tlan_close(struct net_device *dev)
{
tlan_stop(dev);
free_irq(dev->irq, dev);
tlan_free_lists(dev);
TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
return 0;
}
static struct net_device_stats *tlan_get_stats(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
int i;
tlan_read_and_clear_stats(dev, TLAN_RECORD);
TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
priv->rx_eoc_count);
TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
priv->tx_busy_count);
if (debug & TLAN_DEBUG_GNRL) {
tlan_print_dio(dev->base_addr);
tlan_phy_print(dev);
}
if (debug & TLAN_DEBUG_LIST) {
for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
tlan_print_list(priv->rx_list + i, "RX", i);
for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
tlan_print_list(priv->tx_list + i, "TX", i);
}
return &dev->stats;
}
static void tlan_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
u32 offset;
u8 tmp;
if (dev->flags & IFF_PROMISC) {
tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
tlan_dio_write8(dev->base_addr,
TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
} else {
tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
tlan_dio_write8(dev->base_addr,
TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 3; i++)
tlan_set_mac(dev, i + 1, NULL);
tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
0xffffffff);
tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
0xffffffff);
} else {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
if (i < 3) {
tlan_set_mac(dev, i + 1,
(char *) &ha->addr);
} else {
offset =
tlan_hash_func((u8 *)&ha->addr);
if (offset < 32)
hash1 |= (1 << offset);
else
hash2 |= (1 << (offset - 32));
}
i++;
}
for ( ; i < 3; i++)
tlan_set_mac(dev, i + 1, NULL);
tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
}
}
}
static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
{
struct tlan_priv *priv = netdev_priv(dev);
int eoc = 0;
struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 0;
u16 tmp_c_stat;
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
priv->tx_head, priv->tx_tail);
head_list = priv->tx_list + priv->tx_head;
while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
&& (ack < 255)) {
struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
dma_unmap_single(&priv->pci_dev->dev,
head_list->buffer[0].address,
max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
dev->stats.tx_bytes += head_list->frame_size;
head_list->c_stat = TLAN_CSTAT_UNUSED;
netif_start_queue(dev);
CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
head_list = priv->tx_list + priv->tx_head;
}
if (!ack)
netdev_info(dev,
"Received interrupt for uncompleted TX frame\n");
if (eoc) {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
priv->tx_head, priv->tx_tail);
head_list = priv->tx_list + priv->tx_head;
head_list_phys = priv->tx_list_dma
+ sizeof(struct tlan_list)*priv->tx_head;
if ((head_list->c_stat & TLAN_CSTAT_READY)
== TLAN_CSTAT_READY) {
outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
priv->tx_in_progress = 0;
}
}
if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
add_timer(&priv->timer);
} else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
priv->timer_set_at = jiffies;
}
}
return ack;
}
static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
{
tlan_read_and_clear_stats(dev, TLAN_RECORD);
return 1;
}
static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
{
struct tlan_priv *priv = netdev_priv(dev);
u32 ack = 0;
int eoc = 0;
struct tlan_list *head_list;
struct sk_buff *skb;
struct tlan_list *tail_list;
u16 tmp_c_stat;
dma_addr_t head_list_phys;
TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
priv->rx_head, priv->rx_tail);
head_list = priv->rx_list + priv->rx_head;
head_list_phys =
priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
&& (ack < 255)) {
dma_addr_t frame_dma = head_list->buffer[0].address;
u32 frame_size = head_list->frame_size;
struct sk_buff *new_skb;
ack++;
if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
new_skb = netdev_alloc_skb_ip_align(dev,
TLAN_MAX_FRAME_SIZE + 5);
if (!new_skb)
goto drop_and_reuse;
skb = tlan_get_skb(head_list);
dma_unmap_single(&priv->pci_dev->dev, frame_dma,
TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
skb_put(skb, frame_size);
dev->stats.rx_bytes += frame_size;
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
head_list->buffer[0].address =
dma_map_single(&priv->pci_dev->dev, new_skb->data,
TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
tlan_store_skb(head_list, new_skb);
drop_and_reuse:
head_list->forward = 0;
head_list->c_stat = 0;
tail_list = priv->rx_list + priv->rx_tail;
tail_list->forward = head_list_phys;
CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
head_list = priv->rx_list + priv->rx_head;
head_list_phys = priv->rx_list_dma
+ sizeof(struct tlan_list)*priv->rx_head;
}
if (!ack)
netdev_info(dev,
"Received interrupt for uncompleted RX frame\n");
if (eoc) {
TLAN_DBG(TLAN_DEBUG_RX,
"RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
priv->rx_head, priv->rx_tail);
head_list = priv->rx_list + priv->rx_head;
head_list_phys = priv->rx_list_dma
+ sizeof(struct tlan_list)*priv->rx_head;
outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
priv->rx_eoc_count++;
}
if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
add_timer(&priv->timer);
} else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
priv->timer_set_at = jiffies;
}
}
return ack;
}
static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
{
netdev_info(dev, "Test interrupt\n");
return 1;
}
static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
{
struct tlan_priv *priv = netdev_priv(dev);
struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 1;
if (priv->tlan_rev < 0x30) {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
priv->tx_head, priv->tx_tail);
head_list = priv->tx_list + priv->tx_head;
head_list_phys = priv->tx_list_dma
+ sizeof(struct tlan_list)*priv->tx_head;
if ((head_list->c_stat & TLAN_CSTAT_READY)
== TLAN_CSTAT_READY) {
netif_stop_queue(dev);
outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
priv->tx_in_progress = 0;
}
}
return ack;
}
static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
{
struct tlan_priv *priv = netdev_priv(dev);
u32 ack;
u32 error;
u8 net_sts;
u32 phy;
u16 tlphy_ctl;
u16 tlphy_sts;
ack = 1;
if (host_int & TLAN_HI_IV_MASK) {
netif_stop_queue(dev);
error = inl(dev->base_addr + TLAN_CH_PARM);
netdev_info(dev, "Adaptor Error = 0x%x\n", error);
tlan_read_and_clear_stats(dev, TLAN_RECORD);
outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
schedule_work(&priv->tlan_tqueue);
netif_wake_queue(dev);
ack = 0;
} else {
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
phy = priv->phy[priv->phy_num];
net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
if (net_sts) {
tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
dev->name, (unsigned) net_sts);
}
if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
__tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
__tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
if (!(tlphy_sts & TLAN_TS_POLOK) &&
!(tlphy_ctl & TLAN_TC_SWAPOL)) {
tlphy_ctl |= TLAN_TC_SWAPOL;
__tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
tlphy_ctl);
} else if ((tlphy_sts & TLAN_TS_POLOK) &&
(tlphy_ctl & TLAN_TC_SWAPOL)) {
tlphy_ctl &= ~TLAN_TC_SWAPOL;
__tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
tlphy_ctl);
}
if (debug)
__tlan_phy_print(dev);
}
}
return ack;
}
static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
{
struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t head_list_phys;
u32 ack = 1;
if (priv->tlan_rev < 0x30) {
TLAN_DBG(TLAN_DEBUG_RX,
"RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
priv->rx_head, priv->rx_tail);
head_list_phys = priv->rx_list_dma
+ sizeof(struct tlan_list)*priv->rx_head;
outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
priv->rx_eoc_count++;
}
return ack;
}
static void tlan_timer(struct timer_list *t)
{
struct tlan_priv *priv = from_timer(priv, t, timer);
struct net_device *dev = priv->dev;
u32 elapsed;
unsigned long flags = 0;
priv->timer.function = NULL;
switch (priv->timer_type) {
case TLAN_TIMER_PHY_PDOWN:
tlan_phy_power_down(dev);
break;
case TLAN_TIMER_PHY_PUP:
tlan_phy_power_up(dev);
break;
case TLAN_TIMER_PHY_RESET:
tlan_phy_reset(dev);
break;
case TLAN_TIMER_PHY_START_LINK:
tlan_phy_start_link(dev);
break;
case TLAN_TIMER_PHY_FINISH_AN:
tlan_phy_finish_auto_neg(dev);
break;
case TLAN_TIMER_FINISH_RESET:
tlan_finish_reset(dev);
break;
case TLAN_TIMER_ACTIVITY:
spin_lock_irqsave(&priv->lock, flags);
if (priv->timer.function == NULL) {
elapsed = jiffies - priv->timer_set_at;
if (elapsed >= TLAN_TIMER_ACT_DELAY) {
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK);
} else {
priv->timer.expires = priv->timer_set_at
+ TLAN_TIMER_ACT_DELAY;
spin_unlock_irqrestore(&priv->lock, flags);
add_timer(&priv->timer);
break;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
break;
default:
break;
}
}
static void tlan_reset_lists(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
int i;
struct tlan_list *list;
dma_addr_t list_phys;
struct sk_buff *skb;
priv->tx_head = 0;
priv->tx_tail = 0;
for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
list = priv->tx_list + i;
list->c_stat = TLAN_CSTAT_UNUSED;
list->buffer[0].address = 0;
list->buffer[2].count = 0;
list->buffer[2].address = 0;
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
priv->rx_head = 0;
priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
list = priv->rx_list + i;
list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
list->c_stat = TLAN_CSTAT_READY;
list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
if (!skb)
break;
list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
DMA_FROM_DEVICE);
tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
list->forward = list_phys + sizeof(struct tlan_list);
}
while (i < TLAN_NUM_RX_LISTS) {
tlan_store_skb(priv->rx_list + i, NULL);
++i;
}
list->forward = 0;
}
static void tlan_free_lists(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
int i;
struct tlan_list *list;
struct sk_buff *skb;
for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
list = priv->tx_list + i;
skb = tlan_get_skb(list);
if (skb) {
dma_unmap_single(&priv->pci_dev->dev,
list->buffer[0].address,
max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
list = priv->rx_list + i;
skb = tlan_get_skb(list);
if (skb) {
dma_unmap_single(&priv->pci_dev->dev,
list->buffer[0].address,
TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
}
static void tlan_print_dio(u16 io_base)
{
u32 data0, data1;
int i;
pr_info("Contents of internal registers for io base 0x%04hx\n",
io_base);
pr_info("Off. +0 +4\n");
for (i = 0; i < 0x4C; i += 8) {
data0 = tlan_dio_read32(io_base, i);
data1 = tlan_dio_read32(io_base, i + 0x4);
pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
}
}
static void tlan_print_list(struct tlan_list *list, char *type, int num)
{
int i;
pr_info("%s List %d at %p\n", type, num, list);
pr_info(" Forward = 0x%08x\n", list->forward);
pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
for (i = 0; i < 2; i++) {
pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
i, list->buffer[i].count, list->buffer[i].address);
}
}
static void tlan_read_and_clear_stats(struct net_device *dev, int record)
{
u32 tx_good, tx_under;
u32 rx_good, rx_over;
u32 def_tx, crc, code;
u32 multi_col, single_col;
u32 excess_col, late_col, loss;
outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
if (record) {
dev->stats.rx_packets += rx_good;
dev->stats.rx_errors += rx_over + crc + code;
dev->stats.tx_packets += tx_good;
dev->stats.tx_errors += tx_under + loss;
dev->stats.collisions += multi_col
+ single_col + excess_col + late_col;
dev->stats.rx_over_errors += rx_over;
dev->stats.rx_crc_errors += crc;
dev->stats.rx_frame_errors += code;
dev->stats.tx_aborted_errors += tx_under;
dev->stats.tx_carrier_errors += loss;
}
}
static void
tlan_reset_adapter(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
int i;
u32 addr;
u32 data;
u8 data8;
priv->tlan_full_duplex = false;
priv->phy_online = 0;
netif_carrier_off(dev);
data = inl(dev->base_addr + TLAN_HOST_CMD);
data |= TLAN_HC_AD_RST;
outl(data, dev->base_addr + TLAN_HOST_CMD);
udelay(1000);
data = inl(dev->base_addr + TLAN_HOST_CMD);
data |= TLAN_HC_INT_OFF;
outl(data, dev->base_addr + TLAN_HOST_CMD);
for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
tlan_dio_write32(dev->base_addr, (u16) i, 0);
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
if (priv->tlan_rev >= 0x30) {
data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
}
tlan_phy_detect(dev);
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
data |= TLAN_NET_CFG_BIT;
if (priv->aui == 1) {
tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
} else if (priv->duplex == TLAN_DUPLEX_FULL) {
tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
priv->tlan_full_duplex = true;
} else {
tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
}
}
if (priv->phy_num == 0 ||
(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
data |= TLAN_NET_CFG_PHY_EN;
tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
tlan_finish_reset(dev);
else
tlan_phy_power_down(dev);
}
static void
tlan_finish_reset(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u8 data;
u32 phy;
u8 sio;
u16 status;
u16 partner;
u16 tlphy_ctl;
u16 tlphy_par;
u16 tlphy_id1, tlphy_id2;
int i;
phy = priv->phy[priv->phy_num];
data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
if (priv->tlan_full_duplex)
data |= TLAN_NET_CMD_DUPLEX;
tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
if (priv->phy_num == 0)
data |= TLAN_NET_MASK_MASK7;
tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
(priv->aui)) {
status = MII_GS_LINK;
netdev_info(dev, "Link forced\n");
} else {
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
udelay(1000);
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
if (status & MII_GS_LINK) {
if ((tlphy_id1 == NAT_SEM_ID1) &&
(tlphy_id2 == NAT_SEM_ID2)) {
tlan_mii_read_reg(dev, phy, MII_AN_LPA,
&partner);
tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
&tlphy_par);
netdev_info(dev,
"Link active, %s %uMbps %s-Duplex\n",
!(tlphy_par & TLAN_PHY_AN_EN_STAT)
? "forced" : "Autonegotiation enabled,",
tlphy_par & TLAN_PHY_SPEED_100
? 100 : 10,
tlphy_par & TLAN_PHY_DUPLEX_FULL
? "Full" : "Half");
if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
netdev_info(dev, "Partner capability:");
for (i = 5; i < 10; i++)
if (partner & (1 << i))
pr_cont(" %s",
media[i-5]);
pr_cont("\n");
}
} else
netdev_info(dev, "Link active\n");
priv->media_timer.expires = jiffies + HZ;
add_timer(&priv->media_timer);
}
}
if (priv->phy_num == 0) {
tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
tlphy_ctl |= TLAN_TC_INTEN;
tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
sio |= TLAN_NET_SIO_MINTEN;
tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
}
if (status & MII_GS_LINK) {
tlan_set_mac(dev, 0, dev->dev_addr);
priv->phy_online = 1;
outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
outb((TLAN_HC_REQ_INT >> 8),
dev->base_addr + TLAN_HOST_CMD + 1);
outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
netif_carrier_on(dev);
} else {
netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
return;
}
tlan_set_multicast_list(dev);
}
static void tlan_set_mac(struct net_device *dev, int areg, const char *mac)
{
int i;
areg *= 6;
if (mac != NULL) {
for (i = 0; i < 6; i++)
tlan_dio_write8(dev->base_addr,
TLAN_AREG_0 + areg + i, mac[i]);
} else {
for (i = 0; i < 6; i++)
tlan_dio_write8(dev->base_addr,
TLAN_AREG_0 + areg + i, 0);
}
}
static void __tlan_phy_print(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
lockdep_assert_held(&priv->lock);
phy = priv->phy[priv->phy_num];
if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
netdev_info(dev, "Unmanaged PHY\n");
} else if (phy <= TLAN_PHY_MAX_ADDR) {
netdev_info(dev, "PHY 0x%02x\n", phy);
pr_info(" Off. +0 +1 +2 +3\n");
for (i = 0; i < 0x20; i += 4) {
__tlan_mii_read_reg(dev, phy, i, &data0);
__tlan_mii_read_reg(dev, phy, i + 1, &data1);
__tlan_mii_read_reg(dev, phy, i + 2, &data2);
__tlan_mii_read_reg(dev, phy, i + 3, &data3);
pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
i, data0, data1, data2, data3);
}
} else {
netdev_info(dev, "Invalid PHY\n");
}
}
static void tlan_phy_print(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
__tlan_phy_print(dev);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void tlan_phy_detect(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 control;
u16 hi;
u16 lo;
u32 phy;
if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
priv->phy_num = 0xffff;
return;
}
tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
if (hi != 0xffff)
priv->phy[0] = TLAN_PHY_MAX_ADDR;
else
priv->phy[0] = TLAN_PHY_NONE;
priv->phy[1] = TLAN_PHY_NONE;
for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
if ((control != 0xffff) ||
(hi != 0xffff) || (lo != 0xffff)) {
TLAN_DBG(TLAN_DEBUG_GNRL,
"PHY found at %02x %04x %04x %04x\n",
phy, control, hi, lo);
if ((priv->phy[1] == TLAN_PHY_NONE) &&
(phy != TLAN_PHY_MAX_ADDR)) {
priv->phy[1] = phy;
}
}
}
if (priv->phy[1] != TLAN_PHY_NONE)
priv->phy_num = 1;
else if (priv->phy[0] != TLAN_PHY_NONE)
priv->phy_num = 0;
else
netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
}
static void tlan_phy_power_down(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 value;
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
tlan_mii_sync(dev->base_addr);
tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
value = MII_GC_ISOLATE;
tlan_mii_sync(dev->base_addr);
tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
}
tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
}
static void tlan_phy_power_up(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 value;
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK;
tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
tlan_mii_sync(dev->base_addr);
tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
}
static void tlan_phy_reset(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 value;
unsigned long timeout = jiffies + HZ;
phy = priv->phy[priv->phy_num];
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK | MII_GC_RESET;
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
do {
tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
if (time_after(jiffies, timeout)) {
netdev_err(dev, "PHY reset timeout\n");
return;
}
} while (value & MII_GC_RESET);
tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
}
static void tlan_phy_start_link(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 ability;
u16 control;
u16 data;
u16 phy;
u16 status;
u16 tctl;
phy = priv->phy[priv->phy_num];
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
if ((status & MII_GS_AUTONEG) &&
(!priv->aui)) {
ability = status >> 11;
if (priv->speed == TLAN_SPEED_10 &&
priv->duplex == TLAN_DUPLEX_HALF) {
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
} else if (priv->speed == TLAN_SPEED_10 &&
priv->duplex == TLAN_DUPLEX_FULL) {
priv->tlan_full_duplex = true;
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
} else if (priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_HALF) {
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
} else if (priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_FULL) {
priv->tlan_full_duplex = true;
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
} else {
tlan_mii_write_reg(dev, phy, MII_AN_ADV,
(ability << 5) | 1);
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
netdev_info(dev, "Starting autonegotiation\n");
tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
}
if ((priv->aui) && (priv->phy_num != 0)) {
priv->phy_num = 0;
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
| TLAN_NET_CFG_PHY_EN;
tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
return;
} else if (priv->phy_num == 0) {
control = 0;
tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
if (priv->aui) {
tctl |= TLAN_TC_AUISEL;
} else {
tctl &= ~TLAN_TC_AUISEL;
if (priv->duplex == TLAN_DUPLEX_FULL) {
control |= MII_GC_DUPLEX;
priv->tlan_full_duplex = true;
}
if (priv->speed == TLAN_SPEED_100)
control |= MII_GC_SPEEDSEL;
}
tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
}
tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
}
static void tlan_phy_finish_auto_neg(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 an_adv;
u16 an_lpa;
u16 mode;
u16 phy;
u16 status;
phy = priv->phy[priv->phy_num];
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
udelay(1000);
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
if (!(status & MII_GS_AUTOCMPLT)) {
tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
return;
}
netdev_info(dev, "Autonegotiation complete\n");
tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
mode = an_adv & an_lpa & 0x03E0;
if (mode & 0x0100)
priv->tlan_full_duplex = true;
else if (!(mode & 0x0080) && (mode & 0x0040))
priv->tlan_full_duplex = true;
if ((!(mode & 0x0180)) &&
(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
(priv->phy_num != 0)) {
priv->phy_num = 0;
tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
return;
}
if (priv->phy_num == 0) {
if ((priv->duplex == TLAN_DUPLEX_FULL) ||
(an_adv & an_lpa & 0x0040)) {
tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
MII_GC_AUTOENB | MII_GC_DUPLEX);
netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
} else {
tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
MII_GC_AUTOENB);
netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
}
}
tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
}
static void tlan_phy_monitor(struct timer_list *t)
{
struct tlan_priv *priv = from_timer(priv, t, media_timer);
struct net_device *dev = priv->dev;
u16 phy;
u16 phy_status;
phy = priv->phy[priv->phy_num];
tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
if (!(phy_status & MII_GS_LINK)) {
if (netif_carrier_ok(dev)) {
printk(KERN_DEBUG "TLAN: %s has lost link\n",
dev->name);
tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
netif_carrier_off(dev);
if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
MII_GC_ISOLATE;
tlan_mii_sync(dev->base_addr);
tlan_mii_write_reg(dev, priv->phy[0],
MII_GEN_CTL, data);
priv->phy_num = 1;
tlan_set_timer(dev, msecs_to_jiffies(400),
TLAN_TIMER_PHY_PDOWN);
return;
}
}
}
if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
dev->name);
netif_carrier_on(dev);
}
priv->media_timer.expires = jiffies + HZ;
add_timer(&priv->media_timer);
}
static bool
__tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
u32 i;
bool err;
int minten;
struct tlan_priv *priv = netdev_priv(dev);
lockdep_assert_held(&priv->lock);
err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
tlan_mii_sync(dev->base_addr);
minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
if (minten)
tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
tlan_mii_send_data(dev->base_addr, 0x1, 2);
tlan_mii_send_data(dev->base_addr, 0x2, 2);
tlan_mii_send_data(dev->base_addr, phy, 5);
tlan_mii_send_data(dev->base_addr, reg, 5);
tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
if (nack) {
for (i = 0; i < 16; i++) {
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
err = true;
} else {
for (tmp = 0, i = 0x8000; i; i >>= 1) {
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
tmp |= i;
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
}
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
if (minten)
tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
*val = tmp;
return err;
}
static void tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg,
u16 *val)
{
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
__tlan_mii_read_reg(dev, phy, reg, val);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
{
u16 sio;
u32 i;
if (num_bits == 0)
return;
outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
(void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
if (data & i)
tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
else
tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
(void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
}
}
static void tlan_mii_sync(u16 base_port)
{
int i;
u16 sio;
outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
for (i = 0; i < 32; i++) {
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
}
static void
__tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
struct tlan_priv *priv = netdev_priv(dev);
lockdep_assert_held(&priv->lock);
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
tlan_mii_sync(dev->base_addr);
minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
if (minten)
tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
tlan_mii_send_data(dev->base_addr, 0x1, 2);
tlan_mii_send_data(dev->base_addr, 0x1, 2);
tlan_mii_send_data(dev->base_addr, phy, 5);
tlan_mii_send_data(dev->base_addr, reg, 5);
tlan_mii_send_data(dev->base_addr, 0x2, 2);
tlan_mii_send_data(dev->base_addr, val, 16);
tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
if (minten)
tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
}
static void
tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
__tlan_mii_write_reg(dev, phy, reg, val);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void tlan_ee_send_start(u16 io_base)
{
u16 sio;
outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
{
int err;
u8 place;
u16 sio;
outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
for (place = 0x80; place != 0; place >>= 1) {
if (place & data)
tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
else
tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
if ((!err) && stop) {
tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
}
return err;
}
static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
{
u8 place;
u16 sio;
outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
*data = 0;
tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
for (place = 0x80; place; place >>= 1) {
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
*data |= place;
tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
if (!stop) {
tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
} else {
tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
}
}
static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
{
int err;
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
tlan_ee_send_start(dev->base_addr);
err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
if (err) {
ret = 1;
goto fail;
}
err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
if (err) {
ret = 2;
goto fail;
}
tlan_ee_send_start(dev->base_addr);
err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
if (err) {
ret = 3;
goto fail;
}
tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
fail:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}