#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/aer.h>
#include "../pci.h"
#include "portdrv.h"
#define PCIE_PORT_MAX_MSI_ENTRIES 32
#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
struct portdrv_service_data {
struct pcie_port_service_driver *drv;
struct device *dev;
u32 service;
};
static void release_pcie_device(struct device *dev)
{
kfree(to_pcie_device(dev));
}
static int pcie_message_numbers(struct pci_dev *dev, int mask,
u32 *pme, u32 *aer, u32 *dpc)
{
u32 nvec = 0, pos;
u16 reg16;
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
PCIE_PORT_SERVICE_BWNOTIF)) {
pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
*pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
nvec = *pme + 1;
}
#ifdef CONFIG_PCIEAER
if (mask & PCIE_PORT_SERVICE_AER) {
u32 reg32;
pos = dev->aer_cap;
if (pos) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
®32);
*aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
nvec = max(nvec, *aer + 1);
}
}
#endif
if (mask & PCIE_PORT_SERVICE_DPC) {
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
if (pos) {
pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
®16);
*dpc = reg16 & PCI_EXP_DPC_IRQ;
nvec = max(nvec, *dpc + 1);
}
}
return nvec;
}
static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
{
int nr_entries, nvec, pcie_irq;
u32 pme = 0, aer = 0, dpc = 0;
nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
if (nvec > nr_entries) {
pci_free_irq_vectors(dev);
return -EIO;
}
if (nvec != nr_entries) {
pci_free_irq_vectors(dev);
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
}
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
PCIE_PORT_SERVICE_BWNOTIF)) {
pcie_irq = pci_irq_vector(dev, pme);
irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq;
irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq;
irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq;
}
if (mask & PCIE_PORT_SERVICE_AER)
irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, aer);
if (mask & PCIE_PORT_SERVICE_DPC)
irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, dpc);
return 0;
}
static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = -1;
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
goto legacy_irq;
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
legacy_irq:
ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
if (ret < 0)
return -ENODEV;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = pci_irq_vector(dev, 0);
return 0;
}
static int get_port_device_capability(struct pci_dev *dev)
{
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
int services = 0;
if (dev->is_hotplug_bridge &&
(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) &&
(pcie_ports_native || host->native_pcie_hotplug)) {
services |= PCIE_PORT_SERVICE_HP;
pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
#ifdef CONFIG_PCIEAER
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) &&
dev->aer_cap && pci_aer_available() &&
(pcie_ports_native || host->native_aer))
services |= PCIE_PORT_SERVICE_AER;
#endif
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) &&
(pcie_ports_native || host->native_pme)) {
services |= PCIE_PORT_SERVICE_PME;
pcie_pme_interrupt_enable(dev, false);
}
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
pci_aer_available() &&
(pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
u32 linkcap;
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
if (linkcap & PCI_EXP_LNKCAP_LBNC)
services |= PCIE_PORT_SERVICE_BWNOTIF;
}
return services;
}
static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
{
int retval;
struct pcie_device *pcie;
struct device *device;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->port = pdev;
pcie->irq = irq;
pcie->service = service;
device = &pcie->device;
device->bus = &pcie_port_bus_type;
device->release = release_pcie_device;
dev_set_name(device, "%s:pcie%03x",
pci_name(pdev),
get_descriptor_id(pci_pcie_type(pdev), service));
device->parent = &pdev->dev;
device_enable_async_suspend(device);
retval = device_register(device);
if (retval) {
put_device(device);
return retval;
}
pm_runtime_no_callbacks(device);
return 0;
}
static int pcie_port_device_register(struct pci_dev *dev)
{
int status, capabilities, i, nr_service;
int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
status = pci_enable_device(dev);
if (status)
return status;
capabilities = get_port_device_capability(dev);
if (!capabilities)
return 0;
pci_set_master(dev);
status = pcie_init_service_irqs(dev, irqs, capabilities);
if (status) {
capabilities &= PCIE_PORT_SERVICE_HP;
if (!capabilities)
goto error_disable;
}
status = -ENODEV;
nr_service = 0;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
int service = 1 << i;
if (!(capabilities & service))
continue;
if (!pcie_device_init(dev, service, irqs[i]))
nr_service++;
}
if (!nr_service)
goto error_cleanup_irqs;
return 0;
error_cleanup_irqs:
pci_free_irq_vectors(dev);
error_disable:
pci_disable_device(dev);
return status;
}
typedef int (*pcie_callback_t)(struct pcie_device *);
static int pcie_port_device_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
size_t offset = *(size_t *)data;
pcie_callback_t cb;
if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
service_driver = to_service_driver(dev->driver);
cb = *(pcie_callback_t *)((void *)service_driver + offset);
if (cb)
return cb(to_pcie_device(dev));
}
return 0;
}
#ifdef CONFIG_PM
static int pcie_port_device_suspend(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, suspend);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
static int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
static int pcie_port_device_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
static int pcie_port_device_runtime_suspend(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
static int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
#endif /* PM */
static int remove_iter(struct device *dev, void *data)
{
if (dev->bus == &pcie_port_bus_type)
device_unregister(dev);
return 0;
}
static int find_service_iter(struct device *device, void *data)
{
struct pcie_port_service_driver *service_driver;
struct portdrv_service_data *pdrvs;
u32 service;
pdrvs = (struct portdrv_service_data *) data;
service = pdrvs->service;
if (device->bus == &pcie_port_bus_type && device->driver) {
service_driver = to_service_driver(device->driver);
if (service_driver->service == service) {
pdrvs->drv = service_driver;
pdrvs->dev = device;
return 1;
}
}
return 0;
}
struct device *pcie_port_find_device(struct pci_dev *dev,
u32 service)
{
struct device *device;
struct portdrv_service_data pdrvs;
pdrvs.dev = NULL;
pdrvs.service = service;
device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
device = pdrvs.dev;
return device;
}
EXPORT_SYMBOL_GPL(pcie_port_find_device);
static void pcie_port_device_remove(struct pci_dev *dev)
{
device_for_each_child(&dev->dev, NULL, remove_iter);
pci_free_irq_vectors(dev);
}
static int pcie_port_probe_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
int status;
if (!dev || !dev->driver)
return -ENODEV;
driver = to_service_driver(dev->driver);
if (!driver || !driver->probe)
return -ENODEV;
pciedev = to_pcie_device(dev);
status = driver->probe(pciedev);
if (status)
return status;
get_device(dev);
return 0;
}
static int pcie_port_remove_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
driver->remove(pciedev);
put_device(dev);
}
return 0;
}
static void pcie_port_shutdown_service(struct device *dev) {}
int pcie_port_service_register(struct pcie_port_service_driver *new)
{
if (pcie_ports_disabled)
return -ENODEV;
new->driver.name = new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
new->driver.remove = pcie_port_remove_service;
new->driver.shutdown = pcie_port_shutdown_service;
return driver_register(&new->driver);
}
void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
{
driver_unregister(&drv->driver);
}
bool pcie_ports_disabled;
bool pcie_ports_native;
bool pcie_ports_dpc_native;
static int __init pcie_port_setup(char *str)
{
if (!strncmp(str, "compat", 6))
pcie_ports_disabled = true;
else if (!strncmp(str, "native", 6))
pcie_ports_native = true;
else if (!strncmp(str, "dpc-native", 10))
pcie_ports_dpc_native = true;
return 1;
}
__setup("pcie_ports=", pcie_port_setup);
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
if (!to_pci_dev(dev)->bridge_d3)
return -EBUSY;
return pcie_port_device_runtime_suspend(dev);
}
static int pcie_port_runtime_idle(struct device *dev)
{
return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
}
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume_noirq = pcie_port_device_resume_noirq,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
.restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_device_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
#else /* !PM */
#define PCIE_PORTDRV_PM_OPS NULL
#endif /* !PM */
static int pcie_portdrv_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int type = pci_pcie_type(dev);
int status;
if (!pci_is_pcie(dev) ||
((type != PCI_EXP_TYPE_ROOT_PORT) &&
(type != PCI_EXP_TYPE_UPSTREAM) &&
(type != PCI_EXP_TYPE_DOWNSTREAM) &&
(type != PCI_EXP_TYPE_RC_EC)))
return -ENODEV;
if (type == PCI_EXP_TYPE_RC_EC)
pcie_link_rcec(dev);
status = pcie_port_device_register(dev);
if (status)
return status;
pci_save_state(dev);
dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE |
DPM_FLAG_SMART_SUSPEND);
if (pci_bridge_d3_possible(dev)) {
pm_runtime_set_autosuspend_delay(&dev->dev, 100);
pm_runtime_use_autosuspend(&dev->dev);
pm_runtime_mark_last_busy(&dev->dev);
pm_runtime_put_autosuspend(&dev->dev);
pm_runtime_allow(&dev->dev);
}
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
}
pcie_port_device_remove(dev);
pci_disable_device(dev);
}
static void pcie_portdrv_shutdown(struct pci_dev *dev)
{
if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
}
pcie_port_device_remove(dev);
}
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
pci_channel_state_t error)
{
if (error == pci_channel_io_frozen)
return PCI_ERS_RESULT_NEED_RESET;
return PCI_ERS_RESULT_CAN_RECOVER;
}
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, slot_reset);
device_for_each_child(&dev->dev, &off, pcie_port_device_iter);
pci_restore_state(dev);
pci_save_state(dev);
return PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
return PCI_ERS_RESULT_RECOVERED;
}
static const struct pci_device_id port_pci_ids[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0) },
{ PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, ~0) },
{ PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ },
};
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
.slot_reset = pcie_portdrv_slot_reset,
.mmio_enabled = pcie_portdrv_mmio_enabled,
};
static struct pci_driver pcie_portdriver = {
.name = "pcieport",
.id_table = &port_pci_ids[0],
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
.shutdown = pcie_portdrv_shutdown,
.err_handler = &pcie_portdrv_err_handler,
.driver_managed_dma = true,
.driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
{
pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
d->ident);
pcie_pme_disable_msi();
return 0;
}
static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = {
{
.callback = dmi_pcie_pme_disable_msi,
.ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
},
},
{}
};
static void __init pcie_init_services(void)
{
pcie_aer_init();
pcie_pme_init();
pcie_dpc_init();
pcie_hp_init();
}
static int __init pcie_portdrv_init(void)
{
if (pcie_ports_disabled)
return -EACCES;
pcie_init_services();
dmi_check_system(pcie_portdrv_dmi_table);
return pci_register_driver(&pcie_portdriver);
}
device_initcall