Release 4.11 drivers/pci/msi.c
/*
* File: msi.c
* Purpose: PCI Message Signaled Interrupt (MSI)
*
* Copyright (C) 2003-2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
* Copyright (C) 2016 Christoph Hellwig.
*/
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/msi.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/acpi_iort.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include "pci.h"
static int pci_msi_enable = 1;
int pci_msi_ignore_mask;
#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct irq_domain *domain;
domain = dev_get_msi_domain(&dev->dev);
if (domain && irq_domain_is_hierarchy(domain))
return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
return arch_setup_msi_irqs(dev, nvec, type);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 51 | 79.69% | 1 | 25.00% |
Christoph Hellwig | 8 | 12.50% | 2 | 50.00% |
Marc Zyngier | 5 | 7.81% | 1 | 25.00% |
Total | 64 | 100.00% | 4 | 100.00% |
static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
{
struct irq_domain *domain;
domain = dev_get_msi_domain(&dev->dev);
if (domain && irq_domain_is_hierarchy(domain))
msi_domain_free_irqs(domain, &dev->dev);
else
arch_teardown_msi_irqs(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 38 | 74.51% | 1 | 25.00% |
Christoph Hellwig | 8 | 15.69% | 2 | 50.00% |
Marc Zyngier | 5 | 9.80% | 1 | 25.00% |
Total | 51 | 100.00% | 4 | 100.00% |
#else
#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
#endif
/* Arch hooks */
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_controller *chip = dev->bus->msi;
int err;
if (!chip || !chip->setup_irq)
return -EINVAL;
err = chip->setup_irq(chip, dev, desc);
if (err < 0)
return err;
irq_set_chip_data(desc->irq, chip);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thierry Reding | 53 | 67.95% | 1 | 25.00% |
Thomas Petazzoni | 20 | 25.64% | 1 | 25.00% |
Lorenzo Pieralisi | 4 | 5.13% | 1 | 25.00% |
Yijing Wang | 1 | 1.28% | 1 | 25.00% |
Total | 78 | 100.00% | 4 | 100.00% |
void __weak arch_teardown_msi_irq(unsigned int irq)
{
struct msi_controller *chip = irq_get_chip_data(irq);
if (!chip || !chip->teardown_irq)
return;
chip->teardown_irq(chip, irq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thierry Reding | 31 | 77.50% | 1 | 33.33% |
Thomas Petazzoni | 8 | 20.00% | 1 | 33.33% |
Yijing Wang | 1 | 2.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_controller *chip = dev->bus->msi;
struct msi_desc *entry;
int ret;
if (chip && chip->setup_irqs)
return chip->setup_irqs(chip, dev, nvec, type);
/*
* If an architecture wants to support multiple MSI, it needs to
* override arch_setup_msi_irqs()
*/
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
for_each_pci_msi_entry(entry, dev) {
ret = arch_setup_msi_irq(dev, entry);
if (ret < 0)
return ret;
if (ret > 0)
return -ENOSPC;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Bunk | 41 | 36.94% | 1 | 14.29% |
Lucas Stach | 33 | 29.73% | 1 | 14.29% |
Michael Ellerman | 19 | 17.12% | 2 | 28.57% |
Matthew Wilcox | 14 | 12.61% | 1 | 14.29% |
Thomas Petazzoni | 2 | 1.80% | 1 | 14.29% |
Jiang Liu | 2 | 1.80% | 1 | 14.29% |
Total | 111 | 100.00% | 7 | 100.00% |
/*
* We have a default implementation available as a separate non-weak
* function, as it is used by the Xen x86 PCI code
*/
void default_teardown_msi_irqs(struct pci_dev *dev)
{
int i;
struct msi_desc *entry;
for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
arch_teardown_msi_irq(entry->irq + i);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Bunk | 26 | 48.15% | 1 | 14.29% |
Matthew Wilcox | 14 | 25.93% | 1 | 14.29% |
Michael Ellerman | 5 | 9.26% | 1 | 14.29% |
Jiang Liu | 5 | 9.26% | 2 | 28.57% |
Alexander Gordeev | 3 | 5.56% | 1 | 14.29% |
Thomas Gleixner | 1 | 1.85% | 1 | 14.29% |
Total | 54 | 100.00% | 7 | 100.00% |
void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
{
return default_teardown_msi_irqs(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Petazzoni | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static void default_restore_msi_irq(struct pci_dev *dev, int irq)
{
struct msi_desc *entry;
entry = NULL;
if (dev->msix_enabled) {
for_each_pci_msi_entry(entry, dev) {
if (irq == entry->irq)
break;
}
} else if (dev->msi_enabled) {
entry = irq_get_msi_desc(irq);
}
if (entry)
__pci_write_msi_msg(entry, &entry->msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 72 | 92.31% | 1 | 20.00% |
Jiang Liu | 3 | 3.85% | 2 | 40.00% |
Duan Zhenzhong | 2 | 2.56% | 1 | 20.00% |
Yijing Wang | 1 | 1.28% | 1 | 20.00% |
Total | 78 | 100.00% | 5 | 100.00% |
void __weak arch_restore_msi_irqs(struct pci_dev *dev)
{
return default_restore_msi_irqs(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Petazzoni | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static inline __attribute_const__ u32 msi_mask(unsigned x)
{
/* Don't shift by >= width of type */
if (x >= 5)
return 0xffffffff;
return (1 << (1 << x)) - 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 34 | 100.00% | 2 | 100.00% |
Total | 34 | 100.00% | 2 | 100.00% |
/*
* PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
* mask all MSI interrupts by clearing the MSI enable bit does not work
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
u32 mask_bits = desc->masked;
if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
return 0;
mask_bits &= ~mask;
mask_bits |= flag;
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
mask_bits);
return mask_bits;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 37 | 58.73% | 2 | 25.00% |
Hidetoshi Seto | 8 | 12.70% | 1 | 12.50% |
Mitch A Williams | 8 | 12.70% | 1 | 12.50% |
Yinghai Lu | 4 | 6.35% | 1 | 12.50% |
Jiang Liu | 3 | 4.76% | 1 | 12.50% |
Yijing Wang | 2 | 3.17% | 1 | 12.50% |
Thomas Gleixner | 1 | 1.59% | 1 | 12.50% |
Total | 63 | 100.00% | 8 | 100.00% |
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 23 | 76.67% | 1 | 33.33% |
Matthew Wilcox | 6 | 20.00% | 1 | 33.33% |
Thomas Gleixner | 1 | 3.33% | 1 | 33.33% |
Total | 30 | 100.00% | 3 | 100.00% |
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
{
return desc->mask_base +
desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
/*
* This internal function does not flush PCI writes to the device.
* All users must ensure that they read from the device before either
* assuming that the device state is up to date, or returning out of this
* file. This saves a few milliseconds when initialising devices with lots
* of MSI-X interrupts.
*/
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
if (pci_msi_ignore_mask)
return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
if (flag)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
return mask_bits;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 27 | 49.09% | 1 | 14.29% |
Yijing Wang | 7 | 12.73% | 1 | 14.29% |
Sheng Yang | 6 | 10.91% | 1 | 14.29% |
Mitch A Williams | 5 | 9.09% | 1 | 14.29% |
Hidetoshi Seto | 5 | 9.09% | 1 | 14.29% |
Christoph Hellwig | 4 | 7.27% | 1 | 14.29% |
Thomas Gleixner | 1 | 1.82% | 1 | 14.29% |
Total | 55 | 100.00% | 7 | 100.00% |
static void msix_mask_irq(struct msi_desc *desc, u32 flag)
{
desc->masked = __pci_msix_desc_mask_irq(desc, flag);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 18 | 72.00% | 1 | 25.00% |
Matthew Wilcox | 5 | 20.00% | 1 | 25.00% |
Mitch A Williams | 1 | 4.00% | 1 | 25.00% |
Thomas Gleixner | 1 | 4.00% | 1 | 25.00% |
Total | 25 | 100.00% | 4 | 100.00% |
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag);
readl(desc->mask_base); /* Flush write to device */
} else {
unsigned offset = data->irq - desc->irq;
msi_mask_irq(desc, 1 << offset, flag << offset);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 43 | 56.58% | 3 | 33.33% |
Andrew Morton | 20 | 26.32% | 1 | 11.11% |
Thomas Gleixner | 7 | 9.21% | 1 | 11.11% |
Yinghai Lu | 3 | 3.95% | 2 | 22.22% |
Eric W. Biedermann | 2 | 2.63% | 1 | 11.11% |
Jiang Liu | 1 | 1.32% | 1 | 11.11% |
Total | 76 | 100.00% | 9 | 100.00% |
/**
* pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts
* @data: pointer to irqdata associated to that interrupt
*/
void pci_msi_mask_irq(struct irq_data *data)
{
msi_set_mask_bit(data, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 6 | 35.29% | 2 | 50.00% |
Matthew Wilcox | 6 | 35.29% | 1 | 25.00% |
Andrew Morton | 5 | 29.41% | 1 | 25.00% |
Total | 17 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
/**
* pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts
* @data: pointer to irqdata associated to that interrupt
*/
void pci_msi_unmask_irq(struct irq_data *data)
{
msi_set_mask_bit(data, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 9 | 52.94% | 1 | 20.00% |
Thomas Gleixner | 6 | 35.29% | 2 | 40.00% |
Andrew Morton | 1 | 5.88% | 1 | 20.00% |
Eric W. Biedermann | 1 | 5.88% | 1 | 20.00% |
Total | 17 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
void default_restore_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
for_each_pci_msi_entry(entry, dev)
default_restore_msi_irq(dev, entry->irq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Duan Zhenzhong | 28 | 93.33% | 1 | 50.00% |
Jiang Liu | 2 | 6.67% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
BUG_ON(dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
int pos = dev->msi_cap;
u16 data;
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
&msg->address_lo);
if (entry->msi_attrib.is_64) {
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
&msg->address_hi);
pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
} else {
msg->address_hi = 0;
pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
}
msg->data = data;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Hutchings | 140 | 79.10% | 1 | 9.09% |
Jiang Liu | 11 | 6.21% | 2 | 18.18% |
Björn Helgaas | 10 | 5.65% | 3 | 27.27% |
Eric W. Biedermann | 7 | 3.95% | 1 | 9.09% |
Christoph Hellwig | 3 | 1.69% | 1 | 9.09% |
Thomas Gleixner | 2 | 1.13% | 1 | 9.09% |
Yinghai Lu | 2 | 1.13% | 1 | 9.09% |
Andrew Morton | 2 | 1.13% | 1 | 9.09% |
Total | 177 | 100.00% | 11 | 100.00% |
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
if (dev->current_state != PCI_D0) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
int pos = dev->msi_cap;
u16 msgctl;
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
msgctl |= entry->msi_attrib.multiple << 4;
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
msg->address_lo);
if (entry->msi_attrib.is_64) {
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
msg->address_hi);
pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
msg->data);
} else {
pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
msg->data);
}
}
entry->msg = *msg;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 81 | 38.03% | 3 | 15.00% |
Eric W. Biedermann | 48 | 22.54% | 2 | 10.00% |
Björn Helgaas | 14 | 6.57% | 4 | 20.00% |
Ben Hutchings | 14 | 6.57% | 1 | 5.00% |
Andrew Morton | 12 | 5.63% | 1 | 5.00% |
Yinghai Lu | 11 | 5.16% | 1 | 5.00% |
Jiang Liu | 11 | 5.16% | 2 | 10.00% |
Mark Maule | 7 | 3.29% | 1 | 5.00% |
Christoph Hellwig | 5 | 2.35% | 1 | 5.00% |
Ashok Raj | 4 | 1.88% | 1 | 5.00% |
Hidetoshi Seto | 3 | 1.41% | 1 | 5.00% |
Thomas Gleixner | 2 | 0.94% | 1 | 5.00% |
Mitch A Williams | 1 | 0.47% | 1 | 5.00% |
Total | 213 | 100.00% | 20 | 100.00% |
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
__pci_write_msi_msg(entry, msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 9 | 29.03% | 1 | 12.50% |
Matthew Wilcox | 6 | 19.35% | 1 | 12.50% |
Yinghai Lu | 6 | 19.35% | 1 | 12.50% |
Thomas Gleixner | 4 | 12.90% | 2 | 25.00% |
Mitch A Williams | 2 | 6.45% | 1 | 12.50% |
Eric W. Biedermann | 2 | 6.45% | 1 | 12.50% |
Jiang Liu | 2 | 6.45% | 1 | 12.50% |
Total | 31 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(pci_write_msi_msg);
static void free_msi_irqs(struct pci_dev *dev)
{
struct list_head *msi_list = dev_to_msi_list(&dev->dev);
struct msi_desc *entry, *tmp;
struct attribute **msi_attrs;
struct device_attribute *dev_attr;
int i, count = 0;
for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i));
pci_msi_teardown_msi_irqs(dev);
list_for_each_entry_safe(entry, tmp, msi_list, list) {
if (entry->msi_attrib.is_msix) {
if (list_is_last(&entry->list, msi_list))
iounmap(entry->mask_base);
}
list_del(&entry->list);
free_msi_entry(entry);
}
if (dev->msi_irq_groups) {
sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
msi_attrs = dev->msi_irq_groups[0]->attrs;
while (msi_attrs[count]) {
dev_attr = container_of(msi_attrs[count],
struct device_attribute, attr);
kfree(dev_attr->attr.name);
kfree(dev_attr);
++count;
}
kfree(msi_attrs);
kfree(dev->msi_irq_groups[0]);
kfree(dev->msi_irq_groups);
dev->msi_irq_groups = NULL;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Kroah-Hartman | 111 | 44.40% | 1 | 12.50% |
Hidetoshi Seto | 102 | 40.80% | 1 | 12.50% |
Jiang Liu | 22 | 8.80% | 3 | 37.50% |
Roland Dreier | 7 | 2.80% | 1 | 12.50% |
Alexei Starovoitov | 7 | 2.80% | 1 | 12.50% |
Prarit Bhargava | 1 | 0.40% | 1 | 12.50% |
Total | 250 | 100.00% | 8 | 100.00% |
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
{
if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
pci_intx(dev, enable);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
static void __pci_restore_msi_state(struct pci_dev *dev)
{
u16 control;
struct msi_desc *entry;
if (!dev->msi_enabled)
return;
entry = irq_get_msi_desc(dev->irq);
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 0);
arch_restore_msi_irqs(dev);
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
entry->masked);
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 63 | 52.94% | 4 | 26.67% |
Matthew Wilcox | 22 | 18.49% | 2 | 13.33% |
Michael Ellerman | 10 | 8.40% | 1 | 6.67% |
Mark Maule | 7 | 5.88% | 1 | 6.67% |
Yijing Wang | 6 | 5.04% | 1 | 6.67% |
Björn Helgaas | 6 | 5.04% | 1 | 6.67% |
Konrad Rzeszutek Wilk | 1 | 0.84% | 1 | 6.67% |
David S. Miller | 1 | 0.84% | 1 | 6.67% |
Thomas Gleixner | 1 | 0.84% | 1 | 6.67% |
David Shaohua Li | 1 | 0.84% | 1 | 6.67% |
Michael S. Tsirkin | 1 | 0.84% | 1 | 6.67% |
Total | 119 | 100.00% | 15 | 100.00% |
static void __pci_restore_msix_state(struct pci_dev *dev)
{
struct msi_desc *entry;
if (!dev->msix_enabled)
return;
BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
arch_restore_msi_irqs(dev);
for_each_pci_msi_entry(entry, dev)
msix_mask_irq(entry, entry->masked);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 26 | 30.23% | 1 | 10.00% |
David Shaohua Li | 17 | 19.77% | 1 | 10.00% |
Michael Ellerman | 12 | 13.95% | 2 | 20.00% |
Eric W. Biedermann | 12 | 13.95% | 2 | 20.00% |
Jiang Liu | 6 | 6.98% | 1 | 10.00% |
Yijing Wang | 6 | 6.98% | 1 | 10.00% |
Duan Zhenzhong | 5 | 5.81% | 1 | 10.00% |
Michael S. Tsirkin | 2 | 2.33% | 1 | 10.00% |
Total | 86 | 100.00% | 10 | 100.00% |
void pci_restore_msi_state(struct pci_dev *dev)
{
__pci_restore_msi_state(dev);
__pci_restore_msix_state(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Ellerman | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct msi_desc *entry;
unsigned long irq;
int retval;
retval = kstrtoul(attr->attr.name, 10, &irq);
if (retval)
return retval;
entry = irq_get_msi_desc(irq);
if (entry)
return sprintf(buf, "%s\n",
entry->msi_attrib.is_msix ? "msix" : "msi");
return -ENODEV;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Kroah-Hartman | 47 | 53.41% | 1 | 33.33% |
Neil Horman | 36 | 40.91% | 1 | 33.33% |
Yijing Wang | 5 | 5.68% | 1 | 33.33% |
Total | 88 | 100.00% | 3 | 100.00% |
static int populate_msi_sysfs(struct pci_dev *pdev)
{
struct attribute **msi_attrs;
struct attribute *msi_attr;
struct device_attribute *msi_dev_attr;
struct attribute_group *msi_irq_group;
const struct attribute_group **msi_irq_groups;
struct msi_desc *entry;
int ret = -ENOMEM;
int num_msi = 0;
int count = 0;
int i;
/* Determine how many msi entries we have */
for_each_pci_msi_entry(entry, pdev)
num_msi += entry->nvec_used;
if (!num_msi)
return 0;
/* Dynamically create the MSI attributes for the PCI device */
msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
if (!msi_attrs)
return -ENOMEM;
for_each_pci_msi_entry(entry, pdev) {
for (i = 0; i < entry->nvec_used; i++) {
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
if (!msi_dev_attr)
goto error_attrs;
msi_attrs[count] = &msi_dev_attr->attr;
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
entry->irq + i);
if (!msi_dev_attr->attr.name)
goto error_attrs;
msi_dev_attr->attr.mode = S_IRUGO;
msi_dev_attr->show = msi_mode_show;
++count;
}
}
msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
if (!msi_irq_group)
goto error_attrs;
msi_irq_group->name = "msi_irqs";
msi_irq_group->attrs = msi_attrs;
msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
if (!msi_irq_groups)
goto error_irq_group;
msi_irq_groups[0] = msi_irq_group;
ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
if (ret)
goto error_irq_groups;
pdev->msi_irq_groups = msi_irq_groups;
return 0;
error_irq_groups:
kfree(msi_irq_groups);
error_irq_group:
kfree(msi_irq_group);
error_attrs:
count = 0;
msi_attr = msi_attrs[count];
while (msi_attr) {
msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
kfree(msi_attr->name);
kfree(msi_dev_attr);
++count;
msi_attr = msi_attrs[count];
}
kfree(msi_attrs);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Kroah-Hartman | 225 | 57.25% | 2 | 33.33% |
Neil Horman | 107 | 27.23% | 1 | 16.67% |
Jan Beulich | 30 | 7.63% | 1 | 16.67% |
Romain Bezut | 28 | 7.12% | 1 | 16.67% |
Jiang Liu | 3 | 0.76% | 1 | 16.67% |
Total | 393 | 100.00% | 6 | 100.00% |
static struct msi_desc *
msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
{
struct cpumask *masks = NULL;
struct msi_desc *entry;
u16 control;
if (affd) {
masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
/* MSI Entry Initialization */
entry = alloc_msi_entry(&dev->dev, nvec, masks);
if (!entry)
goto out;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
entry->msi_attrib.is_msix = 0;
entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
if (control & PCI_MSI_FLAGS_64BIT)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
else
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* Save the initial mask status */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
out:
kfree(masks);
return entry;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 46 | 19.01% | 2 | 8.33% |
Andrew Morton | 42 | 17.36% | 1 | 4.17% |
Yijing Wang | 29 | 11.98% | 2 | 8.33% |
Eric W. Biedermann | 23 | 9.50% | 3 | 12.50% |
Jiang Liu | 20 | 8.26% | 2 | 8.33% |
Björn Helgaas | 17 | 7.02% | 3 | 12.50% |
Dan Carpenter | 16 | 6.61% | 1 | 4.17% |
Matthew Wilcox | 15 | 6.20% | 2 | 8.33% |
Christoph Hellwig | 10 | 4.13% | 2 | 8.33% |
Hidetoshi Seto | 8 | 3.31% | 1 | 4.17% |
David Shaohua Li | 7 | 2.89% | 1 | 4.17% |
Gavin Shan | 5 | 2.07% | 1 | 4.17% |
Roland Dreier | 3 | 1.24% | 2 | 8.33% |
Jike Song | 1 | 0.41% | 1 | 4.17% |
Total | 242 | 100.00% | 24 | 100.00% |
static int msi_verify_entries(struct pci_dev *dev)
{
struct msi_desc *entry;
for_each_pci_msi_entry(entry, dev) {
if (!dev->no_64bit_msi || !entry->msg.address_hi)
continue;
dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
" tried to assign one above 4G\n");
return -EIO;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 55 | 96.49% | 1 | 50.00% |
Jiang Liu | 2 | 3.51% | 1 | 50.00% |
Total | 57 | 100.00% | 2 | 100.00% |
/**
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
* @affd: description of automatic irq affinity assignments (may be %NULL)
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
* setup of an entry with the new MSI irq. A negative return value indicates
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
static int msi_capability_init(struct pci_dev *dev, int nvec,
const struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
unsigned mask;
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
entry = msi_setup_entry(dev, nvec, affd);
if (!entry)
return -ENOMEM;
/* All MSIs are unmasked by default, Mask them all */
mask = msi_mask(entry->msi_attrib.multi_cap);
msi_mask_irq(entry, mask, mask);
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
msi_mask_irq(entry, mask, ~mask);
free_msi_irqs(dev);
return ret;
}
ret = msi_verify_entries(dev);
if (ret) {
msi_mask_irq(entry, mask, ~mask);
free_msi_irqs(dev);
return ret;
}
ret = populate_msi_sysfs(dev);
if (ret) {
msi_mask_irq(entry, mask, ~mask);
free_msi_irqs(dev);
return ret;
}
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
dev->msi_enabled = 1;
pcibios_free_irq(dev);
dev->irq = entry->irq;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yijing Wang | 54 | 23.38% | 2 | 7.69% |
Benjamin Herrenschmidt | 31 | 13.42% | 1 | 3.85% |
Neil Horman | 31 | 13.42% | 1 | 3.85% |
Eric W. Biedermann | 29 | 12.55% | 4 | 15.38% |
Michael Ellerman | 20 | 8.66% | 3 | 11.54% |
Andrew Morton | 15 | 6.49% | 1 | 3.85% |
Jiang Liu | 12 | 5.19% | 4 | 15.38% |
Hidetoshi Seto | 11 | 4.76% | 2 | 7.69% |
Mark Maule | 10 | 4.33% | 1 | 3.85% |
Matthew Wilcox | 7 | 3.03% | 3 | 11.54% |
Christoph Hellwig | 6 | 2.60% | 1 | 3.85% |
Thomas Gleixner | 2 | 0.87% | 1 | 3.85% |
Michael S. Tsirkin | 2 | 0.87% | 1 | 3.85% |
David S. Miller | 1 | 0.43% | 1 | 3.85% |
Total | 231 | 100.00% | 26 | 100.00% |
static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
{
resource_size_t phys_addr;
u32 table_offset;
unsigned long flags;
u8 bir;
pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
flags = pci_resource_flags(dev, bir);
if (!flags || (flags & IORESOURCE_UNSET))
return NULL;
table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 67 | 65.69% | 1 | 16.67% |
Yijing Wang | 27 | 26.47% | 1 | 16.67% |
Björn Helgaas | 4 | 3.92% | 2 | 33.33% |
Gavin Shan | 3 | 2.94% | 1 | 16.67% |
Kenji Kaneshige | 1 | 0.98% | 1 | 16.67% |
Total | 102 | 100.00% | 6 | 100.00% |
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
const struct irq_affinity *affd)
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
if (affd) {
masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
for (i = 0, curmsk = masks; i < nvec; i++) {
entry = alloc_msi_entry(&dev->dev, 1, curmsk);
if (!entry) {
if (!i)
iounmap(base);
else
free_msi_irqs(dev);
/* No enough memory. Don't try again */
ret = -ENOMEM;
goto out;
}
entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
if (entries)
entry->msi_attrib.entry_nr = entries[i].entry;
else
entry->msi_attrib.entry_nr = i;
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks)
curmsk++;
}
ret = 0;
out:
kfree(masks);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 123 | 52.34% | 1 | 10.00% |
Thomas Gleixner | 59 | 25.11% | 2 | 20.00% |
Christoph Hellwig | 45 | 19.15% | 4 | 40.00% |
Jiang Liu | 7 | 2.98% | 2 | 20.00% |
Christophe Jaillet | 1 | 0.43% | 1 | 10.00% |
Total | 235 | 100.00% | 10 | 100.00% |
static void msix_program_entries(struct pci_dev *dev,
struct msix_entry *entries)
{
struct msi_desc *entry;
int i = 0;
for_each_pci_msi_entry(entry, dev) {
if (entries)
entries[i++].vector = entry->irq;
entry->masked = readl(pci_msix_desc_addr(entry) +
PCI_MSIX_ENTRY_VECTOR_CTRL);
msix_mask_irq(entry, 1);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 60 | 84.51% | 1 | 25.00% |
Christoph Hellwig | 9 | 12.68% | 2 | 50.00% |
Jiang Liu | 2 | 2.82% | 1 | 25.00% |
Total | 71 | 100.00% | 4 | 100.00% |
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
* @affd: Optional pointer to enable automatic affinity assignement
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, const struct irq_affinity *affd)
{
int ret;
u16 control;
void __iomem *base;
/* Ensure MSI-X is disabled while it is set up */
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
base = msix_map_region(dev, msix_table_size(control));
if (!base)
return -ENOMEM;
ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
return ret;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
goto out_avail;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
goto out_free;
/*
* Some devices require MSI-X to be enabled before we can touch the
* MSI-X registers. We need to mask all the vectors to prevent
* interrupts coming in before they're fully set up.
*/
pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
msix_program_entries(dev, entries);
ret = populate_msi_sysfs(dev);
if (ret)
goto out_free;
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
return 0;
out_avail:
if (ret < 0) {
/*
* If we had some success, report the number of irqs
* we succeeded in setting up.
*/
struct msi_desc *entry;
int avail = 0;
for_each_pci_msi_entry(entry, dev) {
if (entry->irq != 0)
avail++;
}
if (avail != 0)
ret = avail;
}
out_free:
free_msi_irqs(dev);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 76 | 29.12% | 5 | 17.24% |
Andrew Morton | 44 | 16.86% | 1 | 3.45% |
Matthew Wilcox | 32 | 12.26% | 3 | 10.34% |
Roland Dreier | 15 | 5.75% | 2 | 6.90% |
Benjamin Herrenschmidt | 15 | 5.75% | 1 | 3.45% |
Michael Ellerman | 14 | 5.36% | 2 | 6.90% |
Neil Horman | 13 | 4.98% | 1 | 3.45% |
Yijing Wang | 11 | 4.21% | 1 | 3.45% |
Eric W. Biedermann | 9 | 3.45% | 2 | 6.90% |
Jiang Liu | 8 | 3.07% | 3 | 10.34% |
Christoph Hellwig | 6 | 2.30% | 1 | 3.45% |
Alexander Gordeev | 5 | 1.92% | 1 | 3.45% |
Michael S. Tsirkin | 3 | 1.15% | 1 | 3.45% |
Grant Grundler | 3 | 1.15% | 1 | 3.45% |
Gavin Shan | 3 | 1.15% | 1 | 3.45% |
Thomas Gleixner | 2 | 0.77% | 1 | 3.45% |
Björn Helgaas | 1 | 0.38% | 1 | 3.45% |
Greg Kroah-Hartman | 1 | 0.38% | 1 | 3.45% |
Total | 261 | 100.00% | 29 | 100.00% |
/**
* pci_msi_supported - check whether MSI may be enabled on a device
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: how many MSIs have been requested ?
*
* Look at global flags, the device itself, and its parent buses
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 1, else return 0.
**/
static int pci_msi_supported(struct pci_dev *dev, int nvec)
{
struct pci_bus *bus;
/* MSI must be globally enabled and supported by the device */
if (!pci_msi_enable)
return 0;
if (!dev || dev->no_msi || dev->current_state != PCI_D0)
return 0;
/*
* You can't ask to have 0 or less MSIs configured.
* a) it's stupid ..
* b) the list manipulation code assumes nvec >= 1.
*/
if (nvec < 1)
return 0;
/*
* Any bridge which does NOT route MSI transactions from its
* secondary bus to its primary bus must set NO_MSI flag on
* the secondary pci_bus.
* We expect only arch-specific PCI host bus controller driver
* or quirks for specific PCI bridges to be setting NO_MSI.
*/
for (bus = dev->bus; bus; bus = bus->parent)
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
return 0;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Brice Goglin | 27 | 31.03% | 3 | 25.00% |
Alexander Gordeev | 17 | 19.54% | 2 | 16.67% |
Andrew Morton | 14 | 16.09% | 1 | 8.33% |
Michael Ellerman | 12 | 13.79% | 3 | 25.00% |
Michael S. Tsirkin | 10 | 11.49% | 1 | 8.33% |
Kristen Carlson Accardi | 6 | 6.90% | 1 | 8.33% |
Hidetoshi Seto | 1 | 1.15% | 1 | 8.33% |
Total | 87 | 100.00% | 12 | 100.00% |
/**
* pci_msi_vec_count - Return the number of MSI vectors a device can send
* @dev: device to report about
*
* This function returns the number of MSI vectors a device requested via
* Multiple Message Capable register. It returns a negative errno if the
* device is not capable sending MSI interrupts. Otherwise, the call succeeds
* and returns a power of two, up to a maximum of 2^5 (32), according to the
* MSI specification.
**/
int pci_msi_vec_count(struct pci_dev *dev)
{
int ret;
u16 msgctl;
if (!dev->msi_cap)
return -EINVAL;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 35 | 60.34% | 1 | 25.00% |
Brice Goglin | 10 | 17.24% | 1 | 25.00% |
Alexander Gordeev | 7 | 12.07% | 1 | 25.00% |
Gavin Shan | 6 | 10.34% | 1 | 25.00% |
Total | 58 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(pci_msi_vec_count);
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
u32 mask;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
desc = first_pci_msi_entry(dev);
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
mask = msi_mask(desc->msi_attrib.multi_cap);
/* Keep cached state to be restored */
__pci_msi_desc_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
pcibios_alloc_irq(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 30 | 26.79% | 3 | 15.00% |
Yinghai Lu | 18 | 16.07% | 3 | 15.00% |
Andrew Morton | 16 | 14.29% | 1 | 5.00% |
Michael Ellerman | 12 | 10.71% | 2 | 10.00% |
Jiang Liu | 10 | 8.93% | 3 | 15.00% |
Eric W. Biedermann | 9 | 8.04% | 3 | 15.00% |
Roland Dreier | 7 | 6.25% | 1 | 5.00% |
Yijing Wang | 6 | 5.36% | 1 | 5.00% |
Hidetoshi Seto | 2 | 1.79% | 1 | 5.00% |
Thomas Gleixner | 1 | 0.89% | 1 | 5.00% |
Michael S. Tsirkin | 1 | 0.89% | 1 | 5.00% |
Total | 112 | 100.00% | 20 | 100.00% |
void pci_disable_msi(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
pci_msi_shutdown(dev);
free_msi_irqs(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 27 | 79.41% | 1 | 20.00% |
Andrew Morton | 3 | 8.82% | 1 | 20.00% |
Roland Dreier | 2 | 5.88% | 1 | 20.00% |
Eric W. Biedermann | 1 | 2.94% | 1 | 20.00% |
Hidetoshi Seto | 1 | 2.94% | 1 | 20.00% |
Total | 34 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(pci_disable_msi);
/**
* pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
* This function returns the number of device's MSI-X table entries and
* therefore the number of MSI-X vectors device is capable of sending.
* It returns a negative errno if the device is not capable of sending MSI-X
* interrupts.
**/
int pci_msix_vec_count(struct pci_dev *dev)
{
u16 control;
if (!dev->msix_cap)
return -EINVAL;
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
return msix_table_size(control);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 32 | 72.73% | 1 | 20.00% |
Gavin Shan | 6 | 13.64% | 1 | 20.00% |
Alexander Gordeev | 3 | 6.82% | 1 | 20.00% |
Björn Helgaas | 3 | 6.82% | 2 | 40.00% |
Total | 44 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
int nvec, const struct irq_affinity *affd)
{
int nr_entries;
int i, j;
if (!pci_msi_supported(dev, nvec))
return -EINVAL;
nr_entries = pci_msix_vec_count(dev);
if (nr_entries < 0)
return nr_entries;
if (nvec > nr_entries)
return nr_entries;
if (entries) {
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
if (entries[i].entry >= nr_entries)
return -EINVAL; /* invalid entry */
for (j = i + 1; j < nvec; j++) {
if (entries[i].entry == entries[j].entry)
return -EINVAL; /* duplicate entry */
}
}
}
WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) {
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
return msix_capability_init(dev, entries, nvec, affd);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roland Dreier | 112 | 58.33% | 2 | 11.11% |
Andrew Morton | 18 | 9.38% | 1 | 5.56% |
Eric W. Biedermann | 16 | 8.33% | 3 | 16.67% |
Alexander Gordeev | 16 | 8.33% | 2 | 11.11% |
Christoph Hellwig | 12 | 6.25% | 2 | 11.11% |
Thomas Gleixner | 7 | 3.65% | 1 | 5.56% |
Björn Helgaas | 6 | 3.12% | 2 | 11.11% |
Michael Ellerman | 2 | 1.04% | 2 | 11.11% |
Michael S. Tsirkin | 1 | 0.52% | 1 | 5.56% |
Ryan Desfosses | 1 | 0.52% | 1 | 5.56% |
Rafael J. Wysocki | 1 | 0.52% | 1 | 5.56% |
Total | 192 | 100.00% | 18 | 100.00% |
/**
* pci_enable_msix - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of MSI-X entries (optional)
* @nvec: number of MSI-X irqs requested for allocation by device driver
*
* Setup the MSI-X capability structure of device function with the number
* of requested irqs upon its software driver call to request for
* MSI-X mode enabled on its hardware device function. A return of zero
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X irqs. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
* of irqs or MSI-X vectors available. Driver should use the returned value to
* re-send its request.
**/
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
return __pci_enable_msix(dev, entries, nvec, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 26 | 86.67% | 1 | 33.33% |
Roland Dreier | 3 | 10.00% | 1 | 33.33% |
Christoph Hellwig | 1 | 3.33% | 1 | 33.33% |
Total | 30 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(pci_enable_msix);
void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
/* Return the device with MSI-X masked as initial states */
for_each_pci_msi_entry(entry, dev) {
/* Keep cached states to be restored */
__pci_msix_desc_mask_irq(entry, 1);
}
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
pcibios_alloc_irq(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hidetoshi Seto | 19 | 26.03% | 1 | 7.14% |
Michael Ellerman | 14 | 19.18% | 2 | 14.29% |
Andrew Morton | 13 | 17.81% | 1 | 7.14% |
Roland Dreier | 10 | 13.70% | 1 | 7.14% |
Jiang Liu | 7 | 9.59% | 2 | 14.29% |
Eric W. Biedermann | 3 | 4.11% | 2 | 14.29% |
Yinghai Lu | 2 | 2.74% | 1 | 7.14% |
Yijing Wang | 2 | 2.74% | 1 | 7.14% |
David S. Miller | 1 | 1.37% | 1 | 7.14% |
Michael S. Tsirkin | 1 | 1.37% | 1 | 7.14% |
Thomas Gleixner | 1 | 1.37% | 1 | 7.14% |
Total | 73 | 100.00% | 14 | 100.00% |
void pci_disable_msix(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
pci_msix_shutdown(dev);
free_msi_irqs(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 28 | 82.35% | 1 | 25.00% |
Roland Dreier | 3 | 8.82% | 1 | 25.00% |
Michael Ellerman | 2 | 5.88% | 1 | 25.00% |
Hidetoshi Seto | 1 | 2.94% | 1 | 25.00% |
Total | 34 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(pci_disable_msix);
void pci_no_msi(void)
{
pci_msi_enable = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
/**
* pci_msi_enabled - is MSI enabled?
*
* Returns true if MSI has not been disabled by the command-line option
* pci=nomsi.
**/
int pci_msi_enabled(void)
{
return pci_msi_enable;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Patterson | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
const struct irq_affinity *affd)
{
int nvec;
int rc;
if (!pci_msi_supported(dev, minvec))
return -EINVAL;
WARN_ON(!!dev->msi_enabled);
/* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
dev_info(&dev->dev,
"can't enable MSI (MSI-X already enabled)\n");
return -EINVAL;
}
if (maxvec < minvec)
return -ERANGE;
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
if (nvec < minvec)
return -ENOSPC;
if (nvec > maxvec)
nvec = maxvec;
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
rc = msi_capability_init(dev, nvec, affd);
if (rc == 0)
return nvec;
if (rc < 0)
return rc;
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Gordeev | 147 | 75.00% | 4 | 44.44% |
Christoph Hellwig | 43 | 21.94% | 3 | 33.33% |
Thomas Gleixner | 5 | 2.55% | 1 | 11.11% |
Dennis Chen | 1 | 0.51% | 1 | 11.11% |
Total | 196 | 100.00% | 9 | 100.00% |
/* deprecated, don't use */
int pci_enable_msi(struct pci_dev *dev)
{
int rc = __pci_enable_msi_range(dev, 1, 1, NULL);
if (rc < 0)
return rc;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 34 | 94.44% | 3 | 75.00% |
Alexander Gordeev | 2 | 5.56% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(pci_enable_msi);
static int __pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec,
int maxvec, const struct irq_affinity *affd)
{
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
rc = __pci_enable_msix(dev, entries, nvec, affd);
if (rc == 0)
return nvec;
if (rc < 0)
return rc;
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Gordeev | 69 | 56.56% | 1 | 20.00% |
Christoph Hellwig | 43 | 35.25% | 3 | 60.00% |
Thomas Gleixner | 10 | 8.20% | 1 | 20.00% |
Total | 122 | 100.00% | 5 | 100.00% |
/**
* pci_enable_msix_range - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of MSI-X entries
* @minvec: minimum number of MSI-X irqs requested
* @maxvec: maximum number of MSI-X irqs requested
*
* Setup the MSI-X capability structure of device function with a maximum
* possible number of interrupts in the range between @minvec and @maxvec
* upon its software driver call to request for MSI-X mode enabled on its
* hardware device function. It returns a negative errno if an error occurs.
* If it succeeds, it returns the actual number of interrupts allocated and
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X interrupts.
**/
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 32 | 91.43% | 2 | 66.67% |
Alexander Gordeev | 3 | 8.57% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(pci_enable_msix_range);
/**
* pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
* @dev: PCI device to operate on
* @min_vecs: minimum number of vectors required (must be >= 1)
* @max_vecs: maximum (desired) number of vectors
* @flags: flags or quirks for the allocation
* @affd: optional description of the affinity requirements
*
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
* vectors if available, and fall back to a single legacy vector
* if neither is available. Return the number of vectors allocated,
* (which might be smaller than @max_vecs) if successful, or a negative
* error code on error. If less than @min_vecs interrupt vectors are
* available for @dev the function will fail with -ENOSPC.
*
* To get the Linux IRQ number used for a vector that can be passed to
* request_irq() use the pci_irq_vector() helper.
*/
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
const struct irq_affinity *affd)
{
static const struct irq_affinity msi_default_affd;
int vecs = -ENOSPC;
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
affd = &msi_default_affd;
if (affd->pre_vectors + affd->post_vectors > min_vecs)
return -EINVAL;
/*
* If there aren't any vectors left after applying the pre/post
* vectors don't bother with assigning affinity.
*/
if (affd->pre_vectors + affd->post_vectors == min_vecs)
affd = NULL;
} else {
if (WARN_ON(affd))
affd = NULL;
}
if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
affd);
if (vecs > 0)
return vecs;
}
if (flags & PCI_IRQ_MSI) {
vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
if (vecs > 0)
return vecs;
}
/* use legacy irq if allowed */
if (flags & PCI_IRQ_LEGACY) {
if (min_vecs == 1 && dev->irq) {
pci_intx(dev, 1);
return 1;
}
}
return vecs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 201 | 100.00% | 8 | 100.00% |
Total | 201 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
/**
* pci_free_irq_vectors - free previously allocated IRQs for a device
* @dev: PCI device to operate on
*
* Undoes the allocations and enabling in pci_alloc_irq_vectors().
*/
void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msix(dev);
pci_disable_msi(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(pci_free_irq_vectors);
/**
* pci_irq_vector - return Linux IRQ number of a device vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
*/
int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (dev->msix_enabled) {
struct msi_desc *entry;
int i = 0;
for_each_pci_msi_entry(entry, dev) {
if (i == nr)
return entry->irq;
i++;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
if (WARN_ON_ONCE(nr >= entry->nvec_used))
return -EINVAL;
} else {
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
}
return dev->irq + nr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 119 | 100.00% | 1 | 100.00% |
Total | 119 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(pci_irq_vector);
/**
* pci_irq_get_affinity - return the affinity of a particular msi vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
*/
const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
{
if (dev->msix_enabled) {
struct msi_desc *entry;
int i = 0;
for_each_pci_msi_entry(entry, dev) {
if (i == nr)
return entry->affinity;
i++;
}
WARN_ON_ONCE(1);
return NULL;
} else if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
if (WARN_ON_ONCE(!entry || !entry->affinity ||
nr >= entry->nvec_used))
return NULL;
return &entry->affinity[nr];
} else {
return cpu_possible_mask;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 115 | 95.83% | 1 | 50.00% |
Jan Beulich | 5 | 4.17% | 1 | 50.00% |
Total | 120 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(pci_irq_get_affinity);
/**
* pci_irq_get_node - return the numa node of a particular msi vector
* @pdev: PCI device to operate on
* @vec: device-relative interrupt vector index (0-based).
*/
int pci_irq_get_node(struct pci_dev *pdev, int vec)
{
const struct cpumask *mask;
mask = pci_irq_get_affinity(pdev, vec);
if (mask)
return local_memory_node(cpu_to_node(cpumask_first(mask)));
return dev_to_node(&pdev->dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shaohua Li | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(pci_irq_get_node);
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
return to_pci_dev(desc->dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(msi_desc_to_pci_dev);
void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
{
struct pci_dev *dev = msi_desc_to_pci_dev(desc);
return dev->bus->sysdata;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
/**
* pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
* @irq_data: Pointer to interrupt data of the MSI interrupt
* @msg: Pointer to the message
*/
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
/*
* For MSI-X desc->irq is always equal to irq_data->irq. For
* MSI only the first interrupt of MULTI MSI passes the test.
*/
if (desc->irq == irq_data->irq)
__pci_write_msi_msg(desc, msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 43 | 100.00% | 2 | 100.00% |
Total | 43 | 100.00% | 2 | 100.00% |
/**
* pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
* @dev: Pointer to the PCI device
* @desc: Pointer to the msi descriptor
*
* The ID number is only used within the irqdomain.
*/
irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
struct msi_desc *desc)
{
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
{
return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
/**
* pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev
* @domain: The interrupt domain to check
* @info: The domain info for verification
* @dev: The device to check
*
* Returns:
* 0 if the functionality is supported
* 1 if Multi MSI is requested, but the domain does not support it
* -ENOTSUPP otherwise
*/
int pci_msi_domain_check_cap(struct irq_domain *domain,
struct msi_domain_info *info, struct device *dev)
{
struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
/* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) &&
!(info->flags & MSI_FLAG_MULTI_PCI_MSI))
return 1;
else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
return -ENOTSUPP;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 77 | 98.72% | 1 | 50.00% |
Christoph Hellwig | 1 | 1.28% | 1 | 50.00% |
Total | 78 | 100.00% | 2 | 100.00% |
static int pci_msi_domain_handle_error(struct irq_domain *domain,
struct msi_desc *desc, int error)
{
/* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
return 1;
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 37 | 97.37% | 1 | 50.00% |
Christoph Hellwig | 1 | 2.63% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
#ifdef GENERIC_MSI_DOMAIN_OPS
static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
desc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
#else
#define pci_msi_domain_set_desc NULL
#endif
static struct msi_domain_ops pci_msi_domain_ops_default = {
.set_desc = pci_msi_domain_set_desc,
.msi_check = pci_msi_domain_check_cap,
.handle_error = pci_msi_domain_handle_error,
};
static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
{
struct msi_domain_ops *ops = info->ops;
if (ops == NULL) {
info->ops = &pci_msi_domain_ops_default;
} else {
if (ops->set_desc == NULL)
ops->set_desc = pci_msi_domain_set_desc;
if (ops->msi_check == NULL)
ops->msi_check = pci_msi_domain_check_cap;
if (ops->handle_error == NULL)
ops->handle_error = pci_msi_domain_handle_error;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 80 | 100.00% | 1 | 100.00% |
Total | 80 | 100.00% | 1 | 100.00% |
static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
BUG_ON(!chip);
if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = pci_msi_domain_write_msg;
if (!chip->irq_mask)
chip->irq_mask = pci_msi_mask_irq;
if (!chip->irq_unmask)
chip->irq_unmask = pci_msi_unmask_irq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 39 | 60.00% | 1 | 50.00% |
Marc Zyngier | 26 | 40.00% | 1 | 50.00% |
Total | 65 | 100.00% | 2 | 100.00% |
/**
* pci_msi_create_irq_domain - Create a MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Updates the domain and chip ops and creates a MSI interrupt domain.
*
* Returns:
* A domain pointer or NULL in case of failure.
*/
struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
struct irq_domain *domain;
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
pci_msi_domain_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
info->flags |= MSI_FLAG_ACTIVATE_EARLY;
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
domain->bus_token = DOMAIN_BUS_PCI_MSI;
return domain;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 54 | 62.07% | 1 | 25.00% |
Marc Zyngier | 33 | 37.93% | 3 | 75.00% |
Total | 87 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
{
u32 *pa = data;
*pa = alias;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
/**
* pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
* @domain: The interrupt domain
* @pdev: The PCI device.
*
* The RID for a device is formed from the alias, with a firmware
* supplied mapping applied
*
* Returns: The RID.
*/
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
{
struct device_node *of_node;
u32 rid = 0;
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
iort_msi_map_rid(&pdev->dev, rid);
return rid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 59 | 83.10% | 1 | 50.00% |
Tomasz Nowicki | 12 | 16.90% | 1 | 50.00% |
Total | 71 | 100.00% | 2 | 100.00% |
/**
* pci_msi_get_device_domain - Get the MSI domain for a given PCI device
* @pdev: The PCI device
*
* Use the firmware data to find a device-specific MSI domain
* (i.e. not one that is ste as a default).
*
* Returns: The coresponding MSI domain or NULL if none has been found.
*/
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
struct irq_domain *dom;
u32 rid = 0;
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
dom = of_msi_map_get_device_domain(&pdev->dev, rid);
if (!dom)
dom = iort_get_device_domain(&pdev->dev, rid);
return dom;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 37 | 57.81% | 1 | 50.00% |
Tomasz Nowicki | 27 | 42.19% | 1 | 50.00% |
Total | 64 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 786 | 13.00% | 14 | 7.78% |
Christoph Hellwig | 660 | 10.92% | 16 | 8.89% |
Hidetoshi Seto | 530 | 8.77% | 11 | 6.11% |
Matthew Wilcox | 461 | 7.62% | 10 | 5.56% |
Greg Kroah-Hartman | 384 | 6.35% | 3 | 1.67% |
Thomas Gleixner | 316 | 5.23% | 8 | 4.44% |
Alexander Gordeev | 293 | 4.85% | 8 | 4.44% |
Andrew Morton | 246 | 4.07% | 1 | 0.56% |
Eric W. Biedermann | 231 | 3.82% | 13 | 7.22% |
Neil Horman | 187 | 3.09% | 1 | 0.56% |
Roland Dreier | 162 | 2.68% | 3 | 1.67% |
Yijing Wang | 161 | 2.66% | 8 | 4.44% |
Michael Ellerman | 157 | 2.60% | 12 | 6.67% |
Ben Hutchings | 154 | 2.55% | 2 | 1.11% |
Marc Zyngier | 108 | 1.79% | 6 | 3.33% |
Benjamin Herrenschmidt | 101 | 1.67% | 1 | 0.56% |
Yinghai Lu | 101 | 1.67% | 3 | 1.67% |
David Daney | 95 | 1.57% | 1 | 0.56% |
Thierry Reding | 84 | 1.39% | 1 | 0.56% |
Konrad Rzeszutek Wilk | 73 | 1.21% | 1 | 0.56% |
Björn Helgaas | 68 | 1.12% | 11 | 6.11% |
Adrian Bunk | 68 | 1.12% | 1 | 0.56% |
Thomas Petazzoni | 65 | 1.08% | 1 | 0.56% |
Shaohua Li | 59 | 0.98% | 1 | 0.56% |
Tomasz Nowicki | 42 | 0.69% | 1 | 0.56% |
Brice Goglin | 37 | 0.61% | 3 | 1.67% |
David S. Miller | 35 | 0.58% | 1 | 0.56% |
Jan Beulich | 35 | 0.58% | 2 | 1.11% |
Duan Zhenzhong | 35 | 0.58% | 1 | 0.56% |
Lucas Stach | 33 | 0.55% | 1 | 0.56% |
Rafael J. Wysocki | 33 | 0.55% | 1 | 0.56% |
Romain Bezut | 28 | 0.46% | 1 | 0.56% |
Gavin Shan | 27 | 0.45% | 3 | 1.67% |
David Shaohua Li | 25 | 0.41% | 1 | 0.56% |
Mark Maule | 24 | 0.40% | 1 | 0.56% |
Michael S. Tsirkin | 22 | 0.36% | 4 | 2.22% |
Jake Oshins | 20 | 0.33% | 1 | 0.56% |
Mitch A Williams | 17 | 0.28% | 1 | 0.56% |
Andrew Patterson | 16 | 0.26% | 1 | 0.56% |
Dan Carpenter | 16 | 0.26% | 1 | 0.56% |
Alexei Starovoitov | 7 | 0.12% | 1 | 0.56% |
Sheng Yang | 6 | 0.10% | 1 | 0.56% |
Kristen Carlson Accardi | 6 | 0.10% | 1 | 0.56% |
Linas Vepstas | 5 | 0.08% | 1 | 0.56% |
Lorenzo Pieralisi | 4 | 0.07% | 1 | 0.56% |
Ashok Raj | 4 | 0.07% | 1 | 0.56% |
Paul Gortmaker | 3 | 0.05% | 1 | 0.56% |
Tejun Heo | 3 | 0.05% | 1 | 0.56% |
Grant Grundler | 3 | 0.05% | 1 | 0.56% |
Kenji Kaneshige | 2 | 0.03% | 2 | 1.11% |
Ryan Desfosses | 1 | 0.02% | 1 | 0.56% |
Jike Song | 1 | 0.02% | 1 | 0.56% |
Christophe Jaillet | 1 | 0.02% | 1 | 0.56% |
Prarit Bhargava | 1 | 0.02% | 1 | 0.56% |
Masanari Iida | 1 | 0.02% | 1 | 0.56% |
Dennis Chen | 1 | 0.02% | 1 | 0.56% |
Randy Dunlap | 1 | 0.02% | 1 | 0.56% |
Dan J Williams | 1 | 0.02% | 1 | 0.56% |
Total | 6046 | 100.00% | 180 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.