This pach reworks how IRQ mode handling is done. The biggest code change is to use the standard INTX management code that exists in more recent kernels (and provide backport version). This also fixes the pci_lock code which was broken, since it was not protecting against config access, and was doing trylock. Make this driver behave like other Linux drivers. Start at MSI-X and degrade to less desireable modes automatically if the desired type is not available. This patch also makes MSI mode work, previously the mode was there but it would never work. Signed-off-by: Stephen Hemminger --- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c +++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c @@ -40,8 +40,7 @@ enum igbuio_intr_mode { IGBUIO_LEGACY_INTR_MODE = 0, IGBUIO_MSI_INTR_MODE, - IGBUIO_MSIX_INTR_MODE, - IGBUIO_INTR_MODE_MAX + IGBUIO_MSIX_INTR_MODE }; /** @@ -50,7 +49,6 @@ struct rte_uio_pci_dev { struct uio_info info; struct pci_dev *pdev; - spinlock_t lock; /* spinlock for accessing PCI config space or msix data in multi tasks/isr */ enum igbuio_intr_mode mode; }; @@ -139,36 +137,67 @@ .attrs = dev_attrs, }; -static inline int -pci_lock(struct pci_dev * pdev) -{ - /* Some function names changes between 3.2.0 and 3.3.0... */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) - pci_block_user_cfg_access(pdev); - return 1; -#else - return pci_cfg_access_trylock(pdev); -#endif +/* Check if INTX works to control irq's. + * Set's INTX_DISABLE flag and reads it back + */ +static bool pci_intx_mask_supported(struct pci_dev *dev) +{ + bool mask_supported = false; + uint16_t orig, new + + pci_block_user_cfg_access(dev); + pci_read_config_word(pdev, PCI_COMMAND, &orig); + pci_write_config_word(dev, PCI_COMMAND, + orig ^ PCI_COMMAND_INTX_DISABLE); + pci_read_config_word(dev, PCI_COMMAND, &new); + + if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { + dev_err(&dev->dev, "Command register changed from " + "0x%x to 0x%x: driver or hardware bug?\n", orig, new); + } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { + mask_supported = true; + pci_write_config_word(dev, PCI_COMMAND, orig); + } + pci_unblock_user_cfg_access(dev); } -static inline void -pci_unlock(struct pci_dev * pdev) +static bool pci_check_and_mask_intx(struct pci_dev *pdev) { - /* Some function names changes between 3.2.0 and 3.3.0... */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) - pci_unblock_user_cfg_access(pdev); -#else - pci_cfg_access_unlock(pdev); -#endif + bool pending; + uint32_t status; + + pci_block_user_cfg_access(dev); + pci_read_config_dword(pdev, PCI_COMMAND, &status); + + /* interrupt is not ours, goes to out */ + pending = (((status >> 16) & PCI_STATUS_INTERRUPT) != 0); + if (pending) { + uint16_t old, new; + + old = status; + if (state != 0) + new = old & (~PCI_COMMAND_INTX_DISABLE); + else + new = old | PCI_COMMAND_INTX_DISABLE; + + if (old != new) + pci_write_config_word(pdev, PCI_COMMAND, new); + } + pci_unblock_user_cfg_access(dev); + + return pending; } +#endif -/** +/* * It masks the msix on/off of generating MSI-X messages. */ -static int +static void igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state) { - uint32_t mask_bits = desc->masked; + u32 mask_bits = desc->masked; unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; @@ -182,49 +211,24 @@ readl(desc->mask_base); desc->masked = mask_bits; } - - return 0; } -/** - * This function sets/clears the masks for generating LSC interrupts. - * - * @param info - * The pointer to struct uio_info. - * @param on - * The on/off flag of masking LSC. - * @return - * -On success, zero value. - * -On failure, a negative value. - */ -static int -igbuio_set_interrupt_mask(struct rte_uio_pci_dev *udev, int32_t state) +static void +igbuio_msi_mask_irq(struct irq_data *data, u32 enable) { - struct pci_dev *pdev = udev->pdev; - - if (udev->mode == IGBUIO_MSIX_INTR_MODE) { - struct msi_desc *desc; + struct msi_desc *desc = irq_data_get_msi(data); + u32 mask_bits = desc->masked; + unsigned offset = data->irq - desc->dev->irq; + u32 mask = 1 << offset; + u32 flag = enable << offset; - list_for_each_entry(desc, &pdev->msi_list, list) { - igbuio_msix_mask_irq(desc, state); - } - } - else if (udev->mode == IGBUIO_LEGACY_INTR_MODE) { - uint32_t status; - uint16_t old, new; + mask_bits &= ~mask; + mask_bits |= flag; - pci_read_config_dword(pdev, PCI_COMMAND, &status); - old = status; - if (state != 0) - new = old & (~PCI_COMMAND_INTX_DISABLE); - else - new = old | PCI_COMMAND_INTX_DISABLE; - - if (old != new) - pci_write_config_word(pdev, PCI_COMMAND, new); + if (desc->msi_attrib.maskbit && mask_bits != desc->masked) { + pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); + desc->masked = mask_bits; } - - return 0; } /** @@ -243,20 +247,23 @@ static int igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state) { - unsigned long flags; struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info); struct pci_dev *pdev = udev->pdev; - spin_lock_irqsave(&udev->lock, flags); - if (!pci_lock(pdev)) { - spin_unlock_irqrestore(&udev->lock, flags); - return -1; - } + pci_cfg_access_lock(pdev); + if (udev->mode == IGBUIO_LEGACY_INTR_MODE) + pci_intx(pdev, !!irq_state); + else if (udev->mode == IGBUIO_MSI_INTR_MODE) { + struct irq_data *data = irq_get_irq_data(pdev->irq); - igbuio_set_interrupt_mask(udev, irq_state); + igbuio_msi_mask_irq(data, !!irq_state); + } else if (udev->mode == IGBUIO_MSIX_INTR_MODE) { + struct msi_desc *desc; - pci_unlock(pdev); - spin_unlock_irqrestore(&udev->lock, flags); + list_for_each_entry(desc, &pdev->msi_list, list) + igbuio_msix_mask_irq(desc, irq_state); + } + pci_cfg_access_unlock(pdev); return 0; } @@ -268,37 +275,15 @@ static irqreturn_t igbuio_pci_irqhandler(int irq, struct uio_info *info) { - irqreturn_t ret = IRQ_NONE; - unsigned long flags; struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info); - struct pci_dev *pdev = udev->pdev; - uint32_t cmd_status_dword; - uint16_t status; - spin_lock_irqsave(&udev->lock, flags); - /* block userspace PCI config reads/writes */ - if (!pci_lock(pdev)) - goto spin_unlock; - - /* for legacy mode, interrupt maybe shared */ - if (udev->mode == IGBUIO_LEGACY_INTR_MODE) { - pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword); - status = cmd_status_dword >> 16; - /* interrupt is not ours, goes to out */ - if (!(status & PCI_STATUS_INTERRUPT)) - goto done; - } - - igbuio_set_interrupt_mask(udev, 0); - ret = IRQ_HANDLED; -done: - /* unblock userspace PCI config reads/writes */ - pci_unlock(pdev); -spin_unlock: - spin_unlock_irqrestore(&udev->lock, flags); - pr_info("irq 0x%x %s\n", irq, (ret == IRQ_HANDLED) ? "handled" : "not handled"); + /* Legacy mode need to mask in hardware */ + if (udev->mode == IGBUIO_LEGACY_INTR_MODE && + !pci_check_and_mask_intx(udev->pdev)) + return IRQ_NONE; - return ret; + /* Message signal mode, no share IRQ and automasked */ + return IRQ_HANDLED; } #ifdef CONFIG_XEN_DOM0 @@ -454,6 +439,7 @@ { struct rte_uio_pci_dev *udev; int err; + struct msix_entry msix_entry; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); if (!udev) @@ -505,6 +491,7 @@ udev->info.version = "0.1"; udev->info.handler = igbuio_pci_irqhandler; udev->info.irqcontrol = igbuio_pci_irqcontrol; + udev->info.irq = dev->irq; #ifdef CONFIG_XEN_DOM0 /* check if the driver run on Xen Dom0 */ if (xen_initial_domain()) @@ -512,36 +499,36 @@ #endif udev->info.priv = udev; udev->pdev = dev; - udev->mode = 0; /* set the default value for interrupt mode */ - spin_lock_init(&udev->lock); - /* check if it need to try msix first */ - if (igbuio_intr_mode_preferred == IGBUIO_MSIX_INTR_MODE) { - /* only one MSIX vector needed */ - struct msix_entry msix_entry = { - .entry = 0, - }; - - if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) { + switch (igbuio_intr_mode_preferred) { + case IGBUIO_MSIX_INTR_MODE: + /* Only 1 msi-x vector needed */ + msix_entry.entry = 0; + if (pci_enable_msix(dev, &msix_entry, 1) == 0) { + dev_dbg(&dev->dev, "using MSI-X"); + udev->info.irq = msix_entry.vector; udev->mode = IGBUIO_MSIX_INTR_MODE; - } else { - pr_err("failed to enable pci msix, or not enough msix entries\n"); - udev->mode = IGBUIO_LEGACY_INTR_MODE; + break; } - } - switch (udev->mode) { - case IGBUIO_MSIX_INTR_MODE: - udev->info.irq_flags = 0; - udev->info.irq = udev->msix_entries[0].vector; - break; + /* fall back to MSI */ case IGBUIO_MSI_INTR_MODE: - break; + if (pci_enable_msi(dev) == 0) { + dev_dbg(&dev->dev, "using MSI"); + udev->info.irq = dev->irq; + udev->mode = IGBUIO_MSI_INTR_MODE; + break; + } + /* fall back to INTX */ case IGBUIO_LEGACY_INTR_MODE: - udev->info.irq_flags = IRQF_SHARED; - udev->info.irq = dev->irq; - break; - default: - break; + if (pci_intx_mask_supported(dev)) { + dev_dbg(&dev->dev, "using INTX"); + udev->info.irq_flags = IRQF_SHARED; + udev->mode = IGBUIO_LEGACY_INTR_MODE; + } else { + dev_err(&dev->dev, "PCI INTX mask not supported\n"); + err = -EIO; + goto fail_release_iomem; + } } err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); @@ -566,6 +553,8 @@ igbuio_pci_release_iomem(&udev->info); if (udev->mode == IGBUIO_MSIX_INTR_MODE) pci_disable_msix(udev->pdev); + if (udev->mode == IGBUIO_MSI_INTR_MODE) + pci_disable_msi(udev->pdev); pci_release_regions(dev); fail_disable: pci_disable_device(dev);