From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id E3F697CD8; Fri, 2 Feb 2018 13:24:50 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 02 Feb 2018 04:24:50 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.46,448,1511856000"; d="scan'208";a="171140890" Received: from unknown (HELO dpdk9.sh.intel.com) ([10.67.118.52]) by orsmga004.jf.intel.com with ESMTP; 02 Feb 2018 04:24:49 -0800 From: Beilei Xing To: dev@dpdk.org, jingjing.wu@intel.com Cc: stable@dpdk.org Date: Fri, 2 Feb 2018 20:25:10 +0800 Message-Id: <1517574310-93096-5-git-send-email-beilei.xing@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1517574310-93096-1-git-send-email-beilei.xing@intel.com> References: <1517486402-81403-1-git-send-email-beilei.xing@intel.com> <1517574310-93096-1-git-send-email-beilei.xing@intel.com> Subject: [dpdk-dev] [PATCH v3 4/4] net/i40e: fix interrupt conflict when using multi-driver X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 02 Feb 2018 12:24:51 -0000 There's interrupt conflict when using DPDK and Linux i40e on different ports of the same Ethernet controller, this patch fixes it by switching from IntN to Int0 if multiple drivers are used. Fixes: be6c228d4da3 ("i40e: support Rx interrupt") Cc: stable@dpdk.org Signed-off-by: Beilei Xing --- drivers/net/i40e/i40e_ethdev.c | 93 +++++++++++++++++++++++++-------------- drivers/net/i40e/i40e_ethdev.h | 10 +++-- drivers/net/i40e/i40e_ethdev_vf.c | 4 +- 3 files changed, 68 insertions(+), 39 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index ae0f31a..cae22e7 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -760,6 +760,23 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER); } +static inline void i40e_config_automask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + /* INTENA flag is not auto-cleared for interrupt */ + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + + /* If support multi-driver, PF will use INT0. */ + if (!pf->support_multi_driver) + val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; + + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); +} + #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 /* @@ -1077,6 +1094,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) return ret; } + i40e_config_automask(pf); + /* * To work around the NVM issue, initialize registers * for flexible payload and packet type of QinQ by @@ -1463,6 +1482,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, int i; uint32_t val; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); /* Bind all RX queues to allocated MSIX interrupt */ for (i = 0; i < nb_queue; i++) { @@ -1481,7 +1501,8 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Write first RX queue to Link list register as the head element */ if (vsi->type != I40E_VSI_SRIOV) { uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, + pf->support_multi_driver); if (msix_vect == I40E_MISC_VEC_ID) { I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, @@ -1539,7 +1560,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); uint16_t queue_idx = 0; int record = 0; - uint32_t val; int i; for (i = 0; i < vsi->nb_qps; i++) { @@ -1547,13 +1567,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); } - /* INTENA flag is not auto-cleared for interrupt */ - val = I40E_READ_REG(hw, I40E_GLINT_CTL); - val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | - I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK | - I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; - I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); - /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { __vsi_queues_bind_intr(vsi, msix_vect, @@ -1606,27 +1619,22 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) struct rte_eth_dev *dev = vsi->adapter->eth_dev; struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - uint16_t interval = i40e_calc_itr_interval(\ - RTE_LIBRTE_I40E_ITR_INTERVAL); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; - if (rte_intr_allow_others(intr_handle)) + if (rte_intr_allow_others(intr_handle) || !pf->support_multi_driver) for (i = 0; i < vsi->nb_msix; i++) { msix_intr = vsi->msix_intr + i; I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), - I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); } else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -1637,16 +1645,18 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) struct rte_eth_dev *dev = vsi->adapter->eth_dev; struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; - if (rte_intr_allow_others(intr_handle)) + if (rte_intr_allow_others(intr_handle) || !pf->support_multi_driver) for (i = 0; i < vsi->nb_msix; i++) { msix_intr = vsi->msix_intr + i; I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), - 0); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); } else - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -4618,16 +4628,28 @@ i40e_vsi_setup(struct i40e_pf *pf, /* VF has MSIX interrupt in VF range, don't allocate here */ if (type == I40E_VSI_MAIN) { - ret = i40e_res_pool_alloc(&pf->msix_pool, - RTE_MIN(vsi->nb_qps, - RTE_MAX_RXTX_INTR_VEC_ID)); - if (ret < 0) { - PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d", - vsi->seid, ret); - goto fail_queue_alloc; + if (pf->support_multi_driver) { + /* If support multi-driver, need to use INT0 instead of + * allocating from msix pool. The Msix pool is init from + * INT1, so it's OK just set msix_intr to 0 and nb_msix + * to 1 without calling i40e_res_pool_alloc. + */ + vsi->msix_intr = 0; + vsi->nb_msix = 1; + } else { + ret = i40e_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "VSI MAIN %d get heap failed %d", + vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID); } - vsi->msix_intr = ret; - vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); } else if (type != I40E_VSI_SRIOV) { ret = i40e_res_pool_alloc(&pf->msix_pool, 1); if (ret < 0) { @@ -5540,7 +5562,8 @@ void i40e_pf_disable_irq0(struct i40e_hw *hw) { /* Disable all interrupt types */ - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -9861,10 +9884,12 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, + pf->support_multi_driver); uint16_t msix_intr; msix_intr = intr_handle->intr_vec[queue_id]; diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 82d5501..77a4466 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -720,10 +720,14 @@ i40e_align_floor(int n) } static inline uint16_t -i40e_calc_itr_interval(int16_t interval) +i40e_calc_itr_interval(int16_t interval, bool is_multi_drv) { - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) { + if (is_multi_drv) + interval = I40E_QUEUE_ITR_INTERVAL_MAX; + else + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + } /* Convert to hardware count, as writing each 1 represents 2 us */ return interval / 2; diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 1686914..618c717 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1246,7 +1246,7 @@ i40evf_init_vf(struct rte_eth_dev *dev) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct ether_addr *p_mac_addr; uint16_t interval = - i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX); + i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX, 0); vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); vf->dev_data = dev->data; @@ -1986,7 +1986,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0); uint16_t msix_intr; msix_intr = intr_handle->intr_vec[queue_id]; -- 2.5.5