From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 92DC78E6A for ; Fri, 30 Oct 2015 06:28:25 +0100 (CET) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga103.jf.intel.com with ESMTP; 29 Oct 2015 22:28:25 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,217,1444719600"; d="scan'208";a="807195659" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga001.jf.intel.com with ESMTP; 29 Oct 2015 22:28:25 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t9U5SMkl023357; Fri, 30 Oct 2015 13:28:22 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t9U5SJh8028933; Fri, 30 Oct 2015 13:28:21 +0800 Received: (from cliang18@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t9U5SJdP028929; Fri, 30 Oct 2015 13:28:19 +0800 From: Cunming Liang To: dev@dpdk.org Date: Fri, 30 Oct 2015 13:27:52 +0800 Message-Id: <1446182873-28814-11-git-send-email-cunming.liang@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1446182873-28814-1-git-send-email-cunming.liang@intel.com> References: <1443072831-19065-1-git-send-email-cunming.liang@intel.com> <1446182873-28814-1-git-send-email-cunming.liang@intel.com> Subject: [dpdk-dev] [PATCH v2 10/11] i40evf: add rx interrupt support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 30 Oct 2015 05:28:26 -0000 v2 changes: - turn on intr only when rxq flag is set - rework base on patch http://dpdk.org/dev/patchwork/patch/7504/ The patch enables rx interrupt support on i40e VF and some necessary change on PF IOV mode to support VF. On PF side, running in IOV mode via uio won't allow rx interrupt which is exclusive with mbox interrupt in single vector competition. On VF side, one single vector is shared for all the rx queues. Signed-off-by: Cunming Liang --- drivers/net/i40e/i40e_ethdev.c | 38 +++++----- drivers/net/i40e/i40e_ethdev.h | 15 ++++ drivers/net/i40e/i40e_ethdev_vf.c | 143 +++++++++++++++++++++++++++++++++++--- drivers/net/i40e/i40e_pf.c | 5 -- 4 files changed, 166 insertions(+), 35 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index d4a663c..40ed852 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -75,11 +75,6 @@ /* Maximun number of VSI */ #define I40E_MAX_NUM_VSIS (384UL) -/* Default queue interrupt throttling time in microseconds */ -#define I40E_ITR_INDEX_DEFAULT 0 -#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ -#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ - #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */ /* Mask of PF interrupt causes */ @@ -762,16 +757,6 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) I40E_WRITE_FLUSH(hw); } -static inline uint16_t -i40e_calc_itr_interval(int16_t interval) -{ - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; - - /* Convert to hardware count, as writing each 1 represents 2 us */ - return (interval/2); -} - static void __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, int base_queue, int nb_queue) @@ -822,13 +807,24 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, } else { uint32_t reg; - /* num_msix_vectors_vf needs to minus irq0 */ - reg = (hw->func_caps.num_msix_vectors_vf - 1) * - vsi->user_param + (msix_vect - 1); + if (msix_vect == MISC_VEC_ID) { + I40E_WRITE_REG(hw, + I40E_VPINT_LNKLST0(vsi->user_param), + (base_queue << + I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + } else { + /* num_msix_vectors_vf needs to minus irq0 */ + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); - I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (base_queue << - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | - (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + (base_queue << + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + } } I40E_WRITE_FLUSH(hw); diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 20d52f8..eeff6d7 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -156,6 +156,11 @@ enum i40e_flxpld_layer_idx { (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) +/* Default queue interrupt throttling time in microseconds */ +#define I40E_ITR_INDEX_DEFAULT 0 +#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + struct i40e_adapter; /** @@ -578,6 +583,16 @@ i40e_align_floor(int n) return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); } +static inline uint16_t +i40e_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return (interval / 2); +} + #define I40E_VALID_FLOW(flow_type) \ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 176a2f6..4fdb401 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -145,6 +145,10 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; @@ -170,6 +174,9 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .tx_queue_stop = i40evf_dev_tx_queue_stop, .rx_queue_setup = i40e_dev_rx_queue_setup, .rx_queue_release = i40e_dev_rx_queue_release, + .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable, + .rx_descriptor_done = i40e_dev_rx_descriptor_done, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .reta_update = i40evf_dev_rss_reta_update, @@ -712,22 +719,33 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ sizeof(struct i40e_virtchnl_vector_map)]; struct i40e_virtchnl_irq_map_info *map_info; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t vector_id; int i, err; + + if (rte_intr_allow_others(intr_handle)) { + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; + else + vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; + } else { + vector_id = MISC_VEC_ID; + } + map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer; map_info->num_vectors = 1; map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR; map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id; /* Alway use default dynamic MSIX interrupt */ - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; - else - map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; - + map_info->vecmap[0].vector_id = vector_id; /* Don't map any tx queue */ map_info->vecmap[0].txq_map = 0; map_info->vecmap[0].rxq_map = 0; - for (i = 0; i < dev->data->nb_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { map_info->vecmap[0].rxq_map |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = vector_id; + } args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; args.in_args = (u8 *)cmd_buffer; @@ -1565,6 +1583,16 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK); + I40E_WRITE_FLUSH(hw); + return; + } if (vf->version_major == I40E_DPDK_VERSION_MAJOR) /* To support DPDK PF host */ @@ -1577,6 +1605,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK); + + I40E_WRITE_FLUSH(hw); } static inline void @@ -1584,14 +1614,76 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + I40E_WRITE_FLUSH(hw); + return; + } if (vf->version_major == I40E_DPDK_VERSION_MAJOR) I40E_WRITE_REG(hw, - I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1), - 0); + I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR + - 1), + 0); else I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + I40E_WRITE_FLUSH(hw); +} + +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - RX_VEC_START), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); + + I40E_WRITE_FLUSH(hw); + + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - RX_VEC_START), + 0); + + I40E_WRITE_FLUSH(hw); + + return 0; } static int @@ -1599,7 +1691,9 @@ i40evf_dev_start(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct ether_addr mac_addr; + uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -1609,6 +1703,24 @@ i40evf_dev_start(struct rte_eth_dev *dev) vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + if (i40evf_rx_init(dev) != 0){ PMD_DRV_LOG(ERR, "failed to do RX init"); return -1; @@ -1638,6 +1750,10 @@ i40evf_dev_start(struct rte_eth_dev *dev) goto err_mac; } + /* vf don't allow intr except for rxq intr */ + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_enable(intr_handle); + i40evf_enable_queues_intr(dev); return 0; @@ -1650,11 +1766,20 @@ err_queue: static void i40evf_dev_stop(struct rte_eth_dev *dev) { + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + PMD_INIT_FUNC_TRACE(); - i40evf_disable_queues_intr(dev); i40evf_stop_queues(dev); + i40evf_disable_queues_intr(dev); i40e_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } static int diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index c1d58a8..cbf4e5b 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -547,11 +547,6 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, goto send_msg; } - if (irqmap->vecmap[0].vector_id == 0) { - PMD_DRV_LOG(ERR, "DPDK host don't support use IRQ0"); - ret = I40E_ERR_PARAM; - goto send_msg; - } /* This MSIX intr store the intr in VF range */ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id; vf->vsi->nb_msix = irqmap->num_vectors; -- 2.4.3