From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 10DF8A0551; Mon, 17 Feb 2020 04:30:48 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E1EDD1B951; Mon, 17 Feb 2020 04:30:46 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 86CA12C4F for ; Mon, 17 Feb 2020 04:30:45 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 16 Feb 2020 19:30:43 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,451,1574150400"; d="scan'208";a="348413146" Received: from intel.sh.intel.com ([10.239.255.133]) by fmsmga001.fm.intel.com with ESMTP; 16 Feb 2020 19:30:41 -0800 From: Shougang Wang To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, Shougang Wang Date: Mon, 17 Feb 2020 03:09:35 +0000 Message-Id: <20200217030935.3504-1-shougangx.wang@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [PATCH] net/i40e: fix multiple interrupt for VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Interrupt mapping should be 1:n queue(s).This patch fixes the logic of interrupt bind by code reconstruction. Fixes: 6a6cf5f88b4a ("net/i40e: enable multi-queue Rx interrupt for VF") Signed-off-by: Shougang Wang --- drivers/net/i40e/i40e_ethdev_vf.c | 63 ++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index b84637309..868117508 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -651,47 +651,68 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct vf_cmd_info args; - uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \ - sizeof(struct virtchnl_vector_map) * dev->data->nb_rx_queues]; + uint8_t *cmd_buffer = NULL; struct virtchnl_irq_map_info *map_info; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - uint32_t vector_id; - int i, err; + uint32_t vec, cmd_buffer_size, max_vectors, nb_msix, msix_base, i; + uint16_t rxq_map[vf->vf_res->max_vectors]; + int err; if (dev->data->dev_conf.intr_conf.rxq != 0 && - rte_intr_allow_others(intr_handle)) - vector_id = I40E_RX_VEC_START; - else - vector_id = I40E_MISC_VEC_ID; + rte_intr_allow_others(intr_handle)) { + msix_base = I40E_RX_VEC_START; + /* For interrupt mode, available vector id is from 1. */ + max_vectors = vf->vf_res->max_vectors - 1; + nb_msix = RTE_MIN(max_vectors, intr_handle->nb_efd); + + vec = msix_base; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= vf->vf_res->max_vectors) + vec = msix_base; + } + } else { + msix_base = I40E_MISC_VEC_ID; + nb_msix = 1; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[msix_base] |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = msix_base; + } + } + + cmd_buffer_size = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * nb_msix; + cmd_buffer = rte_zmalloc("i40e", cmd_buffer_size, 0); + if (!cmd_buffer) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } map_info = (struct virtchnl_irq_map_info *)cmd_buffer; - map_info->num_vectors = dev->data->nb_rx_queues; - for (i = 0; i < dev->data->nb_rx_queues; i++) { + map_info->num_vectors = nb_msix; + for (i = 0; i < nb_msix; i++) { map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT; map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id; - /* Always use default dynamic MSIX interrupt */ - map_info->vecmap[i].vector_id = vector_id; - /* Don't map any tx queue */ + map_info->vecmap[i].vector_id = msix_base + i; map_info->vecmap[i].txq_map = 0; - map_info->vecmap[i].rxq_map = 1 << i; - if (rte_intr_dp_is_en(intr_handle)) - intr_handle->intr_vec[i] = vector_id; - if (vector_id > I40E_MISC_VEC_ID) - vector_id++; - if (vector_id >= vf->vf_res->max_vectors) - vector_id = I40E_RX_VEC_START; + map_info->vecmap[i].rxq_map = rxq_map[msix_base + i]; } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; args.in_args = (u8 *)cmd_buffer; - args.in_args_size = sizeof(cmd_buffer); + args.in_args_size = cmd_buffer_size; args.out_buffer = vf->aq_resp; args.out_size = I40E_AQ_BUF_SZ; err = i40evf_execute_vf_cmd(dev, &args); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES"); + rte_free(cmd_buffer); + return err; } -- 2.17.1