DPDK patches and discussions
 help / color / mirror / Atom feed
From: "lunyuan.cui" <lunyuanx.cui@intel.com>
To: qiming.yang@intel.com, jingjing.wu@intel.com,
	beilei.xing@intel.com, qi.z.zhang@intel.com,
	shougangx.wang@intel.com
Cc: dev@dpdk.org, "lunyuan.cui" <lunyuanx.cui@intel.com>
Subject: [dpdk-dev] [RFC] net/i40e: enable multi-queue Rx interrupt for VF
Date: Fri, 30 Aug 2019 02:12:50 +0000	[thread overview]
Message-ID: <20190830021250.2403-1-lunyuanx.cui@intel.com> (raw)

This patch enable VF can support multi-queue Rx interrupt.

Current implementation is that only one Rx queue can support interrupt,
because all queues are mapped in the same vector id.

What this patch fixes is mapping different interrupt vectors to each queue.
In addition, the maximum number of interrupt vector on i40evf is 4,
so there's a limit on the interrupt vector.

Signed-off-by: lunyuan.cui <lunyuanx.cui@intel.com>
---
 drivers/net/i40e/i40e_ethdev_vf.c | 80 ++++++++++++++++++++++---------
 1 file changed, 57 insertions(+), 23 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 308fb9835..9d1af3804 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -645,6 +645,8 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
 	return ret;
 }
 
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
+
 static int
 i40evf_config_irq_map(struct rte_eth_dev *dev)
 {
@@ -655,38 +657,70 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
 	struct virtchnl_irq_map_info *map_info;
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	uint16_t nb_msix = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 	uint32_t vector_id;
 	int i, err;
 
 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
-	    rte_intr_allow_others(intr_handle))
+	    rte_intr_allow_others(intr_handle)) {
+		nb_msix = RTE_MIN(intr_handle->nb_efd, nb_msix);
 		vector_id = I40E_RX_VEC_START;
-	else
+	} else
 		vector_id = I40E_MISC_VEC_ID;
 
-	map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
-	map_info->num_vectors = 1;
-	map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
-	map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
-	/* Alway use default dynamic MSIX interrupt */
-	map_info->vecmap[0].vector_id = vector_id;
-	/* Don't map any tx queue */
-	map_info->vecmap[0].txq_map = 0;
-	map_info->vecmap[0].rxq_map = 0;
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		map_info->vecmap[0].rxq_map |= 1 << i;
-		if (rte_intr_dp_is_en(intr_handle))
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			memset(cmd_buffer, 0, sizeof(cmd_buffer));
+			map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
+			map_info->num_vectors = 1;
+			map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+			map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+			/* Alway use default dynamic MSIX interrupt */
+			map_info->vecmap[0].vector_id = vector_id;
+			/* Don't map any tx queue */
+			map_info->vecmap[0].txq_map = 0;
+			map_info->vecmap[0].rxq_map = 0;
+			map_info->vecmap[0].rxq_map |= 1 << i;
+
 			intr_handle->intr_vec[i] = vector_id;
-	}
 
-	args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
-	args.in_args = (u8 *)cmd_buffer;
-	args.in_args_size = sizeof(cmd_buffer);
-	args.out_buffer = vf->aq_resp;
-	args.out_size = I40E_AQ_BUF_SZ;
-	err = i40evf_execute_vf_cmd(dev, &args);
-	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
+			args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+			args.in_args = (u8 *)cmd_buffer;
+			args.in_args_size = sizeof(cmd_buffer);
+			args.out_buffer = vf->aq_resp;
+			args.out_size = I40E_AQ_BUF_SZ;
+			err = i40evf_execute_vf_cmd(dev, &args);
+			if (err) {
+				PMD_DRV_LOG(ERR, "fail to execute command "
+					"OP_ADD_ETHER_ADDRESS");
+				return err;
+			}
+			if ((vector_id != I40E_MISC_VEC_ID) && (nb_msix > 1))
+				vector_id++;
+			nb_msix--;
+	} else {
+		map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
+		map_info->num_vectors = 1;
+		map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+		map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+		/* Alway use default dynamic MSIX interrupt */
+		map_info->vecmap[0].vector_id = vector_id;
+		/* Don't map any tx queue */
+		map_info->vecmap[0].txq_map = 0;
+		map_info->vecmap[0].rxq_map = 0;
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			map_info->vecmap[0].rxq_map |= 1 << i;
+
+		args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+		args.in_args = (u8 *)cmd_buffer;
+		args.in_args_size = sizeof(cmd_buffer);
+		args.out_buffer = vf->aq_resp;
+		args.out_size = I40E_AQ_BUF_SZ;
+		err = i40evf_execute_vf_cmd(dev, &args);
+		if (err)
+			PMD_DRV_LOG(ERR,
+				"fail to execute command OP_ENABLE_QUEUES");
+	}
 
 	return err;
 }
-- 
2.17.1


             reply	other threads:[~2019-08-30  8:28 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-30  2:12 lunyuan.cui [this message]
2019-09-02 19:45 lunyuan.cui

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190830021250.2403-1-lunyuanx.cui@intel.com \
    --to=lunyuanx.cui@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=shougangx.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).