DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, beilei.xing@intel.com,
	jingjing.wu@intel.com, Ting Xu <ting.xu@intel.com>
Subject: [dpdk-dev] [PATCH v5 1/2] net/iavf: add IAVF request queues function
Date: Fri, 16 Oct 2020 09:21:41 +0800	[thread overview]
Message-ID: <20201016012142.54144-2-ting.xu@intel.com> (raw)
In-Reply-To: <20201016012142.54144-1-ting.xu@intel.com>

Add new virtchnl function to request additional queues from PF. Current
default queue pairs number when creating a VF is 16. In order to support
up to 256 queue pairs, enable this request queues function.
Since request queues command may return event message, modify function
iavf_read_msg_from_pf to identify event opcode and mark VF reset status.

Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/iavf/iavf.h        |   9 ++
 drivers/net/iavf/iavf_ethdev.c |  11 +-
 drivers/net/iavf/iavf_vchnl.c  | 226 +++++++++++++++++++++++++--------
 3 files changed, 192 insertions(+), 54 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d56611608..93c165c62 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -107,6 +107,14 @@ struct iavf_fdir_info {
 /* TODO: is that correct to assume the max number to be 16 ?*/
 #define IAVF_MAX_MSIX_VECTORS   16
 
+/* Message type read in admin queue from PF */
+enum iavf_aq_result {
+	IAVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
+	IAVF_MSG_NON,      /* Read nothing from admin queue */
+	IAVF_MSG_SYS,      /* Read system msg from admin queue */
+	IAVF_MSG_CMD,      /* Read async command result */
+};
+
 /* Structure to store private data specific for VF instance. */
 struct iavf_info {
 	uint16_t num_queue_pairs;
@@ -301,4 +309,5 @@ int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
 			uint32_t mc_addrs_num, bool add);
+int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 8b1cf8f1c..a4a28b885 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1282,7 +1282,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 }
 
 static int
-iavf_check_vf_reset_done(struct iavf_hw *hw)
+iavf_check_vf_reset_done(struct iavf_hw *hw, struct iavf_info *vf)
 {
 	int i, reset;
 
@@ -1299,6 +1299,9 @@ iavf_check_vf_reset_done(struct iavf_hw *hw)
 	if (i >= IAVF_RESET_WAIT_CNT)
 		return -1;
 
+	/* VF is not in reset or reset is completed */
+	vf->vf_reset = false;
+
 	return 0;
 }
 
@@ -1666,7 +1669,7 @@ iavf_init_vf(struct rte_eth_dev *dev)
 		goto err;
 	}
 
-	err = iavf_check_vf_reset_done(hw);
+	err = iavf_check_vf_reset_done(hw, vf);
 	if (err) {
 		PMD_INIT_LOG(ERR, "VF is still resetting");
 		goto err;
@@ -1911,7 +1914,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 
 	iavf_dev_stop(dev);
 	iavf_flow_flush(dev, NULL);
-	iavf_flow_uninit(adapter);
+	/* if VF is in reset, adminq is disabled, skip the process via adminq */
+	if (!vf->vf_reset)
+		iavf_flow_uninit(adapter);
 
 	/*
 	 * disable promiscuous mode before reset vf
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 5e7142893..11a1ff608 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -17,6 +17,7 @@
 #include <rte_eal.h>
 #include <rte_ether.h>
 #include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
 #include <rte_dev.h>
 
 #include "iavf.h"
@@ -25,14 +26,54 @@
 #define MAX_TRY_TIMES 200
 #define ASQ_DELAY_MS  10
 
+static uint32_t
+iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
+{
+	uint32_t speed;
+
+	switch (virt_link_speed) {
+	case VIRTCHNL_LINK_SPEED_100MB:
+		speed = 100;
+		break;
+	case VIRTCHNL_LINK_SPEED_1GB:
+		speed = 1000;
+		break;
+	case VIRTCHNL_LINK_SPEED_10GB:
+		speed = 10000;
+		break;
+	case VIRTCHNL_LINK_SPEED_40GB:
+		speed = 40000;
+		break;
+	case VIRTCHNL_LINK_SPEED_20GB:
+		speed = 20000;
+		break;
+	case VIRTCHNL_LINK_SPEED_25GB:
+		speed = 25000;
+		break;
+	case VIRTCHNL_LINK_SPEED_2_5GB:
+		speed = 2500;
+		break;
+	case VIRTCHNL_LINK_SPEED_5GB:
+		speed = 5000;
+		break;
+	default:
+		speed = 0;
+		break;
+	}
+
+	return speed;
+}
+
 /* Read data in admin queue to get msg from pf driver */
-static enum iavf_status
+static enum iavf_aq_result
 iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 		     uint8_t *buf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct rte_eth_dev *dev = adapter->eth_dev;
 	struct iavf_arq_event_info event;
+	enum iavf_aq_result result = IAVF_MSG_NON;
 	enum virtchnl_ops opcode;
 	int ret;
 
@@ -42,7 +83,9 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 	/* Can't read any msg from adminQ */
 	if (ret) {
 		PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
-		return ret;
+		if (ret != IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+			result = IAVF_MSG_ERR;
+		return result;
 	}
 
 	opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
@@ -52,16 +95,51 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 	PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
 		    opcode, vf->cmd_retval);
 
-	if (opcode != vf->pend_cmd) {
-		if (opcode != VIRTCHNL_OP_EVENT) {
-			PMD_DRV_LOG(WARNING,
-				    "command mismatch, expect %u, get %u",
-				    vf->pend_cmd, opcode);
+	if (opcode == VIRTCHNL_OP_EVENT) {
+		struct virtchnl_pf_event *vpe =
+			(struct virtchnl_pf_event *)event.msg_buf;
+
+		result = IAVF_MSG_SYS;
+		switch (vpe->event) {
+		case VIRTCHNL_EVENT_LINK_CHANGE:
+			vf->link_up =
+				vpe->event_data.link_event.link_status;
+			if (vf->vf_res->vf_cap_flags &
+				VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+				vf->link_speed =
+				    vpe->event_data.link_event_adv.link_speed;
+			} else {
+				enum virtchnl_link_speed speed;
+				speed = vpe->event_data.link_event.link_speed;
+				vf->link_speed = iavf_convert_link_speed(speed);
+			}
+			iavf_dev_link_update(dev, 0);
+			PMD_DRV_LOG(INFO, "Link status update:%s",
+					vf->link_up ? "up" : "down");
+			break;
+		case VIRTCHNL_EVENT_RESET_IMPENDING:
+			vf->vf_reset = true;
+			PMD_DRV_LOG(INFO, "VF is resetting");
+			break;
+		case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+			vf->dev_closed = true;
+			PMD_DRV_LOG(INFO, "PF driver closed");
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
+					__func__, vpe->event);
+		}
+	}  else {
+		/* async reply msg on command issued by vf previously */
+		result = IAVF_MSG_CMD;
+		if (opcode != vf->pend_cmd) {
+			PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+					vf->pend_cmd, opcode);
+			result = IAVF_MSG_ERR;
 		}
-		return IAVF_ERR_OPCODE_MISMATCH;
 	}
 
-	return IAVF_SUCCESS;
+	return result;
 }
 
 static int
@@ -69,6 +147,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	enum iavf_aq_result result;
 	enum iavf_status ret;
 	int err = 0;
 	int i = 0;
@@ -97,9 +176,9 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
 		/* for init virtchnl ops, need to poll the response */
 		do {
-			ret = iavf_read_msg_from_pf(adapter, args->out_size,
+			result = iavf_read_msg_from_pf(adapter, args->out_size,
 						   args->out_buffer);
-			if (ret == IAVF_SUCCESS)
+			if (result == IAVF_MSG_CMD)
 				break;
 			rte_delay_ms(ASQ_DELAY_MS);
 		} while (i++ < MAX_TRY_TIMES);
@@ -111,7 +190,33 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 		}
 		_clear_cmd(vf);
 		break;
-
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		/*
+		 * ignore async reply, only wait for system message,
+		 * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
+		 * if not, means request queues failed.
+		 */
+		do {
+			result = iavf_read_msg_from_pf(adapter, args->out_size,
+						   args->out_buffer);
+			if (result == IAVF_MSG_SYS && vf->vf_reset) {
+				break;
+			} else if (result == IAVF_MSG_CMD ||
+				result == IAVF_MSG_ERR) {
+				err = -1;
+				break;
+			}
+			rte_delay_ms(ASQ_DELAY_MS);
+			/* If don't read msg or read sys event, continue */
+		} while (i++ < MAX_TRY_TIMES);
+		if (i >= MAX_TRY_TIMES ||
+		    vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+			err = -1;
+			PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+				    " for cmd %d", vf->cmd_retval, args->ops);
+		}
+		_clear_cmd(vf);
+		break;
 	default:
 		/* For other virtchnl ops in running time,
 		 * wait for the cmd done flag.
@@ -136,44 +241,6 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	return err;
 }
 
-static uint32_t
-iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
-{
-	uint32_t speed;
-
-	switch (virt_link_speed) {
-	case VIRTCHNL_LINK_SPEED_100MB:
-		speed = 100;
-		break;
-	case VIRTCHNL_LINK_SPEED_1GB:
-		speed = 1000;
-		break;
-	case VIRTCHNL_LINK_SPEED_10GB:
-		speed = 10000;
-		break;
-	case VIRTCHNL_LINK_SPEED_40GB:
-		speed = 40000;
-		break;
-	case VIRTCHNL_LINK_SPEED_20GB:
-		speed = 20000;
-		break;
-	case VIRTCHNL_LINK_SPEED_25GB:
-		speed = 25000;
-		break;
-	case VIRTCHNL_LINK_SPEED_2_5GB:
-		speed = 2500;
-		break;
-	case VIRTCHNL_LINK_SPEED_5GB:
-		speed = 5000;
-		break;
-	default:
-		speed = 0;
-		break;
-	}
-
-	return speed;
-}
-
 static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
@@ -389,7 +456,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
-		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -1148,3 +1216,59 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
+{
+	struct rte_eth_dev *dev = adapter->eth_dev;
+	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct virtchnl_vf_res_request vfres;
+	struct iavf_cmd_info args;
+	uint16_t num_queue_pairs;
+	int err;
+
+	if (!(vf->vf_res->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
+		PMD_DRV_LOG(ERR, "request queues not supported");
+		return -1;
+	}
+
+	if (num == 0) {
+		PMD_DRV_LOG(ERR, "queue number cannot be zero");
+		return -1;
+	}
+	vfres.num_queue_pairs = num;
+
+	args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
+	args.in_args = (u8 *)&vfres;
+	args.in_args_size = sizeof(vfres);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	/*
+	 * disable interrupt to avoid the admin queue message to be read
+	 * before iavf_read_msg_from_pf.
+	 */
+	rte_intr_disable(&pci_dev->intr_handle);
+	err = iavf_execute_vf_cmd(adapter, &args);
+	rte_intr_enable(&pci_dev->intr_handle);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
+		return err;
+	}
+
+	/* request queues succeeded, vf is resetting */
+	if (vf->vf_reset) {
+		PMD_DRV_LOG(INFO, "vf is resetting");
+		return 0;
+	}
+
+	/* request additional queues failed, return available number */
+	num_queue_pairs =
+	  ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs;
+	PMD_DRV_LOG(ERR, "request queues failed, only %u queues "
+		"available", num_queue_pairs);
+
+	return -1;
+}
-- 
2.17.1


  reply	other threads:[~2020-10-16  1:18 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-09  7:20 [dpdk-dev] [PATCH v1 0/2] enable large VF configuration Ting Xu
2020-09-09  7:20 ` [dpdk-dev] [PATCH v1 1/2] net/iavf: add IAVF request queues Ting Xu
2020-09-09  7:20 ` [dpdk-dev] [PATCH v1 2/2] net/iavf: enable large VF configuration Ting Xu
2020-09-25  5:59 ` [dpdk-dev] [PATCH v2 0/2] " Ting Xu
2020-09-25  5:59   ` [dpdk-dev] [PATCH v2 1/2] net/iavf: add IAVF request queues function Ting Xu
2020-09-25  5:59   ` [dpdk-dev] [PATCH v2 2/2] net/iavf: enable large VF configuration Ting Xu
2020-09-27 12:42 ` [dpdk-dev] [PATCH v3 0/2] " Ting Xu
2020-09-27 12:42   ` [dpdk-dev] [PATCH v3 1/2] net/iavf: add IAVF request queues function Ting Xu
2020-09-27 12:42   ` [dpdk-dev] [PATCH v3 2/2] net/iavf: enable large VF configuration Ting Xu
2020-10-15  5:21 ` [dpdk-dev] [PATCH v4 0/2] " Ting Xu
2020-10-15  5:21   ` [dpdk-dev] [PATCH v4 1/2] net/iavf: add IAVF request queues function Ting Xu
2020-10-15  5:21   ` [dpdk-dev] [PATCH v4 2/2] net/iavf: enable large VF configuration Ting Xu
2020-10-16  1:21 ` [dpdk-dev] [PATCH v5 0/2] " Ting Xu
2020-10-16  1:21   ` Ting Xu [this message]
2020-10-16  1:21   ` [dpdk-dev] [PATCH v5 2/2] net/iavf: " Ting Xu
2020-10-16  1:34 ` [dpdk-dev] [PATCH v5 0/2] " Ting Xu
2020-10-16  1:34   ` [dpdk-dev] [PATCH v5 1/2] net/iavf: add IAVF request queues function Ting Xu
2020-10-16  1:34   ` [dpdk-dev] [PATCH v5 2/2] net/iavf: enable large VF configuration Ting Xu
2020-10-16  1:43 ` [dpdk-dev] [PATCH v6 0/2] " Ting Xu
2020-10-16  1:43   ` [dpdk-dev] [PATCH v6 1/2] net/iavf: add IAVF request queues function Ting Xu
2020-10-16  8:41     ` Xing, Beilei
2020-10-18 10:29       ` Xu, Ting
2020-10-16  1:43   ` [dpdk-dev] [PATCH v6 2/2] net/iavf: enable large VF configuration Ting Xu
2020-10-18 10:34 ` [dpdk-dev] [PATCH v7 0/6] " Ting Xu
2020-10-18 10:34   ` [dpdk-dev] [PATCH v7 1/6] net/iavf: handle virtchnl event message without interrupt Ting Xu
2020-10-21  8:15     ` Xing, Beilei
2020-10-18 10:34   ` [dpdk-dev] [PATCH v7 2/6] net/iavf: add IAVF request queues function Ting Xu
2020-10-18 10:34   ` [dpdk-dev] [PATCH v7 3/6] net/iavf: negotiate large VF and request more queues Ting Xu
2020-10-18 10:34   ` [dpdk-dev] [PATCH v7 4/6] net/iavf: enable multiple queues configurations for large VF Ting Xu
2020-10-18 10:34   ` [dpdk-dev] [PATCH v7 5/6] net/iavf: enable IRQ mapping configuration " Ting Xu
2020-10-18 10:34   ` [dpdk-dev] [PATCH v7 6/6] net/iavf: add enable/disable queues " Ting Xu
2020-10-22  6:48 ` [dpdk-dev] [PATCH v8 0/6] enable large VF configuration Ting Xu
2020-10-22  6:48   ` [dpdk-dev] [PATCH v8 1/6] net/iavf: handle virtchnl event message without interrupt Ting Xu
2020-10-22  6:48   ` [dpdk-dev] [PATCH v8 2/6] net/iavf: add IAVF request queues function Ting Xu
2020-10-23 10:07     ` Ferruh Yigit
2020-10-25  2:28       ` Xu, Ting
2020-10-23 10:11     ` Ferruh Yigit
2020-10-23 10:17       ` Ferruh Yigit
2020-10-22  6:48   ` [dpdk-dev] [PATCH v8 3/6] net/iavf: negotiate large VF and request more queues Ting Xu
2020-10-22  6:49   ` [dpdk-dev] [PATCH v8 4/6] net/iavf: enable multiple queues configurations for large VF Ting Xu
2020-10-22  6:49   ` [dpdk-dev] [PATCH v8 5/6] net/iavf: enable IRQ mapping configuration " Ting Xu
2020-10-22  6:49   ` [dpdk-dev] [PATCH v8 6/6] net/iavf: add enable/disable queues " Ting Xu
2020-10-22  6:54   ` [dpdk-dev] [PATCH v8 0/6] enable large VF configuration Xing, Beilei
2020-10-22  8:50     ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201016012142.54144-2-ting.xu@intel.com \
    --to=ting.xu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).