DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 0/3] Enable queue rate limit and quanta size configuration
@ 2022-03-29  2:07 Wenjun Wu
  2022-03-29  2:07 ` [PATCH v1 1/3] common/iavf: support " Wenjun Wu
                   ` (7 more replies)
  0 siblings, 8 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-03-29  2:07 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang; +Cc: Wenjun Wu

This patch set adds queue rate limit and quanta size configuration.
Quanta size can be changed by driver devarg quanta_size=xxx. Quanta
size should be set to the value between 256 and 4096 and be the product
of 64.

Wenjun Wu (3):
  common/iavf: support queue rate limit and quanta size configuration
  net/iavf: support queue rate limit configuration
  net/iavf: support quanta size configuration

 drivers/common/iavf/virtchnl.h |  51 +++++++++
 drivers/net/iavf/iavf.h        |  16 +++
 drivers/net/iavf/iavf_ethdev.c |  37 +++++++
 drivers/net/iavf/iavf_tm.c     | 189 +++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c  |  51 +++++++++
 5 files changed, 336 insertions(+), 8 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v1 1/3] common/iavf: support queue rate limit and quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
@ 2022-03-29  2:07 ` Wenjun Wu
  2022-03-29  2:07 ` [PATCH v1 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-03-29  2:07 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang; +Cc: Wenjun Wu, Ting Xu

This patch adds new virtchnl opcodes and structures for rate limit
and quanta size configuration, which include:
1. VIRTCHNL_OP_CONFIG_QUEUE_BW, to configure max bandwidth for each
VF per queue.
2. VIRTCHNL_OP_CONFIG_QUANTA, to configure quanta size per queue.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/common/iavf/virtchnl.h | 51 ++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca7d8..0d51055ae2 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -164,6 +164,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -1872,6 +1874,22 @@ struct virtchnl_queue_tc_mapping {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
 
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 pad[2];
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1978,6 +1996,12 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2244,6 +2268,33 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 					 sizeof(q_tc->tc[0]);
 		}
 		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_quanta->queue_select.num_queues - 1) *
+					 sizeof(struct virtchnl_queue_chunk);
+		}
+		break;
 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
 		break;
 	case VIRTCHNL_OP_ADD_VLAN_V2:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v1 2/3] net/iavf: support queue rate limit configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-03-29  2:07 ` [PATCH v1 1/3] common/iavf: support " Wenjun Wu
@ 2022-03-29  2:07 ` Wenjun Wu
  2022-03-29  2:07 ` [PATCH v1 3/3] net/iavf: support quanta size configuration Wenjun Wu
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-03-29  2:07 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang; +Cc: Wenjun Wu, Ting Xu

This patch adds queue rate limit configuration support.
Only max bandwidth is supported.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  13 +++
 drivers/net/iavf/iavf_tm.c    | 189 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c |  23 +++++
 3 files changed, 217 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..96515a3ee9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -170,11 +170,21 @@ struct iavf_tm_node {
 	uint32_t weight;
 	uint32_t reference_count;
 	struct iavf_tm_node *parent;
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
 };
 
 TAILQ_HEAD(iavf_tm_node_list, iavf_tm_node);
 
+struct iavf_tm_shaper_profile {
+	TAILQ_ENTRY(iavf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(iavf_shaper_profile_list, iavf_tm_shaper_profile);
+
 /* node type of Traffic Manager */
 enum iavf_tm_node_type {
 	IAVF_TM_NODE_TYPE_PORT,
@@ -188,6 +198,7 @@ struct iavf_tm_conf {
 	struct iavf_tm_node *root; /* root node - vf vsi */
 	struct iavf_tm_node_list tc_list; /* node list for all the TCs */
 	struct iavf_tm_node_list queue_list; /* node list for all the queues */
+	struct iavf_shaper_profile_list shaper_profile_list;
 	uint32_t nb_tc_node;
 	uint32_t nb_queue_node;
 	bool committed;
@@ -451,6 +462,8 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		  struct virtchnl_queues_bw_cfg *q_bw, uint16_t size);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c
index 8d92062c7f..25b74588bb 100644
--- a/drivers/net/iavf/iavf_tm.c
+++ b/drivers/net/iavf/iavf_tm.c
@@ -8,6 +8,13 @@
 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 				 __rte_unused int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error);
+static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error);
+static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
 	      uint32_t weight, uint32_t level_id,
@@ -30,6 +37,8 @@ static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 		   int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops iavf_tm_ops = {
+	.shaper_profile_add = iavf_shaper_profile_add,
+	.shaper_profile_delete = iavf_shaper_profile_del,
 	.node_add = iavf_tm_node_add,
 	.node_delete = iavf_tm_node_delete,
 	.capabilities_get = iavf_tm_capabilities_get,
@@ -44,6 +53,9 @@ iavf_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	/* initialize shaper profile list */
+	TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
+
 	/* initialize node configuration */
 	vf->tm_conf.root = NULL;
 	TAILQ_INIT(&vf->tm_conf.tc_list);
@@ -57,6 +69,7 @@ void
 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct iavf_tm_node *tm_node;
 
 	/* clear node configuration */
@@ -74,6 +87,14 @@ iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 		rte_free(vf->tm_conf.root);
 		vf->tm_conf.root = NULL;
 	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
 }
 
 static inline struct iavf_tm_node *
@@ -132,13 +153,6 @@ iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	/* not support shaper profile */
-	if (params->shaper_profile_id) {
-		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-		error->message = "shaper profile not supported";
-		return -EINVAL;
-	}
-
 	/* not support shared shaper */
 	if (params->shared_shaper_id) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
@@ -236,6 +250,23 @@ iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static inline struct iavf_tm_shaper_profile *
+iavf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_shaper_profile_list *shaper_profile_list =
+		&vf->tm_conf.shaper_profile_list;
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
 static int
 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
@@ -246,6 +277,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
 	enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
+	struct iavf_tm_shaper_profile *shaper_profile = NULL;
 	struct iavf_tm_node *tm_node;
 	struct iavf_tm_node *parent_node;
 	uint16_t tc_nb = vf->qos_cap->num_elem;
@@ -273,6 +305,18 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		return -EINVAL;
 	}
 
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = iavf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
 	/* root node if not have a parent */
 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 		/* check level */
@@ -358,6 +402,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->id = node_id;
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
 	rte_memcpy(&tm_node->params, params,
 			 sizeof(struct rte_tm_node_params));
 	if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
@@ -373,6 +418,10 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	}
 	tm_node->parent->reference_count++;
 
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
 	return 0;
 }
 
@@ -437,6 +486,103 @@ iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static int
+iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = iavf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
+				     sizeof(struct iavf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
 static int
 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
 			 struct rte_tm_capabilities *cap,
@@ -656,10 +802,11 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct virtchnl_queue_tc_mapping *q_tc_mapping;
+	struct virtchnl_queues_bw_cfg *q_bw;
 	struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
 	struct iavf_tm_node *tm_node;
 	struct iavf_qtc_map *qtc_map;
-	uint16_t size;
+	uint16_t size, size_q;
 	int index = 0, node_committed = 0;
 	int i, ret_val = IAVF_SUCCESS;
 
@@ -691,10 +838,21 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
+		(vf->num_queue_pairs - 1);
+	q_bw = rte_zmalloc("q_bw", size_q, 0);
+	if (!q_bw) {
+		ret_val = IAVF_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
 	q_tc_mapping->vsi_id = vf->vsi.vsi_id;
 	q_tc_mapping->num_tc = vf->qos_cap->num_elem;
 	q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
 
+	q_bw->vsi_id = vf->vsi.vsi_id;
+	q_bw->num_queues = vf->num_queue_pairs;
+
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		if (tm_node->tc >= q_tc_mapping->num_tc) {
 			PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
@@ -702,6 +860,17 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 			goto fail_clear;
 		}
 		q_tc_mapping->tc[tm_node->tc].req.queue_count++;
+
+		if (tm_node->shaper_profile) {
+			q_bw->cfg[node_committed].queue_id = node_committed;
+			q_bw->cfg[node_committed].shaper.peak =
+			tm_node->shaper_profile->profile.peak.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].shaper.committed =
+			tm_node->shaper_profile->profile.committed.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+		}
+
 		node_committed++;
 	}
 
@@ -712,6 +881,10 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	ret_val = iavf_set_q_bw(dev, q_bw, size_q);
+	if (ret_val)
+		goto fail_clear;
+
 	/* store the queue TC mapping info */
 	qtc_map = rte_zmalloc("qtc_map",
 		sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..537369f736 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1636,6 +1636,29 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	return err;
 }
 
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
+{
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+	args.in_args = (uint8_t *)q_bw;
+	args.in_args_size = size;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v1 3/3] net/iavf: support quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-03-29  2:07 ` [PATCH v1 1/3] common/iavf: support " Wenjun Wu
  2022-03-29  2:07 ` [PATCH v1 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
@ 2022-03-29  2:07 ` Wenjun Wu
  2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-03-29  2:07 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang; +Cc: Wenjun Wu

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  3 +++
 drivers/net/iavf/iavf_ethdev.c | 37 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  | 28 +++++++++++++++++++++++++
 3 files changed, 68 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..e5fa63c71b 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,11 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0) {
+		PMD_DRV_LOG(ERR, "configure quanta size failed");
+		goto err_queue;
+	}
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2099,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2144,17 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..ee26e45acf 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,31 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v2 0/3] Enable queue rate limit and quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
                   ` (2 preceding siblings ...)
  2022-03-29  2:07 ` [PATCH v1 3/3] net/iavf: support quanta size configuration Wenjun Wu
@ 2022-04-08  1:30 ` Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 1/3] common/iavf: support " Wenjun Wu
                     ` (2 more replies)
  2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
                   ` (3 subsequent siblings)
  7 siblings, 3 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  1:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch set adds queue rate limit and quanta size configuration.
Quanta size can be changed by driver devarg quanta_size=xxx. Quanta
size should be set to the value between 256 and 4096 and be the product
of 64.

v2: rework virtchnl

Wenjun Wu (3):
  common/iavf: support queue rate limit and quanta size configuration
  net/iavf: support queue rate limit configuration
  net/iavf: support quanta size configuration

 drivers/common/iavf/virtchnl.h |  50 +++++++++
 drivers/net/iavf/iavf.h        |  16 +++
 drivers/net/iavf/iavf_ethdev.c |  40 +++++++
 drivers/net/iavf/iavf_tm.c     | 190 +++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c  |  51 +++++++++
 5 files changed, 339 insertions(+), 8 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v2 1/3] common/iavf: support queue rate limit and quanta size configuration
  2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
@ 2022-04-08  1:30   ` Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 3/3] net/iavf: support quanta size configuration Wenjun Wu
  2 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  1:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds new virtchnl opcodes and structures for rate limit
and quanta size configuration, which include:
1. VIRTCHNL_OP_CONFIG_QUEUE_BW, to configure max bandwidth for each
VF per queue.
2. VIRTCHNL_OP_CONFIG_QUANTA, to configure quanta size per queue.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/common/iavf/virtchnl.h | 50 ++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca7d8..249ae6ed23 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -164,6 +164,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -1872,6 +1874,23 @@ struct virtchnl_queue_tc_mapping {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
 
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 tc;
+	u8 pad;
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1978,6 +1997,12 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2244,6 +2269,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 					 sizeof(q_tc->tc[0]);
 		}
 		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+		}
+		break;
 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
 		break;
 	case VIRTCHNL_OP_ADD_VLAN_V2:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v2 2/3] net/iavf: support queue rate limit configuration
  2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 1/3] common/iavf: support " Wenjun Wu
@ 2022-04-08  1:30   ` Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 3/3] net/iavf: support quanta size configuration Wenjun Wu
  2 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  1:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds queue rate limit configuration support.
Only max bandwidth is supported.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  13 +++
 drivers/net/iavf/iavf_tm.c    | 190 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c |  23 ++++
 3 files changed, 218 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..96515a3ee9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -170,11 +170,21 @@ struct iavf_tm_node {
 	uint32_t weight;
 	uint32_t reference_count;
 	struct iavf_tm_node *parent;
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
 };
 
 TAILQ_HEAD(iavf_tm_node_list, iavf_tm_node);
 
+struct iavf_tm_shaper_profile {
+	TAILQ_ENTRY(iavf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(iavf_shaper_profile_list, iavf_tm_shaper_profile);
+
 /* node type of Traffic Manager */
 enum iavf_tm_node_type {
 	IAVF_TM_NODE_TYPE_PORT,
@@ -188,6 +198,7 @@ struct iavf_tm_conf {
 	struct iavf_tm_node *root; /* root node - vf vsi */
 	struct iavf_tm_node_list tc_list; /* node list for all the TCs */
 	struct iavf_tm_node_list queue_list; /* node list for all the queues */
+	struct iavf_shaper_profile_list shaper_profile_list;
 	uint32_t nb_tc_node;
 	uint32_t nb_queue_node;
 	bool committed;
@@ -451,6 +462,8 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		  struct virtchnl_queues_bw_cfg *q_bw, uint16_t size);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c
index 8d92062c7f..32bb3be45e 100644
--- a/drivers/net/iavf/iavf_tm.c
+++ b/drivers/net/iavf/iavf_tm.c
@@ -8,6 +8,13 @@
 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 				 __rte_unused int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error);
+static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error);
+static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
 	      uint32_t weight, uint32_t level_id,
@@ -30,6 +37,8 @@ static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 		   int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops iavf_tm_ops = {
+	.shaper_profile_add = iavf_shaper_profile_add,
+	.shaper_profile_delete = iavf_shaper_profile_del,
 	.node_add = iavf_tm_node_add,
 	.node_delete = iavf_tm_node_delete,
 	.capabilities_get = iavf_tm_capabilities_get,
@@ -44,6 +53,9 @@ iavf_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	/* initialize shaper profile list */
+	TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
+
 	/* initialize node configuration */
 	vf->tm_conf.root = NULL;
 	TAILQ_INIT(&vf->tm_conf.tc_list);
@@ -57,6 +69,7 @@ void
 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct iavf_tm_node *tm_node;
 
 	/* clear node configuration */
@@ -74,6 +87,14 @@ iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 		rte_free(vf->tm_conf.root);
 		vf->tm_conf.root = NULL;
 	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
 }
 
 static inline struct iavf_tm_node *
@@ -132,13 +153,6 @@ iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	/* not support shaper profile */
-	if (params->shaper_profile_id) {
-		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-		error->message = "shaper profile not supported";
-		return -EINVAL;
-	}
-
 	/* not support shared shaper */
 	if (params->shared_shaper_id) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
@@ -236,6 +250,23 @@ iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static inline struct iavf_tm_shaper_profile *
+iavf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_shaper_profile_list *shaper_profile_list =
+		&vf->tm_conf.shaper_profile_list;
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
 static int
 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
@@ -246,6 +277,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
 	enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
+	struct iavf_tm_shaper_profile *shaper_profile = NULL;
 	struct iavf_tm_node *tm_node;
 	struct iavf_tm_node *parent_node;
 	uint16_t tc_nb = vf->qos_cap->num_elem;
@@ -273,6 +305,18 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		return -EINVAL;
 	}
 
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = iavf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
 	/* root node if not have a parent */
 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 		/* check level */
@@ -358,6 +402,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->id = node_id;
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
 	rte_memcpy(&tm_node->params, params,
 			 sizeof(struct rte_tm_node_params));
 	if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
@@ -373,6 +418,10 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	}
 	tm_node->parent->reference_count++;
 
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
 	return 0;
 }
 
@@ -437,6 +486,103 @@ iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static int
+iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = iavf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
+				     sizeof(struct iavf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
 static int
 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
 			 struct rte_tm_capabilities *cap,
@@ -656,10 +802,11 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct virtchnl_queue_tc_mapping *q_tc_mapping;
+	struct virtchnl_queues_bw_cfg *q_bw;
 	struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
 	struct iavf_tm_node *tm_node;
 	struct iavf_qtc_map *qtc_map;
-	uint16_t size;
+	uint16_t size, size_q;
 	int index = 0, node_committed = 0;
 	int i, ret_val = IAVF_SUCCESS;
 
@@ -691,10 +838,21 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
+		(vf->num_queue_pairs - 1);
+	q_bw = rte_zmalloc("q_bw", size_q, 0);
+	if (!q_bw) {
+		ret_val = IAVF_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
 	q_tc_mapping->vsi_id = vf->vsi.vsi_id;
 	q_tc_mapping->num_tc = vf->qos_cap->num_elem;
 	q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
 
+	q_bw->vsi_id = vf->vsi.vsi_id;
+	q_bw->num_queues = vf->num_queue_pairs;
+
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		if (tm_node->tc >= q_tc_mapping->num_tc) {
 			PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
@@ -702,6 +860,18 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 			goto fail_clear;
 		}
 		q_tc_mapping->tc[tm_node->tc].req.queue_count++;
+
+		if (tm_node->shaper_profile) {
+			q_bw->cfg[node_committed].queue_id = node_committed;
+			q_bw->cfg[node_committed].shaper.peak =
+			tm_node->shaper_profile->profile.peak.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].shaper.committed =
+			tm_node->shaper_profile->profile.committed.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].tc = tm_node->tc;
+		}
+
 		node_committed++;
 	}
 
@@ -712,6 +882,10 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	ret_val = iavf_set_q_bw(dev, q_bw, size_q);
+	if (ret_val)
+		goto fail_clear;
+
 	/* store the queue TC mapping info */
 	qtc_map = rte_zmalloc("qtc_map",
 		sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..537369f736 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1636,6 +1636,29 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	return err;
 }
 
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
+{
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+	args.in_args = (uint8_t *)q_bw;
+	args.in_args_size = size;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v2 3/3] net/iavf: support quanta size configuration
  2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 1/3] common/iavf: support " Wenjun Wu
  2022-04-08  1:30   ` [PATCH v2 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
@ 2022-04-08  1:30   ` Wenjun Wu
  2 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  1:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  3 +++
 drivers/net/iavf/iavf_ethdev.c | 40 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  | 28 ++++++++++++++++++++++++
 3 files changed, 71 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..255459f162 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,11 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0) {
+		PMD_DRV_LOG(ERR, "configure quanta size failed");
+		goto err_queue;
+	}
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2099,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2144,20 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size == 0)
+		ad->devargs.quanta_size = 1024;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..ee26e45acf 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,31 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 0/4] Enable queue rate limit and quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
                   ` (3 preceding siblings ...)
  2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
@ 2022-04-08  5:30 ` Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 1/4] common/iavf: support " Wenjun Wu
                     ` (3 more replies)
  2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
                   ` (2 subsequent siblings)
  7 siblings, 4 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  5:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch set adds queue rate limit and quanta size configuration.
Quanta size can be changed by driver devarg quanta_size=xxx. Quanta
size should be set to the value between 256 and 4096 and be the product
of 64.

v2: rework virtchnl
v3: add release note

Wenjun Wu (4):
  common/iavf: support queue rate limit and quanta size configuration
  net/iavf: support queue rate limit configuration
  net/iavf: support quanta size configuration
  doc: add release notes for 22.07

 doc/guides/rel_notes/release_22_07.rst |   4 +
 drivers/common/iavf/virtchnl.h         |  50 +++++++
 drivers/net/iavf/iavf.h                |  16 +++
 drivers/net/iavf/iavf_ethdev.c         |  40 ++++++
 drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c          |  51 +++++++
 6 files changed, 343 insertions(+), 8 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 1/4] common/iavf: support queue rate limit and quanta size configuration
  2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
@ 2022-04-08  5:30   ` Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  5:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds new virtchnl opcodes and structures for rate limit
and quanta size configuration, which include:
1. VIRTCHNL_OP_CONFIG_QUEUE_BW, to configure max bandwidth for each
VF per queue.
2. VIRTCHNL_OP_CONFIG_QUANTA, to configure quanta size per queue.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/common/iavf/virtchnl.h | 50 ++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca7d8..249ae6ed23 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -164,6 +164,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -1872,6 +1874,23 @@ struct virtchnl_queue_tc_mapping {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
 
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 tc;
+	u8 pad;
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1978,6 +1997,12 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2244,6 +2269,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 					 sizeof(q_tc->tc[0]);
 		}
 		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+		}
+		break;
 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
 		break;
 	case VIRTCHNL_OP_ADD_VLAN_V2:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 2/4] net/iavf: support queue rate limit configuration
  2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 1/4] common/iavf: support " Wenjun Wu
@ 2022-04-08  5:30   ` Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 3/4] net/iavf: support quanta size configuration Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 4/4] doc: add release notes for 22.07 Wenjun Wu
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  5:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds queue rate limit configuration support.
Only max bandwidth is supported.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  13 +++
 drivers/net/iavf/iavf_tm.c    | 190 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c |  23 ++++
 3 files changed, 218 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..96515a3ee9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -170,11 +170,21 @@ struct iavf_tm_node {
 	uint32_t weight;
 	uint32_t reference_count;
 	struct iavf_tm_node *parent;
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
 };
 
 TAILQ_HEAD(iavf_tm_node_list, iavf_tm_node);
 
+struct iavf_tm_shaper_profile {
+	TAILQ_ENTRY(iavf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(iavf_shaper_profile_list, iavf_tm_shaper_profile);
+
 /* node type of Traffic Manager */
 enum iavf_tm_node_type {
 	IAVF_TM_NODE_TYPE_PORT,
@@ -188,6 +198,7 @@ struct iavf_tm_conf {
 	struct iavf_tm_node *root; /* root node - vf vsi */
 	struct iavf_tm_node_list tc_list; /* node list for all the TCs */
 	struct iavf_tm_node_list queue_list; /* node list for all the queues */
+	struct iavf_shaper_profile_list shaper_profile_list;
 	uint32_t nb_tc_node;
 	uint32_t nb_queue_node;
 	bool committed;
@@ -451,6 +462,8 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		  struct virtchnl_queues_bw_cfg *q_bw, uint16_t size);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c
index 8d92062c7f..32bb3be45e 100644
--- a/drivers/net/iavf/iavf_tm.c
+++ b/drivers/net/iavf/iavf_tm.c
@@ -8,6 +8,13 @@
 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 				 __rte_unused int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error);
+static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error);
+static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
 	      uint32_t weight, uint32_t level_id,
@@ -30,6 +37,8 @@ static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 		   int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops iavf_tm_ops = {
+	.shaper_profile_add = iavf_shaper_profile_add,
+	.shaper_profile_delete = iavf_shaper_profile_del,
 	.node_add = iavf_tm_node_add,
 	.node_delete = iavf_tm_node_delete,
 	.capabilities_get = iavf_tm_capabilities_get,
@@ -44,6 +53,9 @@ iavf_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	/* initialize shaper profile list */
+	TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
+
 	/* initialize node configuration */
 	vf->tm_conf.root = NULL;
 	TAILQ_INIT(&vf->tm_conf.tc_list);
@@ -57,6 +69,7 @@ void
 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct iavf_tm_node *tm_node;
 
 	/* clear node configuration */
@@ -74,6 +87,14 @@ iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 		rte_free(vf->tm_conf.root);
 		vf->tm_conf.root = NULL;
 	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
 }
 
 static inline struct iavf_tm_node *
@@ -132,13 +153,6 @@ iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	/* not support shaper profile */
-	if (params->shaper_profile_id) {
-		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-		error->message = "shaper profile not supported";
-		return -EINVAL;
-	}
-
 	/* not support shared shaper */
 	if (params->shared_shaper_id) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
@@ -236,6 +250,23 @@ iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static inline struct iavf_tm_shaper_profile *
+iavf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_shaper_profile_list *shaper_profile_list =
+		&vf->tm_conf.shaper_profile_list;
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
 static int
 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
@@ -246,6 +277,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
 	enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
+	struct iavf_tm_shaper_profile *shaper_profile = NULL;
 	struct iavf_tm_node *tm_node;
 	struct iavf_tm_node *parent_node;
 	uint16_t tc_nb = vf->qos_cap->num_elem;
@@ -273,6 +305,18 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		return -EINVAL;
 	}
 
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = iavf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
 	/* root node if not have a parent */
 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 		/* check level */
@@ -358,6 +402,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->id = node_id;
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
 	rte_memcpy(&tm_node->params, params,
 			 sizeof(struct rte_tm_node_params));
 	if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
@@ -373,6 +418,10 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	}
 	tm_node->parent->reference_count++;
 
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
 	return 0;
 }
 
@@ -437,6 +486,103 @@ iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static int
+iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = iavf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
+				     sizeof(struct iavf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
 static int
 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
 			 struct rte_tm_capabilities *cap,
@@ -656,10 +802,11 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct virtchnl_queue_tc_mapping *q_tc_mapping;
+	struct virtchnl_queues_bw_cfg *q_bw;
 	struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
 	struct iavf_tm_node *tm_node;
 	struct iavf_qtc_map *qtc_map;
-	uint16_t size;
+	uint16_t size, size_q;
 	int index = 0, node_committed = 0;
 	int i, ret_val = IAVF_SUCCESS;
 
@@ -691,10 +838,21 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
+		(vf->num_queue_pairs - 1);
+	q_bw = rte_zmalloc("q_bw", size_q, 0);
+	if (!q_bw) {
+		ret_val = IAVF_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
 	q_tc_mapping->vsi_id = vf->vsi.vsi_id;
 	q_tc_mapping->num_tc = vf->qos_cap->num_elem;
 	q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
 
+	q_bw->vsi_id = vf->vsi.vsi_id;
+	q_bw->num_queues = vf->num_queue_pairs;
+
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		if (tm_node->tc >= q_tc_mapping->num_tc) {
 			PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
@@ -702,6 +860,18 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 			goto fail_clear;
 		}
 		q_tc_mapping->tc[tm_node->tc].req.queue_count++;
+
+		if (tm_node->shaper_profile) {
+			q_bw->cfg[node_committed].queue_id = node_committed;
+			q_bw->cfg[node_committed].shaper.peak =
+			tm_node->shaper_profile->profile.peak.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].shaper.committed =
+			tm_node->shaper_profile->profile.committed.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].tc = tm_node->tc;
+		}
+
 		node_committed++;
 	}
 
@@ -712,6 +882,10 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	ret_val = iavf_set_q_bw(dev, q_bw, size_q);
+	if (ret_val)
+		goto fail_clear;
+
 	/* store the queue TC mapping info */
 	qtc_map = rte_zmalloc("qtc_map",
 		sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..537369f736 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1636,6 +1636,29 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	return err;
 }
 
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
+{
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+	args.in_args = (uint8_t *)q_bw;
+	args.in_args_size = size;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 3/4] net/iavf: support quanta size configuration
  2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 1/4] common/iavf: support " Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
@ 2022-04-08  5:30   ` Wenjun Wu
  2022-04-08  5:30   ` [PATCH v3 4/4] doc: add release notes for 22.07 Wenjun Wu
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  5:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  3 +++
 drivers/net/iavf/iavf_ethdev.c | 40 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  | 28 ++++++++++++++++++++++++
 3 files changed, 71 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..255459f162 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,11 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0) {
+		PMD_DRV_LOG(ERR, "configure quanta size failed");
+		goto err_queue;
+	}
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2099,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2144,20 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size == 0)
+		ad->devargs.quanta_size = 1024;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..ee26e45acf 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,31 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 4/4] doc: add release notes for 22.07
  2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
                     ` (2 preceding siblings ...)
  2022-04-08  5:30   ` [PATCH v3 3/4] net/iavf: support quanta size configuration Wenjun Wu
@ 2022-04-08  5:30   ` Wenjun Wu
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  5:30 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

Add support for queue rate limit and quanta size configuration

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 doc/guides/rel_notes/release_22_07.rst | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index 42a5f2d990..f1b4057d70 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added Tx QoS queue rate limitation support.
+  * Added quanta size configuration support.
 
 Removed Items
 -------------
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 0/4] Enable queue rate limit and quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
                   ` (4 preceding siblings ...)
  2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
@ 2022-04-08  8:45 ` Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 1/4] common/iavf: support " Wenjun Wu
                     ` (3 more replies)
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
  7 siblings, 4 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  8:45 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch set adds queue rate limit and quanta size configuration.
Quanta size can be changed by driver devarg quanta_size=xxx. Quanta
size should be set to the value between 256 and 4096 and be the product
of 64.

v2: Rework virtchnl.
v3: Add release note.
v4: Quanta size configuration will block device init
    if PF does not support. Fix this issue.

Wenjun Wu (4):
  common/iavf: support queue rate limit and quanta size configuration
  net/iavf: support queue rate limit configuration
  net/iavf: support quanta size configuration
  doc: add release notes for 22.07

 doc/guides/rel_notes/release_22_07.rst |   4 +
 drivers/common/iavf/virtchnl.h         |  50 +++++++
 drivers/net/iavf/iavf.h                |  16 +++
 drivers/net/iavf/iavf_ethdev.c         |  38 +++++
 drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c          |  54 +++++++
 6 files changed, 344 insertions(+), 8 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 1/4] common/iavf: support queue rate limit and quanta size configuration
  2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
@ 2022-04-08  8:45   ` Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  8:45 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds new virtchnl opcodes and structures for rate limit
and quanta size configuration, which include:
1. VIRTCHNL_OP_CONFIG_QUEUE_BW, to configure max bandwidth for each
VF per queue.
2. VIRTCHNL_OP_CONFIG_QUANTA, to configure quanta size per queue.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/common/iavf/virtchnl.h | 50 ++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca7d8..249ae6ed23 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -164,6 +164,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -1872,6 +1874,23 @@ struct virtchnl_queue_tc_mapping {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
 
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 tc;
+	u8 pad;
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1978,6 +1997,12 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2244,6 +2269,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 					 sizeof(q_tc->tc[0]);
 		}
 		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+		}
+		break;
 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
 		break;
 	case VIRTCHNL_OP_ADD_VLAN_V2:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 2/4] net/iavf: support queue rate limit configuration
  2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 1/4] common/iavf: support " Wenjun Wu
@ 2022-04-08  8:45   ` Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 3/4] net/iavf: support quanta size configuration Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 4/4] doc: add release notes for 22.07 Wenjun Wu
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  8:45 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds queue rate limit configuration support.
Only max bandwidth is supported.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  13 +++
 drivers/net/iavf/iavf_tm.c    | 190 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c |  23 ++++
 3 files changed, 218 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..96515a3ee9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -170,11 +170,21 @@ struct iavf_tm_node {
 	uint32_t weight;
 	uint32_t reference_count;
 	struct iavf_tm_node *parent;
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
 };
 
 TAILQ_HEAD(iavf_tm_node_list, iavf_tm_node);
 
+struct iavf_tm_shaper_profile {
+	TAILQ_ENTRY(iavf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(iavf_shaper_profile_list, iavf_tm_shaper_profile);
+
 /* node type of Traffic Manager */
 enum iavf_tm_node_type {
 	IAVF_TM_NODE_TYPE_PORT,
@@ -188,6 +198,7 @@ struct iavf_tm_conf {
 	struct iavf_tm_node *root; /* root node - vf vsi */
 	struct iavf_tm_node_list tc_list; /* node list for all the TCs */
 	struct iavf_tm_node_list queue_list; /* node list for all the queues */
+	struct iavf_shaper_profile_list shaper_profile_list;
 	uint32_t nb_tc_node;
 	uint32_t nb_queue_node;
 	bool committed;
@@ -451,6 +462,8 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		  struct virtchnl_queues_bw_cfg *q_bw, uint16_t size);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c
index 8d92062c7f..32bb3be45e 100644
--- a/drivers/net/iavf/iavf_tm.c
+++ b/drivers/net/iavf/iavf_tm.c
@@ -8,6 +8,13 @@
 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 				 __rte_unused int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error);
+static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error);
+static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
 	      uint32_t weight, uint32_t level_id,
@@ -30,6 +37,8 @@ static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 		   int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops iavf_tm_ops = {
+	.shaper_profile_add = iavf_shaper_profile_add,
+	.shaper_profile_delete = iavf_shaper_profile_del,
 	.node_add = iavf_tm_node_add,
 	.node_delete = iavf_tm_node_delete,
 	.capabilities_get = iavf_tm_capabilities_get,
@@ -44,6 +53,9 @@ iavf_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	/* initialize shaper profile list */
+	TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
+
 	/* initialize node configuration */
 	vf->tm_conf.root = NULL;
 	TAILQ_INIT(&vf->tm_conf.tc_list);
@@ -57,6 +69,7 @@ void
 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct iavf_tm_node *tm_node;
 
 	/* clear node configuration */
@@ -74,6 +87,14 @@ iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 		rte_free(vf->tm_conf.root);
 		vf->tm_conf.root = NULL;
 	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
 }
 
 static inline struct iavf_tm_node *
@@ -132,13 +153,6 @@ iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	/* not support shaper profile */
-	if (params->shaper_profile_id) {
-		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-		error->message = "shaper profile not supported";
-		return -EINVAL;
-	}
-
 	/* not support shared shaper */
 	if (params->shared_shaper_id) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
@@ -236,6 +250,23 @@ iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static inline struct iavf_tm_shaper_profile *
+iavf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_shaper_profile_list *shaper_profile_list =
+		&vf->tm_conf.shaper_profile_list;
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
 static int
 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
@@ -246,6 +277,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
 	enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
+	struct iavf_tm_shaper_profile *shaper_profile = NULL;
 	struct iavf_tm_node *tm_node;
 	struct iavf_tm_node *parent_node;
 	uint16_t tc_nb = vf->qos_cap->num_elem;
@@ -273,6 +305,18 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		return -EINVAL;
 	}
 
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = iavf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
 	/* root node if not have a parent */
 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 		/* check level */
@@ -358,6 +402,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->id = node_id;
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
 	rte_memcpy(&tm_node->params, params,
 			 sizeof(struct rte_tm_node_params));
 	if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
@@ -373,6 +418,10 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	}
 	tm_node->parent->reference_count++;
 
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
 	return 0;
 }
 
@@ -437,6 +486,103 @@ iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static int
+iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = iavf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
+				     sizeof(struct iavf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
 static int
 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
 			 struct rte_tm_capabilities *cap,
@@ -656,10 +802,11 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct virtchnl_queue_tc_mapping *q_tc_mapping;
+	struct virtchnl_queues_bw_cfg *q_bw;
 	struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
 	struct iavf_tm_node *tm_node;
 	struct iavf_qtc_map *qtc_map;
-	uint16_t size;
+	uint16_t size, size_q;
 	int index = 0, node_committed = 0;
 	int i, ret_val = IAVF_SUCCESS;
 
@@ -691,10 +838,21 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
+		(vf->num_queue_pairs - 1);
+	q_bw = rte_zmalloc("q_bw", size_q, 0);
+	if (!q_bw) {
+		ret_val = IAVF_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
 	q_tc_mapping->vsi_id = vf->vsi.vsi_id;
 	q_tc_mapping->num_tc = vf->qos_cap->num_elem;
 	q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
 
+	q_bw->vsi_id = vf->vsi.vsi_id;
+	q_bw->num_queues = vf->num_queue_pairs;
+
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		if (tm_node->tc >= q_tc_mapping->num_tc) {
 			PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
@@ -702,6 +860,18 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 			goto fail_clear;
 		}
 		q_tc_mapping->tc[tm_node->tc].req.queue_count++;
+
+		if (tm_node->shaper_profile) {
+			q_bw->cfg[node_committed].queue_id = node_committed;
+			q_bw->cfg[node_committed].shaper.peak =
+			tm_node->shaper_profile->profile.peak.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].shaper.committed =
+			tm_node->shaper_profile->profile.committed.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].tc = tm_node->tc;
+		}
+
 		node_committed++;
 	}
 
@@ -712,6 +882,10 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	ret_val = iavf_set_q_bw(dev, q_bw, size_q);
+	if (ret_val)
+		goto fail_clear;
+
 	/* store the queue TC mapping info */
 	qtc_map = rte_zmalloc("qtc_map",
 		sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..537369f736 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1636,6 +1636,29 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	return err;
 }
 
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
+{
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+	args.in_args = (uint8_t *)q_bw;
+	args.in_args_size = size;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 3/4] net/iavf: support quanta size configuration
  2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 1/4] common/iavf: support " Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
@ 2022-04-08  8:45   ` Wenjun Wu
  2022-04-08  8:45   ` [PATCH v4 4/4] doc: add release notes for 22.07 Wenjun Wu
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  8:45 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  3 +++
 drivers/net/iavf/iavf_ethdev.c | 38 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  | 31 +++++++++++++++++++++++++++
 3 files changed, 72 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..7d093bdc24 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
+		PMD_DRV_LOG(WARNING, "configure quanta size failed");
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2097,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2142,20 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size == 0)
+		ad->devargs.quanta_size = 1024;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..f9452d14ae 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,34 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	if (adapter->devargs.quanta_size == 0)
+		return 0;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 4/4] doc: add release notes for 22.07
  2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
                     ` (2 preceding siblings ...)
  2022-04-08  8:45   ` [PATCH v4 3/4] net/iavf: support quanta size configuration Wenjun Wu
@ 2022-04-08  8:45   ` Wenjun Wu
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-08  8:45 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing

Add support for queue rate limit and quanta size configuration

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 doc/guides/rel_notes/release_22_07.rst | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index 42a5f2d990..f1b4057d70 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added Tx QoS queue rate limitation support.
+  * Added quanta size configuration support.
 
 Removed Items
 -------------
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 0/4] Enable queue rate limit and quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
                   ` (5 preceding siblings ...)
  2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
@ 2022-04-19  2:05 ` Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 1/4] common/iavf: support " Wenjun Wu
                     ` (4 more replies)
  2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
  7 siblings, 5 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-19  2:05 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch set adds queue rate limit and quanta size configuration.
Quanta size can be changed by driver devarg quanta_size=xxx. Quanta
size should be set to the value between 256 and 4096 and be the product
of 64.

v2: Rework virtchnl.
v3: Add release note.
v4: Quanta size configuration will block device init
    if PF does not support. Fix this issue.
v5: Update driver guide.

Wenjun Wu (4):
  common/iavf: support queue rate limit and quanta size configuration
  net/iavf: support queue rate limit configuration
  net/iavf: support quanta size configuration
  doc: update IAVF driver guide and 22.07 release notes

 doc/guides/nics/intel_vf.rst           |   4 +
 doc/guides/rel_notes/release_22_07.rst |   4 +
 drivers/common/iavf/virtchnl.h         |  50 +++++++
 drivers/net/iavf/iavf.h                |  16 +++
 drivers/net/iavf/iavf_ethdev.c         |  38 +++++
 drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c          |  54 +++++++
 7 files changed, 348 insertions(+), 8 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 1/4] common/iavf: support queue rate limit and quanta size configuration
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
@ 2022-04-19  2:05   ` Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-19  2:05 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch adds new virtchnl opcodes and structures for rate limit
and quanta size configuration, which include:
1. VIRTCHNL_OP_CONFIG_QUEUE_BW, to configure max bandwidth for each
VF per queue.
2. VIRTCHNL_OP_CONFIG_QUANTA, to configure quanta size per queue.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/common/iavf/virtchnl.h | 50 ++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca7d8..249ae6ed23 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -164,6 +164,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -1872,6 +1874,23 @@ struct virtchnl_queue_tc_mapping {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
 
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 tc;
+	u8 pad;
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1978,6 +1997,12 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2244,6 +2269,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 					 sizeof(q_tc->tc[0]);
 		}
 		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+		}
+		break;
 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
 		break;
 	case VIRTCHNL_OP_ADD_VLAN_V2:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 2/4] net/iavf: support queue rate limit configuration
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 1/4] common/iavf: support " Wenjun Wu
@ 2022-04-19  2:05   ` Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 3/4] net/iavf: support quanta size configuration Wenjun Wu
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-19  2:05 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch adds queue rate limit configuration support.
Only max bandwidth is supported.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  13 +++
 drivers/net/iavf/iavf_tm.c    | 190 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c |  23 ++++
 3 files changed, 218 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..96515a3ee9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -170,11 +170,21 @@ struct iavf_tm_node {
 	uint32_t weight;
 	uint32_t reference_count;
 	struct iavf_tm_node *parent;
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
 };
 
 TAILQ_HEAD(iavf_tm_node_list, iavf_tm_node);
 
+struct iavf_tm_shaper_profile {
+	TAILQ_ENTRY(iavf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(iavf_shaper_profile_list, iavf_tm_shaper_profile);
+
 /* node type of Traffic Manager */
 enum iavf_tm_node_type {
 	IAVF_TM_NODE_TYPE_PORT,
@@ -188,6 +198,7 @@ struct iavf_tm_conf {
 	struct iavf_tm_node *root; /* root node - vf vsi */
 	struct iavf_tm_node_list tc_list; /* node list for all the TCs */
 	struct iavf_tm_node_list queue_list; /* node list for all the queues */
+	struct iavf_shaper_profile_list shaper_profile_list;
 	uint32_t nb_tc_node;
 	uint32_t nb_queue_node;
 	bool committed;
@@ -451,6 +462,8 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		  struct virtchnl_queues_bw_cfg *q_bw, uint16_t size);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c
index 8d92062c7f..32bb3be45e 100644
--- a/drivers/net/iavf/iavf_tm.c
+++ b/drivers/net/iavf/iavf_tm.c
@@ -8,6 +8,13 @@
 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 				 __rte_unused int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error);
+static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error);
+static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
 	      uint32_t weight, uint32_t level_id,
@@ -30,6 +37,8 @@ static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 		   int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops iavf_tm_ops = {
+	.shaper_profile_add = iavf_shaper_profile_add,
+	.shaper_profile_delete = iavf_shaper_profile_del,
 	.node_add = iavf_tm_node_add,
 	.node_delete = iavf_tm_node_delete,
 	.capabilities_get = iavf_tm_capabilities_get,
@@ -44,6 +53,9 @@ iavf_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	/* initialize shaper profile list */
+	TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
+
 	/* initialize node configuration */
 	vf->tm_conf.root = NULL;
 	TAILQ_INIT(&vf->tm_conf.tc_list);
@@ -57,6 +69,7 @@ void
 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct iavf_tm_node *tm_node;
 
 	/* clear node configuration */
@@ -74,6 +87,14 @@ iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 		rte_free(vf->tm_conf.root);
 		vf->tm_conf.root = NULL;
 	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
 }
 
 static inline struct iavf_tm_node *
@@ -132,13 +153,6 @@ iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	/* not support shaper profile */
-	if (params->shaper_profile_id) {
-		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-		error->message = "shaper profile not supported";
-		return -EINVAL;
-	}
-
 	/* not support shared shaper */
 	if (params->shared_shaper_id) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
@@ -236,6 +250,23 @@ iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static inline struct iavf_tm_shaper_profile *
+iavf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_shaper_profile_list *shaper_profile_list =
+		&vf->tm_conf.shaper_profile_list;
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
 static int
 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
@@ -246,6 +277,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
 	enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
+	struct iavf_tm_shaper_profile *shaper_profile = NULL;
 	struct iavf_tm_node *tm_node;
 	struct iavf_tm_node *parent_node;
 	uint16_t tc_nb = vf->qos_cap->num_elem;
@@ -273,6 +305,18 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		return -EINVAL;
 	}
 
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = iavf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
 	/* root node if not have a parent */
 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 		/* check level */
@@ -358,6 +402,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->id = node_id;
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
 	rte_memcpy(&tm_node->params, params,
 			 sizeof(struct rte_tm_node_params));
 	if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
@@ -373,6 +418,10 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	}
 	tm_node->parent->reference_count++;
 
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
 	return 0;
 }
 
@@ -437,6 +486,103 @@ iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static int
+iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = iavf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
+				     sizeof(struct iavf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
 static int
 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
 			 struct rte_tm_capabilities *cap,
@@ -656,10 +802,11 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct virtchnl_queue_tc_mapping *q_tc_mapping;
+	struct virtchnl_queues_bw_cfg *q_bw;
 	struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
 	struct iavf_tm_node *tm_node;
 	struct iavf_qtc_map *qtc_map;
-	uint16_t size;
+	uint16_t size, size_q;
 	int index = 0, node_committed = 0;
 	int i, ret_val = IAVF_SUCCESS;
 
@@ -691,10 +838,21 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
+		(vf->num_queue_pairs - 1);
+	q_bw = rte_zmalloc("q_bw", size_q, 0);
+	if (!q_bw) {
+		ret_val = IAVF_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
 	q_tc_mapping->vsi_id = vf->vsi.vsi_id;
 	q_tc_mapping->num_tc = vf->qos_cap->num_elem;
 	q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
 
+	q_bw->vsi_id = vf->vsi.vsi_id;
+	q_bw->num_queues = vf->num_queue_pairs;
+
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		if (tm_node->tc >= q_tc_mapping->num_tc) {
 			PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
@@ -702,6 +860,18 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 			goto fail_clear;
 		}
 		q_tc_mapping->tc[tm_node->tc].req.queue_count++;
+
+		if (tm_node->shaper_profile) {
+			q_bw->cfg[node_committed].queue_id = node_committed;
+			q_bw->cfg[node_committed].shaper.peak =
+			tm_node->shaper_profile->profile.peak.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].shaper.committed =
+			tm_node->shaper_profile->profile.committed.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].tc = tm_node->tc;
+		}
+
 		node_committed++;
 	}
 
@@ -712,6 +882,10 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	ret_val = iavf_set_q_bw(dev, q_bw, size_q);
+	if (ret_val)
+		goto fail_clear;
+
 	/* store the queue TC mapping info */
 	qtc_map = rte_zmalloc("qtc_map",
 		sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..537369f736 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1636,6 +1636,29 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	return err;
 }
 
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
+{
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+	args.in_args = (uint8_t *)q_bw;
+	args.in_args_size = size;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 3/4] net/iavf: support quanta size configuration
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 1/4] common/iavf: support " Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
@ 2022-04-19  2:05   ` Wenjun Wu
  2022-04-19  2:05   ` [PATCH v5 4/4] doc: update IAVF driver guide and 22.07 release notes Wenjun Wu
  2022-04-19  2:39   ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Zhang, Qi Z
  4 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-19  2:05 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  3 +++
 drivers/net/iavf/iavf_ethdev.c | 38 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  | 31 +++++++++++++++++++++++++++
 3 files changed, 72 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..7d093bdc24 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
+		PMD_DRV_LOG(WARNING, "configure quanta size failed");
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2097,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2142,20 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size == 0)
+		ad->devargs.quanta_size = 1024;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..f9452d14ae 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,34 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	if (adapter->devargs.quanta_size == 0)
+		return 0;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 4/4] doc: update IAVF driver guide and 22.07 release notes
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
                     ` (2 preceding siblings ...)
  2022-04-19  2:05   ` [PATCH v5 3/4] net/iavf: support quanta size configuration Wenjun Wu
@ 2022-04-19  2:05   ` Wenjun Wu
  2022-04-19  2:39   ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Zhang, Qi Z
  4 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-19  2:05 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

Add driver guide and update the release notes for
quanta size configuration.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 doc/guides/nics/intel_vf.rst           | 4 ++++
 doc/guides/rel_notes/release_22_07.rst | 4 ++++
 2 files changed, 8 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 648af39c22..6498135655 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -92,6 +92,10 @@ For more detail on SR-IOV, please refer to the following documents:
     available for IAVF PMD. The same devargs with the same parameters can be applied to IAVF PMD, for detail please reference
     the section ``Protocol extraction for per queue`` of ice.rst.
 
+    Quanta size configuration is also supported when IAVF is backed by an Intel® E810 device by setting ``devargs``
+    parameter ``quanta_size`` like ``-a 18:00.0,quanta_size=2048``. The default value is 1024, and quanta size should be
+    set as the product of 64 in legacy host interface mode.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index 42a5f2d990..f1b4057d70 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added Tx QoS queue rate limitation support.
+  * Added quanta size configuration support.
 
 Removed Items
 -------------
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* RE: [PATCH v5 0/4] Enable queue rate limit and quanta size configuration
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
                     ` (3 preceding siblings ...)
  2022-04-19  2:05   ` [PATCH v5 4/4] doc: update IAVF driver guide and 22.07 release notes Wenjun Wu
@ 2022-04-19  2:39   ` Zhang, Qi Z
  4 siblings, 0 replies; 29+ messages in thread
From: Zhang, Qi Z @ 2022-04-19  2:39 UTC (permalink / raw)
  To: Wu, Wenjun1, dev, Wu,  Jingjing, Xing, Beilei



> -----Original Message-----
> From: Wu, Wenjun1 <wenjun1.wu@intel.com>
> Sent: Tuesday, April 19, 2022 10:06 AM
> To: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v5 0/4] Enable queue rate limit and quanta size configuration
> 
> This patch set adds queue rate limit and quanta size configuration.
> Quanta size can be changed by driver devarg quanta_size=xxx. Quanta size
> should be set to the value between 256 and 4096 and be the product of 64.
> 
> v2: Rework virtchnl.
> v3: Add release note.
> v4: Quanta size configuration will block device init
>     if PF does not support. Fix this issue.
> v5: Update driver guide.
> 
> Wenjun Wu (4):
>   common/iavf: support queue rate limit and quanta size configuration
>   net/iavf: support queue rate limit configuration
>   net/iavf: support quanta size configuration
>   doc: update IAVF driver guide and 22.07 release notes
> 
>  doc/guides/nics/intel_vf.rst           |   4 +
>  doc/guides/rel_notes/release_22_07.rst |   4 +
>  drivers/common/iavf/virtchnl.h         |  50 +++++++
>  drivers/net/iavf/iavf.h                |  16 +++
>  drivers/net/iavf/iavf_ethdev.c         |  38 +++++
>  drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
>  drivers/net/iavf/iavf_vchnl.c          |  54 +++++++
>  7 files changed, 348 insertions(+), 8 deletions(-)
> 
> --
> 2.25.1

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v6 0/3] Enable queue rate limit and quanta size configuration
  2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
                   ` (6 preceding siblings ...)
  2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
@ 2022-04-22  1:42 ` Wenjun Wu
  2022-04-22  1:42   ` [PATCH v6 1/3] common/iavf: support " Wenjun Wu
                     ` (3 more replies)
  7 siblings, 4 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-22  1:42 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch set adds queue rate limit and quanta size configuration.
Quanta size can be changed by driver devarg quanta_size=xxx. Quanta
size should be set to the value between 256 and 4096 and be the product
of 64.

v2: Rework virtchnl.
v3: Add release note.
v4: Quanta size configuration will block device init
    if PF does not support. Fix this issue.
v5: Update driver guide.
v6: Merge the release note with the previous patch.

Wenjun Wu (3):
  common/iavf: support queue rate limit and quanta size configuration
  net/iavf: support queue rate limit configuration
  net/iavf: support quanta size configuration

 doc/guides/nics/intel_vf.rst           |   4 +
 doc/guides/rel_notes/release_22_07.rst |   4 +
 drivers/common/iavf/virtchnl.h         |  50 +++++++
 drivers/net/iavf/iavf.h                |  16 +++
 drivers/net/iavf/iavf_ethdev.c         |  38 +++++
 drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c          |  54 +++++++
 7 files changed, 348 insertions(+), 8 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v6 1/3] common/iavf: support queue rate limit and quanta size configuration
  2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
@ 2022-04-22  1:42   ` Wenjun Wu
  2022-04-22  1:42   ` [PATCH v6 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-22  1:42 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch adds new virtchnl opcodes and structures for rate limit
and quanta size configuration, which include:
1. VIRTCHNL_OP_CONFIG_QUEUE_BW, to configure max bandwidth for each
VF per queue.
2. VIRTCHNL_OP_CONFIG_QUANTA, to configure quanta size per queue.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/common/iavf/virtchnl.h | 50 ++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca7d8..249ae6ed23 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -164,6 +164,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -1872,6 +1874,23 @@ struct virtchnl_queue_tc_mapping {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
 
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 tc;
+	u8 pad;
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1978,6 +1997,12 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2244,6 +2269,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 					 sizeof(q_tc->tc[0]);
 		}
 		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+		}
+		break;
 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
 		break;
 	case VIRTCHNL_OP_ADD_VLAN_V2:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v6 2/3] net/iavf: support queue rate limit configuration
  2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
  2022-04-22  1:42   ` [PATCH v6 1/3] common/iavf: support " Wenjun Wu
@ 2022-04-22  1:42   ` Wenjun Wu
  2022-04-22  1:43   ` [PATCH v6 3/3] net/iavf: support quanta size configuration Wenjun Wu
  2022-04-22 12:09   ` [PATCH v6 0/3] Enable queue rate limit and " Zhang, Qi Z
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-22  1:42 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch adds queue rate limit configuration support.
Only max bandwidth is supported.

Signed-off-by: Ting Xu <ting.xu@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 doc/guides/rel_notes/release_22_07.rst |   3 +
 drivers/net/iavf/iavf.h                |  13 ++
 drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
 drivers/net/iavf/iavf_vchnl.c          |  23 +++
 4 files changed, 221 insertions(+), 8 deletions(-)

diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index 42a5f2d990..ff379ace67 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -55,6 +55,9 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added Tx QoS queue rate limitation support.
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..96515a3ee9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -170,11 +170,21 @@ struct iavf_tm_node {
 	uint32_t weight;
 	uint32_t reference_count;
 	struct iavf_tm_node *parent;
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
 };
 
 TAILQ_HEAD(iavf_tm_node_list, iavf_tm_node);
 
+struct iavf_tm_shaper_profile {
+	TAILQ_ENTRY(iavf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(iavf_shaper_profile_list, iavf_tm_shaper_profile);
+
 /* node type of Traffic Manager */
 enum iavf_tm_node_type {
 	IAVF_TM_NODE_TYPE_PORT,
@@ -188,6 +198,7 @@ struct iavf_tm_conf {
 	struct iavf_tm_node *root; /* root node - vf vsi */
 	struct iavf_tm_node_list tc_list; /* node list for all the TCs */
 	struct iavf_tm_node_list queue_list; /* node list for all the queues */
+	struct iavf_shaper_profile_list shaper_profile_list;
 	uint32_t nb_tc_node;
 	uint32_t nb_queue_node;
 	bool committed;
@@ -451,6 +462,8 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		  struct virtchnl_queues_bw_cfg *q_bw, uint16_t size);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c
index 8d92062c7f..32bb3be45e 100644
--- a/drivers/net/iavf/iavf_tm.c
+++ b/drivers/net/iavf/iavf_tm.c
@@ -8,6 +8,13 @@
 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 				 __rte_unused int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error);
+static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error);
+static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
 	      uint32_t weight, uint32_t level_id,
@@ -30,6 +37,8 @@ static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 		   int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops iavf_tm_ops = {
+	.shaper_profile_add = iavf_shaper_profile_add,
+	.shaper_profile_delete = iavf_shaper_profile_del,
 	.node_add = iavf_tm_node_add,
 	.node_delete = iavf_tm_node_delete,
 	.capabilities_get = iavf_tm_capabilities_get,
@@ -44,6 +53,9 @@ iavf_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	/* initialize shaper profile list */
+	TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
+
 	/* initialize node configuration */
 	vf->tm_conf.root = NULL;
 	TAILQ_INIT(&vf->tm_conf.tc_list);
@@ -57,6 +69,7 @@ void
 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
 	struct iavf_tm_node *tm_node;
 
 	/* clear node configuration */
@@ -74,6 +87,14 @@ iavf_tm_conf_uninit(struct rte_eth_dev *dev)
 		rte_free(vf->tm_conf.root);
 		vf->tm_conf.root = NULL;
 	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
 }
 
 static inline struct iavf_tm_node *
@@ -132,13 +153,6 @@ iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	/* not support shaper profile */
-	if (params->shaper_profile_id) {
-		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-		error->message = "shaper profile not supported";
-		return -EINVAL;
-	}
-
 	/* not support shared shaper */
 	if (params->shared_shaper_id) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
@@ -236,6 +250,23 @@ iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static inline struct iavf_tm_shaper_profile *
+iavf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_shaper_profile_list *shaper_profile_list =
+		&vf->tm_conf.shaper_profile_list;
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
 static int
 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	      uint32_t parent_node_id, uint32_t priority,
@@ -246,6 +277,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
 	enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
+	struct iavf_tm_shaper_profile *shaper_profile = NULL;
 	struct iavf_tm_node *tm_node;
 	struct iavf_tm_node *parent_node;
 	uint16_t tc_nb = vf->qos_cap->num_elem;
@@ -273,6 +305,18 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		return -EINVAL;
 	}
 
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = iavf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
 	/* root node if not have a parent */
 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 		/* check level */
@@ -358,6 +402,7 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->id = node_id;
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
 	rte_memcpy(&tm_node->params, params,
 			 sizeof(struct rte_tm_node_params));
 	if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
@@ -373,6 +418,10 @@ iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	}
 	tm_node->parent->reference_count++;
 
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
 	return 0;
 }
 
@@ -437,6 +486,103 @@ iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 	return 0;
 }
 
+static int
+iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = iavf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
+				     sizeof(struct iavf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+iavf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
 static int
 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
 			 struct rte_tm_capabilities *cap,
@@ -656,10 +802,11 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct virtchnl_queue_tc_mapping *q_tc_mapping;
+	struct virtchnl_queues_bw_cfg *q_bw;
 	struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
 	struct iavf_tm_node *tm_node;
 	struct iavf_qtc_map *qtc_map;
-	uint16_t size;
+	uint16_t size, size_q;
 	int index = 0, node_committed = 0;
 	int i, ret_val = IAVF_SUCCESS;
 
@@ -691,10 +838,21 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
+		(vf->num_queue_pairs - 1);
+	q_bw = rte_zmalloc("q_bw", size_q, 0);
+	if (!q_bw) {
+		ret_val = IAVF_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
 	q_tc_mapping->vsi_id = vf->vsi.vsi_id;
 	q_tc_mapping->num_tc = vf->qos_cap->num_elem;
 	q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
 
+	q_bw->vsi_id = vf->vsi.vsi_id;
+	q_bw->num_queues = vf->num_queue_pairs;
+
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		if (tm_node->tc >= q_tc_mapping->num_tc) {
 			PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
@@ -702,6 +860,18 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 			goto fail_clear;
 		}
 		q_tc_mapping->tc[tm_node->tc].req.queue_count++;
+
+		if (tm_node->shaper_profile) {
+			q_bw->cfg[node_committed].queue_id = node_committed;
+			q_bw->cfg[node_committed].shaper.peak =
+			tm_node->shaper_profile->profile.peak.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].shaper.committed =
+			tm_node->shaper_profile->profile.committed.rate /
+			1000 * IAVF_BITS_PER_BYTE;
+			q_bw->cfg[node_committed].tc = tm_node->tc;
+		}
+
 		node_committed++;
 	}
 
@@ -712,6 +882,10 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
 		goto fail_clear;
 	}
 
+	ret_val = iavf_set_q_bw(dev, q_bw, size_q);
+	if (ret_val)
+		goto fail_clear;
+
 	/* store the queue TC mapping info */
 	qtc_map = rte_zmalloc("qtc_map",
 		sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..537369f736 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1636,6 +1636,29 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	return err;
 }
 
+int iavf_set_q_bw(struct rte_eth_dev *dev,
+		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
+{
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+	args.in_args = (uint8_t *)q_bw;
+	args.in_args_size = size;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v6 3/3] net/iavf: support quanta size configuration
  2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
  2022-04-22  1:42   ` [PATCH v6 1/3] common/iavf: support " Wenjun Wu
  2022-04-22  1:42   ` [PATCH v6 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
@ 2022-04-22  1:43   ` Wenjun Wu
  2022-04-22 12:09   ` [PATCH v6 0/3] Enable queue rate limit and " Zhang, Qi Z
  3 siblings, 0 replies; 29+ messages in thread
From: Wenjun Wu @ 2022-04-22  1:43 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing, qi.z.zhang

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 doc/guides/nics/intel_vf.rst           |  4 +++
 doc/guides/rel_notes/release_22_07.rst |  1 +
 drivers/net/iavf/iavf.h                |  3 ++
 drivers/net/iavf/iavf_ethdev.c         | 38 ++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 31 +++++++++++++++++++++
 5 files changed, 77 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 648af39c22..6498135655 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -92,6 +92,10 @@ For more detail on SR-IOV, please refer to the following documents:
     available for IAVF PMD. The same devargs with the same parameters can be applied to IAVF PMD, for detail please reference
     the section ``Protocol extraction for per queue`` of ice.rst.
 
+    Quanta size configuration is also supported when IAVF is backed by an Intel® E810 device by setting ``devargs``
+    parameter ``quanta_size`` like ``-a 18:00.0,quanta_size=2048``. The default value is 1024, and quanta size should be
+    set as the product of 64 in legacy host interface mode.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index ff379ace67..f1b4057d70 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -58,6 +58,7 @@ New Features
 * **Updated Intel iavf driver.**
 
   * Added Tx QoS queue rate limitation support.
+  * Added quanta size configuration support.
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..7d093bdc24 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
+		PMD_DRV_LOG(WARNING, "configure quanta size failed");
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2097,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2142,20 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size == 0)
+		ad->devargs.quanta_size = 1024;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..f9452d14ae 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,34 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	if (adapter->devargs.quanta_size == 0)
+		return 0;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* RE: [PATCH v6 0/3] Enable queue rate limit and quanta size configuration
  2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
                     ` (2 preceding siblings ...)
  2022-04-22  1:43   ` [PATCH v6 3/3] net/iavf: support quanta size configuration Wenjun Wu
@ 2022-04-22 12:09   ` Zhang, Qi Z
  3 siblings, 0 replies; 29+ messages in thread
From: Zhang, Qi Z @ 2022-04-22 12:09 UTC (permalink / raw)
  To: Wu, Wenjun1, dev, Wu,  Jingjing, Xing, Beilei



> -----Original Message-----
> From: Wu, Wenjun1 <wenjun1.wu@intel.com>
> Sent: Friday, April 22, 2022 9:43 AM
> To: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v6 0/3] Enable queue rate limit and quanta size configuration
> 
> This patch set adds queue rate limit and quanta size configuration.
> Quanta size can be changed by driver devarg quanta_size=xxx. Quanta size
> should be set to the value between 256 and 4096 and be the product of 64.
> 
> v2: Rework virtchnl.
> v3: Add release note.
> v4: Quanta size configuration will block device init
>     if PF does not support. Fix this issue.
> v5: Update driver guide.
> v6: Merge the release note with the previous patch.
> 
> Wenjun Wu (3):
>   common/iavf: support queue rate limit and quanta size configuration
>   net/iavf: support queue rate limit configuration
>   net/iavf: support quanta size configuration
> 
>  doc/guides/nics/intel_vf.rst           |   4 +
>  doc/guides/rel_notes/release_22_07.rst |   4 +
>  drivers/common/iavf/virtchnl.h         |  50 +++++++
>  drivers/net/iavf/iavf.h                |  16 +++
>  drivers/net/iavf/iavf_ethdev.c         |  38 +++++
>  drivers/net/iavf/iavf_tm.c             | 190 +++++++++++++++++++++++--
>  drivers/net/iavf/iavf_vchnl.c          |  54 +++++++
>  7 files changed, 348 insertions(+), 8 deletions(-)
> 
> --
> 2.25.1

Re-applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 29+ messages in thread

end of thread, other threads:[~2022-04-22 12:09 UTC | newest]

Thread overview: 29+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and quanta size configuration Wenjun Wu
2022-03-29  2:07 ` [PATCH v1 1/3] common/iavf: support " Wenjun Wu
2022-03-29  2:07 ` [PATCH v1 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
2022-03-29  2:07 ` [PATCH v1 3/3] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
2022-04-08  1:30   ` [PATCH v2 1/3] common/iavf: support " Wenjun Wu
2022-04-08  1:30   ` [PATCH v2 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-08  1:30   ` [PATCH v2 3/3] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 1/4] common/iavf: support " Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 3/4] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 4/4] doc: add release notes for 22.07 Wenjun Wu
2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
2022-04-08  8:45   ` [PATCH v4 1/4] common/iavf: support " Wenjun Wu
2022-04-08  8:45   ` [PATCH v4 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-08  8:45   ` [PATCH v4 3/4] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  8:45   ` [PATCH v4 4/4] doc: add release notes for 22.07 Wenjun Wu
2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 1/4] common/iavf: support " Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 3/4] net/iavf: support quanta size configuration Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 4/4] doc: update IAVF driver guide and 22.07 release notes Wenjun Wu
2022-04-19  2:39   ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Zhang, Qi Z
2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
2022-04-22  1:42   ` [PATCH v6 1/3] common/iavf: support " Wenjun Wu
2022-04-22  1:42   ` [PATCH v6 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-22  1:43   ` [PATCH v6 3/3] net/iavf: support quanta size configuration Wenjun Wu
2022-04-22 12:09   ` [PATCH v6 0/3] Enable queue rate limit and " Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).