DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH 30/37] net/txgbe: support TM node add and delete
Date: Tue,  3 Nov 2020 18:08:11 +0800	[thread overview]
Message-ID: <20201103100818.311881-31-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com>

Support traffic manager node add and delete operations.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/txgbe_ethdev.h |   1 +
 drivers/net/txgbe/txgbe_tm.c     | 488 +++++++++++++++++++++++++++++++
 2 files changed, 489 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index cf377809e..1d90c6a06 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -14,6 +14,7 @@
 #include <rte_hash.h>
 #include <rte_hash_crc.h>
 #include <rte_ethdev.h>
+#include <rte_bus_pci.h>
 #include <rte_tm_driver.h>
 
 /* need update link, bit flag */
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 8adb03825..6dd593e54 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -16,6 +16,15 @@ static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
 static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
 				    uint32_t shaper_profile_id,
 				    struct rte_tm_error *error);
+static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+			  uint32_t parent_node_id, uint32_t priority,
+			  uint32_t weight, uint32_t level_id,
+			  struct rte_tm_node_params *params,
+			  struct rte_tm_error *error);
+static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+			     struct rte_tm_error *error);
+static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+			       int *is_leaf, struct rte_tm_error *error);
 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
 					uint32_t level_id,
 					struct rte_tm_level_capabilities *cap,
@@ -29,6 +38,9 @@ const struct rte_tm_ops txgbe_tm_ops = {
 	.capabilities_get = txgbe_tm_capabilities_get,
 	.shaper_profile_add = txgbe_shaper_profile_add,
 	.shaper_profile_delete = txgbe_shaper_profile_del,
+	.node_add = txgbe_node_add,
+	.node_delete = txgbe_node_delete,
+	.node_type_get = txgbe_node_type_get,
 	.level_capabilities_get = txgbe_level_capabilities_get,
 	.node_capabilities_get = txgbe_node_capabilities_get,
 };
@@ -332,6 +344,482 @@ txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
 	return NULL;
 }
 
+static void
+txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+			uint16_t *base, uint16_t *nb)
+{
+	uint8_t nb_tcs = txgbe_tc_nb_get(dev);
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	uint16_t vf_num = pci_dev->max_vfs;
+
+	*base = 0;
+	*nb = 0;
+
+	/* VT on */
+	if (vf_num) {
+		/* no DCB */
+		if (nb_tcs == 1) {
+			if (vf_num >= ETH_32_POOLS) {
+				*nb = 2;
+				*base = vf_num * 2;
+			} else if (vf_num >= ETH_16_POOLS) {
+				*nb = 4;
+				*base = vf_num * 4;
+			} else {
+				*nb = 8;
+				*base = vf_num * 8;
+			}
+		} else {
+			/* DCB */
+			*nb = 1;
+			*base = vf_num * nb_tcs + tc_node_no;
+		}
+	} else {
+		/* VT off */
+		if (nb_tcs == ETH_8_TCS) {
+			switch (tc_node_no) {
+			case 0:
+				*base = 0;
+				*nb = 32;
+				break;
+			case 1:
+				*base = 32;
+				*nb = 32;
+				break;
+			case 2:
+				*base = 64;
+				*nb = 16;
+				break;
+			case 3:
+				*base = 80;
+				*nb = 16;
+				break;
+			case 4:
+				*base = 96;
+				*nb = 8;
+				break;
+			case 5:
+				*base = 104;
+				*nb = 8;
+				break;
+			case 6:
+				*base = 112;
+				*nb = 8;
+				break;
+			case 7:
+				*base = 120;
+				*nb = 8;
+				break;
+			default:
+				return;
+			}
+		} else {
+			switch (tc_node_no) {
+			/**
+			 * If no VF and no DCB, only 64 queues can be used.
+			 * This case also be covered by this "case 0".
+			 */
+			case 0:
+				*base = 0;
+				*nb = 64;
+				break;
+			case 1:
+				*base = 64;
+				*nb = 32;
+				break;
+			case 2:
+				*base = 96;
+				*nb = 16;
+				break;
+			case 3:
+				*base = 112;
+				*nb = 16;
+				break;
+			default:
+				return;
+			}
+		}
+	}
+}
+
+static int
+txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+		       uint32_t priority, uint32_t weight,
+		       struct rte_tm_node_params *params,
+		       struct rte_tm_error *error)
+{
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	if (priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority should be 0";
+		return -EINVAL;
+	}
+
+	if (weight != 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight must be 1";
+		return -EINVAL;
+	}
+
+	/* not support shared shaper */
+	if (params->shared_shaper_id) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+	if (params->n_shared_shapers) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+
+	/* for non-leaf node */
+	if (node_id >= dev->data->nb_tx_queues) {
+		/* check the unsupported parameters */
+		if (params->nonleaf.wfq_weight_mode) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+			error->message = "WFQ not supported";
+			return -EINVAL;
+		}
+		if (params->nonleaf.n_sp_priorities != 1) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+			error->message = "SP priority not supported";
+			return -EINVAL;
+		} else if (params->nonleaf.wfq_weight_mode &&
+			   !(*params->nonleaf.wfq_weight_mode)) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+			error->message = "WFP should be byte mode";
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	/* for leaf node */
+	/* check the unsupported parameters */
+	if (params->leaf.cman) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+		error->message = "Congestion management not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.wred_profile_id !=
+	    RTE_TM_WRED_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.shared_wred_context_id) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.n_shared_wred_contexts) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+	       uint32_t parent_node_id, uint32_t priority,
+	       uint32_t weight, uint32_t level_id,
+	       struct rte_tm_node_params *params,
+	       struct rte_tm_error *error)
+{
+	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+	enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+	enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX;
+	struct txgbe_tm_shaper_profile *shaper_profile = NULL;
+	struct txgbe_tm_node *tm_node;
+	struct txgbe_tm_node *parent_node;
+	uint8_t nb_tcs;
+	uint16_t q_base = 0;
+	uint16_t q_nb = 0;
+	int ret;
+
+	if (!params || !error)
+		return -EINVAL;
+
+	/* if already committed */
+	if (tm_conf->committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	ret = txgbe_node_param_check(dev, node_id, priority, weight,
+				     params, error);
+	if (ret)
+		return ret;
+
+	/* check if the node ID is already used */
+	if (txgbe_tm_node_search(dev, node_id, &node_type)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used";
+		return -EINVAL;
+	}
+
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = txgbe_shaper_profile_search(dev,
+					params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
+	/* root node if not have a parent */
+	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+		/* check level */
+		if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+		    level_id > TXGBE_TM_NODE_TYPE_PORT) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "Wrong level";
+			return -EINVAL;
+		}
+
+		/* obviously no more than one root */
+		if (tm_conf->root) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+			error->message = "already have a root";
+			return -EINVAL;
+		}
+
+		/* add the root node */
+		tm_node = rte_zmalloc("txgbe_tm_node",
+				      sizeof(struct txgbe_tm_node),
+				      0);
+		if (!tm_node)
+			return -ENOMEM;
+		tm_node->id = node_id;
+		tm_node->priority = priority;
+		tm_node->weight = weight;
+		tm_node->reference_count = 0;
+		tm_node->no = 0;
+		tm_node->parent = NULL;
+		tm_node->shaper_profile = shaper_profile;
+		rte_memcpy(&tm_node->params, params,
+				 sizeof(struct rte_tm_node_params));
+		tm_conf->root = tm_node;
+
+		/* increase the reference counter of the shaper profile */
+		if (shaper_profile)
+			shaper_profile->reference_count++;
+
+		return 0;
+	}
+
+	/* TC or queue node */
+	/* check the parent node */
+	parent_node = txgbe_tm_node_search(dev, parent_node_id,
+					   &parent_node_type);
+	if (!parent_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent not exist";
+		return -EINVAL;
+	}
+	if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT &&
+	    parent_node_type != TXGBE_TM_NODE_TYPE_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent is not port or TC";
+		return -EINVAL;
+	}
+	/* check level */
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != parent_node_type + 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Wrong level";
+		return -EINVAL;
+	}
+
+	/* check the node number */
+	if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
+		/* check TC number */
+		nb_tcs = txgbe_tc_nb_get(dev);
+		if (tm_conf->nb_tc_node >= nb_tcs) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many TCs";
+			return -EINVAL;
+		}
+	} else {
+		/* check queue number */
+		if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many queues";
+			return -EINVAL;
+		}
+
+		txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
+		if (parent_node->reference_count >= q_nb) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many queues than TC supported";
+			return -EINVAL;
+		}
+
+		/**
+		 * check the node id.
+		 * For queue, the node id means queue id.
+		 */
+		if (node_id >= dev->data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too large queue id";
+			return -EINVAL;
+		}
+	}
+
+	/* add the TC or queue node */
+	tm_node = rte_zmalloc("txgbe_tm_node",
+			      sizeof(struct txgbe_tm_node),
+			      0);
+	if (!tm_node)
+		return -ENOMEM;
+	tm_node->id = node_id;
+	tm_node->priority = priority;
+	tm_node->weight = weight;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
+	rte_memcpy(&tm_node->params, params,
+			 sizeof(struct rte_tm_node_params));
+	if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
+		tm_node->no = parent_node->reference_count;
+		TAILQ_INSERT_TAIL(&tm_conf->tc_list,
+				  tm_node, node);
+		tm_conf->nb_tc_node++;
+	} else {
+		tm_node->no = q_base + parent_node->reference_count;
+		TAILQ_INSERT_TAIL(&tm_conf->queue_list,
+				  tm_node, node);
+		tm_conf->nb_queue_node++;
+	}
+	tm_node->parent->reference_count++;
+
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
+	return 0;
+}
+
+static int
+txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+		  struct rte_tm_error *error)
+{
+	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+	enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+	struct txgbe_tm_node *tm_node;
+
+	if (!error)
+		return -EINVAL;
+
+	/* if already committed */
+	if (tm_conf->committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	/* check the if the node id exists */
+	tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	/* the node should have no child */
+	if (tm_node->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message =
+			"cannot delete a node which has children";
+		return -EINVAL;
+	}
+
+	/* root node */
+	if (node_type == TXGBE_TM_NODE_TYPE_PORT) {
+		if (tm_node->shaper_profile)
+			tm_node->shaper_profile->reference_count--;
+		rte_free(tm_node);
+		tm_conf->root = NULL;
+		return 0;
+	}
+
+	/* TC or queue node */
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count--;
+	tm_node->parent->reference_count--;
+	if (node_type == TXGBE_TM_NODE_TYPE_TC) {
+		TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+		tm_conf->nb_tc_node--;
+	} else {
+		TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+		tm_conf->nb_queue_node--;
+	}
+	rte_free(tm_node);
+
+	return 0;
+}
+
+static int
+txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+		    int *is_leaf, struct rte_tm_error *error)
+{
+	enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+	struct txgbe_tm_node *tm_node;
+
+	if (!is_leaf || !error)
+		return -EINVAL;
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	/* check if the node id exists */
+	tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (node_type == TXGBE_TM_NODE_TYPE_QUEUE)
+		*is_leaf = true;
+	else
+		*is_leaf = false;
+
+	return 0;
+}
+
 static int
 txgbe_level_capabilities_get(struct rte_eth_dev *dev,
 			     uint32_t level_id,
-- 
2.18.4




  parent reply	other threads:[~2020-11-03 10:17 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 07/37] net/txgbe: support syn filter add and delete Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 22/37] net/txgbe: add flow API Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu
2020-11-03 10:08 ` Jiawen Wu [this message]
2020-11-03 10:08 ` [dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action Jiawen Wu
2020-11-09 19:21 ` [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201103100818.311881-31-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).