DPDK patches and discussions
 help / color / mirror / Atom feed
From: Mingxia Liu <mingxia.liu@intel.com>
To: dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com,
	beilei.xing@intel.com
Cc: Mingxia Liu <mingxia.liu@intel.com>
Subject: [PATCH v5 2/6] common/idpf: add RSS set/get ops
Date: Tue,  7 Feb 2023 10:08:55 +0000	[thread overview]
Message-ID: <20230207100859.2401709-3-mingxia.liu@intel.com> (raw)
In-Reply-To: <20230207100859.2401709-1-mingxia.liu@intel.com>

Add support for these device ops:
- rss_reta_update
- rss_reta_query
- rss_hash_update
- rss_hash_conf_get

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/common/idpf/idpf_common_device.h   |   1 +
 drivers/common/idpf/idpf_common_virtchnl.c | 119 +++++++++
 drivers/common/idpf/idpf_common_virtchnl.h |   6 +
 drivers/common/idpf/version.map            |   3 +
 drivers/net/idpf/idpf_ethdev.c             | 268 +++++++++++++++++++++
 drivers/net/idpf/idpf_ethdev.h             |   3 +-
 6 files changed, 399 insertions(+), 1 deletion(-)

diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 1d8e7d405a..7abc4d2a3a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -98,6 +98,7 @@ struct idpf_vport {
 	uint32_t *rss_lut;
 	uint8_t *rss_key;
 	uint64_t rss_hf;
+	uint64_t last_general_rss_hf;
 
 	/* MSIX info*/
 	struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 40cff34c09..10cfa33704 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -218,6 +218,9 @@ idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
 	case VIRTCHNL2_OP_ALLOC_VECTORS:
 	case VIRTCHNL2_OP_DEALLOC_VECTORS:
 	case VIRTCHNL2_OP_GET_STATS:
+	case VIRTCHNL2_OP_GET_RSS_KEY:
+	case VIRTCHNL2_OP_GET_RSS_HASH:
+	case VIRTCHNL2_OP_GET_RSS_LUT:
 		/* for init virtchnl ops, need to poll the response */
 		err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
 		clear_cmd(adapter);
@@ -448,6 +451,48 @@ idpf_vc_rss_key_set(struct idpf_vport *vport)
 	return err;
 }
 
+int idpf_vc_rss_key_get(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_rss_key *rss_key_ret;
+	struct virtchnl2_rss_key rss_key;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&rss_key, 0, sizeof(rss_key));
+	rss_key.vport_id = vport->vport_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_GET_RSS_KEY;
+	args.in_args = (uint8_t *)&rss_key;
+	args.in_args_size = sizeof(rss_key);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(adapter, &args);
+
+	if (!err) {
+		rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer;
+		if (rss_key_ret->key_len != vport->rss_key_size) {
+			rte_free(vport->rss_key);
+			vport->rss_key = NULL;
+			vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
+						      rss_key_ret->key_len);
+			vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0);
+			if (!vport->rss_key) {
+				vport->rss_key_size = 0;
+				DRV_LOG(ERR, "Failed to allocate RSS key");
+				return -ENOMEM;
+			}
+		}
+		rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size);
+	} else {
+		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY");
+	}
+
+	return err;
+}
+
 int
 idpf_vc_rss_lut_set(struct idpf_vport *vport)
 {
@@ -482,6 +527,80 @@ idpf_vc_rss_lut_set(struct idpf_vport *vport)
 	return err;
 }
 
+int
+idpf_vc_rss_lut_get(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_rss_lut *rss_lut_ret;
+	struct virtchnl2_rss_lut rss_lut;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&rss_lut, 0, sizeof(rss_lut));
+	rss_lut.vport_id = vport->vport_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_GET_RSS_LUT;
+	args.in_args = (uint8_t *)&rss_lut;
+	args.in_args_size = sizeof(rss_lut);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(adapter, &args);
+
+	if (!err) {
+		rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer;
+		if (rss_lut_ret->lut_entries != vport->rss_lut_size) {
+			rte_free(vport->rss_lut);
+			vport->rss_lut = NULL;
+			vport->rss_lut = rte_zmalloc("rss_lut",
+				     sizeof(uint32_t) * rss_lut_ret->lut_entries, 0);
+			if (vport->rss_lut == NULL) {
+				DRV_LOG(ERR, "Failed to allocate RSS lut");
+				return -ENOMEM;
+			}
+		}
+		rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries);
+		vport->rss_lut_size = rss_lut_ret->lut_entries;
+	} else {
+		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT");
+	}
+
+	return err;
+}
+
+int
+idpf_vc_rss_hash_get(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_rss_hash *rss_hash_ret;
+	struct virtchnl2_rss_hash rss_hash;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&rss_hash, 0, sizeof(rss_hash));
+	rss_hash.ptype_groups = vport->rss_hf;
+	rss_hash.vport_id = vport->vport_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_GET_RSS_HASH;
+	args.in_args = (uint8_t *)&rss_hash;
+	args.in_args_size = sizeof(rss_hash);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(adapter, &args);
+
+	if (!err) {
+		rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer;
+		vport->rss_hf = rss_hash_ret->ptype_groups;
+	} else {
+		DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH");
+	}
+
+	return err;
+}
+
 int
 idpf_vc_rss_hash_set(struct idpf_vport *vport)
 {
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 6b94fd5b8f..205d1a932d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -52,4 +52,10 @@ int idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq);
 __rte_internal
 int idpf_vc_stats_query(struct idpf_vport *vport,
 			struct virtchnl2_vport_stats **pstats);
+__rte_internal
+int idpf_vc_rss_key_get(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_rss_lut_get(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_rss_hash_get(struct idpf_vport *vport);
 #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index e6a02828ba..f6c92e7e57 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -42,8 +42,11 @@ INTERNAL {
 	idpf_vc_ptype_info_query;
 	idpf_vc_queue_switch;
 	idpf_vc_queues_ena_dis;
+	idpf_vc_rss_hash_get;
 	idpf_vc_rss_hash_set;
+	idpf_vc_rss_key_get;
 	idpf_vc_rss_key_set;
+	idpf_vc_rss_lut_get;
 	idpf_vc_rss_lut_set;
 	idpf_vc_rxq_config;
 	idpf_vc_stats_query;
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 02ddb0330a..7262109d0a 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -29,6 +29,56 @@ static const char * const idpf_valid_args[] = {
 	NULL
 };
 
+static const uint64_t idpf_map_hena_rss[] = {
+	[IDPF_HASH_NONF_UNICAST_IPV4_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	[IDPF_HASH_NONF_MULTICAST_IPV4_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	[IDPF_HASH_NONF_IPV4_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	[IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK] =
+			RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	[IDPF_HASH_NONF_IPV4_TCP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	[IDPF_HASH_NONF_IPV4_SCTP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
+	[IDPF_HASH_NONF_IPV4_OTHER] =
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+	[IDPF_HASH_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
+
+	/* IPv6 */
+	[IDPF_HASH_NONF_UNICAST_IPV6_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	[IDPF_HASH_NONF_MULTICAST_IPV6_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	[IDPF_HASH_NONF_IPV6_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	[IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK] =
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+	[IDPF_HASH_NONF_IPV6_TCP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+	[IDPF_HASH_NONF_IPV6_SCTP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
+	[IDPF_HASH_NONF_IPV6_OTHER] =
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+	[IDPF_HASH_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
+
+	/* L2 Payload */
+	[IDPF_HASH_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
+};
+
+static const uint64_t idpf_ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+			  RTE_ETH_RSS_FRAG_IPV4;
+
+static const uint64_t idpf_ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+			  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+			  RTE_ETH_RSS_FRAG_IPV6;
+
 static int
 idpf_dev_link_update(struct rte_eth_dev *dev,
 		     __rte_unused int wait_to_complete)
@@ -59,6 +109,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = vport->max_mtu;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+	dev_info->hash_key_size = vport->rss_key_size;
+	dev_info->reta_size = vport->rss_lut_size;
+
 	dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
@@ -221,6 +274,36 @@ idpf_dev_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf)
+{
+	uint64_t hena = 0;
+	uint16_t i;
+
+	/**
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
+	 * generalizations of all other IPv4 and IPv6 RSS types.
+	 */
+	if (rss_hf & RTE_ETH_RSS_IPV4)
+		rss_hf |= idpf_ipv4_rss;
+
+	if (rss_hf & RTE_ETH_RSS_IPV6)
+		rss_hf |= idpf_ipv6_rss;
+
+	for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
+		if (idpf_map_hena_rss[i] & rss_hf)
+			hena |= BIT_ULL(i);
+	}
+
+	/**
+	 * At present, cp doesn't process the virtual channel msg of rss_hf configuration,
+	 * tips are given below.
+	 */
+	if (hena != vport->rss_hf)
+		PMD_DRV_LOG(WARNING, "Updating RSS Hash Function is not supported at present.");
+
+	return 0;
+}
+
 static int
 idpf_init_rss(struct idpf_vport *vport)
 {
@@ -257,6 +340,187 @@ idpf_init_rss(struct idpf_vport *vport)
 	return ret;
 }
 
+static int
+idpf_rss_reta_update(struct rte_eth_dev *dev,
+		     struct rte_eth_rss_reta_entry64 *reta_conf,
+		     uint16_t reta_size)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	uint16_t idx, shift;
+	int ret = 0;
+	uint16_t i;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	if (reta_size != vport->rss_lut_size) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+				 "(%d) doesn't match the number of hardware can "
+				 "support (%d)",
+			    reta_size, vport->rss_lut_size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < reta_size; i++) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (reta_conf[idx].mask & (1ULL << shift))
+			vport->rss_lut[i] = reta_conf[idx].reta[shift];
+	}
+
+	/* send virtchnl ops to configure RSS */
+	ret = idpf_vc_rss_lut_set(vport);
+	if (ret)
+		PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
+
+	return ret;
+}
+
+static int
+idpf_rss_reta_query(struct rte_eth_dev *dev,
+		    struct rte_eth_rss_reta_entry64 *reta_conf,
+		    uint16_t reta_size)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	uint16_t idx, shift;
+	int ret = 0;
+	uint16_t i;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	if (reta_size != vport->rss_lut_size) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+			"(%d) doesn't match the number of hardware can "
+			"support (%d)", reta_size, vport->rss_lut_size);
+		return -EINVAL;
+	}
+
+	ret = idpf_vc_rss_lut_get(vport);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get RSS LUT");
+		return ret;
+	}
+
+	for (i = 0; i < reta_size; i++) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (reta_conf[idx].mask & (1ULL << shift))
+			reta_conf[idx].reta[shift] = vport->rss_lut[i];
+	}
+
+	return 0;
+}
+
+static int
+idpf_rss_hash_update(struct rte_eth_dev *dev,
+		     struct rte_eth_rss_conf *rss_conf)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	int ret = 0;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+		PMD_DRV_LOG(DEBUG, "No key to be configured");
+		goto skip_rss_key;
+	} else if (rss_conf->rss_key_len != vport->rss_key_size) {
+		PMD_DRV_LOG(ERR, "The size of hash key configured "
+				 "(%d) doesn't match the size of hardware can "
+				 "support (%d)",
+			    rss_conf->rss_key_len,
+			    vport->rss_key_size);
+		return -EINVAL;
+	}
+
+	rte_memcpy(vport->rss_key, rss_conf->rss_key,
+		   vport->rss_key_size);
+	ret = idpf_vc_rss_key_set(vport);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to configure RSS key");
+		return ret;
+	}
+
+skip_rss_key:
+	ret = idpf_config_rss_hf(vport, rss_conf->rss_hf);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
+		return ret;
+	}
+
+	return 0;
+}
+
+static uint64_t
+idpf_map_general_rss_hf(uint64_t config_rss_hf, uint64_t last_general_rss_hf)
+{
+	uint64_t valid_rss_hf = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
+		uint64_t bit = BIT_ULL(i);
+
+		if (bit & config_rss_hf)
+			valid_rss_hf |= idpf_map_hena_rss[i];
+	}
+
+	if (valid_rss_hf & idpf_ipv4_rss)
+		valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV4;
+
+	if (valid_rss_hf & idpf_ipv6_rss)
+		valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV6;
+
+	return valid_rss_hf;
+}
+
+static int
+idpf_rss_hash_conf_get(struct rte_eth_dev *dev,
+		       struct rte_eth_rss_conf *rss_conf)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	int ret = 0;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	ret = idpf_vc_rss_hash_get(vport);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get RSS hf");
+		return ret;
+	}
+
+	rss_conf->rss_hf = idpf_map_general_rss_hf(vport->rss_hf, vport->last_general_rss_hf);
+
+	if (!rss_conf->rss_key)
+		return 0;
+
+	ret = idpf_vc_rss_key_get(vport);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get RSS key");
+		return ret;
+	}
+
+	if (rss_conf->rss_key_len > vport->rss_key_size)
+		rss_conf->rss_key_len = vport->rss_key_size;
+
+	rte_memcpy(rss_conf->rss_key, vport->rss_key, rss_conf->rss_key_len);
+
+	return 0;
+}
+
 static int
 idpf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -692,6 +956,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_supported_ptypes_get	= idpf_dev_supported_ptypes_get,
 	.stats_get			= idpf_dev_stats_get,
 	.stats_reset			= idpf_dev_stats_reset,
+	.reta_update			= idpf_rss_reta_update,
+	.reta_query			= idpf_rss_reta_query,
+	.rss_hash_update		= idpf_rss_hash_update,
+	.rss_hash_conf_get		= idpf_rss_hash_conf_get,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index d791d402fb..839a2bd82c 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -48,7 +48,8 @@
 		RTE_ETH_RSS_NONFRAG_IPV6_TCP    |	\
 		RTE_ETH_RSS_NONFRAG_IPV6_UDP    |	\
 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP   |	\
-		RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
+		RTE_ETH_RSS_NONFRAG_IPV6_OTHER  |	\
+		RTE_ETH_RSS_L2_PAYLOAD)
 
 #define IDPF_ADAPTER_NAME_LEN	(PCI_PRI_STR_SIZE + 1)
 
-- 
2.25.1


  parent reply	other threads:[~2023-02-07 11:06 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-16  9:36 [PATCH 0/7] add idpf pmd enhancement features Mingxia Liu
2022-12-16  9:37 ` [PATCH 1/7] common/idpf: add hw statistics Mingxia Liu
2022-12-16  9:37 ` [PATCH 2/7] common/idpf: add RSS set/get ops Mingxia Liu
2022-12-16  9:37 ` [PATCH 3/7] common/idpf: support single q scatter RX datapath Mingxia Liu
2022-12-16  9:37 ` [PATCH 4/7] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2022-12-16  9:37 ` [PATCH 5/7] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2022-12-16  9:37 ` [PATCH 6/7] common/idpf: add xstats ops Mingxia Liu
2022-12-16  9:37 ` [PATCH 7/7] common/idpf: update mbuf_alloc_failed multi-thread process Mingxia Liu
2023-01-11  7:15 ` [PATCH 0/6] add idpf pmd enhancement features Mingxia Liu
2023-01-11  7:15   ` [PATCH v2 1/6] common/idpf: add hw statistics Mingxia Liu
2023-01-11  7:15   ` [PATCH v2 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-01-11  7:15   ` [PATCH v2 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-01-11  7:15   ` [PATCH v2 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-01-11  7:15   ` [PATCH v2 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-01-11  7:15   ` [PATCH v2 6/6] common/idpf: add xstats ops Mingxia Liu
2023-01-18  7:14   ` [PATCH v3 0/6] add idpf pmd enhancement features Mingxia Liu
2023-01-18  7:14     ` [PATCH v3 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-01  8:48       ` Wu, Jingjing
2023-02-01 12:34         ` Liu, Mingxia
2023-01-18  7:14     ` [PATCH v3 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-02  3:28       ` Wu, Jingjing
2023-02-07  3:10         ` Liu, Mingxia
2023-01-18  7:14     ` [PATCH v3 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-02  3:45       ` Wu, Jingjing
2023-02-02  7:19         ` Liu, Mingxia
2023-01-18  7:14     ` [PATCH v3 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-01-18  7:14     ` [PATCH v3 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-02  4:23       ` Wu, Jingjing
2023-02-02  7:39         ` Liu, Mingxia
2023-02-02  8:46           ` Wu, Jingjing
2023-01-18  7:14     ` [PATCH v3 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-07  9:56     ` [PATCH v4 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07  9:56       ` [PATCH v4 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-07  9:56       ` [PATCH v4 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07  9:56       ` [PATCH v4 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07  9:56       ` [PATCH v4 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07  9:57       ` [PATCH v4 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07  9:57       ` [PATCH v4 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-07 10:08       ` [PATCH v4 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 10:08         ` [PATCH v5 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-07 10:16           ` [PATCH v6 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 10:16             ` [PATCH v6 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-08  2:00               ` Zhang, Qi Z
2023-02-08  8:28                 ` Liu, Mingxia
2023-02-07 10:16             ` [PATCH v6 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 10:16             ` [PATCH v6 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 10:16             ` [PATCH v6 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 10:16             ` [PATCH v6 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 10:16             ` [PATCH v6 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-08  0:28             ` [PATCH v6 0/6] add idpf pmd enhancement features Wu, Jingjing
2023-02-08  7:33             ` [PATCH v7 " Mingxia Liu
2023-02-08  7:33               ` [PATCH v7 1/6] net/idpf: add hw statistics Mingxia Liu
2023-02-08  7:33               ` [PATCH v7 2/6] net/idpf: add RSS set/get ops Mingxia Liu
2023-02-08  7:33               ` [PATCH v7 3/6] net/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-08  7:33               ` [PATCH v7 4/6] net/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-08  7:34               ` [PATCH v7 5/6] net/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-08  7:34               ` [PATCH v7 6/6] net/idpf: add xstats ops Mingxia Liu
2023-02-08  9:32               ` [PATCH v7 0/6] add idpf pmd enhancement features Zhang, Qi Z
2023-02-07 10:08         ` Mingxia Liu [this message]
2023-02-07 10:08         ` [PATCH v5 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 10:08         ` [PATCH v5 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 10:08         ` [PATCH v5 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 10:08         ` [PATCH v5 6/6] common/idpf: add xstats ops Mingxia Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230207100859.2401709-3-mingxia.liu@intel.com \
    --to=mingxia.liu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).