DPDK patches and discussions
 help / color / mirror / Atom feed
From: Haiyue Wang <haiyue.wang@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, jingjing.wu@intel.com,
	qi.z.zhang@intel.com, qi.fu@intel.com,
	Haiyue Wang <haiyue.wang@intel.com>
Subject: [dpdk-dev] [PATCH v4 4/5] net/ice: add the DCF VLAN handling
Date: Mon, 28 Dec 2020 19:21:38 +0800	[thread overview]
Message-ID: <20201228112139.29445-5-haiyue.wang@intel.com> (raw)
In-Reply-To: <20201228112139.29445-1-haiyue.wang@intel.com>

Add the DCF port representor infrastructure for the VFs of DCF attached
PF. Then the standard ethdev API like VLAN can be used to configure the
VFs.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
 drivers/net/ice/ice_dcf.c                |   1 +
 drivers/net/ice/ice_dcf_ethdev.c         |  91 +++++-
 drivers/net/ice/ice_dcf_ethdev.h         |  20 ++
 drivers/net/ice/ice_dcf_vf_representor.c | 356 +++++++++++++++++++++++
 drivers/net/ice/meson.build              |   1 +
 5 files changed, 462 insertions(+), 7 deletions(-)
 create mode 100644 drivers/net/ice/ice_dcf_vf_representor.c

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 44dbd3bb8..4a9af3292 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -234,6 +234,7 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
 
 	caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
+	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
 
 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b0b2ecb0d..a9e78064d 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -970,20 +970,97 @@ ice_dcf_cap_selected(struct rte_devargs *devargs)
 	return ret;
 }
 
-static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
-			     struct rte_pci_device *pci_dev)
+static int
+eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
+		      struct rte_pci_device *pci_dev)
 {
+	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+	struct ice_dcf_vf_repr_param repr_param;
+	char repr_name[RTE_ETH_NAME_MAX_LEN];
+	struct ice_dcf_adapter *dcf_adapter;
+	struct rte_eth_dev *dcf_ethdev;
+	uint16_t dcf_vsi_id;
+	int i, ret;
+
 	if (!ice_dcf_cap_selected(pci_dev->device.devargs))
 		return 1;
 
-	return rte_eth_dev_pci_generic_probe(pci_dev,
-					     sizeof(struct ice_dcf_adapter),
-					     ice_dcf_dev_init);
+	ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
+	if (ret)
+		return ret;
+
+	ret = rte_eth_dev_pci_generic_probe(pci_dev,
+					    sizeof(struct ice_dcf_adapter),
+					    ice_dcf_dev_init);
+	if (ret || !eth_da.nb_representor_ports)
+		return ret;
+
+	dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+	if (dcf_ethdev == NULL)
+		return -ENODEV;
+
+	dcf_adapter = dcf_ethdev->data->dev_private;
+
+	if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
+	    eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
+			    eth_da.nb_representor_ports);
+		return -EINVAL;
+	}
+
+	dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
+
+	repr_param.adapter = dcf_adapter;
+	repr_param.switch_domain_id = 0;
+
+	for (i = 0; i < eth_da.nb_representor_ports; i++) {
+		uint16_t vf_id = eth_da.representor_ports[i];
+
+		if (vf_id >= dcf_adapter->real_hw.num_vfs) {
+			PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
+				    vf_id, dcf_adapter->real_hw.num_vfs - 1);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
+			PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
+			ret = -EINVAL;
+			break;
+		}
+
+		repr_param.vf_id = vf_id;
+		snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
+			 pci_dev->device.name, vf_id);
+		ret = rte_eth_dev_create(&pci_dev->device, repr_name,
+					 sizeof(struct ice_dcf_vf_repr),
+					 NULL, NULL, ice_dcf_vf_repr_init,
+					 &repr_param);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
+				    repr_name);
+			break;
+		}
+	}
+
+	return ret;
 }
 
-static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
+static int
+eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
 {
-	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
+	struct rte_eth_dev *eth_dev;
+
+	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+	if (!eth_dev)
+		return 0;
+
+	if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+		return rte_eth_dev_pci_generic_remove(pci_dev,
+						      ice_dcf_vf_repr_uninit);
+	else
+		return rte_eth_dev_pci_generic_remove(pci_dev,
+						      ice_dcf_dev_uninit);
 }
 
 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index b54528bea..bd5552332 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -22,9 +22,29 @@ struct ice_dcf_adapter {
 	struct ice_dcf_hw real_hw;
 };
 
+struct ice_dcf_vf_repr_param {
+	struct ice_dcf_adapter *adapter;
+	uint16_t switch_domain_id;
+	uint16_t vf_id;
+};
+
+struct ice_dcf_vf_repr {
+	struct ice_dcf_adapter *dcf_adapter;
+	struct rte_ether_addr mac_addr;
+	uint16_t switch_domain_id;
+	uint16_t vf_id;
+
+	uint16_t outer_vlan_tpid;
+	uint16_t pvid;
+	uint16_t hw_vlan_insert_pvid:1;
+};
+
 void ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
 				 uint8_t *msg, uint16_t msglen);
 int ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev);
 void ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev);
 
+int ice_dcf_vf_repr_init(struct rte_eth_dev *ethdev, void *init_param);
+int ice_dcf_vf_repr_uninit(struct rte_eth_dev *ethdev);
+
 #endif /* _ICE_DCF_ETHDEV_H_ */
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
new file mode 100644
index 000000000..e9806895d
--- /dev/null
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <errno.h>
+#include <sys/types.h>
+
+#include <rte_ethdev.h>
+
+#include "ice_dcf_ethdev.h"
+#include "ice_rxtx.h"
+
+static uint16_t
+ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
+			 __rte_unused struct rte_mbuf **rx_pkts,
+			 __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+static uint16_t
+ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
+			 __rte_unused struct rte_mbuf **tx_pkts,
+			 __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
+{
+	dev->data->dev_link.link_status = ETH_LINK_UP;
+
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return ice_dcf_vf_repr_uninit(dev);
+}
+
+static int
+ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			       __rte_unused uint16_t queue_id,
+			       __rte_unused uint16_t nb_desc,
+			       __rte_unused unsigned int socket_id,
+			       __rte_unused const struct rte_eth_rxconf *conf,
+			       __rte_unused struct rte_mempool *pool)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			       __rte_unused uint16_t queue_id,
+			       __rte_unused uint16_t nb_desc,
+			       __rte_unused unsigned int socket_id,
+			       __rte_unused const struct rte_eth_txconf *conf)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
+			    __rte_unused int wait_to_complete)
+{
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+	struct ice_dcf_hw *dcf_hw =
+				&repr->dcf_adapter->real_hw;
+
+	dev_info->device = dev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
+	dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
+	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
+	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
+	dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
+	dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
+	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_SCATTER |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_VLAN_EXTEND |
+		DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM |
+		DEV_TX_OFFLOAD_SCTP_CKSUM |
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_TCP_TSO |
+		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+		DEV_TX_OFFLOAD_GRE_TNL_TSO |
+		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = ICE_DEFAULT_RX_PTHRESH,
+			.hthresh = ICE_DEFAULT_RX_HTHRESH,
+			.wthresh = ICE_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = ICE_DEFAULT_TX_PTHRESH,
+			.hthresh = ICE_DEFAULT_TX_HTHRESH,
+			.wthresh = ICE_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = ICE_MAX_RING_DESC,
+		.nb_min = ICE_MIN_RING_DESC,
+		.nb_align = ICE_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = ICE_MAX_RING_DESC,
+		.nb_min = ICE_MIN_RING_DESC,
+		.nb_align = ICE_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
+	dev_info->switch_info.domain_id = repr->switch_domain_id;
+	dev_info->switch_info.port_id = repr->vf_id;
+
+	return 0;
+}
+
+static int
+ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
+			    struct virtchnl_dcf_vlan_offload *vlan_offload)
+{
+	struct dcf_virtchnl_cmd args;
+
+	memset(&args, 0, sizeof(args));
+	args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
+	args.req_msg = (uint8_t *)vlan_offload;
+	args.req_msglen = sizeof(*vlan_offload);
+
+	return ice_dcf_execute_virtchnl_cmd(&repr->dcf_adapter->real_hw, &args);
+}
+
+static __rte_always_inline bool
+ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
+{
+	return !!(repr->dcf_adapter->real_hw.vf_res->vf_cap_flags &
+		  VIRTCHNL_VF_OFFLOAD_VLAN_V2);
+}
+
+static int
+ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
+			      uint16_t pvid, int on)
+{
+	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+	struct virtchnl_dcf_vlan_offload vlan_offload;
+	int err;
+
+	if (!ice_dcf_vlan_offload_ena(repr))
+		return -ENOTSUP;
+
+	memset(&vlan_offload, 0, sizeof(vlan_offload));
+
+	vlan_offload.vf_id = repr->vf_id;
+	vlan_offload.tpid = repr->outer_vlan_tpid;
+	vlan_offload.vlan_flags = (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
+				   VIRTCHNL_DCF_VLAN_TYPE_S) |
+				  (VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
+				   VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
+	vlan_offload.vlan_id = on ? pvid : 0;
+
+	err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
+	if (!err) {
+		repr->pvid = vlan_offload.vlan_id;
+		repr->hw_vlan_insert_pvid = on ? 1 : 0;
+	}
+
+	return err;
+}
+
+static int
+ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+	struct rte_eth_rxmode *rxmode;
+
+	if (!ice_dcf_vlan_offload_ena(repr))
+		return -ENOTSUP;
+
+	rxmode = &dev->data->dev_conf.rxmode;
+
+	if (mask & ETH_VLAN_EXTEND_MASK) {
+		if (!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND))
+			ice_dcf_vf_repr_vlan_pvid_set(dev, 0, 0);
+	}
+
+	return 0;
+}
+
+static int
+ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
+			      enum rte_vlan_type vlan_type, uint16_t tpid)
+{
+	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+
+	if (!ice_dcf_vlan_offload_ena(repr))
+		return -ENOTSUP;
+
+	if (vlan_type != ETH_VLAN_TYPE_INNER &&
+	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+		PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
+		return -EINVAL;
+	}
+
+	if (vlan_type == ETH_VLAN_TYPE_INNER) {
+		PMD_DRV_LOG(ERR,
+			    "Can accelerate only outer VLAN in QinQ\n");
+		return -EINVAL;
+	}
+
+	if (!(dev->data->dev_conf.rxmode.offloads &
+	      DEV_RX_OFFLOAD_VLAN_EXTEND)) {
+		PMD_DRV_LOG(ERR,
+			    "QinQ not enabled.");
+		return -EINVAL;
+	}
+
+	if (tpid != RTE_ETHER_TYPE_QINQ ||
+	    tpid != RTE_ETHER_TYPE_VLAN ||
+	    tpid != RTE_ETHER_TYPE_QINQ1) {
+		PMD_DRV_LOG(ERR,
+			    "Invalid TPID: 0x%04x\n", tpid);
+		return -EINVAL;
+	}
+
+	repr->outer_vlan_tpid = tpid;
+
+	return ice_dcf_vf_repr_vlan_pvid_set(dev,
+					     repr->pvid,
+					     repr->hw_vlan_insert_pvid);
+}
+
+static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
+	.dev_configure        = ice_dcf_vf_repr_dev_configure,
+	.dev_start            = ice_dcf_vf_repr_dev_start,
+	.dev_stop             = ice_dcf_vf_repr_dev_stop,
+	.dev_close            = ice_dcf_vf_repr_dev_close,
+	.dev_infos_get        = ice_dcf_vf_repr_dev_info_get,
+	.rx_queue_setup       = ice_dcf_vf_repr_rx_queue_setup,
+	.tx_queue_setup       = ice_dcf_vf_repr_tx_queue_setup,
+	.promiscuous_enable   = ice_dcf_vf_repr_promiscuous_enable,
+	.promiscuous_disable  = ice_dcf_vf_repr_promiscuous_disable,
+	.allmulticast_enable  = ice_dcf_vf_repr_allmulticast_enable,
+	.allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
+	.link_update          = ice_dcf_vf_repr_link_update,
+	.vlan_offload_set     = ice_dcf_vf_repr_vlan_offload_set,
+	.vlan_pvid_set        = ice_dcf_vf_repr_vlan_pvid_set,
+	.vlan_tpid_set        = ice_dcf_vf_repr_vlan_tpid_set,
+};
+
+int
+ice_dcf_vf_repr_init(struct rte_eth_dev *ethdev, void *init_param)
+{
+	struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
+	struct ice_dcf_vf_repr_param *param = init_param;
+
+	repr->dcf_adapter = param->adapter;
+	repr->switch_domain_id = param->switch_domain_id;
+	repr->vf_id = param->vf_id;
+	repr->outer_vlan_tpid = RTE_ETHER_TYPE_VLAN;
+
+	ethdev->dev_ops = &ice_dcf_vf_repr_dev_ops;
+
+	ethdev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
+	ethdev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
+
+	ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+	ethdev->data->representor_id = repr->vf_id;
+
+	ethdev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return 0;
+}
+
+int
+ice_dcf_vf_repr_uninit(struct rte_eth_dev *ethdev)
+{
+	ethdev->data->mac_addrs = NULL;
+
+	return 0;
+}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 7b291269d..d58936089 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -61,6 +61,7 @@ if arch_subdir == 'x86'
 endif
 
 sources += files('ice_dcf.c',
+		 'ice_dcf_vf_representor.c',
 		 'ice_dcf_ethdev.c',
 		 'ice_dcf_parent.c')
 
-- 
2.29.2


  parent reply	other threads:[~2020-12-28 11:38 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-14  7:11 [dpdk-dev] [PATCH v2 0/5] Add AVF & DCF VLAN feaure Haiyue Wang
2020-12-14  7:11 ` [dpdk-dev] [PATCH v2 1/5] common/iavf: new VLAN opcode Haiyue Wang
2020-12-14  7:11 ` [dpdk-dev] [PATCH v2 2/5] net/iavf: support Ethernet CRC strip disable Haiyue Wang
2020-12-14  7:11 ` [dpdk-dev] [PATCH v2 3/5] net/ice: enable QinQ filter for switch Haiyue Wang
2020-12-14  7:11 ` [dpdk-dev] [PATCH v2 4/5] net/ice: add DCF port representor Haiyue Wang
2020-12-14  7:11 ` [dpdk-dev] [PATCH v2 5/5] net/iavf: support new VLAN virtchnl opcodes Haiyue Wang
2020-12-15  5:20   ` Yang, Qiming
2020-12-15  5:27     ` Wang, Haiyue
2020-12-15  5:21 ` [dpdk-dev] [PATCH v2 0/5] Add AVF & DCF VLAN feaure Yang, Qiming
2020-12-15  5:25   ` Wang, Haiyue
2020-12-28  5:07 ` [dpdk-dev] [PATCH v3 " Haiyue Wang
2020-12-28  5:07   ` [dpdk-dev] [PATCH v3 1/5] common/iavf: new VLAN opcode Haiyue Wang
2020-12-28  5:07   ` [dpdk-dev] [PATCH v3 2/5] net/iavf: support Ethernet CRC strip disable Haiyue Wang
2020-12-28  5:07   ` [dpdk-dev] [PATCH v3 3/5] net/ice: enable QinQ filter for switch Haiyue Wang
2020-12-28  5:07   ` [dpdk-dev] [PATCH v3 4/5] net/ice: add the DCF VLAN handling Haiyue Wang
2020-12-28  5:07   ` [dpdk-dev] [PATCH v3 5/5] net/iavf: implement new VLAN capability handling Haiyue Wang
2020-12-28 11:21 ` [dpdk-dev] [PATCH v4 0/5] Add AVF & DCF VLAN feaure Haiyue Wang
2020-12-28 11:21   ` [dpdk-dev] [PATCH v4 1/5] common/iavf: new VLAN opcode Haiyue Wang
2020-12-28 11:21   ` [dpdk-dev] [PATCH v4 2/5] net/iavf: support Ethernet CRC strip disable Haiyue Wang
2020-12-28 11:21   ` [dpdk-dev] [PATCH v4 3/5] net/ice: enable QinQ filter for switch Haiyue Wang
2020-12-28 11:21   ` Haiyue Wang [this message]
2020-12-28 11:21   ` [dpdk-dev] [PATCH v4 5/5] net/iavf: implement new VLAN capability handling Haiyue Wang
2021-01-11 13:34 ` [dpdk-dev] [PATCH v5 0/4] Add AVF & DCF VLAN feaure Haiyue Wang
2021-01-11 13:34   ` [dpdk-dev] [PATCH v5 1/4] common/iavf: new VLAN opcode Haiyue Wang
2021-01-11 13:34   ` [dpdk-dev] [PATCH v5 2/4] net/iavf: support Ethernet CRC strip disable Haiyue Wang
2021-01-11 13:34   ` [dpdk-dev] [PATCH v5 3/4] net/ice: add the DCF VLAN handling Haiyue Wang
2021-01-11 13:34   ` [dpdk-dev] [PATCH v5 4/4] net/iavf: implement new VLAN capability handling Haiyue Wang
2021-01-12  8:12 ` [dpdk-dev] [PATCH v6 0/3] Add AVF & DCF VLAN feaure Haiyue Wang
2021-01-12  8:13   ` [dpdk-dev] [PATCH v6 1/3] net/iavf: support Ethernet CRC strip disable Haiyue Wang
2021-01-12  8:13   ` [dpdk-dev] [PATCH v6 2/3] net/ice: add the DCF VLAN handling Haiyue Wang
2021-01-12  8:13   ` [dpdk-dev] [PATCH v6 3/3] net/iavf: implement new VLAN capability handling Haiyue Wang
2021-01-12  8:39   ` [dpdk-dev] [PATCH v6 0/3] Add AVF & DCF VLAN feaure Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201228112139.29445-5-haiyue.wang@intel.com \
    --to=haiyue.wang@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.fu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).