DPDK patches and discussions
 help / color / mirror / Atom feed
From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com
Subject: [dpdk-dev] [PATCH v6 6/8] net/dpaa: add support for Virtual Storage Profile
Date: Tue,  1 Sep 2020 18:06:48 +0530	[thread overview]
Message-ID: <20200901123650.29908-6-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20200901123650.29908-1-hemant.agrawal@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

This patch adds support for Virtual Storage profile (VSP) feature.
With VSP support when memory pool is created, the hw buffer pool id
i.e. bpid is not allocated; thhe bpid is identified by dpaa flow
create API.
The memory pool of RX queue is attached to specific BMan pool
according to the VSP ID when RX queue is setup.
for fmlib based hash queue, vsp base ID is assigned to each queue.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/bus/dpaa/include/fsl_qman.h |   1 +
 drivers/net/dpaa/dpaa_ethdev.c      | 133 +++++++++++++++++-----
 drivers/net/dpaa/dpaa_ethdev.h      |   7 ++
 drivers/net/dpaa/dpaa_flow.c        | 164 +++++++++++++++++++++++++++-
 drivers/net/dpaa/dpaa_flow.h        |   5 +
 5 files changed, 282 insertions(+), 28 deletions(-)

diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index dd7ca783a..10212f0fd 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1229,6 +1229,7 @@ struct qman_fq {
 
 	int q_fd;
 	u16 ch_id;
+	int8_t vsp_id;
 	u8 cgr_groupid;
 	u8 is_static:4;
 	u8 qp_initialized:4;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index c2d480397..8e7eb9824 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -722,6 +722,55 @@ static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
+{
+	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if_ic_params icp;
+	uint32_t fd_offset;
+	uint32_t bp_size;
+
+	memset(&icp, 0, sizeof(icp));
+	/* set ICEOF for to the default value , which is 0*/
+	icp.iciof = DEFAULT_ICIOF;
+	icp.iceof = DEFAULT_RX_ICEOF;
+	icp.icsz = DEFAULT_ICSZ;
+	fman_if_set_ic_params(dev->process_private, &icp);
+
+	fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+	fman_if_set_fdoff(dev->process_private, fd_offset);
+
+	/* Buffer pool size should be equal to Dataroom Size*/
+	bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
+
+	fman_if_set_bp(dev->process_private,
+		       dpaa_intf->bp_info->mp->size,
+		       dpaa_intf->bp_info->bpid, bp_size);
+}
+
+static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
+					     int8_t vsp_id, uint32_t bpid)
+{
+	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
+
+	if (fif->num_profiles) {
+		if (vsp_id < 0)
+			vsp_id = fif->base_profile_id;
+	} else {
+		if (vsp_id < 0)
+			vsp_id = 0;
+	}
+
+	if (dpaa_intf->vsp_bpid[vsp_id] &&
+		bpid != dpaa_intf->vsp_bpid[vsp_id]) {
+		DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
+
+		return -1;
+	}
+
+	return 0;
+}
+
 static
 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			    uint16_t nb_desc,
@@ -757,6 +806,20 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
 			queue_idx, rxq->fqid);
 
+	if (!fif->num_profiles) {
+		if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
+			dpaa_intf->bp_info->mp != mp) {
+			DPAA_PMD_WARN("Multiple pools on same interface not"
+				      " supported");
+			return -EINVAL;
+		}
+	} else {
+		if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
+			DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
+			return -EINVAL;
+		}
+	}
+
 	/* Max packet can fit in single buffer */
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
 		;
@@ -779,36 +842,40 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		     buffsz - RTE_PKTMBUF_HEADROOM);
 	}
 
-	if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
-		struct fman_if_ic_params icp;
-		uint32_t fd_offset;
-		uint32_t bp_size;
+	dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
 
-		if (!mp->pool_data) {
-			DPAA_PMD_ERR("Not an offloaded buffer pool!");
-			return -1;
+	/* For shared interface, it's done in kernel, skip.*/
+	if (!fif->is_shared_mac)
+		dpaa_fman_if_pool_setup(dev);
+
+	if (fif->num_profiles) {
+		int8_t vsp_id = rxq->vsp_id;
+
+		if (vsp_id >= 0) {
+			ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
+					DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
+					fif);
+			if (ret) {
+				DPAA_PMD_ERR("dpaa_port_vsp_update failed");
+				return ret;
+			}
+		} else {
+			DPAA_PMD_INFO("Base profile is associated to"
+				" RXQ fqid:%d\r\n", rxq->fqid);
+			if (fif->is_shared_mac) {
+				DPAA_PMD_ERR("Fatal: Base profile is associated"
+					     " to shared interface on DPDK.");
+				return -EINVAL;
+			}
+			dpaa_intf->vsp_bpid[fif->base_profile_id] =
+				DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
 		}
-		dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
-
-		memset(&icp, 0, sizeof(icp));
-		/* set ICEOF for to the default value , which is 0*/
-		icp.iciof = DEFAULT_ICIOF;
-		icp.iceof = DEFAULT_RX_ICEOF;
-		icp.icsz = DEFAULT_ICSZ;
-		fman_if_set_ic_params(fif, &icp);
-
-		fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
-		fman_if_set_fdoff(fif, fd_offset);
-
-		/* Buffer pool size should be equal to Dataroom Size*/
-		bp_size = rte_pktmbuf_data_room_size(mp);
-		fman_if_set_bp(fif, mp->size,
-			       dpaa_intf->bp_info->bpid, bp_size);
-		dpaa_intf->valid = 1;
-		DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
-				dpaa_intf->name, fd_offset,
-				fman_if_get_fdoff(fif));
+	} else {
+		dpaa_intf->vsp_bpid[0] =
+			DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
 	}
+
+	dpaa_intf->valid = 1;
 	DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
 		fman_if_get_sg_enable(fif),
 		dev->data->dev_conf.rxmode.max_rx_pkt_len);
@@ -1605,6 +1672,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 	uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
 	uint32_t cgrid_tx[MAX_DPAA_CORES];
 	uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
+	int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
+	int8_t vsp_id = -1;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1624,6 +1693,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 	memset((char *)dev_rx_fqids, 0,
 		sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
 
+	memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
+
 	/* Initialize Rx FQ's */
 	if (default_q) {
 		num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
@@ -1703,6 +1774,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 		else
 			fqid = dev_rx_fqids[loop];
 
+		vsp_id = dev_vspids[loop];
+
 		if (dpaa_intf->cgr_rx)
 			dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
 
@@ -1711,6 +1784,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 			fqid);
 		if (ret)
 			goto free_rx;
+		dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
 		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
 	}
 	dpaa_intf->nb_rx_queues = num_rx_fqs;
@@ -2051,6 +2125,11 @@ static void __attribute__((destructor(102))) dpaa_finish(void)
 					if (dpaa_fm_deconfig(dpaa_intf, fif))
 						DPAA_PMD_WARN("DPAA FM "
 							"deconfig failed\n");
+				if (fif->num_profiles) {
+					if (dpaa_port_vsp_cleanup(dpaa_intf,
+								  fif))
+						DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
+				}
 			}
 		}
 		if (is_global_init)
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b10c4a20b..dd182c4d5 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -103,6 +103,10 @@
 #define DPAA_FD_CMD_CFQ			0x00ffffff
 /**< Confirmation Frame Queue */
 
+#define DPAA_VSP_PROFILE_MAX_NUM	8
+
+#define DPAA_DEFAULT_RXQ_VSP_ID		1
+
 /* Each network interface is represented by one of these */
 struct dpaa_if {
 	int valid;
@@ -122,6 +126,9 @@ struct dpaa_if {
 	void *netenv_handle;
 	void *scheme_handle[2];
 	uint32_t scheme_count;
+
+	void *vsp_handle[DPAA_VSP_PROFILE_MAX_NUM];
+	uint32_t vsp_bpid[DPAA_VSP_PROFILE_MAX_NUM];
 };
 
 struct dpaa_if_stats {
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index d24cd856c..a0087df67 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -12,6 +12,7 @@
 #include <dpaa_flow.h>
 #include <rte_dpaa_logs.h>
 #include <fmlib/fm_port_ext.h>
+#include <fmlib/fm_vsp_ext.h>
 
 #define DPAA_MAX_NUM_ETH_DEV	8
 
@@ -47,6 +48,17 @@ static struct dpaa_fm_info fm_info;
 static struct dpaa_fm_model fm_model;
 static const char *fm_log = "/tmp/fmdpdk.bin";
 
+static inline uint8_t fm_default_vsp_id(struct fman_if *fif)
+{
+	/* Avoid being same as base profile which could be used
+	 * for kernel interface of shared mac.
+	 */
+	if (fif->base_profile_id)
+		return 0;
+	else
+		return DPAA_DEFAULT_RXQ_VSP_ID;
+}
+
 static void fm_prev_cleanup(void)
 {
 	uint32_t fman_id = 0, i = 0, devid;
@@ -300,11 +312,18 @@ set_hash_params_sctp(ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
 static int set_scheme_params(ioc_fm_pcd_kg_scheme_params_t *scheme_params,
 	ioc_fm_pcd_net_env_params_t *dist_units,
 	struct dpaa_if *dpaa_intf,
-	struct fman_if *fif __rte_unused)
+	struct fman_if *fif)
 {
 	int dist_idx, hdr_idx = 0;
 	PMD_INIT_FUNC_TRACE();
 
+	if (fif->num_profiles) {
+		scheme_params->param.override_storage_profile = true;
+		scheme_params->param.storage_profile.direct = true;
+		scheme_params->param.storage_profile.profile_select
+			.direct_relative_profile_id = fm_default_vsp_id(fif);
+	}
+
 	scheme_params->param.use_hash = 1;
 	scheme_params->param.modify = false;
 	scheme_params->param.always_direct = false;
@@ -784,6 +803,14 @@ int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set)
 		return -1;
 	}
 
+	if (fif->num_profiles) {
+		for (i = 0; i < dpaa_intf->nb_rx_queues; i++)
+			dpaa_intf->rx_queues[i].vsp_id =
+				fm_default_vsp_id(fif);
+
+		i = 0;
+	}
+
 	/* Set PCD netenv and scheme */
 	if (req_dist_set) {
 		ret = set_pcd_netenv_scheme(dpaa_intf, req_dist_set, fif);
@@ -909,3 +936,138 @@ int dpaa_fm_term(void)
 	}
 	return 0;
 }
+
+static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf,
+		uint8_t vsp_id, t_handle fman_handle,
+		struct fman_if *fif)
+{
+	t_fm_vsp_params vsp_params;
+	t_fm_buffer_prefix_content buf_prefix_cont;
+	uint8_t mac_idx[] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1};
+	uint8_t idx = mac_idx[fif->mac_idx];
+	int ret;
+
+	if (vsp_id == fif->base_profile_id && fif->is_shared_mac) {
+		/* For shared interface, VSP of base
+		 * profile is default pool located in kernel.
+		 */
+		dpaa_intf->vsp_bpid[vsp_id] = 0;
+		return 0;
+	}
+
+	if (vsp_id >= DPAA_VSP_PROFILE_MAX_NUM) {
+		DPAA_PMD_ERR("VSP ID %d exceeds MAX number %d",
+			vsp_id, DPAA_VSP_PROFILE_MAX_NUM);
+		return -1;
+	}
+
+	memset(&vsp_params, 0, sizeof(vsp_params));
+	vsp_params.h_fm = fman_handle;
+	vsp_params.relative_profile_id = vsp_id;
+	vsp_params.port_params.port_id = idx;
+	if (fif->mac_type == fman_mac_1g) {
+		vsp_params.port_params.port_type = e_FM_PORT_TYPE_RX;
+	} else if (fif->mac_type == fman_mac_2_5g) {
+		vsp_params.port_params.port_type = e_FM_PORT_TYPE_RX_2_5G;
+	} else if (fif->mac_type == fman_mac_10g) {
+		vsp_params.port_params.port_type = e_FM_PORT_TYPE_RX_10G;
+	} else {
+		DPAA_PMD_ERR("Mac type %d error", fif->mac_type);
+		return -1;
+	}
+	vsp_params.ext_buf_pools.num_of_pools_used = 1;
+	vsp_params.ext_buf_pools.ext_buf_pool[0].id =
+		dpaa_intf->vsp_bpid[vsp_id];
+	vsp_params.ext_buf_pools.ext_buf_pool[0].size =
+		RTE_MBUF_DEFAULT_BUF_SIZE;
+
+	dpaa_intf->vsp_handle[vsp_id] = fm_vsp_config(&vsp_params);
+	if (!dpaa_intf->vsp_handle[vsp_id]) {
+		DPAA_PMD_ERR("fm_vsp_config error for profile %d", vsp_id);
+		return -EINVAL;
+	}
+
+	/* configure the application buffer (structure, size and
+	 * content)
+	 */
+
+	memset(&buf_prefix_cont, 0, sizeof(buf_prefix_cont));
+
+	buf_prefix_cont.priv_data_size = 16;
+	buf_prefix_cont.data_align = 64;
+	buf_prefix_cont.pass_prs_result = true;
+	buf_prefix_cont.pass_time_stamp = true;
+	buf_prefix_cont.pass_hash_result = false;
+	buf_prefix_cont.pass_all_other_pcdinfo = false;
+	ret = fm_vsp_config_buffer_prefix_content(dpaa_intf->vsp_handle[vsp_id],
+					       &buf_prefix_cont);
+	if (ret != E_OK) {
+		DPAA_PMD_ERR("fm_vsp_config_buffer_prefix_content error for profile %d err: %d",
+			     vsp_id, ret);
+		return ret;
+	}
+
+	/* initialize the FM VSP module */
+	ret = fm_vsp_init(dpaa_intf->vsp_handle[vsp_id]);
+	if (ret != E_OK) {
+		DPAA_PMD_ERR("fm_vsp_init error for profile %d err:%d",
+			 vsp_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
+		bool fmc_mode, uint8_t vsp_id, uint32_t bpid,
+		struct fman_if *fif)
+{
+	int ret = 0;
+	t_handle fman_handle;
+
+	if (!fif->num_profiles)
+		return 0;
+
+	if (vsp_id >= fif->num_profiles)
+		return 0;
+
+	if (dpaa_intf->vsp_bpid[vsp_id] == bpid)
+		return 0;
+
+	if (dpaa_intf->vsp_handle[vsp_id]) {
+		ret = fm_vsp_free(dpaa_intf->vsp_handle[vsp_id]);
+		if (ret != E_OK) {
+			DPAA_PMD_ERR("Error fm_vsp_free: err %d vsp_handle[%d]",
+				     ret, vsp_id);
+			return ret;
+		}
+		dpaa_intf->vsp_handle[vsp_id] = 0;
+	}
+
+	if (fmc_mode)
+		fman_handle = fm_open(0);
+	else
+		fman_handle = fm_info.fman_handle;
+
+	dpaa_intf->vsp_bpid[vsp_id] = bpid;
+
+	return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif);
+}
+
+int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif)
+{
+	int idx, ret;
+
+	for (idx = 0; idx < (uint8_t)fif->num_profiles; idx++) {
+		if (dpaa_intf->vsp_handle[idx]) {
+			ret = fm_vsp_free(dpaa_intf->vsp_handle[idx]);
+			if (ret != E_OK) {
+				DPAA_PMD_ERR("Error fm_vsp_free: err %d"
+					     " vsp_handle[%d]", ret, idx);
+				return ret;
+			}
+		}
+	}
+
+	return E_OK;
+}
diff --git a/drivers/net/dpaa/dpaa_flow.h b/drivers/net/dpaa/dpaa_flow.h
index d16bfec21..f5e131acf 100644
--- a/drivers/net/dpaa/dpaa_flow.h
+++ b/drivers/net/dpaa/dpaa_flow.h
@@ -10,5 +10,10 @@ int dpaa_fm_term(void);
 int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set);
 int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf, struct fman_if *fif);
 void dpaa_write_fm_config_to_file(void);
+int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
+	bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif);
+int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif);
+int dpaa_port_fmc_init(struct fman_if *fif,
+		       uint32_t *fqids, int8_t *vspids, int max_nb_rxq);
 
 #endif
-- 
2.17.1


  parent reply	other threads:[~2020-09-01 12:44 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-10 17:19 [dpdk-dev] [PATCH v2 1/9] net/dpaa: support Rxq and Txq info routines Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 2/9] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 3/9] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 4/9] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 5/9] bus/dpaa: add shared MAC support Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 6/9] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 7/9] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 8/9] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-07-10 17:19 ` [dpdk-dev] [PATCH v2 9/9] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-07-11  8:17 ` [dpdk-dev] [PATCH v3 1/8] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 2/8] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 3/8] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 4/8] bus/dpaa: add shared MAC support Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 5/8] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 6/8] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 7/8] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-07-11  8:17   ` [dpdk-dev] [PATCH v3 8/8] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-07-17 11:36   ` [dpdk-dev] [PATCH v3 1/8] net/dpaa: add support for fmlib in dpdk Ferruh Yigit
2020-07-19 20:10     ` Thomas Monjalon
2020-07-20  4:50       ` Hemant Agrawal
2020-07-20 17:06         ` Thomas Monjalon
2020-07-21  3:26           ` Hemant Agrawal
2020-07-20 18:42         ` Stephen Hemminger
2020-07-28 13:41         ` David Marchand
2020-07-29  6:39           ` Hemant Agrawal
2020-07-29 12:07             ` Thomas Monjalon
2020-07-29 14:33             ` Kevin Traynor
2020-07-20  9:51       ` Ferruh Yigit
2020-08-11 12:29   ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
2020-08-11 12:29     ` [dpdk-dev] [PATCH v4 2/8] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-08-11 12:29     ` [dpdk-dev] [PATCH v4 3/8] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-08-11 12:29     ` [dpdk-dev] [PATCH v4 4/8] bus/dpaa: add shared MAC support Hemant Agrawal
2020-08-11 12:29     ` [dpdk-dev] [PATCH v4 5/8] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-08-11 12:29     ` [dpdk-dev] [PATCH v4 6/8] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-08-11 12:30     ` [dpdk-dev] [PATCH v4 7/8] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-08-11 12:30     ` [dpdk-dev] [PATCH v4 8/8] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-08-13 18:01     ` [dpdk-dev] [PATCH v5 1/8] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 2/8] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 3/8] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 4/8] bus/dpaa: add shared MAC support Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 5/8] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 6/8] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 7/8] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-08-13 18:01       ` [dpdk-dev] [PATCH v5 8/8] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-08-26 13:54       ` [dpdk-dev] [PATCH v5 1/8] net/dpaa: add support for fmlib in dpdk Ferruh Yigit
2020-08-26 14:52         ` Ferruh Yigit
2020-08-26 17:06           ` Hemant Agrawal
2020-08-26 21:20             ` Ferruh Yigit
2020-09-01 12:36       ` [dpdk-dev] [PATCH v6 " Hemant Agrawal
2020-09-01 12:36         ` [dpdk-dev] [PATCH v6 2/8] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-09-01 12:36         ` [dpdk-dev] [PATCH v6 3/8] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-09-01 12:36         ` [dpdk-dev] [PATCH v6 4/8] bus/dpaa: add shared MAC support Hemant Agrawal
2020-09-01 12:36         ` [dpdk-dev] [PATCH v6 5/8] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-09-01 12:36         ` Hemant Agrawal [this message]
2020-09-01 12:36         ` [dpdk-dev] [PATCH v6 7/8] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-09-01 12:36         ` [dpdk-dev] [PATCH v6 8/8] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-09-01 15:48         ` [dpdk-dev] [PATCH v6 1/8] net/dpaa: add support for fmlib in dpdk Ferruh Yigit
2020-09-02  5:15           ` Hemant Agrawal
2020-09-02 13:32             ` Ferruh Yigit
2020-09-03  3:24               ` Hemant Agrawal
2020-09-03 19:54                 ` Ferruh Yigit
2020-09-04  8:29         ` [dpdk-dev] [PATCH v7 1/7] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-09-04  8:29           ` [dpdk-dev] [PATCH v7 2/7] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-09-04  8:29           ` [dpdk-dev] [PATCH v7 3/7] bus/dpaa: add shared MAC support Hemant Agrawal
2020-09-04  8:29           ` [dpdk-dev] [PATCH v7 4/7] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-09-04  8:29           ` [dpdk-dev] [PATCH v7 5/7] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-09-04  8:29           ` [dpdk-dev] [PATCH v7 6/7] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-09-04  8:29           ` [dpdk-dev] [PATCH v7 7/7] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-09-04  8:39           ` [dpdk-dev] [PATCH v8 1/8] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 2/8] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 3/8] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 4/8] bus/dpaa: add shared MAC support Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 5/8] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 6/8] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 7/8] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-09-04  8:39             ` [dpdk-dev] [PATCH v8 8/8] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-09-04 12:51             ` [dpdk-dev] [PATCH v8 1/8] net/dpaa: add support for fmlib in dpdk Ferruh Yigit
2020-09-08  9:55               ` Ferruh Yigit
2020-09-08 10:19                 ` Thomas Monjalon
2020-09-08 12:10                   ` Ferruh Yigit
2020-09-09 11:16                     ` [dpdk-dev] [dpdk-ci] " Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200901123650.29908-6-hemant.agrawal@nxp.com \
    --to=hemant.agrawal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).