DPDK patches and discussions
 help / color / mirror / Atom feed
From: Yongseok Koh <yskoh@mellanox.com>
To: shahafs@mellanox.com
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 3/4] net/mlx5: rework PMD global data init
Date: Mon, 25 Mar 2019 12:15:44 -0700	[thread overview]
Message-ID: <20190325191545.20707-4-yskoh@mellanox.com> (raw)
Message-ID: <20190325191544.wg4-ZjaEy3uVJo0kF2UTkGwPj8b5JTnnpa39eqxdGP4@z> (raw)
In-Reply-To: <20190325191545.20707-1-yskoh@mellanox.com>

There's more need to have PMD global data structure. This should be
initialized once per a process regardless of how many PMD instances are
probed. mlx5_init_once() is called during probing and make sure all the
init functions are called once per a process. Currently, such global data
and its initialization functions are even scattered. Rather than
'extern'-ing such variables and calling such functions one by one making
sure it is called only once by checking the validity of such variables, it
will be better to have a global storage to hold such data and a
consolidated function having all the initializations. The existing shared
memory gets more extensively used for this purpose. As there could be
multiple secondary processes, a static storage (local to process) is also
added.

As the reserved virtual address for UAR remap is a PMD global resource,
this doesn't need to be stored in the device priv structure, but in the PMD
global data.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5.c     | 250 ++++++++++++++++++++++++++++++++------------
 drivers/net/mlx5/mlx5.h     |  19 +++-
 drivers/net/mlx5/mlx5_mp.c  |  19 +++-
 drivers/net/mlx5/mlx5_txq.c |   7 +-
 4 files changed, 217 insertions(+), 78 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 316f34cd05..54a1896ea4 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -128,16 +128,26 @@ struct mlx5_shared_data *mlx5_shared_data;
 /* Spinlock for mlx5_shared_data allocation. */
 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 
+/* Process local data for secondary processes. */
+static struct mlx5_local_data mlx5_local_data;
+
 /** Driver-specific log messages type. */
 int mlx5_logtype;
 
 /**
- * Prepare shared data between primary and secondary process.
+ * Initialize shared data between primary and secondary process.
+ *
+ * A memzone is reserved by primary process and secondary processes attach to
+ * the memzone.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static void
-mlx5_prepare_shared_data(void)
+static int
+mlx5_init_shared_data(void)
 {
 	const struct rte_memzone *mz;
+	int ret = 0;
 
 	rte_spinlock_lock(&mlx5_shared_data_lock);
 	if (mlx5_shared_data == NULL) {
@@ -146,22 +156,53 @@ mlx5_prepare_shared_data(void)
 			mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
 						 sizeof(*mlx5_shared_data),
 						 SOCKET_ID_ANY, 0);
+			if (mz == NULL) {
+				DRV_LOG(ERR,
+					"Cannot allocate mlx5 shared data\n");
+				ret = -rte_errno;
+				goto error;
+			}
+			mlx5_shared_data = mz->addr;
+			memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
+			rte_spinlock_init(&mlx5_shared_data->lock);
 		} else {
 			/* Lookup allocated shared memory. */
 			mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+			if (mz == NULL) {
+				DRV_LOG(ERR,
+					"Cannot attach mlx5 shared data\n");
+				ret = -rte_errno;
+				goto error;
+			}
+			mlx5_shared_data = mz->addr;
+			memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
 		}
-		if (mz == NULL)
-			rte_panic("Cannot allocate mlx5 shared data\n");
-		mlx5_shared_data = mz->addr;
-		/* Initialize shared data. */
+	}
+error:
+	rte_spinlock_unlock(&mlx5_shared_data_lock);
+	return ret;
+}
+
+/**
+ * Uninitialize shared data between primary and secondary process.
+ *
+ * The pointer of secondary process is dereferenced and primary process frees
+ * the memzone.
+ */
+static void
+mlx5_uninit_shared_data(void)
+{
+	const struct rte_memzone *mz;
+
+	rte_spinlock_lock(&mlx5_shared_data_lock);
+	if (mlx5_shared_data) {
 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
-			rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
-			rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
-							mlx5_mr_mem_event_cb,
-							NULL);
-			mlx5_mp_init();
+			mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+			rte_memzone_free(mz);
+		} else {
+			memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
 		}
+		mlx5_shared_data = NULL;
 	}
 	rte_spinlock_unlock(&mlx5_shared_data_lock);
 }
@@ -597,15 +638,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 
 static struct rte_pci_driver mlx5_driver;
 
-/*
- * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
- * local resource used by both primary and secondary to avoid duplicate
- * reservation.
- * The space has to be available on both primary and secondary process,
- * TXQ UAR maps to this area using fixed mmap w/o double check.
- */
-static void *uar_base;
-
 static int
 find_lower_va_bound(const struct rte_memseg_list *msl,
 		const struct rte_memseg *ms, void *arg)
@@ -625,25 +657,24 @@ find_lower_va_bound(const struct rte_memseg_list *msl,
 /**
  * Reserve UAR address space for primary process.
  *
- * @param[in] dev
- *   Pointer to Ethernet device.
+ * Process local resource is used by both primary and secondary to avoid
+ * duplicate reservation. The space has to be available on both primary and
+ * secondary process, TXQ UAR maps to this area using fixed mmap w/o double
+ * check.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_uar_init_primary(struct rte_eth_dev *dev)
+mlx5_uar_init_primary(void)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_shared_data *sd = mlx5_shared_data;
 	void *addr = (void *)0;
 
-	if (uar_base) { /* UAR address space mapped. */
-		priv->uar_base = uar_base;
+	if (sd->uar_base)
 		return 0;
-	}
 	/* find out lower bound of hugepage segments */
 	rte_memseg_walk(find_lower_va_bound, &addr);
-
 	/* keep distance to hugepages to minimize potential conflicts. */
 	addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
 	/* anonymous mmap, no real memory consumption. */
@@ -651,65 +682,154 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
 		    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 	if (addr == MAP_FAILED) {
 		DRV_LOG(ERR,
-			"port %u failed to reserve UAR address space, please"
-			" adjust MLX5_UAR_SIZE or try --base-virtaddr",
-			dev->data->port_id);
+			"Failed to reserve UAR address space, please"
+			" adjust MLX5_UAR_SIZE or try --base-virtaddr");
 		rte_errno = ENOMEM;
 		return -rte_errno;
 	}
 	/* Accept either same addr or a new addr returned from mmap if target
 	 * range occupied.
 	 */
-	DRV_LOG(INFO, "port %u reserved UAR address space: %p",
-		dev->data->port_id, addr);
-	priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
-	uar_base = addr; /* process local, don't reserve again. */
+	DRV_LOG(INFO, "Reserved UAR address space: %p", addr);
+	sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */
 	return 0;
 }
 
 /**
- * Reserve UAR address space for secondary process, align with
- * primary process.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
+ * Unmap UAR address space reserved for primary process.
+ */
+static void
+mlx5_uar_uninit_primary(void)
+{
+	struct mlx5_shared_data *sd = mlx5_shared_data;
+
+	if (!sd->uar_base)
+		return;
+	munmap(sd->uar_base, MLX5_UAR_SIZE);
+	sd->uar_base = NULL;
+}
+
+/**
+ * Reserve UAR address space for secondary process, align with primary process.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_uar_init_secondary(struct rte_eth_dev *dev)
+mlx5_uar_init_secondary(void)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_shared_data *sd = mlx5_shared_data;
+	struct mlx5_local_data *ld = &mlx5_local_data;
 	void *addr;
 
-	assert(priv->uar_base);
-	if (uar_base) { /* already reserved. */
-		assert(uar_base == priv->uar_base);
+	if (ld->uar_base) { /* Already reserved. */
+		assert(sd->uar_base == ld->uar_base);
 		return 0;
 	}
+	assert(sd->uar_base);
 	/* anonymous mmap, no real memory consumption. */
-	addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
+	addr = mmap(sd->uar_base, MLX5_UAR_SIZE,
 		    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 	if (addr == MAP_FAILED) {
-		DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
-			dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+		DRV_LOG(ERR, "UAR mmap failed: %p size: %llu",
+			sd->uar_base, MLX5_UAR_SIZE);
 		rte_errno = ENXIO;
 		return -rte_errno;
 	}
-	if (priv->uar_base != addr) {
+	if (sd->uar_base != addr) {
 		DRV_LOG(ERR,
-			"port %u UAR address %p size %llu occupied, please"
+			"UAR address %p size %llu occupied, please"
 			" adjust MLX5_UAR_OFFSET or try EAL parameter"
 			" --base-virtaddr",
-			dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+			sd->uar_base, MLX5_UAR_SIZE);
 		rte_errno = ENXIO;
 		return -rte_errno;
 	}
-	uar_base = addr; /* process local, don't reserve again */
-	DRV_LOG(INFO, "port %u reserved UAR address space: %p",
-		dev->data->port_id, addr);
+	ld->uar_base = addr;
+	DRV_LOG(INFO, "Reserved UAR address space: %p", addr);
+	return 0;
+}
+
+/**
+ * Unmap UAR address space reserved for secondary process.
+ */
+static void
+mlx5_uar_uninit_secondary(void)
+{
+	struct mlx5_local_data *ld = &mlx5_local_data;
+
+	if (!ld->uar_base)
+		return;
+	munmap(ld->uar_base, MLX5_UAR_SIZE);
+	ld->uar_base = NULL;
+}
+
+/**
+ * PMD global initialization.
+ *
+ * Independent from individual device, this function initializes global
+ * per-PMD data structures distinguishing primary and secondary processes.
+ * Hence, each initialization is called once per a process.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_once(void)
+{
+	struct mlx5_shared_data *sd;
+	struct mlx5_local_data *ld = &mlx5_local_data;
+	int ret;
+
+	if (mlx5_init_shared_data())
+		return -rte_errno;
+	sd = mlx5_shared_data;
+	assert(sd);
+	rte_spinlock_lock(&sd->lock);
+	switch (rte_eal_process_type()) {
+	case RTE_PROC_PRIMARY:
+		if (sd->init_done)
+			break;
+		LIST_INIT(&sd->mem_event_cb_list);
+		rte_rwlock_init(&sd->mem_event_rwlock);
+		rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+						mlx5_mr_mem_event_cb, NULL);
+		mlx5_mp_init_primary();
+		ret = mlx5_uar_init_primary();
+		if (ret)
+			goto error;
+		sd->init_done = true;
+		break;
+	case RTE_PROC_SECONDARY:
+		if (ld->init_done)
+			break;
+		ret = mlx5_uar_init_secondary();
+		if (ret)
+			goto error;
+		++sd->secondary_cnt;
+		ld->init_done = true;
+		break;
+	default:
+		break;
+	}
+	rte_spinlock_unlock(&sd->lock);
 	return 0;
+error:
+	switch (rte_eal_process_type()) {
+	case RTE_PROC_PRIMARY:
+		mlx5_uar_uninit_primary();
+		mlx5_mp_uninit_primary();
+		rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", NULL);
+		break;
+	case RTE_PROC_SECONDARY:
+		mlx5_uar_uninit_secondary();
+		break;
+	default:
+		break;
+	}
+	rte_spinlock_unlock(&sd->lock);
+	mlx5_uninit_shared_data();
+	return -rte_errno;
 }
 
 /**
@@ -794,8 +914,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		rte_errno = EEXIST;
 		return NULL;
 	}
-	/* Prepare shared data between primary and secondary process. */
-	mlx5_prepare_shared_data();
 	errno = 0;
 	ctx = mlx5_glue->dv_open_device(ibv_dev);
 	if (ctx) {
@@ -922,11 +1040,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		}
 		eth_dev->device = dpdk_dev;
 		eth_dev->dev_ops = &mlx5_dev_sec_ops;
-		err = mlx5_uar_init_secondary(eth_dev);
-		if (err) {
-			err = rte_errno;
-			goto error;
-		}
 		/* Receive command fd from primary process */
 		err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
 		if (err < 0) {
@@ -1143,11 +1256,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	priv->dev_data = eth_dev->data;
 	eth_dev->data->mac_addrs = priv->mac;
 	eth_dev->device = dpdk_dev;
-	err = mlx5_uar_init_primary(eth_dev);
-	if (err) {
-		err = rte_errno;
-		goto error;
-	}
 	/* Configure the first MAC address by default. */
 	if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
 		DRV_LOG(ERR,
@@ -1363,6 +1471,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	struct mlx5_dev_config dev_config;
 	int ret;
 
+	ret = mlx5_init_once();
+	if (ret) {
+		DRV_LOG(ERR, "unable to init PMD global data: %s",
+			strerror(rte_errno));
+		return -rte_errno;
+	}
 	assert(pci_drv == &mlx5_driver);
 	errno = 0;
 	ibv_list = mlx5_glue->get_device_list(&ret);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7030c6f7d7..cb454e866a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -85,12 +85,25 @@ struct mlx5_switch_info {
 
 LIST_HEAD(mlx5_dev_list, mlx5_priv);
 
-/* Shared memory between primary and secondary processes. */
+/* Shared data between primary and secondary processes. */
 struct mlx5_shared_data {
+	rte_spinlock_t lock;
+	/* Global spinlock for primary and secondary processes. */
+	int init_done; /* Whether primary has done initialization. */
+	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+	void *uar_base;
+	/* Reserved UAR address space for TXQ UAR(hw doorbell) mapping. */
 	struct mlx5_dev_list mem_event_cb_list;
 	rte_rwlock_t mem_event_rwlock;
 };
 
+/* Per-process data structure, not visible to other processes. */
+struct mlx5_local_data {
+	int init_done; /* Whether a secondary has done initialization. */
+	void *uar_base;
+	/* Reserved UAR address space for TXQ UAR(hw doorbell) mapping. */
+};
+
 extern struct mlx5_shared_data *mlx5_shared_data;
 
 struct mlx5_counter_ctrl {
@@ -260,7 +273,6 @@ struct mlx5_priv {
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
 	struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
-	void *uar_base; /* Reserved address space for UAR mapping */
 	struct mlx5_dev_config config; /* Device configuration. */
 	struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
 	/* Context for Verbs allocator. */
@@ -420,7 +432,8 @@ void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
 
 /* mlx5_mp.c */
 int mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev);
-void mlx5_mp_init(void);
+void mlx5_mp_init_primary(void);
+void mlx5_mp_uninit_primary(void);
 
 /* mlx5_nl.c */
 
diff --git a/drivers/net/mlx5/mlx5_mp.c b/drivers/net/mlx5/mlx5_mp.c
index b8dd4b5fa7..d0a38c3d52 100644
--- a/drivers/net/mlx5/mlx5_mp.c
+++ b/drivers/net/mlx5/mlx5_mp.c
@@ -131,9 +131,22 @@ mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev)
 	return ret;
 }
 
+/**
+ * Initialize by primary process.
+ */
+void
+mlx5_mp_init_primary(void)
+{
+	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	rte_mp_action_register(MLX5_MP_NAME, mp_primary_handle);
+}
+
+/**
+ * Un-initialize by primary process.
+ */
 void
-mlx5_mp_init(void)
+mlx5_mp_uninit_primary(void)
 {
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_mp_action_register(MLX5_MP_NAME, mp_primary_handle);
+	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	rte_mp_action_unregister(MLX5_MP_NAME);
 }
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index d18561740f..5640fe1b91 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -286,7 +286,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
 			}
 		}
 		/* new address in reserved UAR address space. */
-		addr = RTE_PTR_ADD(priv->uar_base,
+		addr = RTE_PTR_ADD(mlx5_shared_data->uar_base,
 				   uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1));
 		if (!already_mapped) {
 			pages[pages_n++] = uar_va;
@@ -844,9 +844,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
 	if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
 		txq->ibv = NULL;
-	if (priv->uar_base)
-		munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
-		       page_size), page_size);
+	munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg, page_size),
+	       page_size);
 	if (rte_atomic32_dec_and_test(&txq->refcnt)) {
 		txq_free_elts(txq);
 		mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
-- 
2.11.0


  parent reply	other threads:[~2019-03-25 19:16 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-07  7:33 [dpdk-dev] [PATCH 0/4] net/mlx5: rework IPC socket and " Yongseok Koh
2019-03-07  7:33 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix memory event on secondary process Yongseok Koh
2019-03-07  7:33 ` [dpdk-dev] [PATCH 2/4] net/mlx5: replace IPC socket with EAL API Yongseok Koh
2019-03-14 12:36   ` Shahaf Shuler
2019-03-14 12:36     ` Shahaf Shuler
2019-03-18 21:29     ` Yongseok Koh
2019-03-18 21:29       ` Yongseok Koh
2019-03-07  7:33 ` [dpdk-dev] [PATCH 3/4] net/mlx5: rework PMD global data init Yongseok Koh
2019-03-14 12:36   ` Shahaf Shuler
2019-03-14 12:36     ` Shahaf Shuler
2019-03-18 21:21     ` Yongseok Koh
2019-03-18 21:21       ` Yongseok Koh
2019-03-19  6:54       ` Shahaf Shuler
2019-03-19  6:54         ` Shahaf Shuler
2019-03-07  7:33 ` [dpdk-dev] [PATCH 4/4] net/mlx5: sync stop/start of datapath with secondary process Yongseok Koh
2019-03-25 19:15 ` [dpdk-dev] [PATCH v2 0/4] net/mlx5: rework IPC socket and PMD global data init Yongseok Koh
2019-03-25 19:15   ` Yongseok Koh
2019-03-25 19:15   ` [dpdk-dev] [PATCH v2 1/4] net/mlx5: fix memory event on secondary process Yongseok Koh
2019-03-25 19:15     ` Yongseok Koh
2019-03-26 12:28     ` Shahaf Shuler
2019-03-26 12:28       ` Shahaf Shuler
2019-03-25 19:15   ` [dpdk-dev] [PATCH v2 2/4] net/mlx5: replace IPC socket with EAL API Yongseok Koh
2019-03-25 19:15     ` Yongseok Koh
2019-03-26 12:31     ` Shahaf Shuler
2019-03-26 12:31       ` Shahaf Shuler
2019-03-25 19:15   ` Yongseok Koh [this message]
2019-03-25 19:15     ` [dpdk-dev] [PATCH v2 3/4] net/mlx5: rework PMD global data init Yongseok Koh
2019-03-26 12:38     ` Shahaf Shuler
2019-03-26 12:38       ` Shahaf Shuler
2019-03-25 19:15   ` [dpdk-dev] [PATCH v2 4/4] net/mlx5: sync stop/start of datapath with secondary process Yongseok Koh
2019-03-25 19:15     ` Yongseok Koh
2019-03-26 12:49     ` Shahaf Shuler
2019-03-26 12:49       ` Shahaf Shuler
2019-04-01 21:12 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: rework IPC socket and PMD global data init Yongseok Koh
2019-04-01 21:12   ` Yongseok Koh
2019-04-01 21:12   ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: fix memory event on secondary process Yongseok Koh
2019-04-01 21:12     ` Yongseok Koh
2019-04-01 21:12   ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: replace IPC socket with EAL API Yongseok Koh
2019-04-01 21:12     ` Yongseok Koh
2019-04-01 21:12   ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: rework PMD global data init Yongseok Koh
2019-04-01 21:12     ` Yongseok Koh
2019-04-01 21:12   ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: sync stop/start of datapath with secondary process Yongseok Koh
2019-04-01 21:12     ` Yongseok Koh
2019-04-02  7:11   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: rework IPC socket and PMD global data init Shahaf Shuler
2019-04-02  7:11     ` Shahaf Shuler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190325191545.20707-4-yskoh@mellanox.com \
    --to=yskoh@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).