From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v1 07/18] net/mlx5: share Rx control code
Date: Thu, 3 Sep 2020 10:13:38 +0000 [thread overview]
Message-ID: <1599128029-2092-8-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1599128029-2092-1-git-send-email-michaelba@nvidia.com>
Move Rx object similar resources allocations and debug logs from DevX
and Verbs modules to a shared location.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_verbs.c | 50 +++++++-----------------
drivers/net/mlx5/mlx5.h | 3 +-
drivers/net/mlx5/mlx5_devx.c | 77 ++++++++++---------------------------
drivers/net/mlx5/mlx5_rxq.c | 8 +++-
drivers/net/mlx5/mlx5_rxtx.h | 1 -
drivers/net/mlx5/mlx5_trigger.c | 30 +++++++++++++--
6 files changed, 68 insertions(+), 101 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 3af09db..16e5900 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -269,9 +269,9 @@
* Queue index in DPDK Rx queue array.
*
* @return
- * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_rxq_obj *
+static int
mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -281,24 +281,16 @@
struct ibv_wq_attr mod;
unsigned int cqe_n;
unsigned int wqe_n = 1 << rxq_data->elts_n;
- struct mlx5_rxq_obj *tmpl = NULL;
+ struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
struct mlx5dv_cq cq_info;
struct mlx5dv_rwq rwq;
int ret = 0;
struct mlx5dv_obj obj;
MLX5_ASSERT(rxq_data);
- MLX5_ASSERT(!rxq_ctrl->obj);
+ MLX5_ASSERT(tmpl);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
priv->verbs_alloc_ctx.obj = rxq_ctrl;
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
- dev->data->port_id, rxq_data->idx);
- rte_errno = ENOMEM;
- goto error;
- }
tmpl->type = MLX5_RXQ_OBJ_TYPE_IBV;
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
@@ -316,10 +308,6 @@
cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
else
cqe_n = wqe_n - 1;
- DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->sh->device_attr.max_qp_wr);
- DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
- dev->data->port_id, priv->sh->device_attr.max_sge);
/* Create CQ using Verbs API. */
tmpl->ibv_cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
if (!tmpl->ibv_cq) {
@@ -382,28 +370,21 @@
rxq_data->cq_arm_sn = 0;
mlx5_rxq_initialize(rxq_data);
rxq_data->cq_ci = 0;
- DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
- idx, (void *)&tmpl);
- LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
- return tmpl;
+ return 0;
error:
- if (tmpl) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->wq)
- claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
- if (tmpl->ibv_cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
- if (tmpl->ibv_channel)
- claim_zero(mlx5_glue->destroy_comp_channel
- (tmpl->ibv_channel));
- mlx5_free(tmpl);
- rte_errno = ret; /* Restore rte_errno. */
- }
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->wq)
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+ if (tmpl->ibv_cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
+ if (tmpl->ibv_channel)
+ claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
+ rte_errno = ret; /* Restore rte_errno. */
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- return NULL;
+ return -rte_errno;
}
/**
@@ -418,14 +399,11 @@
MLX5_ASSERT(rxq_obj);
MLX5_ASSERT(rxq_obj->wq);
MLX5_ASSERT(rxq_obj->ibv_cq);
- rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
if (rxq_obj->ibv_channel)
claim_zero(mlx5_glue->destroy_comp_channel
(rxq_obj->ibv_channel));
- LIST_REMOVE(rxq_obj, next);
- mlx5_free(rxq_obj);
}
/**
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f0e2929..5131a47 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -707,8 +707,7 @@ struct mlx5_rxq_obj {
/* HW objects operations structure. */
struct mlx5_obj_ops {
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
- struct mlx5_rxq_obj *(*rxq_obj_new)(struct rte_eth_dev *dev,
- uint16_t idx);
+ int (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
};
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 39e2ad5..5b1cf14 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -116,7 +116,6 @@
mlx5_rxq_obj_hairpin_release(rxq_obj);
} else {
MLX5_ASSERT(rxq_obj->devx_cq);
- rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
claim_zero(mlx5_release_dbr(&priv->dbrpgs,
@@ -131,8 +130,6 @@
rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
}
- LIST_REMOVE(rxq_obj, next);
- mlx5_free(rxq_obj);
}
/**
@@ -435,9 +432,9 @@
* Queue index in DPDK Rx queue array.
*
* @return
- * The hairpin DevX object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_rxq_obj *
+static int
mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -445,19 +442,11 @@
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_devx_create_rq_attr attr = { 0 };
- struct mlx5_rxq_obj *tmpl = NULL;
+ struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
uint32_t max_wq_data;
MLX5_ASSERT(rxq_data);
- MLX5_ASSERT(!rxq_ctrl->obj);
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
- dev->data->port_id, rxq_data->idx);
- rte_errno = ENOMEM;
- return NULL;
- }
+ MLX5_ASSERT(tmpl);
tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
tmpl->rxq_ctrl = rxq_ctrl;
attr.hairpin = 1;
@@ -468,9 +457,8 @@
DRV_LOG(ERR, "Total data size %u power of 2 is "
"too large for hairpin.",
priv->config.log_hp_size);
- mlx5_free(tmpl);
rte_errno = ERANGE;
- return NULL;
+ return -rte_errno;
}
attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
} else {
@@ -488,15 +476,11 @@
DRV_LOG(ERR,
"Port %u Rx hairpin queue %u can't create rq object.",
dev->data->port_id, idx);
- mlx5_free(tmpl);
rte_errno = errno;
- return NULL;
+ return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
- idx, (void *)&tmpl);
- LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
- return tmpl;
+ return 0;
}
/**
@@ -508,9 +492,9 @@
* Queue index in DPDK Rx queue array.
*
* @return
- * The DevX object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_rxq_obj *
+static int
mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -519,7 +503,7 @@
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
unsigned int cqe_n;
unsigned int wqe_n = 1 << rxq_data->elts_n;
- struct mlx5_rxq_obj *tmpl = NULL;
+ struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
@@ -527,17 +511,9 @@
int ret = 0;
MLX5_ASSERT(rxq_data);
- MLX5_ASSERT(!rxq_ctrl->obj);
+ MLX5_ASSERT(tmpl);
if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
return mlx5_rxq_obj_hairpin_new(dev, idx);
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
- dev->data->port_id, rxq_data->idx);
- rte_errno = ENOMEM;
- goto error;
- }
tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
@@ -559,10 +535,6 @@
cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
else
cqe_n = wqe_n - 1;
- DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->sh->device_attr.max_qp_wr);
- DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
- dev->data->port_id, priv->sh->device_attr.max_sge);
/* Allocate CQ door-bell. */
dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &cq_dbr_page);
if (dbr_offset < 0) {
@@ -608,25 +580,17 @@
rxq_data->cq_arm_sn = 0;
mlx5_rxq_initialize(rxq_data);
rxq_data->cq_ci = 0;
- DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
- idx, (void *)&tmpl);
- LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
rxq_ctrl->wqn = tmpl->rq->id;
- return tmpl;
+ return 0;
error:
- if (tmpl) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->rq)
- claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
- if (tmpl->devx_cq)
- claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
- if (tmpl->devx_channel)
- mlx5_glue->devx_destroy_event_channel
- (tmpl->devx_channel);
- mlx5_free(tmpl);
- rte_errno = ret; /* Restore rte_errno. */
- }
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->rq)
+ claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
+ if (tmpl->devx_cq)
+ claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
+ if (tmpl->devx_channel)
+ mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
if (rq_dbr_page)
claim_zero(mlx5_release_dbr(&priv->dbrpgs,
rxq_ctrl->rq_dbr_umem_id,
@@ -637,7 +601,8 @@
rxq_ctrl->cq_dbr_offset));
rxq_release_devx_rq_resources(rxq_ctrl);
rxq_release_devx_cq_resources(rxq_ctrl);
- return NULL;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
struct mlx5_obj_ops devx_obj_ops = {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 46d5f6c..00ef230 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -347,7 +347,7 @@
* @param rxq_ctrl
* Pointer to RX queue structure.
*/
-void
+static void
rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
@@ -1651,10 +1651,14 @@ struct mlx5_rxq_ctrl *
return 1;
if (rxq_ctrl->obj) {
priv->obj_ops->rxq_obj_release(rxq_ctrl->obj);
+ LIST_REMOVE(rxq_ctrl->obj, next);
+ mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;
}
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ rxq_free_elts(rxq_ctrl);
+ }
LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 4baf5b9..d4a6c50 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -400,7 +400,6 @@ struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
-void rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 6376719..43eff93 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -10,6 +10,8 @@
#include <rte_interrupts.h>
#include <rte_alarm.h>
+#include <mlx5_malloc.h>
+
#include "mlx5.h"
#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
@@ -115,6 +117,10 @@
/* Should not release Rx queues but return immediately. */
return -rte_errno;
}
+ DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
+ dev->data->port_id, priv->sh->device_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
+ dev->data->port_id, priv->sh->device_attr.max_sge);
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
struct rte_mempool *mp;
@@ -125,17 +131,33 @@
/* Pre-register Rx mempool. */
mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
- DRV_LOG(DEBUG, "port %u Rx queue %u registering mp %s"
- " having %u chunks", dev->data->port_id,
+ DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
+ " having %u chunks.", dev->data->port_id,
rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
}
- rxq_ctrl->obj = priv->obj_ops->rxq_obj_new(dev, i);
- if (!rxq_ctrl->obj)
+ MLX5_ASSERT(!rxq_ctrl->obj);
+ rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(*rxq_ctrl->obj), 0,
+ rxq_ctrl->socket);
+ if (!rxq_ctrl->obj) {
+ DRV_LOG(ERR,
+ "Port %u Rx queue %u can't allocate resources.",
+ dev->data->port_id, (*priv->rxqs)[i]->idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ ret = priv->obj_ops->rxq_obj_new(dev, i);
+ if (ret) {
+ mlx5_free(rxq_ctrl->obj);
goto error;
+ }
+ DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
+ dev->data->port_id, i, (void *)&rxq_ctrl->obj);
+ LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
}
return 0;
error:
--
1.8.3.1
next prev parent reply other threads:[~2020-09-03 10:15 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-03 10:13 [dpdk-dev] [PATCH v1 00/18] mlx5 Rx DevX/Verbs separation Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 01/18] net/mlx5: fix Rx hash queue creation error flow Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 02/18] net/mlx5: fix Rx queue state update Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 03/18] net/mlx5: fix types differentiation in Rxq create Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 04/18] net/mlx5: mitigate Rx queue reference counters Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 05/18] net/mlx5: separate Rx queue object creations Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 06/18] net/mlx5: separate Rx interrupt handling Michael Baum
2020-09-03 10:13 ` Michael Baum [this message]
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 08/18] net/mlx5: rearrange the creation of RQ and CQ resources Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 09/18] net/mlx5: rearrange the creation of WQ and CQ object Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 10/18] net/mlx5: separate Rx queue object modification Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 11/18] net/mlx5: share " Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 12/18] net/mlx5: separate Rx indirection table object creation Michael Baum
2020-09-09 11:29 ` Ferruh Yigit
2020-09-09 14:37 ` Matan Azrad
2020-09-09 16:28 ` Ferruh Yigit
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 13/18] net/mlx5: separate Rx hash queue creation Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 14/18] net/mlx5: remove indirection table type field Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 15/18] net/mlx5: share Rx queue indirection table code Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 16/18] net/mlx5: share Rx hash queue code Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 17/18] net/mlx5: separate Rx queue drop Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 18/18] net/mlx5: share Rx queue drop action code Michael Baum
2020-09-03 14:34 ` [dpdk-dev] [PATCH v1 00/18] mlx5 Rx DevX/Verbs separation Tom Barbette
2020-09-03 20:59 ` Michael Baum
2020-09-04 7:30 ` David Marchand
2020-09-04 7:47 ` Thomas Monjalon
2020-09-06 7:32 ` Michael Baum
2020-09-08 11:46 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1599128029-2092-8-git-send-email-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).