From: Li Zhang <lizh@nvidia.com>
To: <orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,
<shahafs@nvidia.com>
Cc: <dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,
<roniba@nvidia.com>
Subject: [PATCH v2 07/15] vdpa/mlx5: optimize datapath-control synchronization
Date: Thu, 16 Jun 2022 05:30:04 +0300 [thread overview]
Message-ID: <20220616023012.16013-8-lizh@nvidia.com> (raw)
In-Reply-To: <20220616023012.16013-1-lizh@nvidia.com>
The driver used a single global lock for any synchronization
needed for the datapath and control path.
It is better to group the critical sections with
the other ones that should be synchronized.
Replace the global lock with the following locks:
1.virtq locks(per virtq) synchronize datapath polling and
parallel configurations on the same virtq.
2.A doorbell lock synchronizes doorbell update,
which is shared for all the virtqs in the device.
3.A steering lock for the shared steering objects updates.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 ++++---
drivers/vdpa/mlx5/mlx5_vdpa.h | 13 ++--
drivers/vdpa/mlx5/mlx5_vdpa_event.c | 97 ++++++++++++++++++-----------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 34 +++++++---
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 7 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 88 +++++++++++++++++++-------
6 files changed, 184 insertions(+), 79 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index ee99952e11..e5a11f72fd 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -135,6 +135,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
+ struct mlx5_vdpa_virtq *virtq;
int ret;
if (priv == NULL) {
@@ -145,9 +146,10 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
DRV_LOG(ERR, "Too big vring id: %d.", vring);
return -E2BIG;
}
- pthread_mutex_lock(&priv->vq_config_lock);
+ virtq = &priv->virtqs[vring];
+ pthread_mutex_lock(&virtq->virtq_lock);
ret = mlx5_vdpa_virtq_enable(priv, vring, state);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ pthread_mutex_unlock(&virtq->virtq_lock);
return ret;
}
@@ -267,7 +269,9 @@ mlx5_vdpa_dev_close(int vid)
ret |= mlx5_vdpa_lm_log(priv);
priv->state = MLX5_VDPA_STATE_IN_PROGRESS;
}
+ pthread_mutex_lock(&priv->steer_update_lock);
mlx5_vdpa_steer_unset(priv);
+ pthread_mutex_unlock(&priv->steer_update_lock);
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_drain_cq(priv);
if (priv->lm_mr.addr)
@@ -276,8 +280,6 @@ mlx5_vdpa_dev_close(int vid)
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- /* The mutex may stay locked after event thread cancel - initiate it. */
- pthread_mutex_init(&priv->vq_config_lock, NULL);
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
}
@@ -549,15 +551,21 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
uint32_t index;
uint32_t i;
+ for (index = 0; index < priv->caps.max_num_virtio_queues * 2;
+ index++) {
+ virtq = &priv->virtqs[index];
+ pthread_mutex_init(&virtq->virtq_lock, NULL);
+ }
if (!priv->queues)
return 0;
for (index = 0; index < (priv->queues * 2); ++index) {
- struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
+ virtq = &priv->virtqs[index];
int ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,
- -1, &virtq->eqp);
+ -1, virtq);
if (ret) {
DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
@@ -713,7 +721,8 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
priv->num_lag_ports = attr->num_lag_ports;
if (attr->num_lag_ports == 0)
priv->num_lag_ports = 1;
- pthread_mutex_init(&priv->vq_config_lock, NULL);
+ rte_spinlock_init(&priv->db_lock);
+ pthread_mutex_init(&priv->steer_update_lock, NULL);
priv->cdev = cdev;
mlx5_vdpa_config_get(mkvlist, priv);
if (mlx5_vdpa_create_dev_resources(priv))
@@ -797,7 +806,6 @@ mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)
mlx5_vdpa_release_dev_resources(priv);
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
- pthread_mutex_destroy(&priv->vq_config_lock);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index e5553079fe..3fd5eefc5e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -82,6 +82,7 @@ struct mlx5_vdpa_virtq {
bool stopped;
uint32_t configured:1;
uint32_t version;
+ pthread_mutex_t virtq_lock;
struct mlx5_vdpa_priv *priv;
struct mlx5_devx_obj *virtq;
struct mlx5_devx_obj *counters;
@@ -126,7 +127,8 @@ struct mlx5_vdpa_priv {
TAILQ_ENTRY(mlx5_vdpa_priv) next;
bool connected;
enum mlx5_dev_state state;
- pthread_mutex_t vq_config_lock;
+ rte_spinlock_t db_lock;
+ pthread_mutex_t steer_update_lock;
uint64_t no_traffic_counter;
pthread_t timer_tid;
int event_mode;
@@ -222,14 +224,15 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
* Number of descriptors.
* @param[in] callfd
* The guest notification file descriptor.
- * @param[in/out] eqp
- * Pointer to the event QP structure.
+ * @param[in/out] virtq
+ * Pointer to the virt-queue structure.
*
* @return
* 0 on success, -1 otherwise and rte_errno is set.
*/
-int mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
- int callfd, struct mlx5_vdpa_event_qp *eqp);
+int
+mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
+ int callfd, struct mlx5_vdpa_virtq *virtq);
/**
* Destroy an event QP and all its related resources.
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index b43dca9255..2b0f5936d1 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -85,12 +85,13 @@ mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
static int
mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
- int callfd, struct mlx5_vdpa_cq *cq)
+ int callfd, struct mlx5_vdpa_virtq *virtq)
{
struct mlx5_devx_cq_attr attr = {
.use_first_only = 1,
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
};
+ struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
uint16_t event_nums[1] = {0};
int ret;
@@ -102,10 +103,11 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
cq->log_desc_n = log_desc_n;
rte_spinlock_init(&cq->sl);
/* Subscribe CQ event to the event channel controlled by the driver. */
- ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
- cq->cq_obj.cq->obj,
- sizeof(event_nums), event_nums,
- (uint64_t)(uintptr_t)cq);
+ ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
+ cq->cq_obj.cq->obj,
+ sizeof(event_nums),
+ event_nums,
+ (uint64_t)(uintptr_t)virtq);
if (ret) {
DRV_LOG(ERR, "Failed to subscribe CQE event.");
rte_errno = errno;
@@ -167,13 +169,17 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
static void
mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
struct mlx5_vdpa_cq *cq;
int i;
for (i = 0; i < priv->nr_virtqs; i++) {
+ virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
cq = &priv->virtqs[i].eqp.cq;
if (cq->cq_obj.cq && !cq->armed)
mlx5_vdpa_cq_arm(priv, cq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
@@ -220,13 +226,18 @@ mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
static uint32_t
mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
{
- int i;
+ struct mlx5_vdpa_virtq *virtq;
+ struct mlx5_vdpa_cq *cq;
uint32_t max = 0;
+ uint32_t comp;
+ int i;
for (i = 0; i < priv->nr_virtqs; i++) {
- struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
- uint32_t comp = mlx5_vdpa_queue_complete(cq);
-
+ virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
+ cq = &virtq->eqp.cq;
+ comp = mlx5_vdpa_queue_complete(cq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
if (comp > max)
max = comp;
}
@@ -253,7 +264,7 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
}
/* Wait on all CQs channel for completion event. */
-static struct mlx5_vdpa_cq *
+static struct mlx5_vdpa_virtq *
mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
{
#ifdef HAVE_IBV_DEVX_EVENT
@@ -265,7 +276,8 @@ mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
sizeof(out.buf));
if (ret >= 0)
- return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
+ return (struct mlx5_vdpa_virtq *)
+ (uintptr_t)out.event_resp.cookie;
DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
ret, errno);
#endif
@@ -276,7 +288,7 @@ static void *
mlx5_vdpa_event_handle(void *arg)
{
struct mlx5_vdpa_priv *priv = arg;
- struct mlx5_vdpa_cq *cq;
+ struct mlx5_vdpa_virtq *virtq;
uint32_t max;
switch (priv->event_mode) {
@@ -284,7 +296,6 @@ mlx5_vdpa_event_handle(void *arg)
case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
priv->timer_delay_us = priv->event_us;
while (1) {
- pthread_mutex_lock(&priv->vq_config_lock);
max = mlx5_vdpa_queues_complete(priv);
if (max == 0 && priv->no_traffic_counter++ >=
priv->no_traffic_max) {
@@ -292,32 +303,37 @@ mlx5_vdpa_event_handle(void *arg)
priv->vdev->device->name);
mlx5_vdpa_arm_all_cqs(priv);
do {
- pthread_mutex_unlock
- (&priv->vq_config_lock);
- cq = mlx5_vdpa_event_wait(priv);
- pthread_mutex_lock
- (&priv->vq_config_lock);
- if (cq == NULL ||
- mlx5_vdpa_queue_complete(cq) > 0)
+ virtq = mlx5_vdpa_event_wait(priv);
+ if (virtq == NULL)
break;
+ pthread_mutex_lock(
+ &virtq->virtq_lock);
+ if (mlx5_vdpa_queue_complete(
+ &virtq->eqp.cq) > 0) {
+ pthread_mutex_unlock(
+ &virtq->virtq_lock);
+ break;
+ }
+ pthread_mutex_unlock(
+ &virtq->virtq_lock);
} while (1);
priv->timer_delay_us = priv->event_us;
priv->no_traffic_counter = 0;
} else if (max != 0) {
priv->no_traffic_counter = 0;
}
- pthread_mutex_unlock(&priv->vq_config_lock);
mlx5_vdpa_timer_sleep(priv, max);
}
return NULL;
case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
do {
- cq = mlx5_vdpa_event_wait(priv);
- if (cq != NULL) {
- pthread_mutex_lock(&priv->vq_config_lock);
- if (mlx5_vdpa_queue_complete(cq) > 0)
- mlx5_vdpa_cq_arm(priv, cq);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ virtq = mlx5_vdpa_event_wait(priv);
+ if (virtq != NULL) {
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_vdpa_queue_complete(
+ &virtq->eqp.cq) > 0)
+ mlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
} while (1);
return NULL;
@@ -339,7 +355,6 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
struct mlx5_vdpa_virtq *virtq;
uint64_t sec;
- pthread_mutex_lock(&priv->vq_config_lock);
while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
sizeof(out.buf)) >=
(ssize_t)sizeof(out.event_resp.cookie)) {
@@ -351,10 +366,11 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
continue;
}
virtq = &priv->virtqs[vq_index];
+ pthread_mutex_lock(&virtq->virtq_lock);
if (!virtq->enable || virtq->version != version)
- continue;
+ goto unlock;
if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
- continue;
+ goto unlock;
virtq->stopped = true;
/* Query error info. */
if (mlx5_vdpa_virtq_query(priv, vq_index))
@@ -384,8 +400,9 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
for (i = 1; i < RTE_DIM(virtq->err_time); i++)
virtq->err_time[i - 1] = virtq->err_time[i];
virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
+unlock:
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
- pthread_mutex_unlock(&priv->vq_config_lock);
#endif
}
@@ -533,11 +550,18 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
void
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
void *status;
+ int i;
if (priv->timer_tid) {
pthread_cancel(priv->timer_tid);
pthread_join(priv->timer_tid, &status);
+ /* The mutex may stay locked after event thread cancel, initiate it. */
+ for (i = 0; i < priv->nr_virtqs; i++) {
+ virtq = &priv->virtqs[i];
+ pthread_mutex_init(&virtq->virtq_lock, NULL);
+ }
}
priv->timer_tid = 0;
}
@@ -614,8 +638,9 @@ mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
int
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
- int callfd, struct mlx5_vdpa_event_qp *eqp)
+ int callfd, struct mlx5_vdpa_virtq *virtq)
{
+ struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
struct mlx5_devx_qp_attr attr = {0};
uint16_t log_desc_n = rte_log2_u32(desc_n);
uint32_t ret;
@@ -632,7 +657,8 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
}
if (eqp->fw_qp)
mlx5_vdpa_event_qp_destroy(eqp);
- if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
+ if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||
+ !eqp->cq.cq_obj.cq)
return -1;
attr.pd = priv->cdev->pdn;
attr.ts_format =
@@ -650,8 +676,8 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
attr.ts_format =
mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
- attr.num_of_receive_wqes *
- MLX5_WSEG_SIZE, &attr, SOCKET_ID_ANY);
+ attr.num_of_receive_wqes * MLX5_WSEG_SIZE,
+ &attr, SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
goto error;
@@ -668,3 +694,4 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
mlx5_vdpa_event_qp_destroy(eqp);
return -1;
}
+
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index a8faf0c116..efebf364d0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -25,11 +25,18 @@ mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
if (!virtq->configured) {
DRV_LOG(DEBUG, "virtq %d is invalid for dirty bitmap "
"enabling.", i);
- } else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
+ } else {
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
+
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
&attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d for dirty "
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ DRV_LOG(ERR, "Failed to modify virtq %d for dirty "
"bitmap enabling.", i);
- return -1;
+ return -1;
+ }
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
return 0;
@@ -61,10 +68,19 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
virtq = &priv->virtqs[i];
if (!virtq->configured) {
DRV_LOG(DEBUG, "virtq %d is invalid for LM.", i);
- } else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
- &attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d for LM.", i);
- goto err;
+ } else {
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
+
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_devx_cmd_modify_virtq(
+ priv->virtqs[i].virtq,
+ &attr)) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ DRV_LOG(ERR,
+ "Failed to modify virtq %d for LM.", i);
+ goto err;
+ }
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
return 0;
@@ -79,6 +95,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
uint64_t features;
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
int i;
@@ -90,10 +107,13 @@ mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
if (!RTE_VHOST_NEED_LOG(features))
return 0;
for (i = 0; i < priv->nr_virtqs; ++i) {
+ virtq = &priv->virtqs[i];
if (!priv->virtqs[i].virtq) {
DRV_LOG(DEBUG, "virtq %d is invalid for LM log.", i);
} else {
+ pthread_mutex_lock(&virtq->virtq_lock);
ret = mlx5_vdpa_virtq_stop(priv, i);
+ pthread_mutex_unlock(&virtq->virtq_lock);
if (ret) {
DRV_LOG(ERR, "Failed to stop virtq %d for LM "
"log.", i);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index d4b4375c88..4cbf09784e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -237,19 +237,24 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
int
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
{
- int ret = mlx5_vdpa_rqt_prepare(priv);
+ int ret;
+ pthread_mutex_lock(&priv->steer_update_lock);
+ ret = mlx5_vdpa_rqt_prepare(priv);
if (ret == 0) {
mlx5_vdpa_steer_unset(priv);
} else if (ret < 0) {
+ pthread_mutex_unlock(&priv->steer_update_lock);
return ret;
} else if (!priv->steer.rss[0].flow) {
ret = mlx5_vdpa_rss_flows_create(priv);
if (ret) {
DRV_LOG(ERR, "Cannot create RSS flows.");
+ pthread_mutex_unlock(&priv->steer_update_lock);
return -1;
}
}
+ pthread_mutex_unlock(&priv->steer_update_lock);
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 55cbc9fad2..138b7bdbc5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -24,13 +24,17 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)
int nbytes;
int retry;
+ pthread_mutex_lock(&virtq->virtq_lock);
if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
priv->vid, virtq->index);
return;
}
- if (rte_intr_fd_get(virtq->intr_handle) < 0)
+ if (rte_intr_fd_get(virtq->intr_handle) < 0) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
return;
+ }
for (retry = 0; retry < 3; ++retry) {
nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
8);
@@ -44,9 +48,14 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)
}
break;
}
- if (nbytes < 0)
+ if (nbytes < 0) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
return;
+ }
+ rte_spinlock_lock(&priv->db_lock);
rte_write32(virtq->index, priv->virtq_db_addr);
+ rte_spinlock_unlock(&priv->db_lock);
+ pthread_mutex_unlock(&virtq->virtq_lock);
if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
priv->vid, virtq->index);
@@ -66,6 +75,33 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)
DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
}
+/* Virtq must be locked before calling this function. */
+static void
+mlx5_vdpa_virtq_unregister_intr_handle(struct mlx5_vdpa_virtq *virtq)
+{
+ int ret = -EAGAIN;
+
+ if (!virtq->intr_handle)
+ return;
+ if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
+ while (ret == -EAGAIN) {
+ ret = rte_intr_callback_unregister(virtq->intr_handle,
+ mlx5_vdpa_virtq_kick_handler, virtq);
+ if (ret == -EAGAIN) {
+ DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
+ rte_intr_fd_get(virtq->intr_handle),
+ virtq->index);
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ usleep(MLX5_VDPA_INTR_RETRIES_USEC);
+ pthread_mutex_lock(&virtq->virtq_lock);
+ }
+ }
+ (void)rte_intr_fd_set(virtq->intr_handle, -1);
+ }
+ rte_intr_instance_free(virtq->intr_handle);
+ virtq->intr_handle = NULL;
+}
+
/* Release cached VQ resources. */
void
mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
@@ -75,6 +111,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
virtq->configured = 0;
for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
if (virtq->umems[j].obj) {
@@ -90,28 +127,17 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
}
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
+
static int
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
{
int ret = -EAGAIN;
- if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
- while (ret == -EAGAIN) {
- ret = rte_intr_callback_unregister(virtq->intr_handle,
- mlx5_vdpa_virtq_kick_handler, virtq);
- if (ret == -EAGAIN) {
- DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
- rte_intr_fd_get(virtq->intr_handle),
- virtq->index);
- usleep(MLX5_VDPA_INTR_RETRIES_USEC);
- }
- }
- rte_intr_fd_set(virtq->intr_handle, -1);
- }
- rte_intr_instance_free(virtq->intr_handle);
+ mlx5_vdpa_virtq_unregister_intr_handle(virtq);
if (virtq->configured) {
ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
if (ret)
@@ -128,10 +154,15 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
void
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
int i;
- for (i = 0; i < priv->nr_virtqs; i++)
- mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
+ for (i = 0; i < priv->nr_virtqs; i++) {
+ virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
+ mlx5_vdpa_virtq_unset(virtq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ }
priv->features = 0;
priv->nr_virtqs = 0;
}
@@ -250,7 +281,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
ret = mlx5_vdpa_event_qp_prepare(priv,
- vq->size, vq->callfd, &virtq->eqp);
+ vq->size, vq->callfd, virtq);
if (ret) {
DRV_LOG(ERR,
"Failed to create event QPs for virtq %d.",
@@ -420,7 +451,9 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
}
claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
virtq->configured = 1;
+ rte_spinlock_lock(&priv->db_lock);
rte_write32(virtq->index, priv->virtq_db_addr);
+ rte_spinlock_unlock(&priv->db_lock);
/* Setup doorbell mapping. */
virtq->intr_handle =
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
@@ -441,7 +474,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
if (rte_intr_callback_register(virtq->intr_handle,
mlx5_vdpa_virtq_kick_handler,
virtq)) {
- rte_intr_fd_set(virtq->intr_handle, -1);
+ (void)rte_intr_fd_set(virtq->intr_handle, -1);
DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
index);
goto error;
@@ -537,6 +570,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
uint32_t i;
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
+ struct mlx5_vdpa_virtq *virtq;
if (ret || mlx5_vdpa_features_validate(priv)) {
DRV_LOG(ERR, "Failed to configure negotiated features.");
@@ -556,9 +590,17 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
return -1;
}
priv->nr_virtqs = nr_vring;
- for (i = 0; i < nr_vring; i++)
- if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
- goto error;
+ for (i = 0; i < nr_vring; i++) {
+ virtq = &priv->virtqs[i];
+ if (virtq->enable) {
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_vdpa_virtq_setup(priv, i)) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ goto error;
+ }
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ }
+ }
return 0;
error:
mlx5_vdpa_virtqs_release(priv);
--
2.30.2
next prev parent reply other threads:[~2022-06-16 2:31 UTC|newest]
Thread overview: 137+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-08 7:55 [RFC 00/15] Add vDPA multi-threads optiomization Li Zhang
2022-04-08 7:55 ` [RFC 01/15] examples/vdpa: fix vDPA device remove Li Zhang
2022-04-08 7:55 ` [RFC 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-04-08 7:55 ` [RFC 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-04-08 7:55 ` [RFC 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-04-08 7:55 ` [RFC 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-04-08 7:55 ` [RFC 06/15] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-04-08 7:55 ` [RFC 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-04-08 7:55 ` [RFC 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-04-08 7:55 ` [RFC 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-04-08 7:56 ` [RFC 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-04-08 7:56 ` [RFC 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-04-08 7:56 ` [RFC 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-04-08 7:56 ` [RFC 13/15] vdpa/mlx5: add device close task Li Zhang
2022-04-08 7:56 ` [RFC 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-04-08 7:56 ` [RFC 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-06 11:20 ` [PATCH v1 00/17] Add vDPA multi-threads optiomization Li Zhang
2022-06-06 11:20 ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-06 11:20 ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
2022-06-06 11:20 ` [PATCH 02/16] examples/vdpa: fix vDPA device remove Li Zhang
2022-06-06 11:20 ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
2022-06-06 11:20 ` [PATCH 03/16] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-06 11:20 ` [PATCH 04/16] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-06 11:20 ` [PATCH v1 04/17] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-06 11:20 ` [PATCH v1 05/17] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-06 11:20 ` [PATCH 05/16] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-06 11:20 ` [PATCH 06/16] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-06 11:20 ` [PATCH v1 06/17] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-06 11:20 ` [PATCH v1 07/17] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-06 11:20 ` [PATCH 07/16] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-06 11:20 ` [PATCH 08/16] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-06 11:20 ` [PATCH v1 08/17] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-06 11:20 ` [PATCH 09/16] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-06 11:20 ` [PATCH v1 09/17] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-06 11:20 ` [PATCH v1 10/17] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-06 11:20 ` [PATCH 10/16] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-06 11:20 ` [PATCH 11/16] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-06 11:20 ` [PATCH v1 11/17] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-06 11:20 ` [PATCH v1 12/17] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-06 11:21 ` [PATCH 12/16] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-06 11:21 ` [PATCH 13/16] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-06 11:21 ` [PATCH v1 13/17] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-06 11:21 ` [PATCH 14/16] vdpa/mlx5: add device close task Li Zhang
2022-06-06 11:21 ` [PATCH v1 14/17] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-06 11:21 ` [PATCH v1 15/17] vdpa/mlx5: add device close task Li Zhang
2022-06-06 11:21 ` [PATCH 15/16] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-06 11:21 ` [PATCH v1 16/17] " Li Zhang
2022-06-06 11:21 ` [PATCH 16/16] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-06 11:21 ` [PATCH v1 17/17] " Li Zhang
2022-06-06 11:46 ` [PATCH v1 00/17] Add vDPA multi-threads optiomization Li Zhang
2022-06-06 11:46 ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-06 11:46 ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
2022-06-06 11:46 ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
2022-06-06 11:46 ` [PATCH v1 04/17] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-06 11:46 ` [PATCH v1 05/17] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-06 11:46 ` [PATCH v1 06/17] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-06 11:46 ` [PATCH v1 07/17] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-06 11:46 ` [PATCH v1 08/17] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-06 11:46 ` [PATCH v1 09/17] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-06 11:46 ` [PATCH v1 10/17] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-06 11:46 ` [PATCH v1 11/17] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-06 11:46 ` [PATCH v1 12/17] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-06 11:46 ` [PATCH v1 13/17] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-06 11:46 ` [PATCH v1 14/17] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-06 11:46 ` [PATCH v1 15/17] vdpa/mlx5: add device close task Li Zhang
2022-06-06 11:46 ` [PATCH v1 16/17] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-06 11:46 ` [PATCH v1 17/17] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-16 2:29 ` [PATCH v2 00/15] mlx5/vdpa: optimize live migration time Li Zhang
2022-06-16 2:29 ` [PATCH v2 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-17 14:27 ` Maxime Coquelin
2022-06-16 2:29 ` [PATCH v2 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-17 15:36 ` Maxime Coquelin
2022-06-18 8:04 ` Li Zhang
2022-06-16 2:30 ` [PATCH v2 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-17 15:41 ` Maxime Coquelin
2022-06-16 2:30 ` [PATCH v2 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-16 2:30 ` [PATCH v2 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-17 15:45 ` Maxime Coquelin
2022-06-16 2:30 ` [PATCH v2 06/15] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-17 15:53 ` Maxime Coquelin
2022-06-18 7:54 ` Li Zhang
2022-06-16 2:30 ` Li Zhang [this message]
2022-06-16 2:30 ` [PATCH v2 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-16 2:30 ` [PATCH v2 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-16 2:30 ` [PATCH v2 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-16 2:30 ` [PATCH v2 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-16 2:30 ` [PATCH v2 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-16 2:30 ` [PATCH v2 13/15] vdpa/mlx5: add device close task Li Zhang
2022-06-16 2:30 ` [PATCH v2 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-16 2:30 ` [PATCH v2 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-16 7:24 ` [PATCH v2 00/15] mlx5/vdpa: optimize live migration time Maxime Coquelin
2022-06-16 9:02 ` Maxime Coquelin
2022-06-17 1:49 ` Li Zhang
2022-06-18 8:47 ` [PATCH v3 " Li Zhang
2022-06-18 8:47 ` [PATCH v3 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-18 8:47 ` [PATCH v3 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-18 8:47 ` [PATCH v3 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-18 8:47 ` [PATCH v3 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-18 8:47 ` [PATCH v3 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-18 8:47 ` [PATCH v3 06/15] vdpa/mlx5: pre-create virtq at probe time Li Zhang
2022-06-18 8:47 ` [PATCH v3 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-18 8:47 ` [PATCH v3 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-18 8:47 ` [PATCH v3 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-18 8:48 ` [PATCH v3 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-18 8:48 ` [PATCH v3 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-18 8:48 ` [PATCH v3 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-18 8:48 ` [PATCH v3 13/15] vdpa/mlx5: add device close task Li Zhang
2022-06-18 8:48 ` [PATCH v3 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-18 8:48 ` [PATCH v3 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-18 9:02 ` [PATCH v4 00/15] mlx5/vdpa: optimize live migration time Li Zhang
2022-06-18 9:02 ` [PATCH v4 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-18 9:02 ` [PATCH v4 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-18 9:02 ` [PATCH v4 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-18 9:02 ` [PATCH v4 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-20 8:27 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-20 9:01 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 06/15] vdpa/mlx5: pre-create virtq at probe time Li Zhang
2022-06-18 9:02 ` [PATCH v4 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-20 9:25 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-20 10:57 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-20 15:05 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-20 15:12 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-20 15:19 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-20 15:42 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 13/15] vdpa/mlx5: add device close task Li Zhang
2022-06-20 15:54 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-20 16:01 ` Maxime Coquelin
2022-06-18 9:02 ` [PATCH v4 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-20 16:30 ` Maxime Coquelin
2022-06-21 9:29 ` [PATCH v4 00/15] mlx5/vdpa: optimize live migration time Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220616023012.16013-8-lizh@nvidia.com \
--to=lizh@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=roniba@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).