* [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled
@ 2020-03-31 11:12 Matan Azrad
2020-03-31 11:12 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
` (4 more replies)
0 siblings, 5 replies; 19+ messages in thread
From: Matan Azrad @ 2020-03-31 11:12 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
Since a virtq configuration may be changed in disable state it is better to recreate a virtq becoming enabled.
This series adding this behaviour to the mlx5 driver for vDPA.
Matan Azrad (3):
vdpa/mlx5: manage virtqs by array
vdpa/mlx5: separate virtq stop
vdpa/mlx5: recteate a virtq becoming enabled
drivers/vdpa/mlx5/mlx5_vdpa.c | 47 +++++++--------
drivers/vdpa/mlx5/mlx5_vdpa.h | 54 +++++++++++++++--
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 57 ++++++++----------
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 103 ++++++++++++++++----------------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 113 +++++++++++++++++++++++++++---------
5 files changed, 231 insertions(+), 143 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-03-31 11:12 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
@ 2020-03-31 11:12 ` Matan Azrad
2020-04-09 15:18 ` Maxime Coquelin
2020-04-15 14:06 ` Maxime Coquelin
2020-03-31 11:12 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
` (3 subsequent siblings)
4 siblings, 2 replies; 19+ messages in thread
From: Matan Azrad @ 2020-03-31 11:12 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
As a preparation to listen the virtqs status before the device is
configured, manage the virtqs structures in array instead of list.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++----------------------
5 files changed, 68 insertions(+), 84 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f10647b..b22f074 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -116,20 +116,18 @@
{
int did = rte_vhost_get_vdpa_device_id(vid);
struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
- struct mlx5_vdpa_virtq *virtq = NULL;
if (priv == NULL) {
DRV_LOG(ERR, "Invalid device id: %d.", did);
return -EINVAL;
}
- SLIST_FOREACH(virtq, &priv->virtq_list, next)
- if (virtq->index == vring)
- break;
- if (!virtq) {
+ if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
+ (int)priv->caps.max_num_virtio_queues * 2) ||
+ !priv->virtqs[vring].virtq) {
DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
return -EINVAL;
}
- return mlx5_vdpa_virtq_enable(virtq, state);
+ return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
}
static int
@@ -482,28 +480,28 @@
rte_errno = ENODEV;
return -rte_errno;
}
- priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
- RTE_CACHE_LINE_SIZE);
- if (!priv) {
- DRV_LOG(ERR, "Failed to allocate private memory.");
- rte_errno = ENOMEM;
- goto error;
- }
ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
if (ret) {
DRV_LOG(ERR, "Unable to read HCA capabilities.");
rte_errno = ENOTSUP;
goto error;
- } else {
- if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
- DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
- " maybe old FW/OFED version?");
- rte_errno = ENOTSUP;
- goto error;
- }
- priv->caps = attr.vdpa;
- priv->log_max_rqt_size = attr.log_max_rqt_size;
+ } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+ DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
+ "old FW/OFED version?");
+ rte_errno = ENOTSUP;
+ goto error;
+ }
+ priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
+ sizeof(struct mlx5_vdpa_virtq) *
+ attr.vdpa.max_num_virtio_queues * 2,
+ RTE_CACHE_LINE_SIZE);
+ if (!priv) {
+ DRV_LOG(ERR, "Failed to allocate private memory.");
+ rte_errno = ENOMEM;
+ goto error;
}
+ priv->caps = attr.vdpa;
+ priv->log_max_rqt_size = attr.log_max_rqt_size;
priv->ctx = ctx;
priv->dev_addr.pci_addr = pci_dev->addr;
priv->dev_addr.type = PCI_ADDR;
@@ -519,7 +517,6 @@
goto error;
}
SLIST_INIT(&priv->mr_list);
- SLIST_INIT(&priv->virtq_list);
pthread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 75af410..baec106 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -120,11 +120,11 @@ struct mlx5_vdpa_priv {
uint16_t nr_virtqs;
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
- SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list;
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
void *virtq_db_addr;
SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
+ struct mlx5_vdpa_virtq virtqs[];
};
/**
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 4457760..77f2eda 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -15,13 +15,12 @@
.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
.dirty_bitmap_dump_enable = enable,
};
- struct mlx5_vdpa_virtq *virtq;
+ int i;
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- attr.queue_index = virtq->index;
- if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d logging.",
- virtq->index);
+ for (i = 0; i < priv->nr_virtqs; ++i) {
+ attr.queue_index = i;
+ if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
return -1;
}
}
@@ -47,7 +46,7 @@
.dirty_bitmap_size = log_size,
};
struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
- struct mlx5_vdpa_virtq *virtq;
+ int i;
if (!mr) {
DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
@@ -67,11 +66,10 @@
goto err;
}
attr.dirty_bitmap_mkey = mr->mkey->id;
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- attr.queue_index = virtq->index;
- if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
- virtq->index);
+ for (i = 0; i < priv->nr_virtqs; ++i) {
+ attr.queue_index = i;
+ if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
goto err;
}
}
@@ -94,9 +92,9 @@
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
struct mlx5_devx_virtq_attr attr = {0};
- struct mlx5_vdpa_virtq *virtq;
uint64_t features;
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
+ int i;
if (ret) {
DRV_LOG(ERR, "Failed to get negotiated features.");
@@ -104,27 +102,26 @@
}
if (!RTE_VHOST_NEED_LOG(features))
return 0;
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- ret = mlx5_vdpa_virtq_modify(virtq, 0);
+ for (i = 0; i < priv->nr_virtqs; ++i) {
+ ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
if (ret)
return -1;
- if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
- DRV_LOG(ERR, "Failed to query virtq %d.", virtq->index);
+ if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to query virtq %d.", i);
return -1;
}
DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
- "hw_used_index=%d", priv->vid, virtq->index,
+ "hw_used_index=%d", priv->vid, i,
attr.hw_available_index, attr.hw_used_index);
- ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
+ ret = rte_vhost_set_vring_base(priv->vid, i,
attr.hw_available_index,
attr.hw_used_index);
if (ret) {
- DRV_LOG(ERR, "Failed to set virtq %d base.",
- virtq->index);
+ DRV_LOG(ERR, "Failed to set virtq %d base.", i);
return -1;
}
- rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
- MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
+ rte_vhost_log_used_vring(priv->vid, i, 0,
+ MLX5_VDPA_USED_RING_LEN(priv->virtqs[i].vq_size));
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 9c11452..96ffc21 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -76,13 +76,13 @@
static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
{
- struct mlx5_vdpa_virtq *virtq;
+ int i;
uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
1 << priv->log_max_rqt_size);
struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
+ rqt_n *
sizeof(uint32_t), 0);
- uint32_t i = 0, j;
+ uint32_t k = 0, j;
int ret = 0;
if (!attr) {
@@ -90,15 +90,15 @@
rte_errno = ENOMEM;
return -ENOMEM;
}
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
- virtq->enable) {
- attr->rq_list[i] = virtq->virtq->id;
- i++;
+ for (i = 0; i < priv->nr_virtqs; i++) {
+ if (is_virtq_recvq(i, priv->nr_virtqs) &&
+ priv->virtqs[i].enable) {
+ attr->rq_list[k] = priv->virtqs[i].virtq->id;
+ k++;
}
}
- for (j = 0; i != rqt_n; ++i, ++j)
- attr->rq_list[i] = attr->rq_list[j];
+ for (j = 0; k != rqt_n; ++k, ++j)
+ attr->rq_list[k] = attr->rq_list[j];
attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
attr->rqt_max_size = rqt_n;
attr->rqt_actual_size = rqt_n;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 8bebb92..3575272 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -59,12 +59,9 @@
usleep(MLX5_VDPA_INTR_RETRIES_USEC);
}
}
- virtq->intr_handle.fd = -1;
}
- if (virtq->virtq) {
+ if (virtq->virtq)
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
- virtq->virtq = NULL;
- }
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
if (virtq->umems[i].obj)
claim_zero(mlx5_glue->devx_umem_dereg
@@ -72,27 +69,20 @@
if (virtq->umems[i].buf)
rte_free(virtq->umems[i].buf);
}
- memset(&virtq->umems, 0, sizeof(virtq->umems));
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
+ memset(virtq, 0, sizeof(*virtq));
+ virtq->intr_handle.fd = -1;
return 0;
}
void
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
{
- struct mlx5_vdpa_virtq *entry;
- struct mlx5_vdpa_virtq *next;
+ int i;
- entry = SLIST_FIRST(&priv->virtq_list);
- while (entry) {
- next = SLIST_NEXT(entry, next);
- mlx5_vdpa_virtq_unset(entry);
- SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq, next);
- rte_free(entry);
- entry = next;
- }
- SLIST_INIT(&priv->virtq_list);
+ for (i = 0; i < priv->nr_virtqs; i++)
+ mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
if (priv->tis) {
claim_zero(mlx5_devx_cmd_destroy(priv->tis));
priv->tis = NULL;
@@ -106,6 +96,7 @@
priv->virtq_db_addr = NULL;
}
priv->features = 0;
+ priv->nr_virtqs = 0;
}
int
@@ -140,9 +131,9 @@
}
static int
-mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
- struct mlx5_vdpa_virtq *virtq, int index)
+mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
struct rte_vhost_vring vq;
struct mlx5_devx_virtq_attr attr = {0};
uint64_t gpa;
@@ -340,7 +331,6 @@
mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
{
struct mlx5_devx_tis_attr tis_attr = {0};
- struct mlx5_vdpa_virtq *virtq;
uint32_t i;
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
@@ -349,6 +339,12 @@
DRV_LOG(ERR, "Failed to configure negotiated features.");
return -1;
}
+ if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+ DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
+ (int)priv->caps.max_num_virtio_queues * 2,
+ (int)nr_vring);
+ return -E2BIG;
+ }
/* Always map the entire page. */
priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
@@ -372,16 +368,10 @@
DRV_LOG(ERR, "Failed to create TIS.");
goto error;
}
- for (i = 0; i < nr_vring; i++) {
- virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
- if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
- if (virtq)
- rte_free(virtq);
- goto error;
- }
- SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
- }
priv->nr_virtqs = nr_vring;
+ for (i = 0; i < nr_vring; i++)
+ if (mlx5_vdpa_virtq_setup(priv, i))
+ goto error;
return 0;
error:
mlx5_vdpa_virtqs_release(priv);
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop
2020-03-31 11:12 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
2020-03-31 11:12 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
@ 2020-03-31 11:12 ` Matan Azrad
2020-04-09 15:28 ` Maxime Coquelin
2020-03-31 11:12 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
` (2 subsequent siblings)
4 siblings, 1 reply; 19+ messages in thread
From: Matan Azrad @ 2020-03-31 11:12 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
In live migration, before loging the virtq, the driver queries the virtq
indexes after moving it to suspend mode.
Separate this method to new function mlx5_vdpa_virtq_stop as a
preparation for reusing.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.h | 13 +++++++++++++
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 ++---------------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 26 ++++++++++++++++++++++++++
3 files changed, 41 insertions(+), 15 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index baec106..0edd688 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -308,4 +308,17 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
*/
int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
+/**
+ * Stop virtq before destroying it.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] index
+ * The virtq index.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
+
#endif /* RTE_PMD_MLX5_VDPA_H_ */
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 77f2eda..26b7ce1 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -91,7 +91,6 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- struct mlx5_devx_virtq_attr attr = {0};
uint64_t features;
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
int i;
@@ -103,21 +102,9 @@
if (!RTE_VHOST_NEED_LOG(features))
return 0;
for (i = 0; i < priv->nr_virtqs; ++i) {
- ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
- if (ret)
- return -1;
- if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
- DRV_LOG(ERR, "Failed to query virtq %d.", i);
- return -1;
- }
- DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
- "hw_used_index=%d", priv->vid, i,
- attr.hw_available_index, attr.hw_used_index);
- ret = rte_vhost_set_vring_base(priv->vid, i,
- attr.hw_available_index,
- attr.hw_used_index);
+ ret = mlx5_vdpa_virtq_stop(priv, i);
if (ret) {
- DRV_LOG(ERR, "Failed to set virtq %d base.", i);
+ DRV_LOG(ERR, "Failed to stop virtq %d.", i);
return -1;
}
rte_vhost_log_used_vring(priv->vid, i, 0,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 3575272..0bb6416 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -112,6 +112,32 @@
return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
}
+int
+mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
+{
+ struct mlx5_devx_virtq_attr attr = {0};
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
+ int ret = mlx5_vdpa_virtq_modify(virtq, 0);
+
+ if (ret)
+ return -1;
+ if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to query virtq %d.", index);
+ return -1;
+ }
+ DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
+ "hw_used_index=%d", priv->vid, index,
+ attr.hw_available_index, attr.hw_used_index);
+ ret = rte_vhost_set_vring_base(priv->vid, index,
+ attr.hw_available_index,
+ attr.hw_used_index);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set virtq %d base.", index);
+ return -1;
+ }
+ return 0;
+}
+
static uint64_t
mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
{
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled
2020-03-31 11:12 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
2020-03-31 11:12 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
2020-03-31 11:12 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
@ 2020-03-31 11:12 ` Matan Azrad
2020-04-15 14:06 ` Maxime Coquelin
2020-04-17 14:58 ` [dpdk-dev] [PATCH 0/3] " Maxime Coquelin
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
4 siblings, 1 reply; 19+ messages in thread
From: Matan Azrad @ 2020-03-31 11:12 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
The virtq configuarations may be changed when it moves from disabled
state to enabled state.
Listen to the state callback even if the device is not configured.
Recreate the virtq when it moves from disabled state to enabled state
and when the device is configured.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 12 +++--
drivers/vdpa/mlx5/mlx5_vdpa.h | 39 +++++++++++++++--
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 +++++---
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 87 ++++++++++++++++++-------------------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 59 ++++++++++++++++++++++---
5 files changed, 146 insertions(+), 68 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index b22f074..fe17ced 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -121,13 +121,11 @@
DRV_LOG(ERR, "Invalid device id: %d.", did);
return -EINVAL;
}
- if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
- (int)priv->caps.max_num_virtio_queues * 2) ||
- !priv->virtqs[vring].virtq) {
- DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
- return -EINVAL;
+ if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+ DRV_LOG(ERR, "Too big vring id: %d.", vring);
+ return -E2BIG;
}
- return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
+ return mlx5_vdpa_virtq_enable(priv, vring, state);
}
static int
@@ -206,7 +204,7 @@
if (priv->configured)
ret |= mlx5_vdpa_lm_log(priv);
mlx5_vdpa_cqe_event_unset(priv);
- ret |= mlx5_vdpa_steer_unset(priv);
+ mlx5_vdpa_steer_unset(priv);
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_mem_dereg(priv);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 0edd688..fcc216a 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -127,6 +127,24 @@ struct mlx5_vdpa_priv {
struct mlx5_vdpa_virtq virtqs[];
};
+/*
+ * Check whether virtq is for traffic receive.
+ * According to VIRTIO_NET Spec the virtqueues index identity its type by:
+ * 0 receiveq1
+ * 1 transmitq1
+ * ...
+ * 2(N-1) receiveqN
+ * 2(N-1)+1 transmitqN
+ * 2N controlq
+ */
+static inline uint8_t
+is_virtq_recvq(int virtq_index, int nr_vring)
+{
+ if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
+ return 1;
+ return 0;
+}
+
/**
* Release all the prepared memory regions and all their related resources.
*
@@ -223,15 +241,17 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
/**
* Enable\Disable virtq..
*
- * @param[in] virtq
- * The vdpa driver private virtq structure.
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] index
+ * The virtq index.
* @param[in] enable
* Set to enable, otherwise disable.
*
* @return
* 0 on success, a negative value otherwise.
*/
-int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable);
+int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
/**
* Unset steering and release all its related resources- stop traffic.
@@ -239,7 +259,18 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
* @param[in] priv
* The vdpa driver private structure.
*/
-int mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
+void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
+
+/**
+ * Update steering according to the received queues status.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
/**
* Setup steering and all its related resources to enable RSS traffic from the
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 26b7ce1..460e01d 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -19,7 +19,8 @@
for (i = 0; i < priv->nr_virtqs; ++i) {
attr.queue_index = i;
- if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ if (!priv->virtqs[i].virtq ||
+ mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
return -1;
}
@@ -68,7 +69,8 @@
attr.dirty_bitmap_mkey = mr->mkey->id;
for (i = 0; i < priv->nr_virtqs; ++i) {
attr.queue_index = i;
- if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ if (!priv->virtqs[i].virtq ||
+ mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
goto err;
}
@@ -102,9 +104,14 @@
if (!RTE_VHOST_NEED_LOG(features))
return 0;
for (i = 0; i < priv->nr_virtqs; ++i) {
- ret = mlx5_vdpa_virtq_stop(priv, i);
- if (ret) {
- DRV_LOG(ERR, "Failed to stop virtq %d.", i);
+ if (priv->virtqs[i].virtq) {
+ ret = mlx5_vdpa_virtq_stop(priv, i);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to stop virtq %d.", i);
+ return -1;
+ }
+ } else {
+ DRV_LOG(ERR, "virtq %d is not created.", i);
return -1;
}
rte_vhost_log_used_vring(priv->vid, i, 0,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 96ffc21..406c7be 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -12,10 +12,9 @@
#include "mlx5_vdpa_utils.h"
#include "mlx5_vdpa.h"
-int
-mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
+static void
+mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv)
{
- int ret __rte_unused;
unsigned i;
for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
@@ -40,6 +39,12 @@
priv->steer.rss[i].matcher = NULL;
}
}
+}
+
+void
+mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
+{
+ mlx5_vdpa_rss_flows_destroy(priv);
if (priv->steer.tbl) {
claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
priv->steer.tbl = NULL;
@@ -52,27 +57,13 @@
claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
priv->steer.rqt = NULL;
}
- return 0;
}
+#define MLX5_VDPA_DEFAULT_RQT_SIZE 512
/*
- * According to VIRTIO_NET Spec the virtqueues index identity its type by:
- * 0 receiveq1
- * 1 transmitq1
- * ...
- * 2(N-1) receiveqN
- * 2(N-1)+1 transmitqN
- * 2N controlq
+ * Return the number of queues configured to the table on success, otherwise
+ * -1 on error.
*/
-static uint8_t
-is_virtq_recvq(int virtq_index, int nr_vring)
-{
- if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
- return 1;
- return 0;
-}
-
-#define MLX5_VDPA_DEFAULT_RQT_SIZE 512
static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
{
@@ -83,7 +74,7 @@
+ rqt_n *
sizeof(uint32_t), 0);
uint32_t k = 0, j;
- int ret = 0;
+ int ret = 0, num;
if (!attr) {
DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
@@ -92,11 +83,15 @@
}
for (i = 0; i < priv->nr_virtqs; i++) {
if (is_virtq_recvq(i, priv->nr_virtqs) &&
- priv->virtqs[i].enable) {
+ priv->virtqs[i].enable && priv->virtqs[i].virtq) {
attr->rq_list[k] = priv->virtqs[i].virtq->id;
k++;
}
}
+ if (k == 0)
+ /* No enabled RQ to configure for RSS. */
+ return 0;
+ num = (int)k;
for (j = 0; k != rqt_n; ++k, ++j)
attr->rq_list[k] = attr->rq_list[j];
attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
@@ -114,26 +109,7 @@
DRV_LOG(ERR, "Failed to modify RQT.");
}
rte_free(attr);
- return ret;
-}
-
-int
-mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable)
-{
- struct mlx5_vdpa_priv *priv = virtq->priv;
- int ret = 0;
-
- DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", virtq->index,
- virtq->enable ? "en" : "dis", enable ? "en" : "dis");
- if (virtq->enable == !!enable)
- return 0;
- virtq->enable = !!enable;
- if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
- ret = mlx5_vdpa_rqt_prepare(priv);
- if (ret)
- virtq->enable = !enable;
- }
- return ret;
+ return ret ? -1 : num;
}
static int __rte_unused
@@ -262,11 +238,32 @@
}
int
+mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
+{
+ int ret = mlx5_vdpa_rqt_prepare(priv);
+
+ if (ret == 0) {
+ mlx5_vdpa_rss_flows_destroy(priv);
+ if (priv->steer.rqt) {
+ claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
+ priv->steer.rqt = NULL;
+ }
+ } else if (ret < 0) {
+ return ret;
+ } else if (!priv->steer.rss[0].flow) {
+ ret = mlx5_vdpa_rss_flows_create(priv);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot create RSS flows.");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
{
#ifdef HAVE_MLX5DV_DR
- if (mlx5_vdpa_rqt_prepare(priv))
- return -1;
priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
if (!priv->steer.domain) {
@@ -278,7 +275,7 @@
DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
goto error;
}
- if (mlx5_vdpa_rss_flows_create(priv))
+ if (mlx5_vdpa_steer_update(priv))
goto error;
return 0;
error:
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 0bb6416..defb9e1 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -59,9 +59,11 @@
usleep(MLX5_VDPA_INTR_RETRIES_USEC);
}
}
+ virtq->intr_handle.fd = -1;
}
if (virtq->virtq)
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
+ virtq->virtq = NULL;
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
if (virtq->umems[i].obj)
claim_zero(mlx5_glue->devx_umem_dereg
@@ -69,10 +71,9 @@
if (virtq->umems[i].buf)
rte_free(virtq->umems[i].buf);
}
+ memset(&virtq->umems, 0, sizeof(virtq->umems));
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
- memset(virtq, 0, sizeof(*virtq));
- virtq->intr_handle.fd = -1;
return 0;
}
@@ -81,8 +82,10 @@
{
int i;
- for (i = 0; i < priv->nr_virtqs; i++)
+ for (i = 0; i < priv->nr_virtqs; i++) {
mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
+ priv->virtqs[i].enable = 0;
+ }
if (priv->tis) {
claim_zero(mlx5_devx_cmd_destroy(priv->tis));
priv->tis = NULL;
@@ -265,10 +268,7 @@
goto error;
if (mlx5_vdpa_virtq_modify(virtq, 1))
goto error;
- virtq->enable = 1;
virtq->priv = priv;
- /* Be sure notifications are not missed during configuration. */
- claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
rte_write32(virtq->index, priv->virtq_db_addr);
/* Setup doorbell mapping. */
virtq->intr_handle.fd = vq.kickfd;
@@ -395,11 +395,56 @@
goto error;
}
priv->nr_virtqs = nr_vring;
- for (i = 0; i < nr_vring; i++)
+ for (i = 0; i < nr_vring; i++) {
+ claim_zero(rte_vhost_enable_guest_notification(priv->vid, i,
+ 1));
if (mlx5_vdpa_virtq_setup(priv, i))
goto error;
+ }
return 0;
error:
mlx5_vdpa_virtqs_release(priv);
return -1;
}
+
+int
+mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
+{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
+ int ret;
+
+ DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
+ virtq->enable ? "en" : "dis", enable ? "en" : "dis");
+ if (virtq->enable == !!enable)
+ return 0;
+ if (!priv->configured) {
+ virtq->enable = !!enable;
+ return 0;
+ }
+ if (enable) {
+ /* Configuration might have been updated - reconfigure virtq. */
+ if (virtq->virtq) {
+ ret = mlx5_vdpa_virtq_stop(priv, index);
+ if (ret)
+ DRV_LOG(WARNING, "Failed to stop virtq %d.",
+ index);
+ mlx5_vdpa_virtq_unset(virtq);
+ }
+ ret = mlx5_vdpa_virtq_setup(priv, index);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to setup virtq %d.", index);
+ return ret;
+ /* The only case virtq can stay invalid. */
+ }
+ }
+ virtq->enable = !!enable;
+ if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
+ /* Need to add received virtq to the RQT table of the TIRs. */
+ ret = mlx5_vdpa_steer_update(priv);
+ if (ret) {
+ virtq->enable = !enable;
+ return ret;
+ }
+ }
+ return 0;
+}
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-03-31 11:12 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
@ 2020-04-09 15:18 ` Maxime Coquelin
2020-04-10 13:58 ` Matan Azrad
2020-04-15 14:06 ` Maxime Coquelin
1 sibling, 1 reply; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-09 15:18 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
On 3/31/20 1:12 PM, Matan Azrad wrote:
> As a preparation to listen the virtqs status before the device is
> configured, manage the virtqs structures in array instead of list.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++------------------
> drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++------------------
> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++----------------------
> 5 files changed, 68 insertions(+), 84 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
> index f10647b..b22f074 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
> @@ -116,20 +116,18 @@
> {
> int did = rte_vhost_get_vdpa_device_id(vid);
> struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
> - struct mlx5_vdpa_virtq *virtq = NULL;
>
> if (priv == NULL) {
> DRV_LOG(ERR, "Invalid device id: %d.", did);
> return -EINVAL;
> }
> - SLIST_FOREACH(virtq, &priv->virtq_list, next)
> - if (virtq->index == vring)
> - break;
> - if (!virtq) {
> + if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
> + (int)priv->caps.max_num_virtio_queues * 2) ||
> + !priv->virtqs[vring].virtq) {
> DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
> return -EINVAL;
> }
> - return mlx5_vdpa_virtq_enable(virtq, state);
> + return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
> }
>
> static int
> @@ -482,28 +480,28 @@
> rte_errno = ENODEV;
> return -rte_errno;
> }
> - priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
> - RTE_CACHE_LINE_SIZE);
> - if (!priv) {
> - DRV_LOG(ERR, "Failed to allocate private memory.");
> - rte_errno = ENOMEM;
> - goto error;
> - }
> ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
> if (ret) {
> DRV_LOG(ERR, "Unable to read HCA capabilities.");
> rte_errno = ENOTSUP;
> goto error;
> - } else {
> - if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
> - DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
> - " maybe old FW/OFED version?");
> - rte_errno = ENOTSUP;
> - goto error;
> - }
> - priv->caps = attr.vdpa;
> - priv->log_max_rqt_size = attr.log_max_rqt_size;
> + } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
> + DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
> + "old FW/OFED version?");
> + rte_errno = ENOTSUP;
> + goto error;
> + }
> + priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
> + sizeof(struct mlx5_vdpa_virtq) *
> + attr.vdpa.max_num_virtio_queues * 2,
> + RTE_CACHE_LINE_SIZE);
> + if (!priv) {
> + DRV_LOG(ERR, "Failed to allocate private memory.");
> + rte_errno = ENOMEM;
> + goto error;
> }
> + priv->caps = attr.vdpa;
> + priv->log_max_rqt_size = attr.log_max_rqt_size;
> priv->ctx = ctx;
> priv->dev_addr.pci_addr = pci_dev->addr;
> priv->dev_addr.type = PCI_ADDR;
> @@ -519,7 +517,6 @@
> goto error;
> }
> SLIST_INIT(&priv->mr_list);
> - SLIST_INIT(&priv->virtq_list);
> pthread_mutex_lock(&priv_list_lock);
> TAILQ_INSERT_TAIL(&priv_list, priv, next);
> pthread_mutex_unlock(&priv_list_lock);
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
> index 75af410..baec106 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa.h
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
> @@ -120,11 +120,11 @@ struct mlx5_vdpa_priv {
> uint16_t nr_virtqs;
> uint64_t features; /* Negotiated features. */
> uint16_t log_max_rqt_size;
> - SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list;
> struct mlx5_vdpa_steer steer;
> struct mlx5dv_var *var;
> void *virtq_db_addr;
> SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
> + struct mlx5_vdpa_virtq virtqs[];
> };
>
> /**
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> index 4457760..77f2eda 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> @@ -15,13 +15,12 @@
> .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
> .dirty_bitmap_dump_enable = enable,
> };
> - struct mlx5_vdpa_virtq *virtq;
> + int i;
>
> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> - attr.queue_index = virtq->index;
> - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
> - DRV_LOG(ERR, "Failed to modify virtq %d logging.",
> - virtq->index);
> + for (i = 0; i < priv->nr_virtqs; ++i) {
> + attr.queue_index = i;
> + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
> + DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
> return -1;
> }
> }
> @@ -47,7 +46,7 @@
> .dirty_bitmap_size = log_size,
> };
> struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
> - struct mlx5_vdpa_virtq *virtq;
> + int i;
>
> if (!mr) {
> DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
> @@ -67,11 +66,10 @@
> goto err;
> }
> attr.dirty_bitmap_mkey = mr->mkey->id;
> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> - attr.queue_index = virtq->index;
> - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
> - DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
> - virtq->index);
> + for (i = 0; i < priv->nr_virtqs; ++i) {
> + attr.queue_index = i;
> + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
> + DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
> goto err;
> }
> }
> @@ -94,9 +92,9 @@
> mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
> {
> struct mlx5_devx_virtq_attr attr = {0};
> - struct mlx5_vdpa_virtq *virtq;
> uint64_t features;
> int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
> + int i;
>
> if (ret) {
> DRV_LOG(ERR, "Failed to get negotiated features.");
> @@ -104,27 +102,26 @@
> }
> if (!RTE_VHOST_NEED_LOG(features))
> return 0;
> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> - ret = mlx5_vdpa_virtq_modify(virtq, 0);
> + for (i = 0; i < priv->nr_virtqs; ++i) {
> + ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
> if (ret)
> return -1;
> - if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
> - DRV_LOG(ERR, "Failed to query virtq %d.", virtq->index);
> + if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
> + DRV_LOG(ERR, "Failed to query virtq %d.", i);
> return -1;
> }
> DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
> - "hw_used_index=%d", priv->vid, virtq->index,
> + "hw_used_index=%d", priv->vid, i,
> attr.hw_available_index, attr.hw_used_index);
> - ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
> + ret = rte_vhost_set_vring_base(priv->vid, i,
> attr.hw_available_index,
> attr.hw_used_index);
> if (ret) {
> - DRV_LOG(ERR, "Failed to set virtq %d base.",
> - virtq->index);
> + DRV_LOG(ERR, "Failed to set virtq %d base.", i);
> return -1;
> }
> - rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
> - MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
> + rte_vhost_log_used_vring(priv->vid, i, 0,
> + MLX5_VDPA_USED_RING_LEN(priv->virtqs[i].vq_size));
> }
> return 0;
> }
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> index 9c11452..96ffc21 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> @@ -76,13 +76,13 @@
> static int
> mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
> {
> - struct mlx5_vdpa_virtq *virtq;
> + int i;
> uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
> 1 << priv->log_max_rqt_size);
> struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
> + rqt_n *
> sizeof(uint32_t), 0);
> - uint32_t i = 0, j;
> + uint32_t k = 0, j;
> int ret = 0;
>
> if (!attr) {
> @@ -90,15 +90,15 @@
> rte_errno = ENOMEM;
> return -ENOMEM;
> }
> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> - if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
> - virtq->enable) {
> - attr->rq_list[i] = virtq->virtq->id;
> - i++;
> + for (i = 0; i < priv->nr_virtqs; i++) {
> + if (is_virtq_recvq(i, priv->nr_virtqs) &&
> + priv->virtqs[i].enable) {
> + attr->rq_list[k] = priv->virtqs[i].virtq->id;
> + k++;
> }
> }
> - for (j = 0; i != rqt_n; ++i, ++j)
> - attr->rq_list[i] = attr->rq_list[j];
> + for (j = 0; k != rqt_n; ++k, ++j)
> + attr->rq_list[k] = attr->rq_list[j];
> attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
> attr->rqt_max_size = rqt_n;
> attr->rqt_actual_size = rqt_n;
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> index 8bebb92..3575272 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> @@ -59,12 +59,9 @@
> usleep(MLX5_VDPA_INTR_RETRIES_USEC);
> }
> }
> - virtq->intr_handle.fd = -1;
> }
> - if (virtq->virtq) {
> + if (virtq->virtq)
> claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
> - virtq->virtq = NULL;
> - }
> for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
> if (virtq->umems[i].obj)
> claim_zero(mlx5_glue->devx_umem_dereg
> @@ -72,27 +69,20 @@
> if (virtq->umems[i].buf)
> rte_free(virtq->umems[i].buf);
> }
> - memset(&virtq->umems, 0, sizeof(virtq->umems));
> if (virtq->eqp.fw_qp)
> mlx5_vdpa_event_qp_destroy(&virtq->eqp);
> + memset(virtq, 0, sizeof(*virtq));
> + virtq->intr_handle.fd = -1;
> return 0;
> }
>
> void
> mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
> {
> - struct mlx5_vdpa_virtq *entry;
> - struct mlx5_vdpa_virtq *next;
> + int i;
>
> - entry = SLIST_FIRST(&priv->virtq_list);
> - while (entry) {
> - next = SLIST_NEXT(entry, next);
> - mlx5_vdpa_virtq_unset(entry);
> - SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq, next);
> - rte_free(entry);
> - entry = next;
> - }
> - SLIST_INIT(&priv->virtq_list);
> + for (i = 0; i < priv->nr_virtqs; i++)
> + mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
> if (priv->tis) {
> claim_zero(mlx5_devx_cmd_destroy(priv->tis));
> priv->tis = NULL;
> @@ -106,6 +96,7 @@
> priv->virtq_db_addr = NULL;
> }
> priv->features = 0;
> + priv->nr_virtqs = 0;
> }
>
> int
> @@ -140,9 +131,9 @@
> }
>
> static int
> -mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
> - struct mlx5_vdpa_virtq *virtq, int index)
> +mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
> {
> + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
> struct rte_vhost_vring vq;
> struct mlx5_devx_virtq_attr attr = {0};
> uint64_t gpa;
> @@ -340,7 +331,6 @@
> mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
> {
> struct mlx5_devx_tis_attr tis_attr = {0};
> - struct mlx5_vdpa_virtq *virtq;
> uint32_t i;
> uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
> int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
> @@ -349,6 +339,12 @@
> DRV_LOG(ERR, "Failed to configure negotiated features.");
> return -1;
> }
> + if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
> + DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
> + (int)priv->caps.max_num_virtio_queues * 2,
> + (int)nr_vring);
> + return -E2BIG;
All returns in thid function are either -1 or 0 except this one, so it
looks inconsistent.
> + }
> /* Always map the entire page. */
> priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
> PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
> @@ -372,16 +368,10 @@
> DRV_LOG(ERR, "Failed to create TIS.");
> goto error;
> }
> - for (i = 0; i < nr_vring; i++) {
> - virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
> - if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
> - if (virtq)
> - rte_free(virtq);
> - goto error;
> - }
> - SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
> - }
> priv->nr_virtqs = nr_vring;
> + for (i = 0; i < nr_vring; i++)
> + if (mlx5_vdpa_virtq_setup(priv, i))
> + goto error;
> return 0;
> error:
> mlx5_vdpa_virtqs_release(priv);
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop
2020-03-31 11:12 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
@ 2020-04-09 15:28 ` Maxime Coquelin
2020-04-10 13:59 ` Matan Azrad
0 siblings, 1 reply; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-09 15:28 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
On 3/31/20 1:12 PM, Matan Azrad wrote:
> In live migration, before loging the virtq, the driver queries the virtq
s/loging/logging/
> indexes after moving it to suspend mode.
>
> Separate this method to new function mlx5_vdpa_virtq_stop as a
> preparation for reusing.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/vdpa/mlx5/mlx5_vdpa.h | 13 +++++++++++++
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 ++---------------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 26 ++++++++++++++++++++++++++
> 3 files changed, 41 insertions(+), 15 deletions(-)
>
Other than that:
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-04-09 15:18 ` Maxime Coquelin
@ 2020-04-10 13:58 ` Matan Azrad
2020-04-10 13:59 ` Maxime Coquelin
0 siblings, 1 reply; 19+ messages in thread
From: Matan Azrad @ 2020-04-10 13:58 UTC (permalink / raw)
To: Maxime Coquelin, dev; +Cc: Slava Ovsiienko, Shahaf Shuler
Hi Maxime
From: Maxime Coquelin
> On 3/31/20 1:12 PM, Matan Azrad wrote:
> > As a preparation to listen the virtqs status before the device is
> > configured, manage the virtqs structures in array instead of list.
> >
> > Signed-off-by: Matan Azrad <matan@mellanox.com>
> > Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> > ---
> > drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++----------------
> --
> > drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
> > drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++--------------
> ----
> > drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
> > drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46
> > +++++++++++++++----------------------
> > 5 files changed, 68 insertions(+), 84 deletions(-)
> >
> > diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c
> > b/drivers/vdpa/mlx5/mlx5_vdpa.c index f10647b..b22f074 100644
> > --- a/drivers/vdpa/mlx5/mlx5_vdpa.c
> > +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
> > @@ -116,20 +116,18 @@
> > {
> > int did = rte_vhost_get_vdpa_device_id(vid);
> > struct mlx5_vdpa_priv *priv =
> mlx5_vdpa_find_priv_resource_by_did(did);
> > - struct mlx5_vdpa_virtq *virtq = NULL;
> >
> > if (priv == NULL) {
> > DRV_LOG(ERR, "Invalid device id: %d.", did);
> > return -EINVAL;
> > }
> > - SLIST_FOREACH(virtq, &priv->virtq_list, next)
> > - if (virtq->index == vring)
> > - break;
> > - if (!virtq) {
> > + if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
> > + (int)priv->caps.max_num_virtio_queues * 2) ||
> > + !priv->virtqs[vring].virtq) {
> > DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
> > return -EINVAL;
> > }
> > - return mlx5_vdpa_virtq_enable(virtq, state);
> > + return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
> > }
> >
> > static int
> > @@ -482,28 +480,28 @@
> > rte_errno = ENODEV;
> > return -rte_errno;
> > }
> > - priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
> > - RTE_CACHE_LINE_SIZE);
> > - if (!priv) {
> > - DRV_LOG(ERR, "Failed to allocate private memory.");
> > - rte_errno = ENOMEM;
> > - goto error;
> > - }
> > ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
> > if (ret) {
> > DRV_LOG(ERR, "Unable to read HCA capabilities.");
> > rte_errno = ENOTSUP;
> > goto error;
> > - } else {
> > - if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
> > - DRV_LOG(ERR, "Not enough capabilities to support
> vdpa,"
> > - " maybe old FW/OFED version?");
> > - rte_errno = ENOTSUP;
> > - goto error;
> > - }
> > - priv->caps = attr.vdpa;
> > - priv->log_max_rqt_size = attr.log_max_rqt_size;
> > + } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
> > + DRV_LOG(ERR, "Not enough capabilities to support vdpa,
> maybe "
> > + "old FW/OFED version?");
> > + rte_errno = ENOTSUP;
> > + goto error;
> > + }
> > + priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
> > + sizeof(struct mlx5_vdpa_virtq) *
> > + attr.vdpa.max_num_virtio_queues * 2,
> > + RTE_CACHE_LINE_SIZE);
> > + if (!priv) {
> > + DRV_LOG(ERR, "Failed to allocate private memory.");
> > + rte_errno = ENOMEM;
> > + goto error;
> > }
> > + priv->caps = attr.vdpa;
> > + priv->log_max_rqt_size = attr.log_max_rqt_size;
> > priv->ctx = ctx;
> > priv->dev_addr.pci_addr = pci_dev->addr;
> > priv->dev_addr.type = PCI_ADDR;
> > @@ -519,7 +517,6 @@
> > goto error;
> > }
> > SLIST_INIT(&priv->mr_list);
> > - SLIST_INIT(&priv->virtq_list);
> > pthread_mutex_lock(&priv_list_lock);
> > TAILQ_INSERT_TAIL(&priv_list, priv, next);
> > pthread_mutex_unlock(&priv_list_lock);
> > diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h
> > b/drivers/vdpa/mlx5/mlx5_vdpa.h index 75af410..baec106 100644
> > --- a/drivers/vdpa/mlx5/mlx5_vdpa.h
> > +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
> > @@ -120,11 +120,11 @@ struct mlx5_vdpa_priv {
> > uint16_t nr_virtqs;
> > uint64_t features; /* Negotiated features. */
> > uint16_t log_max_rqt_size;
> > - SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list;
> > struct mlx5_vdpa_steer steer;
> > struct mlx5dv_var *var;
> > void *virtq_db_addr;
> > SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
> > + struct mlx5_vdpa_virtq virtqs[];
> > };
> >
> > /**
> > diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> > b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> > index 4457760..77f2eda 100644
> > --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> > +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> > @@ -15,13 +15,12 @@
> > .type =
> MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
> > .dirty_bitmap_dump_enable = enable,
> > };
> > - struct mlx5_vdpa_virtq *virtq;
> > + int i;
> >
> > - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> > - attr.queue_index = virtq->index;
> > - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
> > - DRV_LOG(ERR, "Failed to modify virtq %d logging.",
> > - virtq->index);
> > + for (i = 0; i < priv->nr_virtqs; ++i) {
> > + attr.queue_index = i;
> > + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr))
> {
> > + DRV_LOG(ERR, "Failed to modify virtq %d logging.",
> i);
> > return -1;
> > }
> > }
> > @@ -47,7 +46,7 @@
> > .dirty_bitmap_size = log_size,
> > };
> > struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__,
> sizeof(*mr), 0);
> > - struct mlx5_vdpa_virtq *virtq;
> > + int i;
> >
> > if (!mr) {
> > DRV_LOG(ERR, "Failed to allocate mem for lm mr."); @@ -
> 67,11 +66,10
> > @@
> > goto err;
> > }
> > attr.dirty_bitmap_mkey = mr->mkey->id;
> > - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> > - attr.queue_index = virtq->index;
> > - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
> > - DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
> > - virtq->index);
> > + for (i = 0; i < priv->nr_virtqs; ++i) {
> > + attr.queue_index = i;
> > + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr))
> {
> > + DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
> > goto err;
> > }
> > }
> > @@ -94,9 +92,9 @@
> > mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv) {
> > struct mlx5_devx_virtq_attr attr = {0};
> > - struct mlx5_vdpa_virtq *virtq;
> > uint64_t features;
> > int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
> > + int i;
> >
> > if (ret) {
> > DRV_LOG(ERR, "Failed to get negotiated features."); @@ -
> 104,27
> > +102,26 @@
> > }
> > if (!RTE_VHOST_NEED_LOG(features))
> > return 0;
> > - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> > - ret = mlx5_vdpa_virtq_modify(virtq, 0);
> > + for (i = 0; i < priv->nr_virtqs; ++i) {
> > + ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
> > if (ret)
> > return -1;
> > - if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
> > - DRV_LOG(ERR, "Failed to query virtq %d.", virtq-
> >index);
> > + if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
> > + DRV_LOG(ERR, "Failed to query virtq %d.", i);
> > return -1;
> > }
> > DRV_LOG(INFO, "Query vid %d vring %d:
> hw_available_idx=%d, "
> > - "hw_used_index=%d", priv->vid, virtq->index,
> > + "hw_used_index=%d", priv->vid, i,
> > attr.hw_available_index, attr.hw_used_index);
> > - ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
> > + ret = rte_vhost_set_vring_base(priv->vid, i,
> > attr.hw_available_index,
> > attr.hw_used_index);
> > if (ret) {
> > - DRV_LOG(ERR, "Failed to set virtq %d base.",
> > - virtq->index);
> > + DRV_LOG(ERR, "Failed to set virtq %d base.", i);
> > return -1;
> > }
> > - rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
> > - MLX5_VDPA_USED_RING_LEN(virtq-
> >vq_size));
> > + rte_vhost_log_used_vring(priv->vid, i, 0,
> > + MLX5_VDPA_USED_RING_LEN(priv-
> >virtqs[i].vq_size));
> > }
> > return 0;
> > }
> > diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> > b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> > index 9c11452..96ffc21 100644
> > --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> > +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
> > @@ -76,13 +76,13 @@
> > static int
> > mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) {
> > - struct mlx5_vdpa_virtq *virtq;
> > + int i;
> > uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
> > 1 << priv->log_max_rqt_size);
> > struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__,
> sizeof(*attr)
> > + rqt_n *
> > sizeof(uint32_t), 0);
> > - uint32_t i = 0, j;
> > + uint32_t k = 0, j;
> > int ret = 0;
> >
> > if (!attr) {
> > @@ -90,15 +90,15 @@
> > rte_errno = ENOMEM;
> > return -ENOMEM;
> > }
> > - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> > - if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
> > - virtq->enable) {
> > - attr->rq_list[i] = virtq->virtq->id;
> > - i++;
> > + for (i = 0; i < priv->nr_virtqs; i++) {
> > + if (is_virtq_recvq(i, priv->nr_virtqs) &&
> > + priv->virtqs[i].enable) {
> > + attr->rq_list[k] = priv->virtqs[i].virtq->id;
> > + k++;
> > }
> > }
> > - for (j = 0; i != rqt_n; ++i, ++j)
> > - attr->rq_list[i] = attr->rq_list[j];
> > + for (j = 0; k != rqt_n; ++k, ++j)
> > + attr->rq_list[k] = attr->rq_list[j];
> > attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
> > attr->rqt_max_size = rqt_n;
> > attr->rqt_actual_size = rqt_n;
> > diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> > b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> > index 8bebb92..3575272 100644
> > --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> > +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> > @@ -59,12 +59,9 @@
> > usleep(MLX5_VDPA_INTR_RETRIES_USEC);
> > }
> > }
> > - virtq->intr_handle.fd = -1;
> > }
> > - if (virtq->virtq) {
> > + if (virtq->virtq)
> > claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
> > - virtq->virtq = NULL;
> > - }
> > for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
> > if (virtq->umems[i].obj)
> > claim_zero(mlx5_glue->devx_umem_dereg
> > @@ -72,27 +69,20 @@
> > if (virtq->umems[i].buf)
> > rte_free(virtq->umems[i].buf);
> > }
> > - memset(&virtq->umems, 0, sizeof(virtq->umems));
> > if (virtq->eqp.fw_qp)
> > mlx5_vdpa_event_qp_destroy(&virtq->eqp);
> > + memset(virtq, 0, sizeof(*virtq));
> > + virtq->intr_handle.fd = -1;
> > return 0;
> > }
> >
> > void
> > mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv) {
> > - struct mlx5_vdpa_virtq *entry;
> > - struct mlx5_vdpa_virtq *next;
> > + int i;
> >
> > - entry = SLIST_FIRST(&priv->virtq_list);
> > - while (entry) {
> > - next = SLIST_NEXT(entry, next);
> > - mlx5_vdpa_virtq_unset(entry);
> > - SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq,
> next);
> > - rte_free(entry);
> > - entry = next;
> > - }
> > - SLIST_INIT(&priv->virtq_list);
> > + for (i = 0; i < priv->nr_virtqs; i++)
> > + mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
> > if (priv->tis) {
> > claim_zero(mlx5_devx_cmd_destroy(priv->tis));
> > priv->tis = NULL;
> > @@ -106,6 +96,7 @@
> > priv->virtq_db_addr = NULL;
> > }
> > priv->features = 0;
> > + priv->nr_virtqs = 0;
> > }
> >
> > int
> > @@ -140,9 +131,9 @@
> > }
> >
> > static int
> > -mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
> > - struct mlx5_vdpa_virtq *virtq, int index)
> > +mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
> > {
> > + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
> > struct rte_vhost_vring vq;
> > struct mlx5_devx_virtq_attr attr = {0};
> > uint64_t gpa;
> > @@ -340,7 +331,6 @@
> > mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv) {
> > struct mlx5_devx_tis_attr tis_attr = {0};
> > - struct mlx5_vdpa_virtq *virtq;
> > uint32_t i;
> > uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
> > int ret = rte_vhost_get_negotiated_features(priv->vid,
> > &priv->features); @@ -349,6 +339,12 @@
> > DRV_LOG(ERR, "Failed to configure negotiated features.");
> > return -1;
> > }
> > + if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
> > + DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
> > + (int)priv->caps.max_num_virtio_queues * 2,
> > + (int)nr_vring);
> > + return -E2BIG;
>
> All returns in thid function are either -1 or 0 except this one, so it looks
> inconsistent.
You can find the consistent in negative values for error.
If you insist and this is your only concern here, I agree to change to -1.
Can it be done in integration?
>
> > + }
> > /* Always map the entire page. */
> > priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
> > PROT_WRITE, MAP_SHARED, priv->ctx-
> >cmd_fd, @@ -372,16 +368,10
> > @@
> > DRV_LOG(ERR, "Failed to create TIS.");
> > goto error;
> > }
> > - for (i = 0; i < nr_vring; i++) {
> > - virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
> > - if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
> > - if (virtq)
> > - rte_free(virtq);
> > - goto error;
> > - }
> > - SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
> > - }
> > priv->nr_virtqs = nr_vring;
> > + for (i = 0; i < nr_vring; i++)
> > + if (mlx5_vdpa_virtq_setup(priv, i))
> > + goto error;
> > return 0;
> > error:
> > mlx5_vdpa_virtqs_release(priv);
> >
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-04-10 13:58 ` Matan Azrad
@ 2020-04-10 13:59 ` Maxime Coquelin
0 siblings, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-10 13:59 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Slava Ovsiienko, Shahaf Shuler
On 4/10/20 3:58 PM, Matan Azrad wrote:
> Hi Maxime
>
> From: Maxime Coquelin
>> On 3/31/20 1:12 PM, Matan Azrad wrote:
>>> As a preparation to listen the virtqs status before the device is
>>> configured, manage the virtqs structures in array instead of list.
>>>
>>> Signed-off-by: Matan Azrad <matan@mellanox.com>
>>> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
>>> ---
>>> drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++----------------
>> --
>>> drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
>>> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++--------------
>> ----
>>> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
>>> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46
>>> +++++++++++++++----------------------
>>> 5 files changed, 68 insertions(+), 84 deletions(-)
>>>
>>> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c
>>> b/drivers/vdpa/mlx5/mlx5_vdpa.c index f10647b..b22f074 100644
>>> --- a/drivers/vdpa/mlx5/mlx5_vdpa.c
>>> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
>>> @@ -116,20 +116,18 @@
>>> {
>>> int did = rte_vhost_get_vdpa_device_id(vid);
>>> struct mlx5_vdpa_priv *priv =
>> mlx5_vdpa_find_priv_resource_by_did(did);
>>> - struct mlx5_vdpa_virtq *virtq = NULL;
>>>
>>> if (priv == NULL) {
>>> DRV_LOG(ERR, "Invalid device id: %d.", did);
>>> return -EINVAL;
>>> }
>>> - SLIST_FOREACH(virtq, &priv->virtq_list, next)
>>> - if (virtq->index == vring)
>>> - break;
>>> - if (!virtq) {
>>> + if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
>>> + (int)priv->caps.max_num_virtio_queues * 2) ||
>>> + !priv->virtqs[vring].virtq) {
>>> DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
>>> return -EINVAL;
>>> }
>>> - return mlx5_vdpa_virtq_enable(virtq, state);
>>> + return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
>>> }
>>>
>>> static int
>>> @@ -482,28 +480,28 @@
>>> rte_errno = ENODEV;
>>> return -rte_errno;
>>> }
>>> - priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
>>> - RTE_CACHE_LINE_SIZE);
>>> - if (!priv) {
>>> - DRV_LOG(ERR, "Failed to allocate private memory.");
>>> - rte_errno = ENOMEM;
>>> - goto error;
>>> - }
>>> ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
>>> if (ret) {
>>> DRV_LOG(ERR, "Unable to read HCA capabilities.");
>>> rte_errno = ENOTSUP;
>>> goto error;
>>> - } else {
>>> - if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
>>> - DRV_LOG(ERR, "Not enough capabilities to support
>> vdpa,"
>>> - " maybe old FW/OFED version?");
>>> - rte_errno = ENOTSUP;
>>> - goto error;
>>> - }
>>> - priv->caps = attr.vdpa;
>>> - priv->log_max_rqt_size = attr.log_max_rqt_size;
>>> + } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
>>> + DRV_LOG(ERR, "Not enough capabilities to support vdpa,
>> maybe "
>>> + "old FW/OFED version?");
>>> + rte_errno = ENOTSUP;
>>> + goto error;
>>> + }
>>> + priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
>>> + sizeof(struct mlx5_vdpa_virtq) *
>>> + attr.vdpa.max_num_virtio_queues * 2,
>>> + RTE_CACHE_LINE_SIZE);
>>> + if (!priv) {
>>> + DRV_LOG(ERR, "Failed to allocate private memory.");
>>> + rte_errno = ENOMEM;
>>> + goto error;
>>> }
>>> + priv->caps = attr.vdpa;
>>> + priv->log_max_rqt_size = attr.log_max_rqt_size;
>>> priv->ctx = ctx;
>>> priv->dev_addr.pci_addr = pci_dev->addr;
>>> priv->dev_addr.type = PCI_ADDR;
>>> @@ -519,7 +517,6 @@
>>> goto error;
>>> }
>>> SLIST_INIT(&priv->mr_list);
>>> - SLIST_INIT(&priv->virtq_list);
>>> pthread_mutex_lock(&priv_list_lock);
>>> TAILQ_INSERT_TAIL(&priv_list, priv, next);
>>> pthread_mutex_unlock(&priv_list_lock);
>>> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h
>>> b/drivers/vdpa/mlx5/mlx5_vdpa.h index 75af410..baec106 100644
>>> --- a/drivers/vdpa/mlx5/mlx5_vdpa.h
>>> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
>>> @@ -120,11 +120,11 @@ struct mlx5_vdpa_priv {
>>> uint16_t nr_virtqs;
>>> uint64_t features; /* Negotiated features. */
>>> uint16_t log_max_rqt_size;
>>> - SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list;
>>> struct mlx5_vdpa_steer steer;
>>> struct mlx5dv_var *var;
>>> void *virtq_db_addr;
>>> SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
>>> + struct mlx5_vdpa_virtq virtqs[];
>>> };
>>>
>>> /**
>>> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
>>> b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
>>> index 4457760..77f2eda 100644
>>> --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
>>> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
>>> @@ -15,13 +15,12 @@
>>> .type =
>> MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
>>> .dirty_bitmap_dump_enable = enable,
>>> };
>>> - struct mlx5_vdpa_virtq *virtq;
>>> + int i;
>>>
>>> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
>>> - attr.queue_index = virtq->index;
>>> - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
>>> - DRV_LOG(ERR, "Failed to modify virtq %d logging.",
>>> - virtq->index);
>>> + for (i = 0; i < priv->nr_virtqs; ++i) {
>>> + attr.queue_index = i;
>>> + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr))
>> {
>>> + DRV_LOG(ERR, "Failed to modify virtq %d logging.",
>> i);
>>> return -1;
>>> }
>>> }
>>> @@ -47,7 +46,7 @@
>>> .dirty_bitmap_size = log_size,
>>> };
>>> struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__,
>> sizeof(*mr), 0);
>>> - struct mlx5_vdpa_virtq *virtq;
>>> + int i;
>>>
>>> if (!mr) {
>>> DRV_LOG(ERR, "Failed to allocate mem for lm mr."); @@ -
>> 67,11 +66,10
>>> @@
>>> goto err;
>>> }
>>> attr.dirty_bitmap_mkey = mr->mkey->id;
>>> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
>>> - attr.queue_index = virtq->index;
>>> - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
>>> - DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
>>> - virtq->index);
>>> + for (i = 0; i < priv->nr_virtqs; ++i) {
>>> + attr.queue_index = i;
>>> + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr))
>> {
>>> + DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
>>> goto err;
>>> }
>>> }
>>> @@ -94,9 +92,9 @@
>>> mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv) {
>>> struct mlx5_devx_virtq_attr attr = {0};
>>> - struct mlx5_vdpa_virtq *virtq;
>>> uint64_t features;
>>> int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
>>> + int i;
>>>
>>> if (ret) {
>>> DRV_LOG(ERR, "Failed to get negotiated features."); @@ -
>> 104,27
>>> +102,26 @@
>>> }
>>> if (!RTE_VHOST_NEED_LOG(features))
>>> return 0;
>>> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
>>> - ret = mlx5_vdpa_virtq_modify(virtq, 0);
>>> + for (i = 0; i < priv->nr_virtqs; ++i) {
>>> + ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
>>> if (ret)
>>> return -1;
>>> - if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
>>> - DRV_LOG(ERR, "Failed to query virtq %d.", virtq-
>>> index);
>>> + if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
>>> + DRV_LOG(ERR, "Failed to query virtq %d.", i);
>>> return -1;
>>> }
>>> DRV_LOG(INFO, "Query vid %d vring %d:
>> hw_available_idx=%d, "
>>> - "hw_used_index=%d", priv->vid, virtq->index,
>>> + "hw_used_index=%d", priv->vid, i,
>>> attr.hw_available_index, attr.hw_used_index);
>>> - ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
>>> + ret = rte_vhost_set_vring_base(priv->vid, i,
>>> attr.hw_available_index,
>>> attr.hw_used_index);
>>> if (ret) {
>>> - DRV_LOG(ERR, "Failed to set virtq %d base.",
>>> - virtq->index);
>>> + DRV_LOG(ERR, "Failed to set virtq %d base.", i);
>>> return -1;
>>> }
>>> - rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
>>> - MLX5_VDPA_USED_RING_LEN(virtq-
>>> vq_size));
>>> + rte_vhost_log_used_vring(priv->vid, i, 0,
>>> + MLX5_VDPA_USED_RING_LEN(priv-
>>> virtqs[i].vq_size));
>>> }
>>> return 0;
>>> }
>>> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
>>> b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
>>> index 9c11452..96ffc21 100644
>>> --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
>>> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
>>> @@ -76,13 +76,13 @@
>>> static int
>>> mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) {
>>> - struct mlx5_vdpa_virtq *virtq;
>>> + int i;
>>> uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
>>> 1 << priv->log_max_rqt_size);
>>> struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__,
>> sizeof(*attr)
>>> + rqt_n *
>>> sizeof(uint32_t), 0);
>>> - uint32_t i = 0, j;
>>> + uint32_t k = 0, j;
>>> int ret = 0;
>>>
>>> if (!attr) {
>>> @@ -90,15 +90,15 @@
>>> rte_errno = ENOMEM;
>>> return -ENOMEM;
>>> }
>>> - SLIST_FOREACH(virtq, &priv->virtq_list, next) {
>>> - if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
>>> - virtq->enable) {
>>> - attr->rq_list[i] = virtq->virtq->id;
>>> - i++;
>>> + for (i = 0; i < priv->nr_virtqs; i++) {
>>> + if (is_virtq_recvq(i, priv->nr_virtqs) &&
>>> + priv->virtqs[i].enable) {
>>> + attr->rq_list[k] = priv->virtqs[i].virtq->id;
>>> + k++;
>>> }
>>> }
>>> - for (j = 0; i != rqt_n; ++i, ++j)
>>> - attr->rq_list[i] = attr->rq_list[j];
>>> + for (j = 0; k != rqt_n; ++k, ++j)
>>> + attr->rq_list[k] = attr->rq_list[j];
>>> attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
>>> attr->rqt_max_size = rqt_n;
>>> attr->rqt_actual_size = rqt_n;
>>> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
>>> b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
>>> index 8bebb92..3575272 100644
>>> --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
>>> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
>>> @@ -59,12 +59,9 @@
>>> usleep(MLX5_VDPA_INTR_RETRIES_USEC);
>>> }
>>> }
>>> - virtq->intr_handle.fd = -1;
>>> }
>>> - if (virtq->virtq) {
>>> + if (virtq->virtq)
>>> claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
>>> - virtq->virtq = NULL;
>>> - }
>>> for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
>>> if (virtq->umems[i].obj)
>>> claim_zero(mlx5_glue->devx_umem_dereg
>>> @@ -72,27 +69,20 @@
>>> if (virtq->umems[i].buf)
>>> rte_free(virtq->umems[i].buf);
>>> }
>>> - memset(&virtq->umems, 0, sizeof(virtq->umems));
>>> if (virtq->eqp.fw_qp)
>>> mlx5_vdpa_event_qp_destroy(&virtq->eqp);
>>> + memset(virtq, 0, sizeof(*virtq));
>>> + virtq->intr_handle.fd = -1;
>>> return 0;
>>> }
>>>
>>> void
>>> mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv) {
>>> - struct mlx5_vdpa_virtq *entry;
>>> - struct mlx5_vdpa_virtq *next;
>>> + int i;
>>>
>>> - entry = SLIST_FIRST(&priv->virtq_list);
>>> - while (entry) {
>>> - next = SLIST_NEXT(entry, next);
>>> - mlx5_vdpa_virtq_unset(entry);
>>> - SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq,
>> next);
>>> - rte_free(entry);
>>> - entry = next;
>>> - }
>>> - SLIST_INIT(&priv->virtq_list);
>>> + for (i = 0; i < priv->nr_virtqs; i++)
>>> + mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
>>> if (priv->tis) {
>>> claim_zero(mlx5_devx_cmd_destroy(priv->tis));
>>> priv->tis = NULL;
>>> @@ -106,6 +96,7 @@
>>> priv->virtq_db_addr = NULL;
>>> }
>>> priv->features = 0;
>>> + priv->nr_virtqs = 0;
>>> }
>>>
>>> int
>>> @@ -140,9 +131,9 @@
>>> }
>>>
>>> static int
>>> -mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
>>> - struct mlx5_vdpa_virtq *virtq, int index)
>>> +mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
>>> {
>>> + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
>>> struct rte_vhost_vring vq;
>>> struct mlx5_devx_virtq_attr attr = {0};
>>> uint64_t gpa;
>>> @@ -340,7 +331,6 @@
>>> mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv) {
>>> struct mlx5_devx_tis_attr tis_attr = {0};
>>> - struct mlx5_vdpa_virtq *virtq;
>>> uint32_t i;
>>> uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
>>> int ret = rte_vhost_get_negotiated_features(priv->vid,
>>> &priv->features); @@ -349,6 +339,12 @@
>>> DRV_LOG(ERR, "Failed to configure negotiated features.");
>>> return -1;
>>> }
>>> + if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
>>> + DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
>>> + (int)priv->caps.max_num_virtio_queues * 2,
>>> + (int)nr_vring);
>>> + return -E2BIG;
>>
>> All returns in thid function are either -1 or 0 except this one, so it looks
>> inconsistent.
> You can find the consistent in negative values for error.
> If you insist and this is your only concern here, I agree to change to -1.
> Can it be done in integration?
Yes, I can/will do it when applying the patch.
Wanted you green light before proceeding.
Thanks,
Maxime
>
>>
>>> + }
>>> /* Always map the entire page. */
>>> priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
>>> PROT_WRITE, MAP_SHARED, priv->ctx-
>>> cmd_fd, @@ -372,16 +368,10
>>> @@
>>> DRV_LOG(ERR, "Failed to create TIS.");
>>> goto error;
>>> }
>>> - for (i = 0; i < nr_vring; i++) {
>>> - virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
>>> - if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
>>> - if (virtq)
>>> - rte_free(virtq);
>>> - goto error;
>>> - }
>>> - SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
>>> - }
>>> priv->nr_virtqs = nr_vring;
>>> + for (i = 0; i < nr_vring; i++)
>>> + if (mlx5_vdpa_virtq_setup(priv, i))
>>> + goto error;
>>> return 0;
>>> error:
>>> mlx5_vdpa_virtqs_release(priv);
>>>
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop
2020-04-09 15:28 ` Maxime Coquelin
@ 2020-04-10 13:59 ` Matan Azrad
2020-04-10 14:04 ` Maxime Coquelin
0 siblings, 1 reply; 19+ messages in thread
From: Matan Azrad @ 2020-04-10 13:59 UTC (permalink / raw)
To: Maxime Coquelin, dev; +Cc: Slava Ovsiienko, Shahaf Shuler
Hi Maxime
From: Maxime Coquelin
> On 3/31/20 1:12 PM, Matan Azrad wrote:
> > In live migration, before loging the virtq, the driver queries the
> > virtq
> s/loging/logging/
Ok, can this small change be done in integration?
> > indexes after moving it to suspend mode.
> >
> > Separate this method to new function mlx5_vdpa_virtq_stop as a
> > preparation for reusing.
> >
> > Signed-off-by: Matan Azrad <matan@mellanox.com>
> > Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> > ---
> > drivers/vdpa/mlx5/mlx5_vdpa.h | 13 +++++++++++++
> > drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 ++---------------
> > drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 26
> ++++++++++++++++++++++++++
> > 3 files changed, 41 insertions(+), 15 deletions(-)
> >
>
> Other than that:
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>
> Thanks,
> Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop
2020-04-10 13:59 ` Matan Azrad
@ 2020-04-10 14:04 ` Maxime Coquelin
0 siblings, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-10 14:04 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Slava Ovsiienko, Shahaf Shuler
On 4/10/20 3:59 PM, Matan Azrad wrote:
>
> Hi Maxime
>
> From: Maxime Coquelin
>> On 3/31/20 1:12 PM, Matan Azrad wrote:
>>> In live migration, before loging the virtq, the driver queries the
>>> virtq
>> s/loging/logging/
> Ok, can this small change be done in integration?
Of course!
>
>>> indexes after moving it to suspend mode.
>>>
>>> Separate this method to new function mlx5_vdpa_virtq_stop as a
>>> preparation for reusing.
>>>
>>> Signed-off-by: Matan Azrad <matan@mellanox.com>
>>> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
>>> ---
>>> drivers/vdpa/mlx5/mlx5_vdpa.h | 13 +++++++++++++
>>> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 ++---------------
>>> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 26
>> ++++++++++++++++++++++++++
>>> 3 files changed, 41 insertions(+), 15 deletions(-)
>>>
>>
>> Other than that:
>> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>>
>> Thanks,
>> Maxime
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled
2020-03-31 11:12 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
@ 2020-04-15 14:06 ` Maxime Coquelin
0 siblings, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-15 14:06 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
s/recteate/recreate/
On 3/31/20 1:12 PM, Matan Azrad wrote:
> The virtq configuarations may be changed when it moves from disabled
s/configuarations/configurations/
> state to enabled state.
>
> Listen to the state callback even if the device is not configured.
> Recreate the virtq when it moves from disabled state to enabled state
> and when the device is configured.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/vdpa/mlx5/mlx5_vdpa.c | 12 +++--
> drivers/vdpa/mlx5/mlx5_vdpa.h | 39 +++++++++++++++--
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 +++++---
> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 87 ++++++++++++++++++-------------------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 59 ++++++++++++++++++++++---
> 5 files changed, 146 insertions(+), 68 deletions(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-03-31 11:12 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
2020-04-09 15:18 ` Maxime Coquelin
@ 2020-04-15 14:06 ` Maxime Coquelin
1 sibling, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-15 14:06 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
On 3/31/20 1:12 PM, Matan Azrad wrote:
> As a preparation to listen the virtqs status before the device is
> configured, manage the virtqs structures in array instead of list.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++------------------
> drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++------------------
> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++----------------------
> 5 files changed, 68 insertions(+), 84 deletions(-)
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled
2020-03-31 11:12 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
` (2 preceding siblings ...)
2020-03-31 11:12 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
@ 2020-04-17 14:58 ` Maxime Coquelin
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
4 siblings, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-17 14:58 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
Hi Matan,
On 3/31/20 1:12 PM, Matan Azrad wrote:
> Since a virtq configuration may be changed in disable state it is better to recreate a virtq becoming enabled.
> This series adding this behaviour to the mlx5 driver for vDPA.
>
> Matan Azrad (3):
> vdpa/mlx5: manage virtqs by array
> vdpa/mlx5: separate virtq stop
> vdpa/mlx5: recteate a virtq becoming enabled
>
> drivers/vdpa/mlx5/mlx5_vdpa.c | 47 +++++++--------
> drivers/vdpa/mlx5/mlx5_vdpa.h | 54 +++++++++++++++--
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 57 ++++++++----------
> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 103 ++++++++++++++++----------------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 113 +++++++++++++++++++++++++++---------
> 5 files changed, 231 insertions(+), 143 deletions(-)
>
I wanted to apply your series, but it fails to apply:
$ git am -3 vdpa-mlx5-recteate-a-virtq-becoming-enabled.patch
Applying: vdpa/mlx5: manage virtqs by array
error: sha1 information is lacking or useless
(drivers/vdpa/mlx5/mlx5_vdpa.c).
error: could not build fake ancestor
Patch failed at 0001 vdpa/mlx5: manage virtqs by array
hint: Use 'git am --show-current-patch' to see the failed patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
Could you please rebase it on top of dpdk-next-virtio/master?
While doing that, if you could take fix the minor issues/typos I *
reported, that would be great!
Thanks,
Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate a virtq becoming enabled
2020-03-31 11:12 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
` (3 preceding siblings ...)
2020-04-17 14:58 ` [dpdk-dev] [PATCH 0/3] " Maxime Coquelin
@ 2020-04-26 12:07 ` Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
` (3 more replies)
4 siblings, 4 replies; 19+ messages in thread
From: Matan Azrad @ 2020-04-26 12:07 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
Since a virtq configuration may be changed in disable state it is better to recreate a virtq becoming enabled.
This series adding this behaviour to the mlx5 driver for vDPA.
v2:
1. Address Maxime comments:
- returning -1 for out of range error.
- commit massage spelling.
2. rebase.
Matan Azrad (3):
vdpa/mlx5: manage virtqs by array
vdpa/mlx5: separate virtq stop
vdpa/mlx5: recreate a virtq becoming enabled
drivers/vdpa/mlx5/mlx5_vdpa.c | 47 +++++++--------
drivers/vdpa/mlx5/mlx5_vdpa.h | 54 +++++++++++++++--
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 57 ++++++++----------
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 103 ++++++++++++++++----------------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 113 +++++++++++++++++++++++++++---------
5 files changed, 231 insertions(+), 143 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
@ 2020-04-26 12:07 ` Matan Azrad
2020-04-27 7:45 ` Maxime Coquelin
2020-04-26 12:07 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
` (2 subsequent siblings)
3 siblings, 1 reply; 19+ messages in thread
From: Matan Azrad @ 2020-04-26 12:07 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
As a preparation to listen the virtqs status before the device is
configured, manage the virtqs structures in array instead of list.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++----------------------
5 files changed, 68 insertions(+), 84 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f1bfe7a..70327df 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -116,20 +116,18 @@
{
int did = rte_vhost_get_vdpa_device_id(vid);
struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
- struct mlx5_vdpa_virtq *virtq = NULL;
if (priv == NULL) {
DRV_LOG(ERR, "Invalid device id: %d.", did);
return -EINVAL;
}
- SLIST_FOREACH(virtq, &priv->virtq_list, next)
- if (virtq->index == vring)
- break;
- if (!virtq) {
+ if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
+ (int)priv->caps.max_num_virtio_queues * 2) ||
+ !priv->virtqs[vring].virtq) {
DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
return -EINVAL;
}
- return mlx5_vdpa_virtq_enable(virtq, state);
+ return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
}
static int
@@ -482,28 +480,28 @@
rte_errno = ENODEV;
return -rte_errno;
}
- priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
- RTE_CACHE_LINE_SIZE);
- if (!priv) {
- DRV_LOG(ERR, "Failed to allocate private memory.");
- rte_errno = ENOMEM;
- goto error;
- }
ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
if (ret) {
DRV_LOG(ERR, "Unable to read HCA capabilities.");
rte_errno = ENOTSUP;
goto error;
- } else {
- if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
- DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
- " maybe old FW/OFED version?");
- rte_errno = ENOTSUP;
- goto error;
- }
- priv->caps = attr.vdpa;
- priv->log_max_rqt_size = attr.log_max_rqt_size;
+ } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+ DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
+ "old FW/OFED version?");
+ rte_errno = ENOTSUP;
+ goto error;
+ }
+ priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
+ sizeof(struct mlx5_vdpa_virtq) *
+ attr.vdpa.max_num_virtio_queues * 2,
+ RTE_CACHE_LINE_SIZE);
+ if (!priv) {
+ DRV_LOG(ERR, "Failed to allocate private memory.");
+ rte_errno = ENOMEM;
+ goto error;
}
+ priv->caps = attr.vdpa;
+ priv->log_max_rqt_size = attr.log_max_rqt_size;
priv->ctx = ctx;
priv->dev_addr.pci_addr = pci_dev->addr;
priv->dev_addr.type = VDPA_ADDR_PCI;
@@ -519,7 +517,6 @@
goto error;
}
SLIST_INIT(&priv->mr_list);
- SLIST_INIT(&priv->virtq_list);
pthread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 75af410..baec106 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -120,11 +120,11 @@ struct mlx5_vdpa_priv {
uint16_t nr_virtqs;
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
- SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list;
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
void *virtq_db_addr;
SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
+ struct mlx5_vdpa_virtq virtqs[];
};
/**
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 4457760..77f2eda 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -15,13 +15,12 @@
.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
.dirty_bitmap_dump_enable = enable,
};
- struct mlx5_vdpa_virtq *virtq;
+ int i;
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- attr.queue_index = virtq->index;
- if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d logging.",
- virtq->index);
+ for (i = 0; i < priv->nr_virtqs; ++i) {
+ attr.queue_index = i;
+ if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
return -1;
}
}
@@ -47,7 +46,7 @@
.dirty_bitmap_size = log_size,
};
struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
- struct mlx5_vdpa_virtq *virtq;
+ int i;
if (!mr) {
DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
@@ -67,11 +66,10 @@
goto err;
}
attr.dirty_bitmap_mkey = mr->mkey->id;
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- attr.queue_index = virtq->index;
- if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
- virtq->index);
+ for (i = 0; i < priv->nr_virtqs; ++i) {
+ attr.queue_index = i;
+ if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
goto err;
}
}
@@ -94,9 +92,9 @@
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
struct mlx5_devx_virtq_attr attr = {0};
- struct mlx5_vdpa_virtq *virtq;
uint64_t features;
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
+ int i;
if (ret) {
DRV_LOG(ERR, "Failed to get negotiated features.");
@@ -104,27 +102,26 @@
}
if (!RTE_VHOST_NEED_LOG(features))
return 0;
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- ret = mlx5_vdpa_virtq_modify(virtq, 0);
+ for (i = 0; i < priv->nr_virtqs; ++i) {
+ ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
if (ret)
return -1;
- if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
- DRV_LOG(ERR, "Failed to query virtq %d.", virtq->index);
+ if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to query virtq %d.", i);
return -1;
}
DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
- "hw_used_index=%d", priv->vid, virtq->index,
+ "hw_used_index=%d", priv->vid, i,
attr.hw_available_index, attr.hw_used_index);
- ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
+ ret = rte_vhost_set_vring_base(priv->vid, i,
attr.hw_available_index,
attr.hw_used_index);
if (ret) {
- DRV_LOG(ERR, "Failed to set virtq %d base.",
- virtq->index);
+ DRV_LOG(ERR, "Failed to set virtq %d base.", i);
return -1;
}
- rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
- MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
+ rte_vhost_log_used_vring(priv->vid, i, 0,
+ MLX5_VDPA_USED_RING_LEN(priv->virtqs[i].vq_size));
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 9c11452..96ffc21 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -76,13 +76,13 @@
static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
{
- struct mlx5_vdpa_virtq *virtq;
+ int i;
uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
1 << priv->log_max_rqt_size);
struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
+ rqt_n *
sizeof(uint32_t), 0);
- uint32_t i = 0, j;
+ uint32_t k = 0, j;
int ret = 0;
if (!attr) {
@@ -90,15 +90,15 @@
rte_errno = ENOMEM;
return -ENOMEM;
}
- SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
- virtq->enable) {
- attr->rq_list[i] = virtq->virtq->id;
- i++;
+ for (i = 0; i < priv->nr_virtqs; i++) {
+ if (is_virtq_recvq(i, priv->nr_virtqs) &&
+ priv->virtqs[i].enable) {
+ attr->rq_list[k] = priv->virtqs[i].virtq->id;
+ k++;
}
}
- for (j = 0; i != rqt_n; ++i, ++j)
- attr->rq_list[i] = attr->rq_list[j];
+ for (j = 0; k != rqt_n; ++k, ++j)
+ attr->rq_list[k] = attr->rq_list[j];
attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
attr->rqt_max_size = rqt_n;
attr->rqt_actual_size = rqt_n;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e59c172..e07c826 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -59,12 +59,9 @@
usleep(MLX5_VDPA_INTR_RETRIES_USEC);
}
}
- virtq->intr_handle.fd = -1;
}
- if (virtq->virtq) {
+ if (virtq->virtq)
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
- virtq->virtq = NULL;
- }
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
if (virtq->umems[i].obj)
claim_zero(mlx5_glue->devx_umem_dereg
@@ -72,27 +69,20 @@
if (virtq->umems[i].buf)
rte_free(virtq->umems[i].buf);
}
- memset(&virtq->umems, 0, sizeof(virtq->umems));
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
+ memset(virtq, 0, sizeof(*virtq));
+ virtq->intr_handle.fd = -1;
return 0;
}
void
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
{
- struct mlx5_vdpa_virtq *entry;
- struct mlx5_vdpa_virtq *next;
+ int i;
- entry = SLIST_FIRST(&priv->virtq_list);
- while (entry) {
- next = SLIST_NEXT(entry, next);
- mlx5_vdpa_virtq_unset(entry);
- SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq, next);
- rte_free(entry);
- entry = next;
- }
- SLIST_INIT(&priv->virtq_list);
+ for (i = 0; i < priv->nr_virtqs; i++)
+ mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
if (priv->tis) {
claim_zero(mlx5_devx_cmd_destroy(priv->tis));
priv->tis = NULL;
@@ -106,6 +96,7 @@
priv->virtq_db_addr = NULL;
}
priv->features = 0;
+ priv->nr_virtqs = 0;
}
int
@@ -140,9 +131,9 @@
}
static int
-mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
- struct mlx5_vdpa_virtq *virtq, int index)
+mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
struct rte_vhost_vring vq;
struct mlx5_devx_virtq_attr attr = {0};
uint64_t gpa;
@@ -347,7 +338,6 @@
mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
{
struct mlx5_devx_tis_attr tis_attr = {0};
- struct mlx5_vdpa_virtq *virtq;
uint32_t i;
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
@@ -356,6 +346,12 @@
DRV_LOG(ERR, "Failed to configure negotiated features.");
return -1;
}
+ if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+ DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
+ (int)priv->caps.max_num_virtio_queues * 2,
+ (int)nr_vring);
+ return -1;
+ }
/* Always map the entire page. */
priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
@@ -379,16 +375,10 @@
DRV_LOG(ERR, "Failed to create TIS.");
goto error;
}
- for (i = 0; i < nr_vring; i++) {
- virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
- if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
- if (virtq)
- rte_free(virtq);
- goto error;
- }
- SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
- }
priv->nr_virtqs = nr_vring;
+ for (i = 0; i < nr_vring; i++)
+ if (mlx5_vdpa_virtq_setup(priv, i))
+ goto error;
return 0;
error:
mlx5_vdpa_virtqs_release(priv);
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
@ 2020-04-26 12:07 ` Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recreate a virtq becoming enabled Matan Azrad
2020-04-28 16:07 ` [dpdk-dev] [PATCH 0/3] " Maxime Coquelin
3 siblings, 0 replies; 19+ messages in thread
From: Matan Azrad @ 2020-04-26 12:07 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
In live migration, before logging the virtq, the driver queries the
virtq indexes after moving it to suspend mode.
Separate this method to new function mlx5_vdpa_virtq_stop as a
preparation for reusing.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.h | 13 +++++++++++++
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 ++---------------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 26 ++++++++++++++++++++++++++
3 files changed, 41 insertions(+), 15 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index baec106..0edd688 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -308,4 +308,17 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
*/
int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
+/**
+ * Stop virtq before destroying it.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] index
+ * The virtq index.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
+
#endif /* RTE_PMD_MLX5_VDPA_H_ */
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 77f2eda..26b7ce1 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -91,7 +91,6 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- struct mlx5_devx_virtq_attr attr = {0};
uint64_t features;
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
int i;
@@ -103,21 +102,9 @@
if (!RTE_VHOST_NEED_LOG(features))
return 0;
for (i = 0; i < priv->nr_virtqs; ++i) {
- ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
- if (ret)
- return -1;
- if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
- DRV_LOG(ERR, "Failed to query virtq %d.", i);
- return -1;
- }
- DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
- "hw_used_index=%d", priv->vid, i,
- attr.hw_available_index, attr.hw_used_index);
- ret = rte_vhost_set_vring_base(priv->vid, i,
- attr.hw_available_index,
- attr.hw_used_index);
+ ret = mlx5_vdpa_virtq_stop(priv, i);
if (ret) {
- DRV_LOG(ERR, "Failed to set virtq %d base.", i);
+ DRV_LOG(ERR, "Failed to stop virtq %d.", i);
return -1;
}
rte_vhost_log_used_vring(priv->vid, i, 0,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e07c826..9c0284c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -112,6 +112,32 @@
return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
}
+int
+mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
+{
+ struct mlx5_devx_virtq_attr attr = {0};
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
+ int ret = mlx5_vdpa_virtq_modify(virtq, 0);
+
+ if (ret)
+ return -1;
+ if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to query virtq %d.", index);
+ return -1;
+ }
+ DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
+ "hw_used_index=%d", priv->vid, index,
+ attr.hw_available_index, attr.hw_used_index);
+ ret = rte_vhost_set_vring_base(priv->vid, index,
+ attr.hw_available_index,
+ attr.hw_used_index);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set virtq %d base.", index);
+ return -1;
+ }
+ return 0;
+}
+
static uint64_t
mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
{
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recreate a virtq becoming enabled
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
@ 2020-04-26 12:07 ` Matan Azrad
2020-04-28 16:07 ` [dpdk-dev] [PATCH 0/3] " Maxime Coquelin
3 siblings, 0 replies; 19+ messages in thread
From: Matan Azrad @ 2020-04-26 12:07 UTC (permalink / raw)
To: dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler, Maxime Coquelin
The virtq configurations may be changed when it moves from disabled
state to enabled state.
Listen to the state callback even if the device is not configured.
Recreate the virtq when it moves from disabled state to enabled state
and when the device is configured.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 12 +++--
drivers/vdpa/mlx5/mlx5_vdpa.h | 39 +++++++++++++++--
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 +++++---
drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 87 ++++++++++++++++++-------------------
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 59 ++++++++++++++++++++++---
5 files changed, 146 insertions(+), 68 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 70327df..9f7353d 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -121,13 +121,11 @@
DRV_LOG(ERR, "Invalid device id: %d.", did);
return -EINVAL;
}
- if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
- (int)priv->caps.max_num_virtio_queues * 2) ||
- !priv->virtqs[vring].virtq) {
- DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
- return -EINVAL;
+ if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+ DRV_LOG(ERR, "Too big vring id: %d.", vring);
+ return -E2BIG;
}
- return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
+ return mlx5_vdpa_virtq_enable(priv, vring, state);
}
static int
@@ -206,7 +204,7 @@
if (priv->configured)
ret |= mlx5_vdpa_lm_log(priv);
mlx5_vdpa_cqe_event_unset(priv);
- ret |= mlx5_vdpa_steer_unset(priv);
+ mlx5_vdpa_steer_unset(priv);
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_mem_dereg(priv);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 0edd688..fcc216a 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -127,6 +127,24 @@ struct mlx5_vdpa_priv {
struct mlx5_vdpa_virtq virtqs[];
};
+/*
+ * Check whether virtq is for traffic receive.
+ * According to VIRTIO_NET Spec the virtqueues index identity its type by:
+ * 0 receiveq1
+ * 1 transmitq1
+ * ...
+ * 2(N-1) receiveqN
+ * 2(N-1)+1 transmitqN
+ * 2N controlq
+ */
+static inline uint8_t
+is_virtq_recvq(int virtq_index, int nr_vring)
+{
+ if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
+ return 1;
+ return 0;
+}
+
/**
* Release all the prepared memory regions and all their related resources.
*
@@ -223,15 +241,17 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
/**
* Enable\Disable virtq..
*
- * @param[in] virtq
- * The vdpa driver private virtq structure.
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] index
+ * The virtq index.
* @param[in] enable
* Set to enable, otherwise disable.
*
* @return
* 0 on success, a negative value otherwise.
*/
-int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable);
+int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
/**
* Unset steering and release all its related resources- stop traffic.
@@ -239,7 +259,18 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
* @param[in] priv
* The vdpa driver private structure.
*/
-int mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
+void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
+
+/**
+ * Update steering according to the received queues status.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
/**
* Setup steering and all its related resources to enable RSS traffic from the
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 26b7ce1..460e01d 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -19,7 +19,8 @@
for (i = 0; i < priv->nr_virtqs; ++i) {
attr.queue_index = i;
- if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ if (!priv->virtqs[i].virtq ||
+ mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
return -1;
}
@@ -68,7 +69,8 @@
attr.dirty_bitmap_mkey = mr->mkey->id;
for (i = 0; i < priv->nr_virtqs; ++i) {
attr.queue_index = i;
- if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+ if (!priv->virtqs[i].virtq ||
+ mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
goto err;
}
@@ -102,9 +104,14 @@
if (!RTE_VHOST_NEED_LOG(features))
return 0;
for (i = 0; i < priv->nr_virtqs; ++i) {
- ret = mlx5_vdpa_virtq_stop(priv, i);
- if (ret) {
- DRV_LOG(ERR, "Failed to stop virtq %d.", i);
+ if (priv->virtqs[i].virtq) {
+ ret = mlx5_vdpa_virtq_stop(priv, i);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to stop virtq %d.", i);
+ return -1;
+ }
+ } else {
+ DRV_LOG(ERR, "virtq %d is not created.", i);
return -1;
}
rte_vhost_log_used_vring(priv->vid, i, 0,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 96ffc21..406c7be 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -12,10 +12,9 @@
#include "mlx5_vdpa_utils.h"
#include "mlx5_vdpa.h"
-int
-mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
+static void
+mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv)
{
- int ret __rte_unused;
unsigned i;
for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
@@ -40,6 +39,12 @@
priv->steer.rss[i].matcher = NULL;
}
}
+}
+
+void
+mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
+{
+ mlx5_vdpa_rss_flows_destroy(priv);
if (priv->steer.tbl) {
claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
priv->steer.tbl = NULL;
@@ -52,27 +57,13 @@
claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
priv->steer.rqt = NULL;
}
- return 0;
}
+#define MLX5_VDPA_DEFAULT_RQT_SIZE 512
/*
- * According to VIRTIO_NET Spec the virtqueues index identity its type by:
- * 0 receiveq1
- * 1 transmitq1
- * ...
- * 2(N-1) receiveqN
- * 2(N-1)+1 transmitqN
- * 2N controlq
+ * Return the number of queues configured to the table on success, otherwise
+ * -1 on error.
*/
-static uint8_t
-is_virtq_recvq(int virtq_index, int nr_vring)
-{
- if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
- return 1;
- return 0;
-}
-
-#define MLX5_VDPA_DEFAULT_RQT_SIZE 512
static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
{
@@ -83,7 +74,7 @@
+ rqt_n *
sizeof(uint32_t), 0);
uint32_t k = 0, j;
- int ret = 0;
+ int ret = 0, num;
if (!attr) {
DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
@@ -92,11 +83,15 @@
}
for (i = 0; i < priv->nr_virtqs; i++) {
if (is_virtq_recvq(i, priv->nr_virtqs) &&
- priv->virtqs[i].enable) {
+ priv->virtqs[i].enable && priv->virtqs[i].virtq) {
attr->rq_list[k] = priv->virtqs[i].virtq->id;
k++;
}
}
+ if (k == 0)
+ /* No enabled RQ to configure for RSS. */
+ return 0;
+ num = (int)k;
for (j = 0; k != rqt_n; ++k, ++j)
attr->rq_list[k] = attr->rq_list[j];
attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
@@ -114,26 +109,7 @@
DRV_LOG(ERR, "Failed to modify RQT.");
}
rte_free(attr);
- return ret;
-}
-
-int
-mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable)
-{
- struct mlx5_vdpa_priv *priv = virtq->priv;
- int ret = 0;
-
- DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", virtq->index,
- virtq->enable ? "en" : "dis", enable ? "en" : "dis");
- if (virtq->enable == !!enable)
- return 0;
- virtq->enable = !!enable;
- if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
- ret = mlx5_vdpa_rqt_prepare(priv);
- if (ret)
- virtq->enable = !enable;
- }
- return ret;
+ return ret ? -1 : num;
}
static int __rte_unused
@@ -262,11 +238,32 @@
}
int
+mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
+{
+ int ret = mlx5_vdpa_rqt_prepare(priv);
+
+ if (ret == 0) {
+ mlx5_vdpa_rss_flows_destroy(priv);
+ if (priv->steer.rqt) {
+ claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
+ priv->steer.rqt = NULL;
+ }
+ } else if (ret < 0) {
+ return ret;
+ } else if (!priv->steer.rss[0].flow) {
+ ret = mlx5_vdpa_rss_flows_create(priv);
+ if (ret) {
+ DRV_LOG(ERR, "Cannot create RSS flows.");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
{
#ifdef HAVE_MLX5DV_DR
- if (mlx5_vdpa_rqt_prepare(priv))
- return -1;
priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
if (!priv->steer.domain) {
@@ -278,7 +275,7 @@
DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
goto error;
}
- if (mlx5_vdpa_rss_flows_create(priv))
+ if (mlx5_vdpa_steer_update(priv))
goto error;
return 0;
error:
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 9c0284c..bd48460 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -59,9 +59,11 @@
usleep(MLX5_VDPA_INTR_RETRIES_USEC);
}
}
+ virtq->intr_handle.fd = -1;
}
if (virtq->virtq)
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
+ virtq->virtq = NULL;
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
if (virtq->umems[i].obj)
claim_zero(mlx5_glue->devx_umem_dereg
@@ -69,10 +71,9 @@
if (virtq->umems[i].buf)
rte_free(virtq->umems[i].buf);
}
+ memset(&virtq->umems, 0, sizeof(virtq->umems));
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
- memset(virtq, 0, sizeof(*virtq));
- virtq->intr_handle.fd = -1;
return 0;
}
@@ -81,8 +82,10 @@
{
int i;
- for (i = 0; i < priv->nr_virtqs; i++)
+ for (i = 0; i < priv->nr_virtqs; i++) {
mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
+ priv->virtqs[i].enable = 0;
+ }
if (priv->tis) {
claim_zero(mlx5_devx_cmd_destroy(priv->tis));
priv->tis = NULL;
@@ -272,10 +275,7 @@
goto error;
if (mlx5_vdpa_virtq_modify(virtq, 1))
goto error;
- virtq->enable = 1;
virtq->priv = priv;
- /* Be sure notifications are not missed during configuration. */
- claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
rte_write32(virtq->index, priv->virtq_db_addr);
/* Setup doorbell mapping. */
virtq->intr_handle.fd = vq.kickfd;
@@ -402,11 +402,56 @@
goto error;
}
priv->nr_virtqs = nr_vring;
- for (i = 0; i < nr_vring; i++)
+ for (i = 0; i < nr_vring; i++) {
+ claim_zero(rte_vhost_enable_guest_notification(priv->vid, i,
+ 1));
if (mlx5_vdpa_virtq_setup(priv, i))
goto error;
+ }
return 0;
error:
mlx5_vdpa_virtqs_release(priv);
return -1;
}
+
+int
+mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
+{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
+ int ret;
+
+ DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
+ virtq->enable ? "en" : "dis", enable ? "en" : "dis");
+ if (virtq->enable == !!enable)
+ return 0;
+ if (!priv->configured) {
+ virtq->enable = !!enable;
+ return 0;
+ }
+ if (enable) {
+ /* Configuration might have been updated - reconfigure virtq. */
+ if (virtq->virtq) {
+ ret = mlx5_vdpa_virtq_stop(priv, index);
+ if (ret)
+ DRV_LOG(WARNING, "Failed to stop virtq %d.",
+ index);
+ mlx5_vdpa_virtq_unset(virtq);
+ }
+ ret = mlx5_vdpa_virtq_setup(priv, index);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to setup virtq %d.", index);
+ return ret;
+ /* The only case virtq can stay invalid. */
+ }
+ }
+ virtq->enable = !!enable;
+ if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
+ /* Need to add received virtq to the RQT table of the TIRs. */
+ ret = mlx5_vdpa_steer_update(priv);
+ if (ret) {
+ virtq->enable = !enable;
+ return ret;
+ }
+ }
+ return 0;
+}
--
1.8.3.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array
2020-04-26 12:07 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
@ 2020-04-27 7:45 ` Maxime Coquelin
0 siblings, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-27 7:45 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
On 4/26/20 2:07 PM, Matan Azrad wrote:
> As a preparation to listen the virtqs status before the device is
> configured, manage the virtqs structures in array instead of list.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++------------------
> drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +-
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++------------------
> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++----------------------
> 5 files changed, 68 insertions(+), 84 deletions(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate a virtq becoming enabled
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
` (2 preceding siblings ...)
2020-04-26 12:07 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recreate a virtq becoming enabled Matan Azrad
@ 2020-04-28 16:07 ` Maxime Coquelin
3 siblings, 0 replies; 19+ messages in thread
From: Maxime Coquelin @ 2020-04-28 16:07 UTC (permalink / raw)
To: Matan Azrad, dev; +Cc: Viacheslav Ovsiienko, Shahaf Shuler
On 4/26/20 2:07 PM, Matan Azrad wrote:
> Since a virtq configuration may be changed in disable state it is better to recreate a virtq becoming enabled.
> This series adding this behaviour to the mlx5 driver for vDPA.
>
> v2:
> 1. Address Maxime comments:
> - returning -1 for out of range error.
> - commit massage spelling.
> 2. rebase.
>
> Matan Azrad (3):
> vdpa/mlx5: manage virtqs by array
> vdpa/mlx5: separate virtq stop
> vdpa/mlx5: recreate a virtq becoming enabled
>
> drivers/vdpa/mlx5/mlx5_vdpa.c | 47 +++++++--------
> drivers/vdpa/mlx5/mlx5_vdpa.h | 54 +++++++++++++++--
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 57 ++++++++----------
> drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 103 ++++++++++++++++----------------
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 113 +++++++++++++++++++++++++++---------
> 5 files changed, 231 insertions(+), 143 deletions(-)
>
Applied to dpdk-next-virtio/master
Thanks,
Maxime
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2020-04-28 16:07 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-31 11:12 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
2020-03-31 11:12 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
2020-04-09 15:18 ` Maxime Coquelin
2020-04-10 13:58 ` Matan Azrad
2020-04-10 13:59 ` Maxime Coquelin
2020-04-15 14:06 ` Maxime Coquelin
2020-03-31 11:12 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
2020-04-09 15:28 ` Maxime Coquelin
2020-04-10 13:59 ` Matan Azrad
2020-04-10 14:04 ` Maxime Coquelin
2020-03-31 11:12 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled Matan Azrad
2020-04-15 14:06 ` Maxime Coquelin
2020-04-17 14:58 ` [dpdk-dev] [PATCH 0/3] " Maxime Coquelin
2020-04-26 12:07 ` [dpdk-dev] [PATCH 0/3] vdpa/mlx5: recreate " Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array Matan Azrad
2020-04-27 7:45 ` Maxime Coquelin
2020-04-26 12:07 ` [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop Matan Azrad
2020-04-26 12:07 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recreate a virtq becoming enabled Matan Azrad
2020-04-28 16:07 ` [dpdk-dev] [PATCH 0/3] " Maxime Coquelin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).