From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id D9E5FA0526;
	Mon, 20 Jan 2020 18:08:11 +0100 (CET)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 8F41B1C0B3;
	Mon, 20 Jan 2020 18:04:04 +0100 (CET)
Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])
 by dpdk.org (Postfix) with ESMTP id BE1E51BFAE
 for <dev@dpdk.org>; Mon, 20 Jan 2020 18:03:14 +0100 (CET)
Received: from Internal Mail-Server by MTLPINE1 (envelope-from
 asafp@mellanox.com)
 with ESMTPS (AES256-SHA encrypted); 20 Jan 2020 19:03:13 +0200
Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx
 [10.210.16.112])
 by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 00KH3BGm024424;
 Mon, 20 Jan 2020 19:03:13 +0200
From: Matan Azrad <matan@mellanox.com>
To: dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
 Thomas Monjalon <thomas@monjalon.net>
Date: Mon, 20 Jan 2020 17:03:01 +0000
Message-Id: <1579539790-3882-30-git-send-email-matan@mellanox.com>
X-Mailer: git-send-email 1.8.3.1
In-Reply-To: <1579539790-3882-1-git-send-email-matan@mellanox.com>
References: <1579539790-3882-1-git-send-email-matan@mellanox.com>
Subject: [dpdk-dev] [PATCH v1 29/38] vdpa/mlx5: support queue state operation
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Add support for set_vring_state operation.

Using DevX API the virtq state can be changed as described in PRM:
	enable - move to ready state.
	disable - move to suspend state.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 23 ++++++++++++++++++++++-
 drivers/vdpa/mlx5/mlx5_vdpa.h       | 15 +++++++++++++++
 drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 22 ++++++++++++++++++++--
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 25 +++++++++++++++++++++----
 4 files changed, 78 insertions(+), 7 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index d6014fc..8f078e5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -106,13 +106,34 @@
 	return 0;
 }
 
+static int
+mlx5_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	int did = rte_vhost_get_vdpa_device_id(vid);
+	struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+	struct mlx5_vdpa_virtq *virtq = NULL;
+
+	if (priv == NULL) {
+		DRV_LOG(ERR, "Invalid device id: %d.", did);
+		return -EINVAL;
+	}
+	SLIST_FOREACH(virtq, &priv->virtq_list, next)
+		if (virtq->index == vring)
+			break;
+	if (!virtq) {
+		DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
+		return -EINVAL;
+	}
+	return mlx5_vdpa_virtq_enable(virtq, state);
+}
+
 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
 	.get_queue_num = mlx5_vdpa_get_queue_num,
 	.get_features = mlx5_vdpa_get_vdpa_features,
 	.get_protocol_features = mlx5_vdpa_get_protocol_features,
 	.dev_conf = NULL,
 	.dev_close = NULL,
-	.set_vring_state = NULL,
+	.set_vring_state = mlx5_vdpa_set_vring_state,
 	.set_features = NULL,
 	.migration_done = NULL,
 	.get_vfio_group_fd = NULL,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 0f91682..318f1e8 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -55,8 +55,10 @@ struct mlx5_vdpa_query_mr {
 
 struct mlx5_vdpa_virtq {
 	SLIST_ENTRY(mlx5_vdpa_virtq) next;
+	uint8_t enable;
 	uint16_t index;
 	uint16_t vq_size;
+	struct mlx5_vdpa_priv *priv;
 	struct mlx5_devx_obj *virtq;
 	struct mlx5_vdpa_cq cq;
 	struct {
@@ -198,6 +200,19 @@ int mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
 
 /**
+ * Enable\Disable virtq..
+ *
+ * @param[in] virtq
+ *   The vdpa driver private virtq structure.
+ * @param[in] enable
+ *   Set to enable, otherwise disable.
+ *
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable);
+
+/**
  * Unset steering and release all its related resources- stop traffic.
  *
  * @param[in] priv
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index b3cfebd..37b7668 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -78,7 +78,7 @@
 }
 
 #define MLX5_VDPA_DEFAULT_RQT_SIZE 512
-static int __rte_unused
+static int
 mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
 {
 	struct mlx5_vdpa_virtq *virtq;
@@ -96,7 +96,8 @@
 		return -ENOMEM;
 	}
 	SLIST_FOREACH(virtq, &priv->virtq_list, next) {
-		if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
+		if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
+		    virtq->enable) {
 			attr->rq_list[i] = virtq->virtq->id;
 			i++;
 		}
@@ -121,6 +122,23 @@
 	return ret;
 }
 
+int
+mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable)
+{
+	struct mlx5_vdpa_priv *priv = virtq->priv;
+	int ret = 0;
+
+	if (virtq->enable == !!enable)
+		return 0;
+	virtq->enable = !!enable;
+	if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
+		ret = mlx5_vdpa_rqt_prepare(priv);
+		if (ret)
+			virtq->enable = !enable;
+	}
+	return ret;
+}
+
 static int __rte_unused
 mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
 {
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 332913c..a294117 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -15,14 +15,14 @@
 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
 {
 	int ret __rte_unused;
-	int i;
+	unsigned i;
 
 	if (virtq->virtq) {
 		ret = mlx5_devx_cmd_destroy(virtq->virtq);
 		assert(!ret);
 		virtq->virtq = NULL;
 	}
-	for (i = 0; i < 3; ++i) {
+	for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
 		if (virtq->umems[i].obj) {
 			ret = mlx5_glue->devx_umem_dereg(virtq->umems[i].obj);
 			assert(!ret);
@@ -65,6 +65,19 @@
 	priv->features = 0;
 }
 
+static int
+mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
+{
+	struct mlx5_devx_virtq_attr attr = {
+			.type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
+			.state = state ? MLX5_VIRTQ_STATE_RDY :
+					 MLX5_VIRTQ_STATE_SUSPEND,
+			.queue_index = virtq->index,
+	};
+
+	return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
+}
+
 static uint64_t
 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
 {
@@ -91,7 +104,7 @@
 	struct mlx5_devx_virtq_attr attr = {0};
 	uint64_t gpa;
 	int ret;
-	int i;
+	unsigned i;
 	uint16_t last_avail_idx;
 	uint16_t last_used_idx;
 
@@ -130,7 +143,7 @@
 			" need CQ and event mechanism.", index);
 	}
 	/* Setup 3 UMEMs for each virtq. */
-	for (i = 0; i < 3; ++i) {
+	for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
 		virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
 							  priv->caps.umems[i].b;
 		assert(virtq->umems[i].size);
@@ -188,8 +201,12 @@
 	attr.tis_id = priv->tis->id;
 	attr.queue_index = index;
 	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
+	virtq->priv = priv;
 	if (!virtq->virtq)
 		goto error;
+	if (mlx5_vdpa_virtq_modify(virtq, 1))
+		goto error;
+	virtq->enable = 1;
 	return 0;
 error:
 	mlx5_vdpa_virtq_unset(virtq);
-- 
1.8.3.1