DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
To: <dev@dpdk.org>, <maxime.coquelin@redhat.com>,
	Chenbo Xia <chenbox@nvidia.com>
Cc: <anoobj@marvell.com>, Akhil Goyal <gakhil@marvell.com>,
	David Marchand <david.marchand@redhat.com>,
	Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
Subject: [v9 1/6] vhost: fix thread safety checks for vhost crypto data req
Date: Fri, 28 Feb 2025 19:17:08 +0530	[thread overview]
Message-ID: <7aff12586d4a2091e983e7d434bbb0d7b1957ae1.1740749809.git.gmuthukrishn@marvell.com> (raw)
In-Reply-To: <cover.1740749809.git.gmuthukrishn@marvell.com>

For thread safety check to succeed (as in clang), calling function
should ensure vq->iotlb_lock locked before passing vq to a function
that has thread safety attribute, in vhost crypto implementation.

When vhost_crypto_data_req is local and its vq is pointer to a
locked vq, clang does not recognise this inherited lock and
stops compilation. This patch explicitly uses vhost_virtqueue
where ever required and fulfills thread safety checks.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 lib/vhost/vhost_crypto.c | 76 ++++++++++++++++++++++------------------
 1 file changed, 41 insertions(+), 35 deletions(-)

diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index 3dc41a3bd5..4c36df9cb2 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -43,8 +43,8 @@ RTE_LOG_REGISTER_SUFFIX(vhost_crypto_logtype, crypto, INFO);
 		(1ULL << VIRTIO_F_VERSION_1) |				\
 		(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
 
-#define IOVA_TO_VVA(t, r, a, l, p)					\
-	((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
+#define IOVA_TO_VVA(t, dev, vq, a, l, p)				\
+	((t)(uintptr_t)vhost_iova_to_vva(dev, vq, a, l, p))
 
 /*
  * vhost_crypto_desc is used to copy original vring_desc to the local buffer
@@ -488,10 +488,10 @@ find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
 }
 
 static __rte_always_inline struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req,
+reach_inhdr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
-	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(vq->iotlb_lock)
 {
 	struct virtio_crypto_inhdr *inhdr;
 	struct vhost_crypto_desc *last = head + (max_n_descs - 1);
@@ -500,8 +500,8 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req,
 	if (unlikely(dlen != sizeof(*inhdr)))
 		return NULL;
 
-	inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
-			&dlen, VHOST_ACCESS_WO);
+	inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, dev, vq,
+			last->addr, &dlen, VHOST_ACCESS_WO);
 	if (unlikely(!inhdr || dlen != last->len))
 		return NULL;
 
@@ -543,7 +543,8 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req,
 	void *data;
 	uint64_t dlen = cur_desc->len;
 
-	data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
+	data = IOVA_TO_VVA(void *, vc_req->dev, vc_req->vq,
+			cur_desc->addr, &dlen, perm);
 	if (unlikely(!data || dlen != cur_desc->len)) {
 		VC_LOG_ERR("Failed to map object");
 		return NULL;
@@ -553,9 +554,9 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req,
 }
 
 static __rte_always_inline uint32_t
-copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
-	struct vhost_crypto_desc *desc, uint32_t size)
-	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
+copy_data_from_desc(void *dst, struct virtio_net *dev,
+	struct vhost_virtqueue *vq, struct vhost_crypto_desc *desc, uint32_t size)
+	__rte_requires_shared_capability(vq->iotlb_lock)
 {
 	uint64_t remain;
 	uint64_t addr;
@@ -567,7 +568,8 @@ copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
 		void *src;
 
 		len = remain;
-		src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
+		src = IOVA_TO_VVA(void *, dev, vq,
+				addr, &len, VHOST_ACCESS_RO);
 		if (unlikely(src == NULL || len == 0))
 			return 0;
 
@@ -583,10 +585,10 @@ copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
 
 
 static __rte_always_inline int
-copy_data(void *data, struct vhost_crypto_data_req *vc_req,
+copy_data(void *data, struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
 	uint32_t size, uint32_t max_n_descs)
-	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = *cur_desc;
 	uint32_t left = size;
@@ -594,7 +596,7 @@ copy_data(void *data, struct vhost_crypto_data_req *vc_req,
 	do {
 		uint32_t copied;
 
-		copied = copy_data_from_desc(data, vc_req, desc, left);
+		copied = copy_data_from_desc(data, dev, vq, desc, left);
 		if (copied == 0)
 			return -1;
 		left -= copied;
@@ -689,8 +691,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 	if (likely(desc->len > offset)) {
 		wb_data->src = src + offset;
 		dlen = desc->len;
-		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
-			&dlen, VHOST_ACCESS_RW);
+		dst = IOVA_TO_VVA(uint8_t *, vc_req->dev, vc_req->vq,
+			desc->addr, &dlen, VHOST_ACCESS_RW);
 		if (unlikely(!dst || dlen != desc->len)) {
 			VC_LOG_ERR("Failed to map descriptor");
 			goto error_exit;
@@ -731,8 +733,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		}
 
 		dlen = desc->len;
-		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
-				VHOST_ACCESS_RW) + offset;
+		dst = IOVA_TO_VVA(uint8_t *, vc_req->dev, vc_req->vq,
+				desc->addr, &dlen, VHOST_ACCESS_RW) + offset;
 		if (unlikely(dst == NULL || dlen != desc->len)) {
 			VC_LOG_ERR("Failed to map descriptor");
 			goto error_exit;
@@ -804,7 +806,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* prepare */
 	/* iv */
-	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
+	if (unlikely(copy_data(iv_data, vcrypto->dev, vc_req->vq, head, &desc,
 			cipher->para.iv_len, max_n_descs))) {
 		VC_LOG_ERR("Incorrect virtio descriptor");
 		ret = VIRTIO_CRYPTO_BADMSG;
@@ -835,8 +837,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		vc_req->wb_pool = vcrypto->wb_pool;
 		m_src->data_len = cipher->para.src_data_len;
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-				vc_req, head, &desc, cipher->para.src_data_len,
-				max_n_descs) < 0)) {
+				vcrypto->dev, vc_req->vq, head, &desc,
+				cipher->para.src_data_len, max_n_descs) < 0)) {
 			VC_LOG_ERR("Incorrect virtio descriptor");
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -960,7 +962,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* prepare */
 	/* iv */
-	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
+	if (unlikely(copy_data(iv_data, vcrypto->dev, vc_req->vq, head, &desc,
 			chain->para.iv_len, max_n_descs) < 0)) {
 		VC_LOG_ERR("Incorrect virtio descriptor");
 		ret = VIRTIO_CRYPTO_BADMSG;
@@ -992,8 +994,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		vc_req->wb_pool = vcrypto->wb_pool;
 		m_src->data_len = chain->para.src_data_len;
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-				vc_req, head, &desc, chain->para.src_data_len,
-				max_n_descs) < 0)) {
+				vcrypto->dev, vc_req->vq, head, &desc,
+				chain->para.src_data_len, max_n_descs) < 0)) {
 			VC_LOG_ERR("Incorrect virtio descriptor");
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1076,8 +1078,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 			goto error_exit;
 		}
 
-		if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
-				chain->para.hash_result_len,
+		if (unlikely(copy_data(digest_addr, vcrypto->dev, vc_req->vq, head,
+				&digest_desc, chain->para.hash_result_len,
 				max_n_descs) < 0)) {
 			VC_LOG_ERR("Incorrect virtio descriptor");
 			ret = VIRTIO_CRYPTO_BADMSG;
@@ -1131,7 +1133,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		struct vhost_virtqueue *vq, struct rte_crypto_op *op,
 		struct vring_desc *head, struct vhost_crypto_desc *descs,
 		uint16_t desc_idx)
-	__rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
+	__rte_requires_shared_capability(vq->iotlb_lock)
 {
 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
 	struct rte_cryptodev_sym_session *session;
@@ -1154,8 +1156,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	}
 
 	dlen = head->len;
-	src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
-			&dlen, VHOST_ACCESS_RO);
+	src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req->dev, vq,
+			head->addr, &dlen, VHOST_ACCESS_RO);
 	if (unlikely(!src_desc || dlen != head->len)) {
 		VC_LOG_ERR("Invalid descriptor");
 		return -1;
@@ -1175,8 +1177,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 			}
 			if (inhdr_desc->len != sizeof(*inhdr))
 				return -1;
-			inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
-					vc_req, inhdr_desc->addr, &dlen,
+			inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req->dev,
+					vq, inhdr_desc->addr, &dlen,
 					VHOST_ACCESS_WO);
 			if (unlikely(!inhdr || dlen != inhdr_desc->len))
 				return -1;
@@ -1213,7 +1215,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		goto error_exit;
 	}
 
-	if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
+	if (unlikely(copy_data(&req, vcrypto->dev, vq, descs, &desc, sizeof(req),
 			max_n_descs) < 0)) {
 		err = VIRTIO_CRYPTO_BADMSG;
 		VC_LOG_ERR("Invalid descriptor");
@@ -1257,14 +1259,18 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 			err = VIRTIO_CRYPTO_NOTSUPP;
 			break;
 		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
-			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
+			vhost_user_iotlb_rd_lock(vc_req_out->vq);
+			err = prepare_sym_cipher_op(vcrypto, op, vc_req_out,
 					&req.u.sym_req.u.cipher, desc,
 					max_n_descs);
+			vhost_user_iotlb_rd_unlock(vc_req_out->vq);
 			break;
 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
-			err = prepare_sym_chain_op(vcrypto, op, vc_req,
+			vhost_user_iotlb_rd_lock(vc_req_out->vq);
+			err = prepare_sym_chain_op(vcrypto, op, vc_req_out,
 					&req.u.sym_req.u.chain, desc,
 					max_n_descs);
+			vhost_user_iotlb_rd_unlock(vc_req_out->vq);
 			break;
 		}
 		if (unlikely(err != 0)) {
@@ -1283,7 +1289,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 
 error_exit:
 
-	inhdr = reach_inhdr(vc_req, descs, max_n_descs);
+	inhdr = reach_inhdr(vc_req->dev, vq, descs, max_n_descs);
 	if (likely(inhdr != NULL))
 		inhdr->status = (uint8_t)err;
 
-- 
2.25.1


  reply	other threads:[~2025-02-28 13:47 UTC|newest]

Thread overview: 121+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-12-24  7:36 [v1 00/16] crypto/virtio: vDPA and asymmetric support Gowrishankar Muthukrishnan
2024-12-24  7:36 ` [v1 01/16] vhost: include AKCIPHER algorithms in crypto_config Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 02/16] crypto/virtio: remove redundant crypto queue free Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 03/16] crypto/virtio: add asymmetric RSA support Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 04/16] test/crypto: check for RSA capability Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 05/16] test/crypto: return proper codes in create session Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 06/16] test/crypto: add asymmetric tests for virtio PMD Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 07/16] vhost: add asymmetric RSA support Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 08/16] examples/vhost_crypto: add asymmetric support Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 09/16] crypto/virtio: fix dataqueues iteration Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 10/16] crypto/virtio: refactor queue operations Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 11/16] crypto/virtio: add packed ring support Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 12/16] common/virtio: common virtio log Gowrishankar Muthukrishnan
2024-12-24  8:14   ` David Marchand
2025-01-07 10:57     ` [EXTERNAL] " Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 13/16] common/virtio: move vDPA to common directory Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 14/16] common/virtio: support cryptodev in vdev setup Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 15/16] crypto/virtio: add vhost backend to virtio_user Gowrishankar Muthukrishnan
2024-12-24  7:37 ` [v1 16/16] test/crypto: test virtio_crypto_user PMD Gowrishankar Muthukrishnan
2025-01-07 17:52 ` [v2 0/2] crypto/virtio: add RSA support Gowrishankar Muthukrishnan
2025-01-07 17:52   ` [v2 1/2] crypto/virtio: add asymmetric " Gowrishankar Muthukrishnan
2025-01-07 17:52   ` [v2 2/2] test/crypto: add asymmetric tests for virtio PMD Gowrishankar Muthukrishnan
2025-02-21 17:41   ` [v3 0/6] crypto/virtio: enhancements for RSA and vDPA Gowrishankar Muthukrishnan
2025-02-21 17:41     ` [v3 1/6] crypto/virtio: add asymmetric RSA support Gowrishankar Muthukrishnan
2025-02-21 17:41     ` [v3 2/6] crypto/virtio: refactor queue operations Gowrishankar Muthukrishnan
2025-02-21 17:41     ` [v3 3/6] crypto/virtio: add packed ring support Gowrishankar Muthukrishnan
2025-02-21 17:41     ` [v3 4/6] crypto/virtio: add vDPA backend Gowrishankar Muthukrishnan
2025-02-21 17:41     ` [v3 5/6] test/crypto: add asymmetric tests for virtio PMD Gowrishankar Muthukrishnan
2025-02-21 17:41     ` [v3 6/6] test/crypto: add tests for virtio user PMD Gowrishankar Muthukrishnan
2025-02-22  9:16     ` [v4 0/6] crypto/virtio: enhancements for RSA and vDPA Gowrishankar Muthukrishnan
2025-02-22  9:16       ` [v4 1/6] crypto/virtio: add asymmetric RSA support Gowrishankar Muthukrishnan
2025-02-22  9:16       ` [v4 2/6] crypto/virtio: refactor queue operations Gowrishankar Muthukrishnan
2025-02-22  9:16       ` [v4 3/6] crypto/virtio: add packed ring support Gowrishankar Muthukrishnan
2025-02-22  9:16       ` [v4 4/6] crypto/virtio: add vDPA backend Gowrishankar Muthukrishnan
2025-02-22  9:16       ` [v4 5/6] test/crypto: add asymmetric tests for virtio PMD Gowrishankar Muthukrishnan
2025-02-22  9:16       ` [v4 6/6] test/crypto: add tests for virtio user PMD Gowrishankar Muthukrishnan
2025-02-24  7:25       ` [v4 0/6] crypto/virtio: enhancements for RSA and vDPA Akhil Goyal
2025-02-26 18:58       ` [v5 " Gowrishankar Muthukrishnan
2025-02-26 18:58         ` [v5 1/6] crypto/virtio: add asymmetric RSA support Gowrishankar Muthukrishnan
2025-02-26 18:58         ` [v5 2/6] crypto/virtio: refactor queue operations Gowrishankar Muthukrishnan
2025-02-26 18:58         ` [v5 3/6] crypto/virtio: add packed ring support Gowrishankar Muthukrishnan
2025-02-26 18:58         ` [v5 4/6] crypto/virtio: add vDPA backend Gowrishankar Muthukrishnan
2025-02-26 18:58         ` [v5 5/6] test/crypto: add asymmetric tests for virtio PMD Gowrishankar Muthukrishnan
2025-02-26 18:58         ` [v5 6/6] test/crypto: add tests for virtio user PMD Gowrishankar Muthukrishnan
2025-01-07 18:02 ` [v2 0/2] vhost: add RSA support Gowrishankar Muthukrishnan
2025-01-07 18:02   ` [v2 1/2] vhost: add asymmetric " Gowrishankar Muthukrishnan
2025-01-29 16:07     ` Maxime Coquelin
2025-01-07 18:02   ` [v2 2/2] examples/vhost_crypto: add asymmetric support Gowrishankar Muthukrishnan
2025-01-29 16:13     ` Maxime Coquelin
2025-01-30  9:29       ` [EXTERNAL] " Gowrishankar Muthukrishnan
2025-02-21 17:30   ` [v3 0/5] vhost: add RSA support Gowrishankar Muthukrishnan
2025-02-21 17:30     ` [v3 1/5] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-21 17:30     ` [v3 2/5] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-21 17:30     ` [v3 3/5] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-21 17:30     ` [v3 4/5] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-21 17:30     ` [v3 5/5] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-02-22  8:38     ` [v4 0/5] vhost: add RSA support Gowrishankar Muthukrishnan
2025-02-22  8:38       ` [v4 1/5] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-24  7:13         ` Akhil Goyal
2025-02-22  8:38       ` [v4 2/5] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-24  7:14         ` Akhil Goyal
2025-02-22  8:38       ` [v4 3/5] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-24  7:15         ` Akhil Goyal
2025-02-22  8:38       ` [v4 4/5] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-24  7:24         ` Akhil Goyal
2025-02-22  8:38       ` [v4 5/5] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-02-24  7:19         ` Akhil Goyal
2025-02-24  7:28       ` [v4 0/5] vhost: add RSA support Akhil Goyal
2025-02-24 10:35       ` [v5 " Gowrishankar Muthukrishnan
2025-02-24 10:35         ` [v5 1/5] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-25 16:17           ` Maxime Coquelin
2025-02-26 17:40             ` [EXTERNAL] " Gowrishankar Muthukrishnan
2025-02-24 10:35         ` [v5 2/5] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-24 10:35         ` [v5 3/5] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-24 10:35         ` [v5 4/5] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-24 10:35         ` [v5 5/5] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-02-26 18:43         ` [v6 0/5] vhost: add RSA support Gowrishankar Muthukrishnan
2025-02-26 18:43           ` [v6 1/5] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-27  9:15             ` Maxime Coquelin
2025-02-27  9:19               ` Maxime Coquelin
2025-02-27 13:15                 ` [EXTERNAL] " Gowrishankar Muthukrishnan
2025-02-27 18:07                   ` Gowrishankar Muthukrishnan
2025-02-28  8:48                     ` David Marchand
2025-02-28  9:40                       ` Maxime Coquelin
2025-02-28 13:59                         ` Gowrishankar Muthukrishnan
2025-02-28 15:16                           ` Maxime Coquelin
2025-02-28 13:53                       ` Gowrishankar Muthukrishnan
2025-02-26 18:43           ` [v6 2/5] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-26 18:43           ` [v6 3/5] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-26 18:43           ` [v6 4/5] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-26 18:43           ` [v6 5/5] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-02-27 13:59           ` [v7 0/5] vhost: add RSA support Gowrishankar Muthukrishnan
2025-02-27 13:59             ` [v7 1/5] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-27 13:59             ` [v7 2/5] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-27 13:59             ` [v7 3/5] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-27 13:59             ` [v7 4/5] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-27 13:59             ` [v7 5/5] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-02-27 18:15             ` [v8 0/5] vhost: add RSA support Gowrishankar Muthukrishnan
2025-02-27 18:15               ` [v8 1/5] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-27 18:15               ` [v8 2/5] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-27 18:15               ` [v8 3/5] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-27 18:15               ` [v8 4/5] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-27 18:15               ` [v8 5/5] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-02-28 13:47               ` [v9 0/6] vhost: add RSA support Gowrishankar Muthukrishnan
2025-02-28 13:47                 ` Gowrishankar Muthukrishnan [this message]
2025-02-28 13:47                 ` [v9 2/6] vhost: skip crypto op fetch before vring init Gowrishankar Muthukrishnan
2025-02-28 13:47                 ` [v9 3/6] vhost: update vhost_user crypto session parameters Gowrishankar Muthukrishnan
2025-02-28 13:47                 ` [v9 4/6] examples/vhost_crypto: fix user callbacks Gowrishankar Muthukrishnan
2025-02-28 13:47                 ` [v9 5/6] vhost: support asymmetric RSA crypto ops Gowrishankar Muthukrishnan
2025-02-28 13:47                 ` [v9 6/6] examples/vhost_crypto: support asymmetric crypto Gowrishankar Muthukrishnan
2025-01-07 18:08 ` [v2 0/2] crypto/virtio: add packed ring support Gowrishankar Muthukrishnan
2025-01-07 18:08   ` [v2 1/2] crypto/virtio: refactor queue operations Gowrishankar Muthukrishnan
2025-01-07 18:08   ` [v2 2/2] crypto/virtio: add packed ring support Gowrishankar Muthukrishnan
2025-01-07 18:44 ` [v2 0/4] crypto/virtio: add vDPA backend support Gowrishankar Muthukrishnan
2025-01-07 18:44   ` [v2 1/4] common/virtio: move vDPA to common directory Gowrishankar Muthukrishnan
2025-02-06  9:40     ` Maxime Coquelin
2025-02-06 14:21       ` [EXTERNAL] " Gowrishankar Muthukrishnan
2025-01-07 18:44   ` [v2 2/4] common/virtio: support cryptodev in vdev setup Gowrishankar Muthukrishnan
2025-01-07 18:44   ` [v2 3/4] crypto/virtio: add vhost backend to virtio_user Gowrishankar Muthukrishnan
2025-02-06 13:14     ` Maxime Coquelin
2025-01-07 18:44   ` [v2 4/4] test/crypto: test virtio_crypto_user PMD Gowrishankar Muthukrishnan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7aff12586d4a2091e983e7d434bbb0d7b1957ae1.1740749809.git.gmuthukrishn@marvell.com \
    --to=gmuthukrishn@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=chenbox@nvidia.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).