patches for DPDK stable branches
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: gakhil@marvell.com, sunilprakashrao.uttarwar@amd.com,
	stable@dpdk.org, Amaranath Somalapuram <asomalap@amd.com>
Subject: [PATCH v2 3/4] crypto/ccp: fix IOVA handling
Date: Tue,  4 Oct 2022 11:51:31 +0200	[thread overview]
Message-ID: <20221004095132.198777-4-david.marchand@redhat.com> (raw)
In-Reply-To: <20221004095132.198777-1-david.marchand@redhat.com>

Using IOVA or physical addresses is something that the user (via
--iova-mode=) or the bus code decides.

The crypto/ccp PCI driver should only use rte_mem_virt2iova.
It should not try to decide what to use solely based on the kmod
the PCI device is bound to.

While at it, the global variable sha_ctx looks unsafe and unneeded.
Remove it.

Fixes: 09a0fd736a08 ("crypto/ccp: enable IOMMU")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 105 ++++++-------------------------
 drivers/crypto/ccp/ccp_dev.c     |   9 +--
 drivers/crypto/ccp/ccp_pci.c     |  34 ----------
 drivers/crypto/ccp/ccp_pci.h     |   3 -
 drivers/crypto/ccp/rte_ccp_pmd.c |   3 -
 5 files changed, 19 insertions(+), 135 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 4bab18323b..351d8ac63e 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -33,8 +33,6 @@
 #include <openssl/err.h>
 #include <openssl/hmac.h>
 
-extern int iommu_mode;
-void *sha_ctx;
 /* SHA initial context values */
 uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
@@ -748,13 +746,8 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
-	if (iommu_mode == 2) {
-		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
-		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
-	} else {
-		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
-	}
+	sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -793,7 +786,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha1_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
-		rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		if (sess->auth_opt) {
@@ -832,7 +824,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha224_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
-		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		if (sess->auth_opt) {
@@ -895,7 +886,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha256_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
-		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		if (sess->auth_opt) {
@@ -958,7 +948,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha384_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
-		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		if (sess->auth_opt) {
@@ -1023,7 +1012,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha512_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
-		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		if (sess->auth_opt) {
@@ -1173,13 +1161,8 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
-	if (iommu_mode == 2) {
-		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
-		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
-	} else {
-		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
-	}
+	sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -1594,14 +1577,8 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 					      op->sym->auth.data.offset);
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	if (iommu_mode == 2) {
-		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
-	} else {
-		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
-	}
-	dest_addr_t = dest_addr;
+	dest_addr_t = dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+	pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
 
 	/** Load PHash1 to LSB*/
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
@@ -1683,10 +1660,7 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 
 	/** Load PHash2 to LSB*/
 	addr += session->auth.ctx_len;
-	if (iommu_mode == 2)
-		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
-	else
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1774,14 +1748,8 @@ ccp_perform_sha(struct rte_crypto_op *op,
 					      op->sym->auth.data.offset);
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	if (iommu_mode == 2) {
-		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
-		pst.src_addr = (phys_addr_t)sha_ctx;
-	} else {
-		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
-						     session->auth.ctx);
-	}
+	pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)session->auth.ctx);
+	dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
 
 	/** Passthru sha context*/
 
@@ -1871,15 +1839,8 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	if (iommu_mode == 2) {
-		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
-		ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
-					session->auth.pre_compute);
-	} else {
-		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
-		ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
-					session->auth.pre_compute);
-	}
+	dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2iova(session->auth.pre_compute);
 	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
@@ -2017,13 +1978,8 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	if (iommu_mode == 2) {
-		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
-		ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
-	} else {
-		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
-		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
-	}
+	dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
 
 	ctx_addr = session->auth.sha3_ctx;
 
@@ -2099,13 +2055,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 
 		ctx_addr = session->auth.pre_compute;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		if (iommu_mode == 2)
-			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
-							(void *)ctx_addr);
-		else
-			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
-							(void *)ctx_addr);
-
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2143,12 +2093,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	} else {
 		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		if (iommu_mode == 2)
-			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
-							(void *)ctx_addr);
-		else
-			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
-							(void *)ctx_addr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2342,12 +2287,7 @@ ccp_perform_3des(struct rte_crypto_op *op,
 
 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
 			   iv, session->iv.length);
-		if (iommu_mode == 2)
-			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
-							(void *) lsb_buf);
-		else
-			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
-							(void *) lsb_buf);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *) lsb_buf);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2370,11 +2310,7 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	else
 		dest_addr = src_addr;
 
-	if (iommu_mode == 2)
-		key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
-	else
-		key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
-
+	key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 
 	memset(desc, 0, Q_DESC_SIZE);
@@ -2768,12 +2704,7 @@ process_ops_to_enqueue(struct ccp_qp *qp,
 	b_info->lsb_buf_idx = 0;
 	b_info->desccnt = 0;
 	b_info->cmd_q = cmd_q;
-	if (iommu_mode == 2)
-		b_info->lsb_buf_phys =
-			(phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
-	else
-		b_info->lsb_buf_phys =
-			(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	b_info->lsb_buf_phys = (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
 
 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
 
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 410e62121e..14c54929c4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,7 +23,6 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
-int iommu_mode;
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
@@ -652,7 +651,7 @@ is_ccp_device(const char *dirname,
 static int
 ccp_probe_device(int ccp_type, struct rte_pci_device *pci_dev)
 {
-	struct ccp_device *ccp_dev = NULL;
+	struct ccp_device *ccp_dev;
 
 	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
 			      RTE_CACHE_LINE_SIZE);
@@ -683,16 +682,10 @@ ccp_probe_devices(struct rte_pci_device *pci_dev,
 	struct dirent *d;
 	DIR *dir;
 	int ret = 0;
-	int module_idx = 0;
 	uint16_t domain;
 	uint8_t bus, devid, function;
 	char dirname[PATH_MAX];
 
-	module_idx = ccp_check_pci_uio_module();
-	if (module_idx < 0)
-		return -1;
-
-	iommu_mode = module_idx;
 	TAILQ_INIT(&ccp_list);
 	dir = opendir(SYSFS_PCI_DEVICES);
 	if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index c941e222c7..bd1a037f76 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -11,40 +11,6 @@
 #include <rte_string_fns.h>
 
 #include "ccp_pci.h"
-#include "ccp_pmd_private.h"
-
-static const char * const uio_module_names[] = {
-	"igb_uio",
-	"uio_pci_generic",
-	"vfio_pci"
-};
-
-int
-ccp_check_pci_uio_module(void)
-{
-	FILE *fp;
-	int i;
-	char buf[BUFSIZ];
-
-	fp = fopen(PROC_MODULES, "r");
-	if (fp == NULL)
-		return -1;
-	i = 0;
-	while (uio_module_names[i] != NULL) {
-		while (fgets(buf, sizeof(buf), fp) != NULL) {
-			if (!strncmp(buf, uio_module_names[i],
-				     strlen(uio_module_names[i]))) {
-				fclose(fp);
-				return i;
-			}
-		}
-		i++;
-		rewind(fp);
-	}
-	fclose(fp);
-	CCP_LOG_DBG("Insert igb_uio or uio_pci_generic kernel module(s)");
-	return -1;/* uio not inserted */
-}
 
 /*
  * split up a pci address into its constituent parts.
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
index 6736bf8ad3..d9a8b9dcc6 100644
--- a/drivers/crypto/ccp/ccp_pci.h
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -10,9 +10,6 @@
 #include <bus_pci_driver.h>
 
 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
-#define PROC_MODULES "/proc/modules"
-
-int ccp_check_pci_uio_module(void);
 
 int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
 			      uint8_t *bus, uint8_t *devid, uint8_t *function);
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 7338ef0ae8..8b3a5a304b 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -22,7 +22,6 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 uint8_t cryptodev_cnt;
-extern void *sha_ctx;
 
 struct ccp_pmd_init_params {
 	struct rte_cryptodev_pmd_init_params def_p;
@@ -213,7 +212,6 @@ cryptodev_ccp_remove(struct rte_pci_device *pci_dev)
 		return -ENODEV;
 
 	ccp_pmd_init_done = 0;
-	rte_free(sha_ctx);
 
 	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
 			name, rte_socket_id());
@@ -300,7 +298,6 @@ cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		.auth_opt = CCP_PMD_AUTH_OPT_CCP,
 	};
 
-	sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
 	if (ccp_pmd_init_done) {
 		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
 		return -EFAULT;
-- 
2.37.3


  parent reply	other threads:[~2022-10-04  9:51 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20220909150411.3702860-1-david.marchand@redhat.com>
2022-09-09 15:04 ` [PATCH 1/4] crypto/ccp: remove some printf David Marchand
2022-09-09 15:04 ` [PATCH 2/4] crypto/ccp: remove some dead code for UIO David Marchand
2022-09-09 15:04 ` [PATCH 3/4] crypto/ccp: fix IOVA handling David Marchand
2022-09-09 15:04 ` [PATCH 4/4] crypto/ccp: fix PCI probing David Marchand
     [not found] ` <20221004095132.198777-1-david.marchand@redhat.com>
2022-10-04  9:51   ` [PATCH v2 1/4] crypto/ccp: remove some printf David Marchand
2023-01-13 11:58     ` Uttarwar, Sunil Prakashrao
2023-01-30 18:42     ` [EXT] " Akhil Goyal
2022-10-04  9:51   ` [PATCH v2 2/4] crypto/ccp: remove some dead code for UIO David Marchand
2023-01-13 12:00     ` Uttarwar, Sunil Prakashrao
2022-10-04  9:51   ` David Marchand [this message]
2023-01-13 12:00     ` [PATCH v2 3/4] crypto/ccp: fix IOVA handling Uttarwar, Sunil Prakashrao
2022-10-04  9:51   ` [PATCH v2 4/4] crypto/ccp: fix PCI probing David Marchand
2023-03-02 11:43 ` [PATCH v2] " David Marchand
2023-03-06 12:05   ` Uttarwar, Sunil Prakashrao
2023-03-11 18:49     ` Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221004095132.198777-4-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=asomalap@amd.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=stable@dpdk.org \
    --cc=sunilprakashrao.uttarwar@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).