DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
@ 2020-01-28  8:38 asomalap
  2020-02-12  7:11 ` Akhil Goyal
                   ` (2 more replies)
  0 siblings, 3 replies; 14+ messages in thread
From: asomalap @ 2020-01-28  8:38 UTC (permalink / raw)
  To: dev; +Cc: stable

From: Amaranath Somalapuram <asomalap@amd.com>

CCP use vdev framework, and vdev framework don’t support IOMMU.
Adding custom IOMMU support for AMD CCP drives.
Cc: stable@dpdk.org

Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 127 ++++++++++++++++++++++++-------
 drivers/crypto/ccp/ccp_dev.c     |  54 +++----------
 drivers/crypto/ccp/ccp_pci.c     |   1 +
 drivers/crypto/ccp/rte_ccp_pmd.c |   3 +
 4 files changed, 113 insertions(+), 72 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 4256734d1..1918ae88d 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -31,8 +31,10 @@
 #include <openssl/err.h>
 #include <openssl/hmac.h>
 
+extern int iommu_mode;
+void *sha_ctx;
 /* SHA initial context values */
-static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
 	SHA1_H2, SHA1_H1,
 	SHA1_H0, 0x0U,
@@ -744,8 +746,13 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if (iommu_mode == 2) {
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	} else {
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -784,6 +791,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha1_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		if (sess->auth_opt) {
@@ -822,6 +830,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha224_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		if (sess->auth_opt) {
@@ -884,6 +893,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha256_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		if (sess->auth_opt) {
@@ -946,6 +956,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha384_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		if (sess->auth_opt) {
@@ -1010,6 +1021,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha512_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		if (sess->auth_opt) {
@@ -1159,8 +1171,13 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if (iommu_mode == 2) {
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	} else {
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -1575,11 +1592,16 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 					      op->sym->auth.data.offset);
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	}
 	dest_addr_t = dest_addr;
 
 	/** Load PHash1 to LSB*/
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1659,7 +1681,10 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 
 	/** Load PHash2 to LSB*/
 	addr += session->auth.ctx_len;
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	if (iommu_mode == 2)
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	else
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1745,15 +1770,19 @@ ccp_perform_sha(struct rte_crypto_op *op,
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
-
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)sha_ctx;
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	}
 
 	/** Passthru sha context*/
 
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
-						     session->auth.ctx);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1840,10 +1869,16 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
+					session->auth.pre_compute);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
+					session->auth.pre_compute);
+	}
 	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
-						   *)session->auth.pre_compute);
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
 
@@ -1964,7 +1999,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 	struct ccp_session *session;
 	union ccp_function function;
 	struct ccp_desc *desc;
-	uint8_t *ctx_addr, *append_ptr;
+	uint8_t *ctx_addr = NULL, *append_ptr = NULL;
 	uint32_t tail;
 	phys_addr_t src_addr, dest_addr, ctx_paddr;
 
@@ -1980,9 +2015,15 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+	}
+
 	ctx_addr = session->auth.sha3_ctx;
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
@@ -2032,20 +2073,25 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	struct ccp_passthru pst;
 	struct ccp_desc *desc;
 	uint32_t tail;
-	uint8_t *src_tb, *append_ptr, *ctx_addr;
-	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *src_tb, *append_ptr = 0, *ctx_addr;
+	phys_addr_t src_addr, dest_addr = 0, key_addr = 0;
 	int length, non_align_len;
 
 	session = (struct ccp_session *)get_sym_session_private_data(
 					 op->sym->session,
 					ccp_cryptodev_driver_id);
-	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		key_addr = rte_mem_virt2iova(session->auth.key_ccp);
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+	} else {
+		key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	}
 
 	function.raw = 0;
 	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
@@ -2056,7 +2102,13 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 
 		ctx_addr = session->auth.pre_compute;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *)ctx_addr);
+
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2094,7 +2146,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	} else {
 		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2288,8 +2345,12 @@ ccp_perform_3des(struct rte_crypto_op *op,
 
 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
 			   iv, session->iv.length);
-
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *) lsb_buf);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *) lsb_buf);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2312,7 +2373,10 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	else
 		dest_addr = src_addr;
 
-	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+	if (iommu_mode == 2)
+		key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
+	else
+		key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 
@@ -2707,8 +2771,13 @@ process_ops_to_enqueue(struct ccp_qp *qp,
 	b_info->lsb_buf_idx = 0;
 	b_info->desccnt = 0;
 	b_info->cmd_q = cmd_q;
-	b_info->lsb_buf_phys =
-		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	if (iommu_mode == 2)
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
+	else
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
 
 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 80fe6a453..3a63b01e6 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,6 +23,7 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+int iommu_mode;
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
@@ -512,7 +513,7 @@ ccp_add_device(struct ccp_device *dev, int type)
 
 		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
 	}
-	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
 
 	/* Copy the private LSB mask to the public registers */
 	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
@@ -657,9 +658,7 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	struct rte_pci_device *pci;
 	char filename[PATH_MAX];
 	unsigned long tmp;
-	int uio_fd = -1, i, uio_num;
-	char uio_devname[PATH_MAX];
-	void *map_addr;
+	int uio_fd = -1;
 
 	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
 			      RTE_CACHE_LINE_SIZE);
@@ -710,46 +709,14 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	snprintf(filename, sizeof(filename), "%s/resource", dirname);
 	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
 		goto fail;
+	if (iommu_mode == 2)
+		pci->kdrv = RTE_KDRV_VFIO;
+	else if (iommu_mode == 0)
+		pci->kdrv = RTE_KDRV_IGB_UIO;
+	else if (iommu_mode == 1)
+		pci->kdrv = RTE_KDRV_UIO_GENERIC;
 
-	uio_num = ccp_find_uio_devname(dirname);
-	if (uio_num < 0) {
-		/*
-		 * It may take time for uio device to appear,
-		 * wait  here and try again
-		 */
-		usleep(100000);
-		uio_num = ccp_find_uio_devname(dirname);
-		if (uio_num < 0)
-			goto fail;
-	}
-	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
-
-	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
-	if (uio_fd < 0)
-		goto fail;
-	if (flock(uio_fd, LOCK_EX | LOCK_NB))
-		goto fail;
-
-	/* Map the PCI memory resource of device */
-	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
-
-		char devname[PATH_MAX];
-		int res_fd;
-
-		if (pci->mem_resource[i].phys_addr == 0)
-			continue;
-		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
-		res_fd = open(devname, O_RDWR);
-		if (res_fd < 0)
-			goto fail;
-		map_addr = mmap(NULL, pci->mem_resource[i].len,
-				PROT_READ | PROT_WRITE,
-				MAP_SHARED, res_fd, 0);
-		if (map_addr == MAP_FAILED)
-			goto fail;
-
-		pci->mem_resource[i].addr = map_addr;
-	}
+	rte_pci_map_device(pci);
 
 	/* device is valid, add in list */
 	if (ccp_add_device(ccp_dev, ccp_type)) {
@@ -784,6 +751,7 @@ ccp_probe_devices(const struct rte_pci_id *ccp_id)
 	if (module_idx < 0)
 		return -1;
 
+	iommu_mode = module_idx;
 	TAILQ_INIT(&ccp_list);
 	dir = opendir(SYSFS_PCI_DEVICES);
 	if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index 1702a09c4..38029a908 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -15,6 +15,7 @@
 static const char * const uio_module_names[] = {
 	"igb_uio",
 	"uio_pci_generic",
+	"vfio_pci"
 };
 
 int
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 38cb1fe3d..5f8ab0618 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -23,6 +23,7 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 uint8_t cryptodev_cnt;
+extern void *sha_ctx;
 
 struct ccp_pmd_init_params {
 	struct rte_cryptodev_pmd_init_params def_p;
@@ -303,6 +304,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
 
 	ccp_pmd_init_done = 0;
 	name = rte_vdev_device_name(dev);
+	rte_free(sha_ctx);
 	if (name == NULL)
 		return -EINVAL;
 
@@ -385,6 +387,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 	};
 	const char *input_args;
 
+	sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
 	if (ccp_pmd_init_done) {
 		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
 		return -EFAULT;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
  2020-01-28  8:38 [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP asomalap
@ 2020-02-12  7:11 ` Akhil Goyal
  2020-02-12  7:44   ` Somalapuram, Amaranath
  2020-02-12 10:31 ` Kumar, Ravi1
  2020-12-25  8:03 ` [dpdk-dev] [PATCH v3] " asomalap
  2 siblings, 1 reply; 14+ messages in thread
From: Akhil Goyal @ 2020-02-12  7:11 UTC (permalink / raw)
  To: asomalap, dev, Kumar, Ravi1; +Cc: stable


> 
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP drives.
> Cc: stable@dpdk.org
> 
> Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
> ---

Is it a fix or a new feature?
If it is a fix then only it should be sent to stable and the patch title should include "crypto/ccp: fix ...".
If it is a feature(which is more likely the case IMO), the title is fine or I would change it to "crypto/ccp: enable IOMMU"

@Ravi: could you please review this patch.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
  2020-02-12  7:11 ` Akhil Goyal
@ 2020-02-12  7:44   ` Somalapuram, Amaranath
  2020-02-12 10:05     ` Akhil Goyal
  0 siblings, 1 reply; 14+ messages in thread
From: Somalapuram, Amaranath @ 2020-02-12  7:44 UTC (permalink / raw)
  To: Akhil Goyal, dev, Kumar,  Ravi1; +Cc: stable

Is it a fix or a new feature?
It’s a new feature
If it is a fix then only it should be sent to stable and the patch title should include "crypto/ccp: fix ...".
Ok got it.. next time will keep in mind. 
If it is a feature(which is more likely the case IMO), the title is fine or I would change it to "crypto/ccp: enable IOMMU"
This title well be fine ("crypto/ccp: enable IOMMU") do let me know should I resubmit ??

Regards,
S.Amarnath
-----Original Message-----
From: Akhil Goyal <akhil.goyal@nxp.com> 
Sent: Wednesday, February 12, 2020 12:41 PM
To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>; dev@dpdk.org; Kumar, Ravi1 <Ravi1.Kumar@amd.com>
Cc: stable@dpdk.org
Subject: RE: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP

[CAUTION: External Email]

>
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP drives.
> Cc: stable@dpdk.org
>
> Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
> ---

Is it a fix or a new feature?
If it is a fix then only it should be sent to stable and the patch title should include "crypto/ccp: fix ...".
If it is a feature(which is more likely the case IMO), the title is fine or I would change it to "crypto/ccp: enable IOMMU"

@Ravi: could you please review this patch.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
  2020-02-12  7:44   ` Somalapuram, Amaranath
@ 2020-02-12 10:05     ` Akhil Goyal
  0 siblings, 0 replies; 14+ messages in thread
From: Akhil Goyal @ 2020-02-12 10:05 UTC (permalink / raw)
  To: Somalapuram, Amaranath, dev, Kumar, Ravi1; +Cc: stable



> 
> Is it a fix or a new feature?
> It’s a new feature
> If it is a fix then only it should be sent to stable and the patch title should include
> "crypto/ccp: fix ...".
> Ok got it.. next time will keep in mind.
> If it is a feature(which is more likely the case IMO), the title is fine or I would
> change it to "crypto/ccp: enable IOMMU"
> This title well be fine ("crypto/ccp: enable IOMMU") do let me know should I
> resubmit ??

No I would just need an ack from the PMD maintainer.


> @Ravi: could you please review this patch.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
  2020-01-28  8:38 [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP asomalap
  2020-02-12  7:11 ` Akhil Goyal
@ 2020-02-12 10:31 ` Kumar, Ravi1
  2020-02-12 13:50   ` Akhil Goyal
  2020-12-25  8:03 ` [dpdk-dev] [PATCH v3] " asomalap
  2 siblings, 1 reply; 14+ messages in thread
From: Kumar, Ravi1 @ 2020-02-12 10:31 UTC (permalink / raw)
  To: Somalapuram, Amaranath, dev; +Cc: stable

[AMD Official Use Only - Internal Distribution Only]

Acked-by: Ravi Kumar <ravi1.kumar@amd.com>

>
>-----Original Message-----
>From: dev <dev-bounces@dpdk.org> On Behalf Of asomalap@amd.com
>Sent: Tuesday, January 28, 2020 2:08 PM
>To: dev@dpdk.org
>Cc: stable@dpdk.org
>Subject: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
>
>[CAUTION: External Email]
>
>From: Amaranath Somalapuram <asomalap@amd.com>
>
>CCP use vdev framework, and vdev framework don’t support IOMMU.
>Adding custom IOMMU support for AMD CCP drives.
>Cc: stable@dpdk.org
>
>Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
>---
> drivers/crypto/ccp/ccp_crypto.c  | 127 ++++++++++++++++++++++++-------
> drivers/crypto/ccp/ccp_dev.c     |  54 +++----------
> drivers/crypto/ccp/ccp_pci.c     |   1 +
> drivers/crypto/ccp/rte_ccp_pmd.c |   3 +
> 4 files changed, 113 insertions(+), 72 deletions(-)
>
>diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c index 4256734d1..1918ae88d 100644
>--- a/drivers/crypto/ccp/ccp_crypto.c
>+++ b/drivers/crypto/ccp/ccp_crypto.c
>@@ -31,8 +31,10 @@
> #include <openssl/err.h>
> #include <openssl/hmac.h>
>
>+extern int iommu_mode;
>+void *sha_ctx;
> /* SHA initial context values */
>-static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
>+uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
>        SHA1_H4, SHA1_H3,
>        SHA1_H2, SHA1_H1,
>        SHA1_H0, 0x0U,
>@@ -744,8 +746,13 @@ ccp_configure_session_cipher(struct ccp_session *sess,
>                CCP_LOG_ERR("Invalid CCP Engine");
>                return -ENOTSUP;
>        }
>-       sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
>-       sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
>+       if (iommu_mode == 2) {
>+               sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
>+               sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
>+       } else {
>+               sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
>+               sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
>+       }
>        return 0;
> }
>
>@@ -784,6 +791,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
>                sess->auth.ctx = (void *)ccp_sha1_init;
>                sess->auth.ctx_len = CCP_SB_BYTES;
>                sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
>+               rte_memcpy(sha_ctx, sess->auth.ctx, 
>+ SHA_COMMON_DIGEST_SIZE);
>                break;
>        case RTE_CRYPTO_AUTH_SHA1_HMAC:
>                if (sess->auth_opt) {
>@@ -822,6 +830,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
>                sess->auth.ctx = (void *)ccp_sha224_init;
>                sess->auth.ctx_len = CCP_SB_BYTES;
>                sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
>+               rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
>                break;
>        case RTE_CRYPTO_AUTH_SHA224_HMAC:
>                if (sess->auth_opt) {
>@@ -884,6 +893,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
>                sess->auth.ctx = (void *)ccp_sha256_init;
>                sess->auth.ctx_len = CCP_SB_BYTES;
>                sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
>+               rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
>                break;
>        case RTE_CRYPTO_AUTH_SHA256_HMAC:
>                if (sess->auth_opt) {
>@@ -946,6 +956,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
>                sess->auth.ctx = (void *)ccp_sha384_init;
>                sess->auth.ctx_len = CCP_SB_BYTES << 1;
>                sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
>+               rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
>                break;
>        case RTE_CRYPTO_AUTH_SHA384_HMAC:
>                if (sess->auth_opt) {
>@@ -1010,6 +1021,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
>                sess->auth.ctx = (void *)ccp_sha512_init;
>                sess->auth.ctx_len = CCP_SB_BYTES << 1;
>                sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
>+               rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
>                break;
>        case RTE_CRYPTO_AUTH_SHA512_HMAC:
>                if (sess->auth_opt) {
>@@ -1159,8 +1171,13 @@ ccp_configure_session_aead(struct ccp_session *sess,
>                CCP_LOG_ERR("Unsupported aead algo");
>                return -ENOTSUP;
>        }
>-       sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
>-       sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
>+       if (iommu_mode == 2) {
>+               sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
>+               sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
>+       } else {
>+               sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
>+               sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
>+       }
>        return 0;
> }
>
>@@ -1575,11 +1592,16 @@ ccp_perform_hmac(struct rte_crypto_op *op,
>                                              op->sym->auth.data.offset);
>        append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
>                                                session->auth.ctx_len);
>-       dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
>+       if (iommu_mode == 2) {
>+               dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
>+               pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
>+       } else {
>+               dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
>+               pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
>+       }
>        dest_addr_t = dest_addr;
>
>        /** Load PHash1 to LSB*/
>-       pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
>        pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
>        pst.len = session->auth.ctx_len;
>        pst.dir = 1;
>@@ -1659,7 +1681,10 @@ ccp_perform_hmac(struct rte_crypto_op *op,
>
>        /** Load PHash2 to LSB*/
>        addr += session->auth.ctx_len;
>-       pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
>+       if (iommu_mode == 2)
>+               pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
>+       else
>+               pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void 
>+ *)addr);
>        pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
>        pst.len = session->auth.ctx_len;
>        pst.dir = 1;
>@@ -1745,15 +1770,19 @@ ccp_perform_sha(struct rte_crypto_op *op,
>
>        src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
>                                              op->sym->auth.data.offset);
>-
>        append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
>                                                session->auth.ctx_len);
>-       dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
>+       if (iommu_mode == 2) {
>+               dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
>+               pst.src_addr = (phys_addr_t)sha_ctx;
>+       } else {
>+               dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
>+               pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
>+                                                    session->auth.ctx);
>+       }
>
>        /** Passthru sha context*/
>
>-       pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
>-                                                    session->auth.ctx);
>        pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
>        pst.len = session->auth.ctx_len;
>        pst.dir = 1;
>@@ -1840,10 +1869,16 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
>                CCP_LOG_ERR("CCP MBUF append failed\n");
>                return -1;
>        }
>-       dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
>+       if (iommu_mode == 2) {
>+               dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
>+               ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
>+                                       session->auth.pre_compute);
>+       } else {
>+               dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
>+               ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
>+                                       session->auth.pre_compute);
>+       }
>        dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
>-       ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
>-                                                  *)session->auth.pre_compute);
>        desc = &cmd_q->qbase_desc[cmd_q->qidx];
>        memset(desc, 0, Q_DESC_SIZE);
>
>@@ -1964,7 +1999,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
>        struct ccp_session *session;
>        union ccp_function function;
>        struct ccp_desc *desc;
>-       uint8_t *ctx_addr, *append_ptr;
>+       uint8_t *ctx_addr = NULL, *append_ptr = NULL;
>        uint32_t tail;
>        phys_addr_t src_addr, dest_addr, ctx_paddr;
>
>@@ -1980,9 +2015,15 @@ ccp_perform_sha3(struct rte_crypto_op *op,
>                CCP_LOG_ERR("CCP MBUF append failed\n");
>                return -1;
>        }
>-       dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
>+       if (iommu_mode == 2) {
>+               dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
>+               ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
>+       } else {
>+               dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
>+               ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
>+       }
>+
>        ctx_addr = session->auth.sha3_ctx;
>-       ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
>
>        desc = &cmd_q->qbase_desc[cmd_q->qidx];
>        memset(desc, 0, Q_DESC_SIZE);
>@@ -2032,20 +2073,25 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
>        struct ccp_passthru pst;
>        struct ccp_desc *desc;
>        uint32_t tail;
>-       uint8_t *src_tb, *append_ptr, *ctx_addr;
>-       phys_addr_t src_addr, dest_addr, key_addr;
>+       uint8_t *src_tb, *append_ptr = 0, *ctx_addr;
>+       phys_addr_t src_addr, dest_addr = 0, key_addr = 0;
>        int length, non_align_len;
>
>        session = (struct ccp_session *)get_sym_session_private_data(
>                                         op->sym->session,
>                                        ccp_cryptodev_driver_id);
>-       key_addr = rte_mem_virt2phy(session->auth.key_ccp);
>
>        src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
>                                              op->sym->auth.data.offset);
>        append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
>                                                session->auth.ctx_len);
>-       dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
>+       if (iommu_mode == 2) {
>+               key_addr = rte_mem_virt2iova(session->auth.key_ccp);
>+               dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
>+       } else {
>+               key_addr = rte_mem_virt2phy(session->auth.key_ccp);
>+               dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
>+       }
>
>        function.raw = 0;
>        CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT; @@ -2056,7 +2102,13 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
>
>                ctx_addr = session->auth.pre_compute;
>                memset(ctx_addr, 0, AES_BLOCK_SIZE);
>-               pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
>+               if (iommu_mode == 2)
>+                       pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
>+                                                       (void *)ctx_addr);
>+               else
>+                       pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
>+                                                       (void 
>+ *)ctx_addr);
>+
>                pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
>                pst.len = CCP_SB_BYTES;
>                pst.dir = 1;
>@@ -2094,7 +2146,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
>        } else {
>                ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
>                memset(ctx_addr, 0, AES_BLOCK_SIZE);
>-               pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
>+               if (iommu_mode == 2)
>+                       pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
>+                                                       (void *)ctx_addr);
>+               else
>+                       pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
>+                                                       (void 
>+ *)ctx_addr);
>                pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
>                pst.len = CCP_SB_BYTES;
>                pst.dir = 1;
>@@ -2288,8 +2345,12 @@ ccp_perform_3des(struct rte_crypto_op *op,
>
>                rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
>                           iv, session->iv.length);
>-
>-               pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
>+               if (iommu_mode == 2)
>+                       pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
>+                                                       (void *) lsb_buf);
>+               else
>+                       pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
>+                                                       (void *) 
>+ lsb_buf);
>                pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
>                pst.len = CCP_SB_BYTES;
>                pst.dir = 1;
>@@ -2312,7 +2373,10 @@ ccp_perform_3des(struct rte_crypto_op *op,
>        else
>                dest_addr = src_addr;
>
>-       key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
>+       if (iommu_mode == 2)
>+               key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
>+       else
>+               key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
>
>        desc = &cmd_q->qbase_desc[cmd_q->qidx];
>
>@@ -2707,8 +2771,13 @@ process_ops_to_enqueue(struct ccp_qp *qp,
>        b_info->lsb_buf_idx = 0;
>        b_info->desccnt = 0;
>        b_info->cmd_q = cmd_q;
>-       b_info->lsb_buf_phys =
>-               (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
>+       if (iommu_mode == 2)
>+               b_info->lsb_buf_phys =
>+                       (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
>+       else
>+               b_info->lsb_buf_phys =
>+                       (phys_addr_t)rte_mem_virt2phy((void 
>+ *)b_info->lsb_buf);
>+
>        rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
>
>        b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c index 80fe6a453..3a63b01e6 100644
>--- a/drivers/crypto/ccp/ccp_dev.c
>+++ b/drivers/crypto/ccp/ccp_dev.c
>@@ -23,6 +23,7 @@
> #include "ccp_pci.h"
> #include "ccp_pmd_private.h"
>
>+int iommu_mode;
> struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);  static int ccp_dev_id;
>
>@@ -512,7 +513,7 @@ ccp_add_device(struct ccp_device *dev, int type)
>
>                CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
>        }
>-       CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
>+       CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
>
>        /* Copy the private LSB mask to the public registers */
>        status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET); @@ -657,9 +658,7 @@ ccp_probe_device(const char *dirname, uint16_t domain,
>        struct rte_pci_device *pci;
>        char filename[PATH_MAX];
>        unsigned long tmp;
>-       int uio_fd = -1, i, uio_num;
>-       char uio_devname[PATH_MAX];
>-       void *map_addr;
>+       int uio_fd = -1;
>
>        ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
>                              RTE_CACHE_LINE_SIZE); @@ -710,46 +709,14 @@ ccp_probe_device(const char *dirname, uint16_t domain,
>        snprintf(filename, sizeof(filename), "%s/resource", dirname);
>        if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
>                goto fail;
>+       if (iommu_mode == 2)
>+               pci->kdrv = RTE_KDRV_VFIO;
>+       else if (iommu_mode == 0)
>+               pci->kdrv = RTE_KDRV_IGB_UIO;
>+       else if (iommu_mode == 1)
>+               pci->kdrv = RTE_KDRV_UIO_GENERIC;
>
>-       uio_num = ccp_find_uio_devname(dirname);
>-       if (uio_num < 0) {
>-               /*
>-                * It may take time for uio device to appear,
>-                * wait  here and try again
>-                */
>-               usleep(100000);
>-               uio_num = ccp_find_uio_devname(dirname);
>-               if (uio_num < 0)
>-                       goto fail;
>-       }
>-       snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
>-
>-       uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
>-       if (uio_fd < 0)
>-               goto fail;
>-       if (flock(uio_fd, LOCK_EX | LOCK_NB))
>-               goto fail;
>-
>-       /* Map the PCI memory resource of device */
>-       for (i = 0; i < PCI_MAX_RESOURCE; i++) {
>-
>-               char devname[PATH_MAX];
>-               int res_fd;
>-
>-               if (pci->mem_resource[i].phys_addr == 0)
>-                       continue;
>-               snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
>-               res_fd = open(devname, O_RDWR);
>-               if (res_fd < 0)
>-                       goto fail;
>-               map_addr = mmap(NULL, pci->mem_resource[i].len,
>-                               PROT_READ | PROT_WRITE,
>-                               MAP_SHARED, res_fd, 0);
>-               if (map_addr == MAP_FAILED)
>-                       goto fail;
>-
>-               pci->mem_resource[i].addr = map_addr;
>-       }
>+       rte_pci_map_device(pci);
>
>        /* device is valid, add in list */
>        if (ccp_add_device(ccp_dev, ccp_type)) { @@ -784,6 +751,7 @@ ccp_probe_devices(const struct rte_pci_id *ccp_id)
>        if (module_idx < 0)
>                return -1;
>
>+       iommu_mode = module_idx;
>        TAILQ_INIT(&ccp_list);
>        dir = opendir(SYSFS_PCI_DEVICES);
>        if (dir == NULL)
>diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c index 1702a09c4..38029a908 100644
>--- a/drivers/crypto/ccp/ccp_pci.c
>+++ b/drivers/crypto/ccp/ccp_pci.c
>@@ -15,6 +15,7 @@
> static const char * const uio_module_names[] = {
>        "igb_uio",
>        "uio_pci_generic",
>+       "vfio_pci"
> };
>
> int
>diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
>index 38cb1fe3d..5f8ab0618 100644
>--- a/drivers/crypto/ccp/rte_ccp_pmd.c
>+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
>@@ -23,6 +23,7 @@
> static unsigned int ccp_pmd_init_done;
> uint8_t ccp_cryptodev_driver_id;
> uint8_t cryptodev_cnt;
>+extern void *sha_ctx;
>
> struct ccp_pmd_init_params {
>        struct rte_cryptodev_pmd_init_params def_p; @@ -303,6 +304,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
>
>        ccp_pmd_init_done = 0;
>        name = rte_vdev_device_name(dev);
>+       rte_free(sha_ctx);
>        if (name == NULL)
>                return -EINVAL;
>
>@@ -385,6 +387,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
>        };
>        const char *input_args;
>
>+       sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
>        if (ccp_pmd_init_done) {
>                RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
>                return -EFAULT;
>--
>2.17.1
>
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP
  2020-02-12 10:31 ` Kumar, Ravi1
@ 2020-02-12 13:50   ` Akhil Goyal
  2020-02-14 15:53     ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
  0 siblings, 1 reply; 14+ messages in thread
From: Akhil Goyal @ 2020-02-12 13:50 UTC (permalink / raw)
  To: Kumar, Ravi1, Somalapuram, Amaranath, dev; +Cc: stable

> Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
> 
Applied to dpdk-next-crypto
Removed stable tag as it is a feature not a fix.

Thanks.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v2] crypto/ccp: enable IOMMU for CCP
  2020-02-12 13:50   ` Akhil Goyal
@ 2020-02-14 15:53     ` Thomas Monjalon
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Monjalon @ 2020-02-14 15:53 UTC (permalink / raw)
  To: Kumar, Ravi1, Somalapuram, Amaranath; +Cc: dev, stable, Akhil Goyal

12/02/2020 14:50, Akhil Goyal:
> > Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
> > 
> Applied to dpdk-next-crypto
> Removed stable tag as it is a feature not a fix.

Sorry I must drop this patch because it does not compile on i686.

Note: it is too late in the cycle to add such a feature.



^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2020-01-28  8:38 [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP asomalap
  2020-02-12  7:11 ` Akhil Goyal
  2020-02-12 10:31 ` Kumar, Ravi1
@ 2020-12-25  8:03 ` asomalap
  2021-01-15 15:58   ` Akhil Goyal
  2021-05-27 13:24   ` David Marchand
  2 siblings, 2 replies; 14+ messages in thread
From: asomalap @ 2020-12-25  8:03 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal

From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>

CCP use vdev framework, and vdev framework don’t support IOMMU.
Adding custom IOMMU support for AMD CCP driver.

Signed-off-by: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 114 ++++++++++++++++++++++++-------
 drivers/crypto/ccp/ccp_dev.c     |  54 +++------------
 drivers/crypto/ccp/ccp_pci.c     |   1 +
 drivers/crypto/ccp/rte_ccp_pmd.c |   3 +
 4 files changed, 104 insertions(+), 68 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index db3fb6eff..f37d35f18 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -31,8 +31,10 @@
 #include <openssl/err.h>
 #include <openssl/hmac.h>
 
+extern int iommu_mode;
+void *sha_ctx;
 /* SHA initial context values */
-static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
 	SHA1_H2, SHA1_H1,
 	SHA1_H0, 0x0U,
@@ -744,8 +746,13 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if (iommu_mode == 2) {
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	} else {
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -784,6 +791,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha1_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		if (sess->auth_opt) {
@@ -822,6 +830,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha224_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		if (sess->auth_opt) {
@@ -884,6 +893,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha256_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		if (sess->auth_opt) {
@@ -946,6 +956,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha384_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		if (sess->auth_opt) {
@@ -1010,6 +1021,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha512_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		if (sess->auth_opt) {
@@ -1159,8 +1171,13 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if (iommu_mode == 2) {
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	} else {
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -1575,11 +1592,16 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 					      op->sym->auth.data.offset);
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	}
 	dest_addr_t = dest_addr;
 
 	/** Load PHash1 to LSB*/
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1659,7 +1681,10 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 
 	/** Load PHash2 to LSB*/
 	addr += session->auth.ctx_len;
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	if (iommu_mode == 2)
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	else
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1745,15 +1770,19 @@ ccp_perform_sha(struct rte_crypto_op *op,
 
 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
-
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)sha_ctx;
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	}
 
 	/** Passthru sha context*/
 
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
-						     session->auth.ctx);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1840,10 +1869,16 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
+					session->auth.pre_compute);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
+					session->auth.pre_compute);
+	}
 	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
-						   *)session->auth.pre_compute);
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
 
@@ -1964,7 +1999,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 	struct ccp_session *session;
 	union ccp_function function;
 	struct ccp_desc *desc;
-	uint8_t *ctx_addr, *append_ptr;
+	uint8_t *ctx_addr = NULL, *append_ptr = NULL;
 	uint32_t tail;
 	phys_addr_t src_addr, dest_addr, ctx_paddr;
 
@@ -1980,9 +2015,15 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+	}
+
 	ctx_addr = session->auth.sha3_ctx;
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
@@ -2056,7 +2097,13 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 
 		ctx_addr = session->auth.pre_compute;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *)ctx_addr);
+
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2094,7 +2141,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	} else {
 		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2288,8 +2340,12 @@ ccp_perform_3des(struct rte_crypto_op *op,
 
 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
 			   iv, session->iv.length);
-
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *) lsb_buf);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *) lsb_buf);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2312,7 +2368,10 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	else
 		dest_addr = src_addr;
 
-	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+	if (iommu_mode == 2)
+		key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
+	else
+		key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 
@@ -2707,8 +2766,13 @@ process_ops_to_enqueue(struct ccp_qp *qp,
 	b_info->lsb_buf_idx = 0;
 	b_info->desccnt = 0;
 	b_info->cmd_q = cmd_q;
-	b_info->lsb_buf_phys =
-		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	if (iommu_mode == 2)
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
+	else
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
 
 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 664ddc174..ee6882b8a 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,6 +23,7 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+int iommu_mode;
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
@@ -512,7 +513,7 @@ ccp_add_device(struct ccp_device *dev, int type)
 
 		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
 	}
-	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
 
 	/* Copy the private LSB mask to the public registers */
 	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
@@ -657,9 +658,7 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	struct rte_pci_device *pci;
 	char filename[PATH_MAX];
 	unsigned long tmp;
-	int uio_fd = -1, i, uio_num;
-	char uio_devname[PATH_MAX];
-	void *map_addr;
+	int uio_fd = -1;
 
 	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
 			      RTE_CACHE_LINE_SIZE);
@@ -710,46 +709,14 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	snprintf(filename, sizeof(filename), "%s/resource", dirname);
 	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
 		goto fail;
+	if (iommu_mode == 2)
+		pci->kdrv = RTE_PCI_KDRV_VFIO;
+	else if (iommu_mode == 0)
+		pci->kdrv = RTE_PCI_KDRV_IGB_UIO;
+	else if (iommu_mode == 1)
+		pci->kdrv = RTE_PCI_KDRV_UIO_GENERIC;
 
-	uio_num = ccp_find_uio_devname(dirname);
-	if (uio_num < 0) {
-		/*
-		 * It may take time for uio device to appear,
-		 * wait  here and try again
-		 */
-		usleep(100000);
-		uio_num = ccp_find_uio_devname(dirname);
-		if (uio_num < 0)
-			goto fail;
-	}
-	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
-
-	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
-	if (uio_fd < 0)
-		goto fail;
-	if (flock(uio_fd, LOCK_EX | LOCK_NB))
-		goto fail;
-
-	/* Map the PCI memory resource of device */
-	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
-
-		char devname[PATH_MAX];
-		int res_fd;
-
-		if (pci->mem_resource[i].phys_addr == 0)
-			continue;
-		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
-		res_fd = open(devname, O_RDWR);
-		if (res_fd < 0)
-			goto fail;
-		map_addr = mmap(NULL, pci->mem_resource[i].len,
-				PROT_READ | PROT_WRITE,
-				MAP_SHARED, res_fd, 0);
-		if (map_addr == MAP_FAILED)
-			goto fail;
-
-		pci->mem_resource[i].addr = map_addr;
-	}
+	rte_pci_map_device(pci);
 
 	/* device is valid, add in list */
 	if (ccp_add_device(ccp_dev, ccp_type)) {
@@ -784,6 +751,7 @@ ccp_probe_devices(const struct rte_pci_id *ccp_id)
 	if (module_idx < 0)
 		return -1;
 
+	iommu_mode = module_idx;
 	TAILQ_INIT(&ccp_list);
 	dir = opendir(SYSFS_PCI_DEVICES);
 	if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index 1702a09c4..38029a908 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -15,6 +15,7 @@
 static const char * const uio_module_names[] = {
 	"igb_uio",
 	"uio_pci_generic",
+	"vfio_pci"
 };
 
 int
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 000b2f4fe..ba379a19f 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -22,6 +22,7 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 uint8_t cryptodev_cnt;
+extern void *sha_ctx;
 
 struct ccp_pmd_init_params {
 	struct rte_cryptodev_pmd_init_params def_p;
@@ -305,6 +306,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
 
 	ccp_pmd_init_done = 0;
 	name = rte_vdev_device_name(dev);
+	rte_free(sha_ctx);
 	if (name == NULL)
 		return -EINVAL;
 
@@ -388,6 +390,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 	};
 	const char *input_args;
 
+	sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
 	if (ccp_pmd_init_done) {
 		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
 		return -EFAULT;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2020-12-25  8:03 ` [dpdk-dev] [PATCH v3] " asomalap
@ 2021-01-15 15:58   ` Akhil Goyal
  2021-05-27 13:24   ` David Marchand
  1 sibling, 0 replies; 14+ messages in thread
From: Akhil Goyal @ 2021-01-15 15:58 UTC (permalink / raw)
  To: asomalap, dev

> Subject: [PATCH v3] crypto/ccp: enable IOMMU for CCP
> 
> From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
> 
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP driver.
> 
> Signed-off-by: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
> ---
Applied to dpdk-next-crypto

Thanks.


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2020-12-25  8:03 ` [dpdk-dev] [PATCH v3] " asomalap
  2021-01-15 15:58   ` Akhil Goyal
@ 2021-05-27 13:24   ` David Marchand
  2021-05-27 14:10     ` Thomas Monjalon
  1 sibling, 1 reply; 14+ messages in thread
From: David Marchand @ 2021-05-27 13:24 UTC (permalink / raw)
  To: Somalapuram Amaranath; +Cc: dev, Akhil Goyal, Thomas Monjalon

On Fri, Dec 25, 2020 at 9:06 AM <asomalap@amd.com> wrote:
>
> From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
>
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP driver.

I am currently looking at pci bus patches/cleanups.
I ended up looking at crypto/ccp.
This driver code contains a lot of features duplicated with the pci bus.

Why is the ccp driver not a PCI driver?


-- 
David Marchand


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2021-05-27 13:24   ` David Marchand
@ 2021-05-27 14:10     ` Thomas Monjalon
  2021-05-28  5:02       ` Somalapuram, Amaranath
  0 siblings, 1 reply; 14+ messages in thread
From: Thomas Monjalon @ 2021-05-27 14:10 UTC (permalink / raw)
  To: Somalapuram Amaranath, David Marchand
  Cc: dev, Akhil Goyal, Ravi1.Kumar@amd.com, keesang.song

27/05/2021 15:24, David Marchand:
> On Fri, Dec 25, 2020 at 9:06 AM <asomalap@amd.com> wrote:
> >
> > From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
> >
> > CCP use vdev framework, and vdev framework don’t support IOMMU.
> > Adding custom IOMMU support for AMD CCP driver.
> 
> I am currently looking at pci bus patches/cleanups.
> I ended up looking at crypto/ccp.
> This driver code contains a lot of features duplicated with the pci bus.
> 
> Why is the ccp driver not a PCI driver?

Indeed it looks abusing vdev.
We should drop all the code duplicating the PCI bus driver.
If nothing else is done, it would mean breaking the probing
of this driver.

Adding more people in Cc list to have a fix before it is broken,
thanks.



^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2021-05-27 14:10     ` Thomas Monjalon
@ 2021-05-28  5:02       ` Somalapuram, Amaranath
  2021-05-28  7:20         ` Thomas Monjalon
  0 siblings, 1 reply; 14+ messages in thread
From: Somalapuram, Amaranath @ 2021-05-28  5:02 UTC (permalink / raw)
  To: Thomas Monjalon, David Marchand
  Cc: dev, Akhil Goyal, Kumar, Ravi1, Song, Keesang

[Public]



-----Original Message-----
From: Thomas Monjalon <thomas@monjalon.net> 
Sent: Thursday, May 27, 2021 7:40 PM
To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>; David Marchand <david.marchand@redhat.com>
Cc: dev <dev@dpdk.org>; Akhil Goyal <gakhil@marvell.com>; Kumar, Ravi1 <Ravi1.Kumar@amd.com>; Song, Keesang <Keesang.Song@amd.com>
Subject: Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP

[CAUTION: External Email]

27/05/2021 15:24, David Marchand:
> On Fri, Dec 25, 2020 at 9:06 AM <asomalap@amd.com> wrote:
> >
> > From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
> >
> > CCP use vdev framework, and vdev framework don’t support IOMMU.
> > Adding custom IOMMU support for AMD CCP driver.
>
> I am currently looking at pci bus patches/cleanups.
> I ended up looking at crypto/ccp.
> This driver code contains a lot of features duplicated with the pci bus.
>
> Why is the ccp driver not a PCI driver?

Indeed it looks abusing vdev.
We should drop all the code duplicating the PCI bus driver.
If nothing else is done, it would mean breaking the probing of this driver.

Adding more people in Cc list to have a fix before it is broken, thanks.

Enable IOMMU for vdev was not supported in DPDK.
I can remove all the duplicating code after I test the CCP with IOMMU.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2021-05-28  5:02       ` Somalapuram, Amaranath
@ 2021-05-28  7:20         ` Thomas Monjalon
  2021-05-28  9:40           ` Somalapuram, Amaranath
  0 siblings, 1 reply; 14+ messages in thread
From: Thomas Monjalon @ 2021-05-28  7:20 UTC (permalink / raw)
  To: Somalapuram, Amaranath
  Cc: David Marchand, dev, Akhil Goyal, Kumar, Ravi1, Song, Keesang

28/05/2021 07:02, Somalapuram, Amaranath:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 27/05/2021 15:24, David Marchand:
> > > On Fri, Dec 25, 2020 at 9:06 AM <asomalap@amd.com> wrote:
> > > > From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
> > > > 
> > > > CCP use vdev framework, and vdev framework don’t support IOMMU.
> > > > Adding custom IOMMU support for AMD CCP driver.
> > > 
> > > I am currently looking at pci bus patches/cleanups.
> > > I ended up looking at crypto/ccp.
> > > This driver code contains a lot of features duplicated with the pci bus.
> > > 
> > > Why is the ccp driver not a PCI driver?
> > 
> > Indeed it looks abusing vdev.
> > We should drop all the code duplicating the PCI bus driver.
> > If nothing else is done, it would mean breaking the probing of this
> > driver.
> > 
> > Adding more people in Cc list to have a fix before it is broken, thanks.
> 
> Enable IOMMU for vdev was not supported in DPDK.
> I can remove all the duplicating code after I test the CCP with IOMMU.

I think you didn't get it.
It should not be a vdev.
We want to switch the driver to a true PCI device,
and remove all the code copied from the PCI bus driver.




^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
  2021-05-28  7:20         ` Thomas Monjalon
@ 2021-05-28  9:40           ` Somalapuram, Amaranath
  0 siblings, 0 replies; 14+ messages in thread
From: Somalapuram, Amaranath @ 2021-05-28  9:40 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: David Marchand, dev, Akhil Goyal, Kumar, Ravi1, Song, Keesang

[Public]



-----Original Message-----
From: Thomas Monjalon <thomas@monjalon.net> 
Sent: Friday, May 28, 2021 12:51 PM
To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>
Cc: David Marchand <david.marchand@redhat.com>; dev <dev@dpdk.org>; Akhil Goyal <gakhil@marvell.com>; Kumar, Ravi1 <Ravi1.Kumar@amd.com>; Song, Keesang <Keesang.Song@amd.com>
Subject: Re: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP

[CAUTION: External Email]

28/05/2021 07:02, Somalapuram, Amaranath:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 27/05/2021 15:24, David Marchand:
> > > On Fri, Dec 25, 2020 at 9:06 AM <asomalap@amd.com> wrote:
> > > > From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
> > > >
> > > > CCP use vdev framework, and vdev framework don’t support IOMMU.
> > > > Adding custom IOMMU support for AMD CCP driver.
> > >
> > > I am currently looking at pci bus patches/cleanups.
> > > I ended up looking at crypto/ccp.
> > > This driver code contains a lot of features duplicated with the pci bus.
> > >
> > > Why is the ccp driver not a PCI driver?
> >
> > Indeed it looks abusing vdev.
> > We should drop all the code duplicating the PCI bus driver.
> > If nothing else is done, it would mean breaking the probing of this 
> > driver.
> >
> > Adding more people in Cc list to have a fix before it is broken, thanks.
>
> Enable IOMMU for vdev was not supported in DPDK.
> I can remove all the duplicating code after I test the CCP with IOMMU.

I think you didn't get it.
It should not be a vdev.
We want to switch the driver to a true PCI device, and remove all the code copied from the PCI bus driver.

We will implement CCP as true PCI device. 

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2021-05-28  9:40 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-28  8:38 [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP asomalap
2020-02-12  7:11 ` Akhil Goyal
2020-02-12  7:44   ` Somalapuram, Amaranath
2020-02-12 10:05     ` Akhil Goyal
2020-02-12 10:31 ` Kumar, Ravi1
2020-02-12 13:50   ` Akhil Goyal
2020-02-14 15:53     ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
2020-12-25  8:03 ` [dpdk-dev] [PATCH v3] " asomalap
2021-01-15 15:58   ` Akhil Goyal
2021-05-27 13:24   ` David Marchand
2021-05-27 14:10     ` Thomas Monjalon
2021-05-28  5:02       ` Somalapuram, Amaranath
2021-05-28  7:20         ` Thomas Monjalon
2021-05-28  9:40           ` Somalapuram, Amaranath

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).