* [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
@ 2019-10-15 7:02 asomalap
2019-10-15 8:16 ` Jerin Jacob
2019-10-15 11:08 ` Akhil Goyal
0 siblings, 2 replies; 7+ messages in thread
From: asomalap @ 2019-10-15 7:02 UTC (permalink / raw)
To: dev; +Cc: stable
From: Amaranath Somalapuram <asomalap@amd.com>
CCP use vdev framework, and vdev framework don’t support IOMMU.
Adding custom IOMMU support for AMD CCP drives.
Cc: stable@dpdk.org
Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
---
drivers/crypto/ccp/ccp_crypto.c | 239 ++++++++++++++++++++++++-------
drivers/crypto/ccp/ccp_dev.c | 56 ++------
drivers/crypto/ccp/ccp_dev.h | 2 +-
drivers/crypto/ccp/ccp_pci.c | 1 +
drivers/crypto/ccp/ccp_pci.h | 1 +
drivers/crypto/ccp/rte_ccp_pmd.c | 5 +-
6 files changed, 202 insertions(+), 102 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 1837c8543..8862a1a84 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -31,8 +31,11 @@
#include <openssl/err.h>
#include <openssl/hmac.h>
+extern int iommu_mode;
+
/* SHA initial context values */
-static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+void *sha_ctx;
+uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
SHA1_H4, SHA1_H3,
SHA1_H2, SHA1_H1,
SHA1_H0, 0x0U,
@@ -744,8 +747,13 @@ ccp_configure_session_cipher(struct ccp_session *sess,
CCP_LOG_ERR("Invalid CCP Engine");
return -ENOTSUP;
}
- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ if (iommu_mode == 2) {
+ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+ } else {
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ }
return 0;
}
@@ -784,6 +792,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha1_init;
sess->auth.ctx_len = CCP_SB_BYTES;
sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
if (sess->auth_opt) {
@@ -822,6 +831,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha224_init;
sess->auth.ctx_len = CCP_SB_BYTES;
sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
if (sess->auth_opt) {
@@ -884,6 +894,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha256_init;
sess->auth.ctx_len = CCP_SB_BYTES;
sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
if (sess->auth_opt) {
@@ -946,6 +957,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha384_init;
sess->auth.ctx_len = CCP_SB_BYTES << 1;
sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
if (sess->auth_opt) {
@@ -1010,6 +1022,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha512_init;
sess->auth.ctx_len = CCP_SB_BYTES << 1;
sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
if (sess->auth_opt) {
@@ -1159,8 +1172,13 @@ ccp_configure_session_aead(struct ccp_session *sess,
CCP_LOG_ERR("Unsupported aead algo");
return -ENOTSUP;
}
- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ if (iommu_mode == 2) {
+ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+ } else {
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ }
return 0;
}
@@ -1571,15 +1589,25 @@ ccp_perform_hmac(struct rte_crypto_op *op,
ccp_cryptodev_driver_id);
addr = session->auth.pre_compute;
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->auth.data.offset);
append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ if (iommu_mode == 2) {
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src,
+ phys_addr_t*,
+ op->sym->auth.data.offset));
+ dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+ } else {
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ }
dest_addr_t = dest_addr;
/** Load PHash1 to LSB*/
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+
pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
pst.len = session->auth.ctx_len;
pst.dir = 1;
@@ -1659,7 +1687,11 @@ ccp_perform_hmac(struct rte_crypto_op *op,
/** Load PHash2 to LSB*/
addr += session->auth.ctx_len;
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ if (iommu_mode == 2)
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+ else
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+
pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
pst.len = session->auth.ctx_len;
pst.dir = 1;
@@ -1743,17 +1775,23 @@ ccp_perform_sha(struct rte_crypto_op *op,
op->sym->session,
ccp_cryptodev_driver_id);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->auth.data.offset);
-
append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
-
/** Passthru sha context*/
+ if (iommu_mode == 2) {
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src, void*,
+ op->sym->auth.data.offset));
+ dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+ pst.src_addr = (phys_addr_t)sha_ctx;
+ } else {
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ }
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
- session->auth.ctx);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
pst.len = session->auth.ctx_len;
pst.dir = 1;
@@ -1832,18 +1870,30 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
op->sym->session,
ccp_cryptodev_driver_id);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->auth.data.offset);
append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
if (!append_ptr) {
CCP_LOG_ERR("CCP MBUF append failed\n");
return -1;
}
- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ if (iommu_mode == 2) {
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src,
+ phys_addr_t*,
+ op->sym->auth.data.offset));
+ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void
+ *)session->auth.pre_compute);
+ } else {
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ }
+
dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
- ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
- *)session->auth.pre_compute);
+
desc = &cmd_q->qbase_desc[cmd_q->qidx];
memset(desc, 0, Q_DESC_SIZE);
@@ -1964,7 +2014,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
struct ccp_session *session;
union ccp_function function;
struct ccp_desc *desc;
- uint8_t *ctx_addr, *append_ptr;
+ uint8_t *ctx_addr = NULL, *append_ptr = NULL;
uint32_t tail;
phys_addr_t src_addr, dest_addr, ctx_paddr;
@@ -1972,18 +2022,27 @@ ccp_perform_sha3(struct rte_crypto_op *op,
op->sym->session,
ccp_cryptodev_driver_id);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->auth.data.offset);
append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
if (!append_ptr) {
CCP_LOG_ERR("CCP MBUF append failed\n");
return -1;
}
- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
- ctx_addr = session->auth.sha3_ctx;
- ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ if (iommu_mode == 2) {
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src,
+ phys_addr_t*,
+ op->sym->auth.data.offset));
+ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+ } else {
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ }
+ ctx_addr = session->auth.sha3_ctx;
desc = &cmd_q->qbase_desc[cmd_q->qidx];
memset(desc, 0, Q_DESC_SIZE);
@@ -2032,20 +2091,29 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
struct ccp_passthru pst;
struct ccp_desc *desc;
uint32_t tail;
- uint8_t *src_tb, *append_ptr, *ctx_addr;
+ uint8_t *src_tb, *append_ptr = NULL, *ctx_addr;
phys_addr_t src_addr, dest_addr, key_addr;
int length, non_align_len;
session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
- key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+ if (iommu_mode == 2) {
+ key_addr = rte_mem_virt2iova(session->auth.key_ccp);
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src,
+ phys_addr_t*,
+ op->sym->auth.data.offset));
+ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+ } else {
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ }
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->auth.data.offset);
append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
function.raw = 0;
CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
@@ -2056,7 +2124,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
ctx_addr = session->auth.pre_compute;
memset(ctx_addr, 0, AES_BLOCK_SIZE);
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ if (iommu_mode == 2)
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+ (void *)ctx_addr);
+ else
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+ (void *)ctx_addr);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
pst.len = CCP_SB_BYTES;
pst.dir = 1;
@@ -2094,7 +2167,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
} else {
ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
memset(ctx_addr, 0, AES_BLOCK_SIZE);
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ if (iommu_mode == 2)
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+ (void *)ctx_addr);
+ else
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+ (void *)ctx_addr);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
pst.len = CCP_SB_BYTES;
pst.dir = 1;
@@ -2221,11 +2299,24 @@ ccp_perform_aes(struct rte_crypto_op *op,
desc = &cmd_q->qbase_desc[cmd_q->qidx];
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->cipher.data.offset);
- if (likely(op->sym->m_dst != NULL))
- dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
- op->sym->cipher.data.offset);
+ if (iommu_mode == 2)
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src, void *,
+ op->sym->cipher.data.offset));
+ else
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL)) {
+ if (iommu_mode == 2)
+ dest_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, void *,
+ op->sym->cipher.data.offset));
+ else
+ dest_addr = rte_pktmbuf_mtophys_offset(
+ op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ }
else
dest_addr = src_addr;
key_addr = session->cipher.key_phys;
@@ -2289,7 +2380,13 @@ ccp_perform_3des(struct rte_crypto_op *op,
rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
iv, session->iv.length);
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ if (iommu_mode == 2)
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+ (void *) lsb_buf);
+ else
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+ (void *) lsb_buf);
+
pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
pst.len = CCP_SB_BYTES;
pst.dir = 1;
@@ -2303,16 +2400,30 @@ ccp_perform_3des(struct rte_crypto_op *op,
return -ENOTSUP;
}
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->cipher.data.offset);
- if (unlikely(op->sym->m_dst != NULL))
- dest_addr =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst,
- op->sym->cipher.data.offset);
+ if (iommu_mode == 2)
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src, void *,
+ op->sym->cipher.data.offset));
+ else
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL)) {
+ if (iommu_mode == 2)
+ dest_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, void *,
+ op->sym->cipher.data.offset));
+ else
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ }
else
dest_addr = src_addr;
- key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+ if (iommu_mode == 2)
+ key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
+ else
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
desc = &cmd_q->qbase_desc[cmd_q->qidx];
@@ -2385,11 +2496,23 @@ ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
key_addr = session->cipher.key_phys;
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
- op->sym->aead.data.offset);
- if (unlikely(op->sym->m_dst != NULL))
- dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
- op->sym->aead.data.offset);
+ if (iommu_mode == 2)
+ src_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(op->sym->m_src, void *,
+ op->sym->aead.data.offset));
+ else
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL)) {
+ if (iommu_mode == 2)
+ dest_addr = (phys_addr_t)rte_mem_virt2iova(
+ rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, void *,
+ op->sym->aead.data.offset));
+ else
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ }
else
dest_addr = src_addr;
rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
@@ -2704,8 +2827,14 @@ process_ops_to_enqueue(struct ccp_qp *qp,
b_info->lsb_buf_idx = 0;
b_info->desccnt = 0;
b_info->cmd_q = cmd_q;
- b_info->lsb_buf_phys =
- (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
+ if (iommu_mode == 2)
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
+ else
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 80fe6a453..90b14fa64 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,6 +23,8 @@
#include "ccp_pci.h"
#include "ccp_pmd_private.h"
+int iommu_mode;
+
struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
static int ccp_dev_id;
@@ -512,7 +514,7 @@ ccp_add_device(struct ccp_device *dev, int type)
CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
}
- CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
/* Copy the private LSB mask to the public registers */
status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
@@ -657,9 +659,7 @@ ccp_probe_device(const char *dirname, uint16_t domain,
struct rte_pci_device *pci;
char filename[PATH_MAX];
unsigned long tmp;
- int uio_fd = -1, i, uio_num;
- char uio_devname[PATH_MAX];
- void *map_addr;
+ int uio_fd = -1;
ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
RTE_CACHE_LINE_SIZE);
@@ -710,46 +710,14 @@ ccp_probe_device(const char *dirname, uint16_t domain,
snprintf(filename, sizeof(filename), "%s/resource", dirname);
if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
goto fail;
+ if (iommu_mode == 2)
+ pci->kdrv = RTE_KDRV_VFIO;
+ else if (iommu_mode == 0)
+ pci->kdrv = RTE_KDRV_IGB_UIO;
+ else if (iommu_mode == 1)
+ pci->kdrv = RTE_KDRV_UIO_GENERIC;
- uio_num = ccp_find_uio_devname(dirname);
- if (uio_num < 0) {
- /*
- * It may take time for uio device to appear,
- * wait here and try again
- */
- usleep(100000);
- uio_num = ccp_find_uio_devname(dirname);
- if (uio_num < 0)
- goto fail;
- }
- snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
-
- uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
- if (uio_fd < 0)
- goto fail;
- if (flock(uio_fd, LOCK_EX | LOCK_NB))
- goto fail;
-
- /* Map the PCI memory resource of device */
- for (i = 0; i < PCI_MAX_RESOURCE; i++) {
-
- char devname[PATH_MAX];
- int res_fd;
-
- if (pci->mem_resource[i].phys_addr == 0)
- continue;
- snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
- res_fd = open(devname, O_RDWR);
- if (res_fd < 0)
- goto fail;
- map_addr = mmap(NULL, pci->mem_resource[i].len,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, res_fd, 0);
- if (map_addr == MAP_FAILED)
- goto fail;
-
- pci->mem_resource[i].addr = map_addr;
- }
+ rte_pci_map_device(pci);
/* device is valid, add in list */
if (ccp_add_device(ccp_dev, ccp_type)) {
@@ -783,7 +751,7 @@ ccp_probe_devices(const struct rte_pci_id *ccp_id)
module_idx = ccp_check_pci_uio_module();
if (module_idx < 0)
return -1;
-
+ iommu_mode = module_idx;
TAILQ_INIT(&ccp_list);
dir = opendir(SYSFS_PCI_DEVICES);
if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index de3e4bcc6..f4ad9eafd 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -59,7 +59,7 @@
#define CMD_Q_RUN 0x1
#define CMD_Q_SIZE 0x1F
#define CMD_Q_SHIFT 3
-#define COMMANDS_PER_QUEUE 2048
+#define COMMANDS_PER_QUEUE 8192
#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
CMD_Q_SIZE)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index 1702a09c4..38029a908 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -15,6 +15,7 @@
static const char * const uio_module_names[] = {
"igb_uio",
"uio_pci_generic",
+ "vfio_pci"
};
int
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
index 7ed3bac40..bcde1d970 100644
--- a/drivers/crypto/ccp/ccp_pci.h
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -12,6 +12,7 @@
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
#define PROC_MODULES "/proc/modules"
+
int ccp_check_pci_uio_module(void);
int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 4810d799c..a182c6a52 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -22,6 +22,7 @@
*/
static unsigned int ccp_pmd_init_done;
uint8_t ccp_cryptodev_driver_id;
+extern void *sha_ctx;
struct ccp_pmd_init_params {
struct rte_cryptodev_pmd_init_params def_p;
@@ -279,6 +280,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
ccp_pmd_init_done = 0;
name = rte_vdev_device_name(dev);
+ rte_free((void *) sha_ctx);
if (name == NULL)
return -EINVAL;
@@ -296,7 +298,6 @@ cryptodev_ccp_create(const char *name,
{
struct rte_cryptodev *dev;
struct ccp_private *internals;
- uint8_t cryptodev_cnt = 0;
if (init_params->def_p.name[0] == '\0')
strlcpy(init_params->def_p.name, name,
@@ -361,7 +362,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
.auth_opt = CCP_PMD_AUTH_OPT_CCP,
};
const char *input_args;
-
+ sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
if (ccp_pmd_init_done) {
RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
return -EFAULT;
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
2019-10-15 7:02 [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP asomalap
@ 2019-10-15 8:16 ` Jerin Jacob
2019-10-15 8:35 ` Somalapuram, Amaranath
2019-10-15 11:08 ` Akhil Goyal
1 sibling, 1 reply; 7+ messages in thread
From: Jerin Jacob @ 2019-10-15 8:16 UTC (permalink / raw)
To: asomalap; +Cc: dev, stable
On Tue, Oct 15, 2019 at 12:32 PM <asomalap@amd.com> wrote:
>
> From: Amaranath Somalapuram <asomalap@amd.com>
>
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP drives.
> Cc: stable@dpdk.org
>
> + if (iommu_mode == 2)
> + pci->kdrv = RTE_KDRV_VFIO;
> + else if (iommu_mode == 0)
> + pci->kdrv = RTE_KDRV_IGB_UIO;
> + else if (iommu_mode == 1)
> + pci->kdrv = RTE_KDRV_UIO_GENERIC;
The crypto driver should not have iommu mode-specific handling.
I am not sure about the problem statement. If the problem is,
iommu support for PCI based vdev device then move the solution to
common layer so that everyone can use it. If not, please share the
problem statement
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
2019-10-15 8:16 ` Jerin Jacob
@ 2019-10-15 8:35 ` Somalapuram, Amaranath
2019-10-15 9:43 ` Jerin Jacob
0 siblings, 1 reply; 7+ messages in thread
From: Somalapuram, Amaranath @ 2019-10-15 8:35 UTC (permalink / raw)
To: Jerin Jacob; +Cc: dev, stable
Problem statement: As of now vdev device do not support IOMMU.
vdev device used to custom solution, even for software drives like openssl uses vdev.
I feel its not advisable to put iommu in vdev.
moving the changes to vdev will effect rest of the vdev drives.
That will be big efforts. Every vdev drivers has their own implementation. Need better design to move it to vdev or common place.
Regards,
S.Amarnath
-----Original Message-----
From: Jerin Jacob <jerinjacobk@gmail.com>
Sent: Tuesday, October 15, 2019 1:47 PM
To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>
Cc: dev@dpdk.org; stable@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
[CAUTION: External Email]
On Tue, Oct 15, 2019 at 12:32 PM <asomalap@amd.com> wrote:
>
> From: Amaranath Somalapuram <asomalap@amd.com>
>
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP drives.
> Cc: stable@dpdk.org
>
> + if (iommu_mode == 2)
> + pci->kdrv = RTE_KDRV_VFIO;
> + else if (iommu_mode == 0)
> + pci->kdrv = RTE_KDRV_IGB_UIO;
> + else if (iommu_mode == 1)
> + pci->kdrv = RTE_KDRV_UIO_GENERIC;
The crypto driver should not have iommu mode-specific handling.
I am not sure about the problem statement. If the problem is, iommu support for PCI based vdev device then move the solution to common layer so that everyone can use it. If not, please share the problem statement
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
2019-10-15 8:35 ` Somalapuram, Amaranath
@ 2019-10-15 9:43 ` Jerin Jacob
2019-10-22 5:13 ` Somalapuram, Amaranath
0 siblings, 1 reply; 7+ messages in thread
From: Jerin Jacob @ 2019-10-15 9:43 UTC (permalink / raw)
To: Somalapuram, Amaranath; +Cc: dev, stable
On Tue, Oct 15, 2019 at 2:05 PM Somalapuram, Amaranath
<Amaranath.Somalapuram@amd.com> wrote:
>
> Problem statement: As of now vdev device do not support IOMMU.
> vdev device used to custom solution, even for software drives like openssl uses vdev.
I spend some time going through the driver.
#The ideal architecture of this driver would have been to introduce a
bus driver(see drivers/bus/ifpga/) which does all the PCI
probes(drivers/crypto/ccp/rte_ccp_pmd.c and
drivers/crypto/ccp/ccp_pci.c)
and arrange the devices on the bus scan and enable "non bus" specific
driver crypto driver. So that no pci probe etc in crypto driver.
# On the upside, You DONT need to give vdev= on eal arguments, on AMD
machines it can probe the crypto devices automatically and probe the
crypto driver
# I dont think, it is specific to vdev IOMMU support, If would have
been PCI/Any bus driver, you would have similar changes. Right?
# Major portion of this patch does following
+ if (iommu_mode == 2)
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+ (void *) lsb_buf);
+ else
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+ (void *) lsb_buf);
+
Since the following check already present common code, Do we need the
above check, just calling rte_mem_virt2iova() is enough. Right?
rte_iova_t
rte_mem_virt2iova(const void *virtaddr)
{
if (rte_eal_iova_mode() == RTE_IOVA_VA)
return (uintptr_t)virtaddr;
return rte_mem_virt2phy(virtaddr);
}
> I feel its not advisable to put iommu in vdev.
> moving the changes to vdev will effect rest of the vdev drives.
> That will be big efforts. Every vdev drivers has their own implementation. Need better design to move it to vdev or common place.
>
> Regards,
> S.Amarnath
>
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Tuesday, October 15, 2019 1:47 PM
> To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
>
> [CAUTION: External Email]
>
> On Tue, Oct 15, 2019 at 12:32 PM <asomalap@amd.com> wrote:
> >
> > From: Amaranath Somalapuram <asomalap@amd.com>
> >
> > CCP use vdev framework, and vdev framework don’t support IOMMU.
> > Adding custom IOMMU support for AMD CCP drives.
> > Cc: stable@dpdk.org
> >
> > + if (iommu_mode == 2)
> > + pci->kdrv = RTE_KDRV_VFIO;
> > + else if (iommu_mode == 0)
> > + pci->kdrv = RTE_KDRV_IGB_UIO;
> > + else if (iommu_mode == 1)
> > + pci->kdrv = RTE_KDRV_UIO_GENERIC;
>
> The crypto driver should not have iommu mode-specific handling.
> I am not sure about the problem statement. If the problem is, iommu support for PCI based vdev device then move the solution to common layer so that everyone can use it. If not, please share the problem statement
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
2019-10-15 9:43 ` Jerin Jacob
@ 2019-10-22 5:13 ` Somalapuram, Amaranath
0 siblings, 0 replies; 7+ messages in thread
From: Somalapuram, Amaranath @ 2019-10-22 5:13 UTC (permalink / raw)
To: Jerin Jacob; +Cc: dev, stable
Hi Jerin,
I will check how to improve this. Will resubmit this patch with your suggested changes.
Regards,
S.Amarnath
-----Original Message-----
From: Jerin Jacob <jerinjacobk@gmail.com>
Sent: Tuesday, October 15, 2019 3:14 PM
To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>
Cc: dev@dpdk.org; stable@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
[CAUTION: External Email]
On Tue, Oct 15, 2019 at 2:05 PM Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com> wrote:
>
> Problem statement: As of now vdev device do not support IOMMU.
> vdev device used to custom solution, even for software drives like openssl uses vdev.
I spend some time going through the driver.
#The ideal architecture of this driver would have been to introduce a bus driver(see drivers/bus/ifpga/) which does all the PCI probes(drivers/crypto/ccp/rte_ccp_pmd.c and
drivers/crypto/ccp/ccp_pci.c)
and arrange the devices on the bus scan and enable "non bus" specific driver crypto driver. So that no pci probe etc in crypto driver.
# On the upside, You DONT need to give vdev= on eal arguments, on AMD machines it can probe the crypto devices automatically and probe the crypto driver # I dont think, it is specific to vdev IOMMU support, If would have been PCI/Any bus driver, you would have similar changes. Right?
# Major portion of this patch does following
+ if (iommu_mode == 2)
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+ (void *) lsb_buf);
+ else
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+ (void *) lsb_buf);
+
Since the following check already present common code, Do we need the above check, just calling rte_mem_virt2iova() is enough. Right?
rte_iova_t
rte_mem_virt2iova(const void *virtaddr)
{
if (rte_eal_iova_mode() == RTE_IOVA_VA)
return (uintptr_t)virtaddr;
return rte_mem_virt2phy(virtaddr); }
> I feel its not advisable to put iommu in vdev.
> moving the changes to vdev will effect rest of the vdev drives.
> That will be big efforts. Every vdev drivers has their own implementation. Need better design to move it to vdev or common place.
>
> Regards,
> S.Amarnath
>
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Tuesday, October 15, 2019 1:47 PM
> To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for
> CCP
>
> [CAUTION: External Email]
>
> On Tue, Oct 15, 2019 at 12:32 PM <asomalap@amd.com> wrote:
> >
> > From: Amaranath Somalapuram <asomalap@amd.com>
> >
> > CCP use vdev framework, and vdev framework don’t support IOMMU.
> > Adding custom IOMMU support for AMD CCP drives.
> > Cc: stable@dpdk.org
> >
> > + if (iommu_mode == 2)
> > + pci->kdrv = RTE_KDRV_VFIO;
> > + else if (iommu_mode == 0)
> > + pci->kdrv = RTE_KDRV_IGB_UIO;
> > + else if (iommu_mode == 1)
> > + pci->kdrv = RTE_KDRV_UIO_GENERIC;
>
> The crypto driver should not have iommu mode-specific handling.
> I am not sure about the problem statement. If the problem is, iommu
> support for PCI based vdev device then move the solution to common
> layer so that everyone can use it. If not, please share the problem
> statement
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
2019-10-15 7:02 [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP asomalap
2019-10-15 8:16 ` Jerin Jacob
@ 2019-10-15 11:08 ` Akhil Goyal
2019-10-22 5:14 ` Somalapuram, Amaranath
1 sibling, 1 reply; 7+ messages in thread
From: Akhil Goyal @ 2019-10-15 11:08 UTC (permalink / raw)
To: asomalap, dev; +Cc: stable
Title: crypto/ccp: enable IOMMU
>
> From: Amaranath Somalapuram <asomalap@amd.com>
>
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP drives.
> Cc: stable@dpdk.org
This patch does not look like a fix. Do you really want to backport this patch?
>
> Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
> ---
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
2019-10-15 11:08 ` Akhil Goyal
@ 2019-10-22 5:14 ` Somalapuram, Amaranath
0 siblings, 0 replies; 7+ messages in thread
From: Somalapuram, Amaranath @ 2019-10-22 5:14 UTC (permalink / raw)
To: Akhil Goyal, dev; +Cc: stable
Hi Akhil,
I have to resubmit this patch. Please ignore this.
-----Original Message-----
From: Akhil Goyal <akhil.goyal@nxp.com>
Sent: Tuesday, October 15, 2019 4:38 PM
To: Somalapuram, Amaranath <Amaranath.Somalapuram@amd.com>; dev@dpdk.org
Cc: stable@dpdk.org
Subject: RE: [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP
[CAUTION: External Email]
Title: crypto/ccp: enable IOMMU
>
> From: Amaranath Somalapuram <asomalap@amd.com>
>
> CCP use vdev framework, and vdev framework don’t support IOMMU.
> Adding custom IOMMU support for AMD CCP drives.
> Cc: stable@dpdk.org
This patch does not look like a fix. Do you really want to backport this patch?
>
> Signed-off-by: Amaranath Somalapuram <asomalap@amd.com>
> ---
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2019-10-22 5:14 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-15 7:02 [dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP asomalap
2019-10-15 8:16 ` Jerin Jacob
2019-10-15 8:35 ` Somalapuram, Amaranath
2019-10-15 9:43 ` Jerin Jacob
2019-10-22 5:13 ` Somalapuram, Amaranath
2019-10-15 11:08 ` Akhil Goyal
2019-10-22 5:14 ` Somalapuram, Amaranath
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).