DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload
@ 2019-01-09 15:13 Akhil Goyal
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores Akhil Goyal
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Akhil Goyal @ 2019-01-09 15:13 UTC (permalink / raw)
  To: dev; +Cc: Hemant Agrawal, pablo.de.lara.guarch

From: Hemant Agrawal <hemant.agrawal@nxp.com>

PDCP session configuration for lookaside protocol offload
and data path is added.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 268 +++++++++++++++++++++++++++++
 drivers/crypto/dpaa_sec/dpaa_sec.h | 212 +++++++++++++++++++++--
 2 files changed, 470 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index d83e74541..b5896c4f7 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -33,6 +33,7 @@
 #include <hw/desc/common.h>
 #include <hw/desc/algo.h>
 #include <hw/desc/ipsec.h>
+#include <hw/desc/pdcp.h>
 
 #include <rte_dpaa_bus.h>
 #include <dpaa_sec.h>
@@ -266,6 +267,11 @@ static inline int is_proto_ipsec(dpaa_sec_session *ses)
 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
 }
 
+static inline int is_proto_pdcp(dpaa_sec_session *ses)
+{
+	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
+}
+
 static inline int is_encode(dpaa_sec_session *ses)
 {
 	return ses->dir == DIR_ENC;
@@ -372,6 +378,155 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
 	}
 }
 
+static int
+dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
+{
+	struct alginfo authdata = {0}, cipherdata = {0};
+	struct sec_cdb *cdb = &ses->cdb;
+	int32_t shared_desc_len = 0;
+	int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+	int swap = false;
+#else
+	int swap = true;
+#endif
+
+	switch (ses->cipher_alg) {
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
+		break;
+	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
+		break;
+	default:
+		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
+			      ses->cipher_alg);
+		return -1;
+	}
+
+	cipherdata.key = (size_t)ses->cipher_key.data;
+	cipherdata.keylen = ses->cipher_key.length;
+	cipherdata.key_enc_flags = 0;
+	cipherdata.key_type = RTA_DATA_IMM;
+
+	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+		switch (ses->auth_alg) {
+		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
+			break;
+		case RTE_CRYPTO_AUTH_ZUC_EIA3:
+			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
+			break;
+		case RTE_CRYPTO_AUTH_AES_CMAC:
+			authdata.algtype = PDCP_AUTH_TYPE_AES;
+			break;
+		case RTE_CRYPTO_AUTH_NULL:
+			authdata.algtype = PDCP_AUTH_TYPE_NULL;
+			break;
+		default:
+			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+				      ses->auth_alg);
+			return -1;
+		}
+
+		authdata.key = (size_t)ses->auth_key.data;
+		authdata.keylen = ses->auth_key.length;
+		authdata.key_enc_flags = 0;
+		authdata.key_type = RTA_DATA_IMM;
+
+		cdb->sh_desc[0] = cipherdata.keylen;
+		cdb->sh_desc[1] = authdata.keylen;
+		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+				       MIN_JOB_DESC_SIZE,
+				       (unsigned int *)cdb->sh_desc,
+				       &cdb->sh_desc[2], 2);
+
+		if (err < 0) {
+			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+			return err;
+		}
+		if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
+			cipherdata.key = (size_t)dpaa_mem_vtop(
+						(void *)(size_t)cipherdata.key);
+			cipherdata.key_type = RTA_DATA_PTR;
+		}
+		if (!(cdb->sh_desc[2] & (1<<1)) &&  authdata.keylen) {
+			authdata.key = (size_t)dpaa_mem_vtop(
+						(void *)(size_t)authdata.key);
+			authdata.key_type = RTA_DATA_PTR;
+		}
+
+		cdb->sh_desc[0] = 0;
+		cdb->sh_desc[1] = 0;
+		cdb->sh_desc[2] = 0;
+
+		if (ses->dir == DIR_ENC)
+			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
+					cdb->sh_desc, 1, swap,
+					ses->pdcp.hfn,
+					ses->pdcp.bearer,
+					ses->pdcp.pkt_dir,
+					ses->pdcp.hfn_threshold,
+					&cipherdata, &authdata,
+					0);
+		else if (ses->dir == DIR_DEC)
+			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
+					cdb->sh_desc, 1, swap,
+					ses->pdcp.hfn,
+					ses->pdcp.bearer,
+					ses->pdcp.pkt_dir,
+					ses->pdcp.hfn_threshold,
+					&cipherdata, &authdata,
+					0);
+	} else {
+		cdb->sh_desc[0] = cipherdata.keylen;
+		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+				       MIN_JOB_DESC_SIZE,
+				       (unsigned int *)cdb->sh_desc,
+				       &cdb->sh_desc[2], 1);
+
+		if (err < 0) {
+			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+			return err;
+		}
+		if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
+			cipherdata.key = (size_t)dpaa_mem_vtop(
+						(void *)(size_t)cipherdata.key);
+			cipherdata.key_type = RTA_DATA_PTR;
+		}
+		cdb->sh_desc[0] = 0;
+		cdb->sh_desc[1] = 0;
+		cdb->sh_desc[2] = 0;
+
+		if (ses->dir == DIR_ENC)
+			shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
+					cdb->sh_desc, 1, swap,
+					ses->pdcp.sn_size,
+					ses->pdcp.hfn,
+					ses->pdcp.bearer,
+					ses->pdcp.pkt_dir,
+					ses->pdcp.hfn_threshold,
+					&cipherdata, 0);
+		else if (ses->dir == DIR_DEC)
+			shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
+					cdb->sh_desc, 1, swap,
+					ses->pdcp.sn_size,
+					ses->pdcp.hfn,
+					ses->pdcp.bearer,
+					ses->pdcp.pkt_dir,
+					ses->pdcp.hfn_threshold,
+					&cipherdata, 0);
+	}
+
+	return shared_desc_len;
+}
+
 /* prepare ipsec proto command block of the session */
 static int
 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
@@ -472,6 +627,8 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 
 	if (is_proto_ipsec(ses)) {
 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
+	} else if (is_proto_pdcp(ses)) {
+		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
 	} else if (is_cipher_only(ses)) {
 		caam_cipher_alg(ses, &alginfo_c);
 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
@@ -1545,6 +1702,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
 				if (is_proto_ipsec(ses)) {
 					cf = build_proto(op, ses);
+				} else if (is_proto_pdcp(ses)) {
+					cf = build_proto(op, ses);
 				} else if (is_auth_only(ses)) {
 					cf = build_auth_only(op, ses);
 				} else if (is_cipher_only(ses)) {
@@ -2105,7 +2264,112 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
 		goto out;
 	}
 
+	return 0;
+out:
+	rte_free(session->auth_key.data);
+	rte_free(session->cipher_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+	return -1;
+}
+
+static int
+dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
+			  struct rte_security_session_conf *conf,
+			  void *sess)
+{
+	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
+	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
+	struct rte_crypto_auth_xform *auth_xform = NULL;
+	struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	memset(session, 0, sizeof(dpaa_sec_session));
+
+	/* find xfrm types */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		cipher_xform = &xform->cipher;
+		if (xform->next != NULL)
+			auth_xform = &xform->next->auth;
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		auth_xform = &xform->auth;
+		if (xform->next != NULL)
+			cipher_xform = &xform->next->cipher;
+	} else {
+		DPAA_SEC_ERR("Invalid crypto type");
+		return -EINVAL;
+	}
+
+	session->proto_alg = conf->protocol;
+	if (cipher_xform) {
+		session->cipher_key.data = rte_zmalloc(NULL,
+					       cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+		if (session->cipher_key.data == NULL &&
+				cipher_xform->key.length > 0) {
+			DPAA_SEC_ERR("No Memory for cipher key");
+			return -ENOMEM;
+		}
+		session->cipher_key.length = cipher_xform->key.length;
+		memcpy(session->cipher_key.data, cipher_xform->key.data,
+			cipher_xform->key.length);
+		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+					DIR_ENC : DIR_DEC;
+		session->cipher_alg = cipher_xform->algo;
+	} else {
+		session->cipher_key.data = NULL;
+		session->cipher_key.length = 0;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+		session->dir = DIR_ENC;
+	}
+
+	/* Auth is only applicable for control mode operation. */
+	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
+			DPAA_SEC_ERR(
+				"PDCP Seq Num size should be 5 bits for cmode");
+			goto out;
+		}
+		if (auth_xform) {
+			session->auth_key.data = rte_zmalloc(NULL,
+							auth_xform->key.length,
+							RTE_CACHE_LINE_SIZE);
+			if (session->auth_key.data == NULL &&
+					auth_xform->key.length > 0) {
+				DPAA_SEC_ERR("No Memory for auth key");
+				rte_free(session->cipher_key.data);
+				return -ENOMEM;
+			}
+			session->auth_key.length = auth_xform->key.length;
+			memcpy(session->auth_key.data, auth_xform->key.data,
+					auth_xform->key.length);
+			session->auth_alg = auth_xform->algo;
+		} else {
+			session->auth_key.data = NULL;
+			session->auth_key.length = 0;
+			session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+		}
+	}
+	session->pdcp.domain = pdcp_xform->domain;
+	session->pdcp.bearer = pdcp_xform->bearer;
+	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
+	session->pdcp.sn_size = pdcp_xform->sn_size;
+#ifdef ENABLE_HFN_OVERRIDE
+	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
+#endif
+	session->pdcp.hfn = pdcp_xform->hfn;
+	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
 
+	session->ctx_pool = dev_priv->ctx_pool;
+	rte_spinlock_lock(&dev_priv->lock);
+	session->inq = dpaa_sec_attach_rxq(dev_priv);
+	rte_spinlock_unlock(&dev_priv->lock);
+	if (session->inq == NULL) {
+		DPAA_SEC_ERR("unable to attach sec queue");
+		goto out;
+	}
 	return 0;
 out:
 	rte_free(session->auth_key.data);
@@ -2134,6 +2398,10 @@ dpaa_sec_security_session_create(void *dev,
 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
 				sess_private_data);
 		break;
+	case RTE_SECURITY_PROTOCOL_PDCP:
+		ret = dpaa_sec_set_pdcp_session(cdev, conf,
+				sess_private_data);
+		break;
 	case RTE_SECURITY_PROTOCOL_MACSEC:
 		return -ENOTSUP;
 	default:
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f4b87844c..6049c1d52 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -91,6 +91,20 @@ struct sec_cdb {
 	uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
 };
 
+/*!
+ * The structure is to be filled by user as a part of
+ * dpaa_sec_proto_ctxt for PDCP Protocol
+ */
+struct sec_pdcp_ctxt {
+	enum rte_security_pdcp_domain domain; /*!< Data/Control mode*/
+	int8_t bearer;	/*!< PDCP bearer ID */
+	int8_t pkt_dir;/*!< PDCP Frame Direction 0:UL 1:DL*/
+	int8_t hfn_ovd;/*!< Overwrite HFN per packet*/
+	uint32_t hfn;	/*!< Hyper Frame Number */
+	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
+	uint8_t sn_size;	/*!< Sequence number size, 7/12/15 */
+};
+
 typedef struct dpaa_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
@@ -113,15 +127,21 @@ typedef struct dpaa_sec_session_entry {
 			} auth_key;
 		};
 	};
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;	/**< Initialisation vector parameters */
-	uint16_t auth_only_len; /*!< Length of data for Auth only */
-	uint32_t digest_length;
-	struct ipsec_encap_pdb encap_pdb;
-	struct ip ip4_hdr;
-	struct ipsec_decap_pdb decap_pdb;
+	union {
+		struct {
+			struct {
+				uint16_t length;
+				uint16_t offset;
+			} iv;	/**< Initialisation vector parameters */
+			uint16_t auth_only_len;
+					/*!< Length of data for Auth only */
+			uint32_t digest_length;
+			struct ipsec_decap_pdb decap_pdb;
+			struct ipsec_encap_pdb encap_pdb;
+			struct ip ip4_hdr;
+		};
+		struct sec_pdcp_ctxt pdcp;
+	};
 	struct dpaa_sec_qp *qp;
 	struct qman_fq *inq;
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
@@ -366,7 +386,7 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
 					.min = 16,
 					.max = 16,
 					.increment = 0
-				}
+				},
 			}, }
 		}, }
 	},
@@ -394,6 +414,162 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
+static const struct rte_cryptodev_capabilities dpaa_pdcp_capabilities[] = {
+	{	/* SNOW 3G (UIA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 4,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* SNOW 3G (UEA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* ZUC (EEA3) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* ZUC (EIA3) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 4,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
 static const struct rte_security_capability dpaa_sec_security_cap[] = {
 	{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
 		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
@@ -417,6 +593,22 @@ static const struct rte_security_capability dpaa_sec_security_cap[] = {
 		},
 		.crypto_capabilities = dpaa_sec_capabilities
 	},
+	{ /* PDCP Lookaside Protocol offload Data */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_PDCP,
+		.pdcp = {
+			.domain = RTE_SECURITY_PDCP_MODE_DATA,
+		},
+		.crypto_capabilities = dpaa_pdcp_capabilities
+	},
+	{ /* PDCP Lookaside Protocol offload Control */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_PDCP,
+		.pdcp = {
+			.domain = RTE_SECURITY_PDCP_MODE_CONTROL,
+		},
+		.crypto_capabilities = dpaa_pdcp_capabilities
+	},
 	{
 		.action = RTE_SECURITY_ACTION_TYPE_NONE
 	}
-- 
2.17.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores
  2019-01-09 15:13 [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload Akhil Goyal
@ 2019-01-09 15:14 ` Akhil Goyal
  2019-01-11  0:06   ` De Lara Guarch, Pablo
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode Akhil Goyal
  2019-01-11  0:06 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload De Lara Guarch, Pablo
  2 siblings, 1 reply; 6+ messages in thread
From: Akhil Goyal @ 2019-01-09 15:14 UTC (permalink / raw)
  To: dev; +Cc: Hemant Agrawal, pablo.de.lara.guarch, Akhil Goyal

In dpaa_sec, each session should have a separate in-queue,
and each in-queue should be mapped to an outq.
So if multiple flows of same SA comes to different cores
due to rss distribution of NIC, enqueue from any core will
try to do re-assignment of inq - outq mapping which will fail.

In this patch, for each core a separate inq is allocated and
used from each of the core. But the number of outq will remain
the same and we save the pointer to outq in session for each
of the cores.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 65 +++++++++++++++++++-----------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  9 +++--
 2 files changed, 46 insertions(+), 28 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index b5896c4f7..6b05ac8bd 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1683,15 +1683,18 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 				nb_ops = loop;
 				goto send_pkts;
 			}
-			if (unlikely(!ses->qp)) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
 				if (dpaa_sec_attach_sess_q(qp, ses)) {
 					frames_to_send = loop;
 					nb_ops = loop;
 					goto send_pkts;
 				}
-			} else if (unlikely(ses->qp != qp)) {
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != qp)) {
 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
-					" New qp = %p\n", ses->qp, qp);
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], qp);
 				frames_to_send = loop;
 				nb_ops = loop;
 				goto send_pkts;
@@ -1743,7 +1746,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			}
 
 			fd = &fds[loop];
-			inq[loop] = ses->inq;
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
 			fd->opaque_addr = 0;
 			fd->cmd = 0;
 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
@@ -1970,7 +1973,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
 
-	sess->qp = qp;
+	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
 	ret = dpaa_sec_prep_cdb(sess);
 	if (ret) {
 		DPAA_SEC_ERR("Unable to prepare sec cdb");
@@ -1983,7 +1986,8 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 			return ret;
 		}
 	}
-	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
+			       dpaa_mem_vtop(&sess->cdb),
 			       qman_fq_fqid(&qp->outq));
 	if (ret)
 		DPAA_SEC_ERR("Unable to init sec queue");
@@ -1997,6 +2001,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 {
 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
 	dpaa_sec_session *session = sess;
+	uint32_t i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -2053,12 +2058,15 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 	}
 	session->ctx_pool = internals->ctx_pool;
 	rte_spinlock_lock(&internals->lock);
-	session->inq = dpaa_sec_attach_rxq(internals);
-	rte_spinlock_unlock(&internals->lock);
-	if (session->inq == NULL) {
-		DPAA_SEC_ERR("unable to attach sec queue");
-		goto err1;
+	for (i = 0; i < MAX_DPAA_CORES; i++) {
+		session->inq[i] = dpaa_sec_attach_rxq(internals);
+		if (session->inq[i] == NULL) {
+			DPAA_SEC_ERR("unable to attach sec queue");
+			rte_spinlock_unlock(&internals->lock);
+			goto err1;
+		}
 	}
+	rte_spinlock_unlock(&internals->lock);
 
 	return 0;
 
@@ -2118,8 +2126,9 @@ dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv) {
 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-		if (s->inq)
-			dpaa_sec_detach_rxq(qi, s->inq);
+		if (s->inq[rte_lcore_id() % MAX_DPAA_CORES])
+			dpaa_sec_detach_rxq(qi,
+				s->inq[rte_lcore_id() % MAX_DPAA_CORES]);
 		rte_free(s->cipher_key.data);
 		rte_free(s->auth_key.data);
 		memset(s, 0, sizeof(dpaa_sec_session));
@@ -2138,6 +2147,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
 	struct rte_crypto_auth_xform *auth_xform = NULL;
 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+	uint32_t i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -2257,12 +2267,15 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
 		goto out;
 	session->ctx_pool = internals->ctx_pool;
 	rte_spinlock_lock(&internals->lock);
-	session->inq = dpaa_sec_attach_rxq(internals);
-	rte_spinlock_unlock(&internals->lock);
-	if (session->inq == NULL) {
-		DPAA_SEC_ERR("unable to attach sec queue");
-		goto out;
+	for (i = 0; i < MAX_DPAA_CORES; i++) {
+		session->inq[i] = dpaa_sec_attach_rxq(internals);
+		if (session->inq[i] == NULL) {
+			DPAA_SEC_ERR("unable to attach sec queue");
+			rte_spinlock_unlock(&internals->lock);
+			goto out;
+		}
 	}
+	rte_spinlock_unlock(&internals->lock);
 
 	return 0;
 out:
@@ -2283,6 +2296,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
+	uint32_t i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -2364,12 +2378,15 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 
 	session->ctx_pool = dev_priv->ctx_pool;
 	rte_spinlock_lock(&dev_priv->lock);
-	session->inq = dpaa_sec_attach_rxq(dev_priv);
-	rte_spinlock_unlock(&dev_priv->lock);
-	if (session->inq == NULL) {
-		DPAA_SEC_ERR("unable to attach sec queue");
-		goto out;
+	for (i = 0; i < MAX_DPAA_CORES; i++) {
+		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
+		if (session->inq[i] == NULL) {
+			DPAA_SEC_ERR("unable to attach sec queue");
+			rte_spinlock_unlock(&dev_priv->lock);
+			goto out;
+		}
 	}
+	rte_spinlock_unlock(&dev_priv->lock);
 	return 0;
 out:
 	rte_free(session->auth_key.data);
@@ -2631,7 +2648,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
 		QMAN_FQ_FLAG_TO_DCPORTAL;
-	for (i = 0; i < internals->max_nb_sessions; i++) {
+	for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
 		/* create rx qman fq for sessions*/
 		ret = qman_create_fq(0, flags, &internals->inq[i]);
 		if (unlikely(ret != 0)) {
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 6049c1d52..75c0960a9 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -10,6 +10,7 @@
 #define CRYPTODEV_NAME_DPAA_SEC_PMD	crypto_dpaa_sec
 /**< NXP DPAA - SEC PMD device name */
 
+#define MAX_DPAA_CORES		4
 #define NUM_POOL_CHANNELS	4
 #define DPAA_SEC_BURST		7
 #define DPAA_SEC_ALG_UNSUPPORT	(-1)
@@ -26,7 +27,7 @@
 #define CTX_POOL_NUM_BUFS	32000
 #define CTX_POOL_BUF_SIZE	sizeof(struct dpaa_sec_op_ctx)
 #define CTX_POOL_CACHE_SIZE	512
-#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048
+#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 1024
 
 #define DIR_ENC                 1
 #define DIR_DEC                 0
@@ -142,8 +143,8 @@ typedef struct dpaa_sec_session_entry {
 		};
 		struct sec_pdcp_ctxt pdcp;
 	};
-	struct dpaa_sec_qp *qp;
-	struct qman_fq *inq;
+	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
+	struct qman_fq *inq[MAX_DPAA_CORES];
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
 } dpaa_sec_session;
@@ -158,7 +159,7 @@ struct dpaa_sec_qp {
 };
 
 #define RTE_DPAA_MAX_NB_SEC_QPS 2
-#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define RTE_DPAA_MAX_RX_QUEUE (MAX_DPAA_CORES * RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS)
 #define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
 
 /* internal sec queue interface */
-- 
2.17.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode
  2019-01-09 15:13 [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload Akhil Goyal
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores Akhil Goyal
@ 2019-01-09 15:14 ` Akhil Goyal
  2019-01-11  0:05   ` De Lara Guarch, Pablo
  2019-01-11  0:06 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload De Lara Guarch, Pablo
  2 siblings, 1 reply; 6+ messages in thread
From: Akhil Goyal @ 2019-01-09 15:14 UTC (permalink / raw)
  To: dev; +Cc: Hemant Agrawal, pablo.de.lara.guarch, Akhil Goyal

Fixes: 547a4d40e7bf ("crypto/dpaa2_sec: support out of place protocol offload")
Fixes: 0a23d4b6f4c2 ("crypto/dpaa2_sec: support protocol offload IPsec")

Cc:stable@dpdk.org

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 6095c6021..c24562b80 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -108,7 +108,7 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
 	/* Configure FD as a FRAME LIST */
 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
 	/* Configure Output FLE with dst mbuf data  */
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
@@ -160,7 +160,7 @@ build_proto_fd(dpaa2_sec_session *sess,
 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
-	DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
 	/* save physical address of mbuf */
 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
-- 
2.17.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode Akhil Goyal
@ 2019-01-11  0:05   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 6+ messages in thread
From: De Lara Guarch, Pablo @ 2019-01-11  0:05 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: Hemant Agrawal



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Wednesday, January 9, 2019 3:14 PM
> To: dev@dpdk.org
> Cc: Hemant Agrawal <hemant.agrawal@nxp.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode
> 
> Fixes: 547a4d40e7bf ("crypto/dpaa2_sec: support out of place protocol
> offload")
> Fixes: 0a23d4b6f4c2 ("crypto/dpaa2_sec: support protocol offload IPsec")
> 
> Cc:stable@dpdk.org
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>

Applied to dpdk-next-crypto.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores Akhil Goyal
@ 2019-01-11  0:06   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 6+ messages in thread
From: De Lara Guarch, Pablo @ 2019-01-11  0:06 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: Hemant Agrawal



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Wednesday, January 9, 2019 3:14 PM
> To: dev@dpdk.org
> Cc: Hemant Agrawal <hemant.agrawal@nxp.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH] crypto/dpaa_sec: support same session flows on multi
> cores
> 
> In dpaa_sec, each session should have a separate in-queue, and each in-
> queue should be mapped to an outq.
> So if multiple flows of same SA comes to different cores due to rss
> distribution of NIC, enqueue from any core will try to do re-assignment of
> inq - outq mapping which will fail.
> 
> In this patch, for each core a separate inq is allocated and used from each of
> the core. But the number of outq will remain the same and we save the
> pointer to outq in session for each of the cores.
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>

Applied to dpdk-next-crypto.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload
  2019-01-09 15:13 [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload Akhil Goyal
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores Akhil Goyal
  2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode Akhil Goyal
@ 2019-01-11  0:06 ` De Lara Guarch, Pablo
  2 siblings, 0 replies; 6+ messages in thread
From: De Lara Guarch, Pablo @ 2019-01-11  0:06 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: Hemant Agrawal



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Wednesday, January 9, 2019 3:13 PM
> To: dev@dpdk.org
> Cc: Hemant Agrawal <hemant.agrawal@nxp.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH] crypto/dpaa_sec: support PDCP offload
> 
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> 
> PDCP session configuration for lookaside protocol offload and data path is
> added.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> Acked-by: Akhil Goyal <akhil.goyal@nxp.com>

Applied to dpdk-next-crypto.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-01-11  0:06 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-09 15:13 [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload Akhil Goyal
2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores Akhil Goyal
2019-01-11  0:06   ` De Lara Guarch, Pablo
2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode Akhil Goyal
2019-01-11  0:05   ` De Lara Guarch, Pablo
2019-01-11  0:06 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload De Lara Guarch, Pablo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).