From: Akhil Goyal <akhil.goyal@nxp.com>
To: "dev@dpdk.org" <dev@dpdk.org>
Cc: Hemant Agrawal <hemant.agrawal@nxp.com>,
"pablo.de.lara.guarch@intel.com" <pablo.de.lara.guarch@intel.com>
Subject: [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload
Date: Wed, 9 Jan 2019 15:13:16 +0000 [thread overview]
Message-ID: <20190109150612.20803-1-akhil.goyal@nxp.com> (raw)
From: Hemant Agrawal <hemant.agrawal@nxp.com>
PDCP session configuration for lookaside protocol offload
and data path is added.
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
drivers/crypto/dpaa_sec/dpaa_sec.c | 268 +++++++++++++++++++++++++++++
drivers/crypto/dpaa_sec/dpaa_sec.h | 212 +++++++++++++++++++++--
2 files changed, 470 insertions(+), 10 deletions(-)
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index d83e74541..b5896c4f7 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -33,6 +33,7 @@
#include <hw/desc/common.h>
#include <hw/desc/algo.h>
#include <hw/desc/ipsec.h>
+#include <hw/desc/pdcp.h>
#include <rte_dpaa_bus.h>
#include <dpaa_sec.h>
@@ -266,6 +267,11 @@ static inline int is_proto_ipsec(dpaa_sec_session *ses)
return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
}
+static inline int is_proto_pdcp(dpaa_sec_session *ses)
+{
+ return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
+}
+
static inline int is_encode(dpaa_sec_session *ses)
{
return ses->dir == DIR_ENC;
@@ -372,6 +378,155 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
}
}
+static int
+dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo authdata = {0}, cipherdata = {0};
+ struct sec_cdb *cdb = &ses->cdb;
+ int32_t shared_desc_len = 0;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ ses->cipher_alg);
+ return -1;
+ }
+
+ cipherdata.key = (size_t)ses->cipher_key.data;
+ cipherdata.keylen = ses->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ authdata.algtype = PDCP_AUTH_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ authdata.algtype = PDCP_AUTH_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = PDCP_AUTH_TYPE_AES;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = PDCP_AUTH_TYPE_NULL;
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ ses->auth_alg);
+ return -1;
+ }
+
+ authdata.key = (size_t)ses->auth_key.data;
+ authdata.keylen = ses->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = cipherdata.keylen;
+ cdb->sh_desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
+ cipherdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (!(cdb->sh_desc[2] & (1<<1)) && authdata.keylen) {
+ authdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
+ cdb->sh_desc, 1, swap,
+ ses->pdcp.hfn,
+ ses->pdcp.bearer,
+ ses->pdcp.pkt_dir,
+ ses->pdcp.hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ else if (ses->dir == DIR_DEC)
+ shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
+ cdb->sh_desc, 1, swap,
+ ses->pdcp.hfn,
+ ses->pdcp.bearer,
+ ses->pdcp.pkt_dir,
+ ses->pdcp.hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ } else {
+ cdb->sh_desc[0] = cipherdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 1);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
+ cipherdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
+ cdb->sh_desc, 1, swap,
+ ses->pdcp.sn_size,
+ ses->pdcp.hfn,
+ ses->pdcp.bearer,
+ ses->pdcp.pkt_dir,
+ ses->pdcp.hfn_threshold,
+ &cipherdata, 0);
+ else if (ses->dir == DIR_DEC)
+ shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
+ cdb->sh_desc, 1, swap,
+ ses->pdcp.sn_size,
+ ses->pdcp.hfn,
+ ses->pdcp.bearer,
+ ses->pdcp.pkt_dir,
+ ses->pdcp.hfn_threshold,
+ &cipherdata, 0);
+ }
+
+ return shared_desc_len;
+}
+
/* prepare ipsec proto command block of the session */
static int
dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
@@ -472,6 +627,8 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
if (is_proto_ipsec(ses)) {
shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
+ } else if (is_proto_pdcp(ses)) {
+ shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
} else if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
@@ -1545,6 +1702,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
if (is_proto_ipsec(ses)) {
cf = build_proto(op, ses);
+ } else if (is_proto_pdcp(ses)) {
+ cf = build_proto(op, ses);
} else if (is_auth_only(ses)) {
cf = build_auth_only(op, ses);
} else if (is_cipher_only(ses)) {
@@ -2105,7 +2264,112 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
goto out;
}
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+ return -1;
+}
+
+static int
+dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
+ struct rte_crypto_sym_xform *xform = conf->crypto_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ dpaa_sec_session *session = (dpaa_sec_session *)sess;
+ struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(session, 0, sizeof(dpaa_sec_session));
+
+ /* find xfrm types */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &xform->cipher;
+ if (xform->next != NULL)
+ auth_xform = &xform->next->auth;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &xform->auth;
+ if (xform->next != NULL)
+ cipher_xform = &xform->next->cipher;
+ } else {
+ DPAA_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+
+ session->proto_alg = conf->protocol;
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ session->dir = DIR_ENC;
+ }
+
+ /* Auth is only applicable for control mode operation. */
+ if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+ if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
+ DPAA_SEC_ERR(
+ "PDCP Seq Num size should be 5 bits for cmode");
+ goto out;
+ }
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ }
+ }
+ session->pdcp.domain = pdcp_xform->domain;
+ session->pdcp.bearer = pdcp_xform->bearer;
+ session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
+ session->pdcp.sn_size = pdcp_xform->sn_size;
+#ifdef ENABLE_HFN_OVERRIDE
+ session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
+#endif
+ session->pdcp.hfn = pdcp_xform->hfn;
+ session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
+ session->ctx_pool = dev_priv->ctx_pool;
+ rte_spinlock_lock(&dev_priv->lock);
+ session->inq = dpaa_sec_attach_rxq(dev_priv);
+ rte_spinlock_unlock(&dev_priv->lock);
+ if (session->inq == NULL) {
+ DPAA_SEC_ERR("unable to attach sec queue");
+ goto out;
+ }
return 0;
out:
rte_free(session->auth_key.data);
@@ -2134,6 +2398,10 @@ dpaa_sec_security_session_create(void *dev,
ret = dpaa_sec_set_ipsec_session(cdev, conf,
sess_private_data);
break;
+ case RTE_SECURITY_PROTOCOL_PDCP:
+ ret = dpaa_sec_set_pdcp_session(cdev, conf,
+ sess_private_data);
+ break;
case RTE_SECURITY_PROTOCOL_MACSEC:
return -ENOTSUP;
default:
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f4b87844c..6049c1d52 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -91,6 +91,20 @@ struct sec_cdb {
uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
};
+/*!
+ * The structure is to be filled by user as a part of
+ * dpaa_sec_proto_ctxt for PDCP Protocol
+ */
+struct sec_pdcp_ctxt {
+ enum rte_security_pdcp_domain domain; /*!< Data/Control mode*/
+ int8_t bearer; /*!< PDCP bearer ID */
+ int8_t pkt_dir;/*!< PDCP Frame Direction 0:UL 1:DL*/
+ int8_t hfn_ovd;/*!< Overwrite HFN per packet*/
+ uint32_t hfn; /*!< Hyper Frame Number */
+ uint32_t hfn_threshold; /*!< HFN Threashold for key renegotiation */
+ uint8_t sn_size; /*!< Sequence number size, 7/12/15 */
+};
+
typedef struct dpaa_sec_session_entry {
uint8_t dir; /*!< Operation Direction */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
@@ -113,15 +127,21 @@ typedef struct dpaa_sec_session_entry {
} auth_key;
};
};
- struct {
- uint16_t length;
- uint16_t offset;
- } iv; /**< Initialisation vector parameters */
- uint16_t auth_only_len; /*!< Length of data for Auth only */
- uint32_t digest_length;
- struct ipsec_encap_pdb encap_pdb;
- struct ip ip4_hdr;
- struct ipsec_decap_pdb decap_pdb;
+ union {
+ struct {
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len;
+ /*!< Length of data for Auth only */
+ uint32_t digest_length;
+ struct ipsec_decap_pdb decap_pdb;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ip ip4_hdr;
+ };
+ struct sec_pdcp_ctxt pdcp;
+ };
struct dpaa_sec_qp *qp;
struct qman_fq *inq;
struct sec_cdb cdb; /**< cmd block associated with qp */
@@ -366,7 +386,7 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.min = 16,
.max = 16,
.increment = 0
- }
+ },
}, }
}, }
},
@@ -394,6 +414,162 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+static const struct rte_cryptodev_capabilities dpaa_pdcp_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
static const struct rte_security_capability dpaa_sec_security_cap[] = {
{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
@@ -417,6 +593,22 @@ static const struct rte_security_capability dpaa_sec_security_cap[] = {
},
.crypto_capabilities = dpaa_sec_capabilities
},
+ { /* PDCP Lookaside Protocol offload Data */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_DATA,
+ },
+ .crypto_capabilities = dpaa_pdcp_capabilities
+ },
+ { /* PDCP Lookaside Protocol offload Control */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_CONTROL,
+ },
+ .crypto_capabilities = dpaa_pdcp_capabilities
+ },
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
--
2.17.1
next reply other threads:[~2019-01-09 15:13 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-09 15:13 Akhil Goyal [this message]
2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores Akhil Goyal
2019-01-11 0:06 ` De Lara Guarch, Pablo
2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode Akhil Goyal
2019-01-11 0:05 ` De Lara Guarch, Pablo
2019-01-11 0:06 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload De Lara Guarch, Pablo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190109150612.20803-1-akhil.goyal@nxp.com \
--to=akhil.goyal@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=pablo.de.lara.guarch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).