From: Konstantin Ananyev <konstantin.ananyev@intel.com>
To: dev@dpdk.org
Cc: akhil.goyal@nxp.com, pablo.de.lara.guarch@intel.com,
declan.doherty@intel.com,
Konstantin Ananyev <konstantin.ananyev@intel.com>
Subject: [dpdk-dev] [PATCH v3 1/2] crypto/aesni_mb: support CPU crypto
Date: Fri, 17 Apr 2020 16:03:48 +0100 [thread overview]
Message-ID: <20200417150349.14773-2-konstantin.ananyev@intel.com> (raw)
In-Reply-To: <20200417150349.14773-1-konstantin.ananyev@intel.com>
Add support for CPU crypto mode by introducing required handler.
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
doc/guides/cryptodevs/aesni_mb.rst | 3 +
doc/guides/cryptodevs/features/aesni_mb.ini | 1 +
doc/guides/rel_notes/release_20_05.rst | 4 +
.../crypto/aesni_mb/aesni_mb_pmd_private.h | 5 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 386 ++++++++++++++++--
.../crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 2 +
6 files changed, 367 insertions(+), 34 deletions(-)
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 465d20d88..baa11fac4 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -12,6 +12,9 @@ support for utilizing Intel multi buffer library, see the white paper
The AES-NI MB PMD has current only been tested on Fedora 21 64-bit with gcc.
+The AES-NI MB PMD supports synchronous mode of operation with
+``rte_cryptodev_sym_cpu_crypto_process`` function call.
+
Features
--------
diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
index ee6a07460..6cc4c82e8 100644
--- a/doc/guides/cryptodevs/features/aesni_mb.ini
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -12,6 +12,7 @@ CPU AVX2 = Y
CPU AVX512 = Y
CPU AESNI = Y
OOP LB In LB Out = Y
+CPU crypto = Y
;
; Supported crypto algorithms of the 'aesni_mb' crypto driver.
diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index 184967844..360e3ddb8 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -81,6 +81,10 @@ New Features
by making use of the event device capabilities. The event mode currently supports
only inline IPsec protocol offload.
+* **Updated the aesni_mb cryptodev PMD.**
+
+ Add support for synchronous Crypto burst API.
+
Removed Items
-------------
diff --git a/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
index 3456693c2..ce3ea9add 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
@@ -275,6 +275,9 @@ aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
/** device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
-
+extern uint32_t
+aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
#endif /* _AESNI_MB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 33f416745..91567f98e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -11,6 +11,7 @@
#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
+#include <rte_per_lcore.h>
#include "aesni_mb_pmd_private.h"
@@ -19,6 +20,12 @@
#define HMAC_MAX_BLOCK_SIZE 128
static uint8_t cryptodev_driver_id;
+/*
+ * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
+ * as we still use JOB based API even for synchronous processing.
+ */
+static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
+
typedef void (*hash_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
@@ -808,6 +815,119 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
(UINT64_MAX - u_src + u_dst + 1);
}
+static inline void
+set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
+ union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
+ void *iv, void *aad, void *digest, void *udata)
+{
+ /* Set crypto operation */
+ job->chain_order = session->chain_order;
+
+ /* Set cipher parameters */
+ job->cipher_direction = session->cipher.direction;
+ job->cipher_mode = session->cipher.mode;
+
+ job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+ /* Set authentication parameters */
+ job->hash_alg = session->auth.algo;
+ job->iv = iv;
+
+ switch (job->hash_alg) {
+ case AES_XCBC:
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
+
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
+
+ case AES_CCM:
+ job->u.CCM.aad = (uint8_t *)aad + 18;
+ job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ job->iv++;
+ break;
+
+ case AES_CMAC:
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
+
+ case AES_GMAC:
+ if (session->cipher.mode == GCM) {
+ job->u.GCM.aad = aad;
+ job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+ } else {
+ /* For GMAC */
+ job->u.GCM.aad = buf;
+ job->u.GCM.aad_len_in_bytes = len;
+ job->cipher_mode = GCM;
+ }
+ job->aes_enc_key_expanded = &session->cipher.gcm_key;
+ job->aes_dec_key_expanded = &session->cipher.gcm_key;
+ break;
+
+ default:
+ job->u.HMAC._hashed_auth_key_xor_ipad =
+ session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad =
+ session->auth.pads.outer;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
+ }
+
+ /*
+ * Multi-buffer library current only support returning a truncated
+ * digest length as specified in the relevant IPsec RFCs
+ */
+
+ /* Set digest location and length */
+ job->auth_tag_output = digest;
+ job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+ /* Set IV parameters */
+ job->iv_len_in_bytes = session->iv.length;
+
+ /* Data Parameters */
+ job->src = buf;
+ job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
+ job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
+ job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
+ if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
+ job->msg_len_to_hash_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = 0;
+ } else {
+ job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
+ sofs.ofs.auth.tail;
+ job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
+ sofs.ofs.cipher.tail;
+ }
+
+ job->user_data = udata;
+}
+
/**
* Process a crypto operation and complete a JOB_AES_HMAC job structure for
* submission to the multi buffer library for processing.
@@ -1100,6 +1220,15 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
return op;
}
+static inline void
+post_process_mb_sync_job(JOB_AES_HMAC *job)
+{
+ uint32_t *st;
+
+ st = job->user_data;
+ st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
+}
+
/**
* Process a completed JOB_AES_HMAC job and keep processing jobs until
* get_completed_job return NULL
@@ -1136,6 +1265,26 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
return processed_jobs;
}
+static inline uint32_t
+handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
+{
+ uint32_t i;
+
+ for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
+ post_process_mb_sync_job(job);
+
+ return i;
+}
+
+static inline uint32_t
+flush_mb_sync_mgr(MB_MGR *mb_mgr)
+{
+ JOB_AES_HMAC *job;
+
+ job = IMB_FLUSH_JOB(mb_mgr);
+ return handle_completed_sync_jobs(job, mb_mgr);
+}
+
static inline uint16_t
flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
@@ -1239,8 +1388,201 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
+static MB_MGR *
+alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
+{
+ MB_MGR *mb_mgr = alloc_mb_mgr(0);
+ if (mb_mgr == NULL)
+ return NULL;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ init_mb_mgr_sse(mb_mgr);
+ break;
+ case RTE_AESNI_MB_AVX:
+ init_mb_mgr_avx(mb_mgr);
+ break;
+ case RTE_AESNI_MB_AVX2:
+ init_mb_mgr_avx2(mb_mgr);
+ break;
+ case RTE_AESNI_MB_AVX512:
+ init_mb_mgr_avx512(mb_mgr);
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+ free_mb_mgr(mb_mgr);
+ return NULL;
+ }
+
+ return mb_mgr;
+}
+
+static inline void
+aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
+{
+ uint32_t i;
+
+ for (i = 0; i != vec->num; ++i)
+ vec->status[i] = err;
+}
+
+static inline int
+check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
+{
+ /* no multi-seg support with current AESNI-MB PMD */
+ if (sgl->num != 1)
+ return ENOTSUP;
+ else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
+ return EINVAL;
+ return 0;
+}
+
+static inline JOB_AES_HMAC *
+submit_sync_job(MB_MGR *mb_mgr)
+{
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+ return IMB_SUBMIT_JOB(mb_mgr);
+#else
+ return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
+#endif
+}
+
+static inline uint32_t
+generate_sync_dgst(struct rte_crypto_sym_vec *vec,
+ const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+{
+ uint32_t i, k;
+
+ for (i = 0, k = 0; i != vec->num; i++) {
+ if (vec->status[i] == 0) {
+ memcpy(vec->digest[i], dgst[i], len);
+ k++;
+ }
+ }
+
+ return k;
+}
+
+static inline uint32_t
+verify_sync_dgst(struct rte_crypto_sym_vec *vec,
+ const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+{
+ uint32_t i, k;
+
+ for (i = 0, k = 0; i != vec->num; i++) {
+ if (vec->status[i] == 0) {
+ if (memcmp(vec->digest[i], dgst[i], len) != 0)
+ vec->status[i] = EBADMSG;
+ else
+ k++;
+ }
+ }
+
+ return k;
+}
+
+uint32_t
+aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec)
+{
+ int32_t ret;
+ uint32_t i, j, k, len;
+ void *buf;
+ JOB_AES_HMAC *job;
+ MB_MGR *mb_mgr;
+ struct aesni_mb_private *priv;
+ struct aesni_mb_session *s;
+ uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
+
+ s = get_sym_session_private_data(sess, dev->driver_id);
+ if (s == NULL) {
+ aesni_mb_fill_error_code(vec, EINVAL);
+ return 0;
+ }
+
+ /* get per-thread MB MGR, create one if needed */
+ mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
+ if (mb_mgr == NULL) {
+
+ priv = dev->data->dev_private;
+ mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
+ if (mb_mgr == NULL) {
+ aesni_mb_fill_error_code(vec, ENOMEM);
+ return 0;
+ }
+ RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
+ }
+
+ for (i = 0, j = 0, k = 0; i != vec->num; i++) {
+
+
+ ret = check_crypto_sgl(sofs, vec->sgl + i);
+ if (ret != 0) {
+ vec->status[i] = ret;
+ continue;
+ }
+
+ buf = vec->sgl[i].vec[0].base;
+ len = vec->sgl[i].vec[0].len;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ if (job == NULL) {
+ k += flush_mb_sync_mgr(mb_mgr);
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ RTE_ASSERT(job != NULL);
+ }
+
+ /* Submit job for processing */
+ set_cpu_mb_job_params(job, s, sofs, buf, len,
+ vec->iv[i], vec->aad[i], tmp_dgst[i],
+ &vec->status[i]);
+ job = submit_sync_job(mb_mgr);
+ j++;
+
+ /* handle completed jobs */
+ k += handle_completed_sync_jobs(job, mb_mgr);
+ }
+
+ /* flush remaining jobs */
+ while (k != j)
+ k += flush_mb_sync_mgr(mb_mgr);
+
+ /* finish processing for successful jobs: check/update digest */
+ if (k != 0) {
+ if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ k = verify_sync_dgst(vec,
+ (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+ s->auth.req_digest_len);
+ else
+ k = generate_sync_dgst(vec,
+ (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+ s->auth.req_digest_len);
+ }
+
+ return k;
+}
+
static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+static uint64_t
+vec_mode_to_flags(enum aesni_mb_vector_mode mode)
+{
+ switch (mode) {
+ case RTE_AESNI_MB_SSE:
+ return RTE_CRYPTODEV_FF_CPU_SSE;
+ case RTE_AESNI_MB_AVX:
+ return RTE_CRYPTODEV_FF_CPU_AVX;
+ case RTE_AESNI_MB_AVX2:
+ return RTE_CRYPTODEV_FF_CPU_AVX2;
+ case RTE_AESNI_MB_AVX512:
+ return RTE_CRYPTODEV_FF_CPU_AVX512;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
+ return 0;
+ }
+}
+
static int
cryptodev_aesni_mb_create(const char *name,
struct rte_vdev_device *vdev,
@@ -1276,7 +1618,8 @@ cryptodev_aesni_mb_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO;
/* Check CPU for support for AES instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
@@ -1284,30 +1627,12 @@ cryptodev_aesni_mb_create(const char *name,
else
AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
- mb_mgr = alloc_mb_mgr(0);
- if (mb_mgr == NULL)
- return -ENOMEM;
+ dev->feature_flags |= vec_mode_to_flags(vector_mode);
- switch (vector_mode) {
- case RTE_AESNI_MB_SSE:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- init_mb_mgr_sse(mb_mgr);
- break;
- case RTE_AESNI_MB_AVX:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- init_mb_mgr_avx(mb_mgr);
- break;
- case RTE_AESNI_MB_AVX2:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
- init_mb_mgr_avx2(mb_mgr);
- break;
- case RTE_AESNI_MB_AVX512:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
- init_mb_mgr_avx512(mb_mgr);
- break;
- default:
- AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
- goto error_exit;
+ mb_mgr = alloc_init_mb_mgr(vector_mode);
+ if (mb_mgr == NULL) {
+ rte_cryptodev_pmd_destroy(dev);
+ return -ENOMEM;
}
/* Set vector instructions mode supported */
@@ -1319,16 +1644,7 @@ cryptodev_aesni_mb_create(const char *name,
AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
imb_get_version_str());
-
return 0;
-
-error_exit:
- if (mb_mgr)
- free_mb_mgr(mb_mgr);
-
- rte_cryptodev_pmd_destroy(dev);
-
- return -1;
}
static int
@@ -1377,6 +1693,10 @@ cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
internals = cryptodev->data->dev_private;
free_mb_mgr(internals->mb_mgr);
+ if (RTE_PER_LCORE(sync_mb_mgr)) {
+ free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
+ RTE_PER_LCORE(sync_mb_mgr) = NULL;
+ }
return rte_cryptodev_pmd_destroy(cryptodev);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 845661174..b972ab434 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -797,6 +797,8 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = {
.queue_pair_setup = aesni_mb_pmd_qp_setup,
.queue_pair_release = aesni_mb_pmd_qp_release,
+ .sym_cpu_process = aesni_mb_cpu_crypto_process_bulk,
+
.sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
.sym_session_configure = aesni_mb_pmd_sym_session_configure,
.sym_session_clear = aesni_mb_pmd_sym_session_clear
--
2.17.1
next prev parent reply other threads:[~2020-04-17 15:04 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-16 13:21 [dpdk-dev] [PATCH 0/2] support CPU crypto for AESNI MB PMD Konstantin Ananyev
2020-03-16 13:21 ` [dpdk-dev] [PATCH 1/2] crypto/aesni_mb: support CPU crypto Konstantin Ananyev
2020-03-16 13:21 ` [dpdk-dev] [PATCH 2/2] test/crypto: add CPU crypto mode for AESNI MB Konstantin Ananyev
2020-04-01 14:18 ` [dpdk-dev] [PATCH 0/2] support CPU crypto for AESNI MB PMD Akhil Goyal
2020-04-01 15:09 ` Ananyev, Konstantin
2020-04-03 15:55 ` [dpdk-dev] [PATCH v2 " Konstantin Ananyev
2020-04-03 15:55 ` [dpdk-dev] [PATCH v2 1/2] crypto/aesni_mb: support CPU crypto Konstantin Ananyev
2020-04-15 9:08 ` De Lara Guarch, Pablo
2020-04-15 9:14 ` De Lara Guarch, Pablo
2020-04-15 9:52 ` Ananyev, Konstantin
2020-04-03 15:55 ` [dpdk-dev] [PATCH v2 2/2] test/crypto: add CPU crypto mode for AESNI MB Konstantin Ananyev
2020-04-15 9:13 ` De Lara Guarch, Pablo
2020-04-15 18:25 ` Akhil Goyal
2020-04-03 16:26 ` [dpdk-dev] [PATCH v2 0/2] support CPU crypto for AESNI MB PMD Akhil Goyal
2020-04-17 15:03 ` [dpdk-dev] [PATCH v3 " Konstantin Ananyev
2020-04-17 15:03 ` Konstantin Ananyev [this message]
2020-04-17 15:03 ` [dpdk-dev] [PATCH v3 2/2] test/crypto: add CPU crypto mode for AESNI MB Konstantin Ananyev
2020-04-17 20:57 ` [dpdk-dev] [PATCH v3 0/2] support CPU crypto for AESNI MB PMD Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200417150349.14773-2-konstantin.ananyev@intel.com \
--to=konstantin.ananyev@intel.com \
--cc=akhil.goyal@nxp.com \
--cc=declan.doherty@intel.com \
--cc=dev@dpdk.org \
--cc=pablo.de.lara.guarch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).