From: Nagadheeraj Rottela <rnagadheeraj@marvell.com>
To: "akhil.goyal@nxp.com" <akhil.goyal@nxp.com>,
"pablo.de.lara.guarch@intel.com" <pablo.de.lara.guarch@intel.com>,
"mattias.ronnblom@ericsson.com" <mattias.ronnblom@ericsson.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
Srikanth Jampala <jsrikanth@marvell.com>,
Nagadheeraj Rottela <rnagadheeraj@marvell.com>
Subject: [dpdk-dev] [PATCH v4 09/11] crypto/nitrox: add cipher auth crypto chain processing
Date: Mon, 26 Aug 2019 12:49:46 +0000 [thread overview]
Message-ID: <20190826124836.21187-10-rnagadheeraj@marvell.com> (raw)
In-Reply-To: <20190826124836.21187-1-rnagadheeraj@marvell.com>
Add cipher auth crypto chain processing functionality in symmetric
request manager.
Signed-off-by: Nagadheeraj Rottela <rnagadheeraj@marvell.com>
---
drivers/crypto/nitrox/nitrox_sym_reqmgr.c | 427 +++++++++++++++++++++++++++++-
1 file changed, 425 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
index a37b754f2..968e74fbe 100644
--- a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
+++ b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
@@ -10,9 +10,24 @@
#include "nitrox_sym_reqmgr.h"
#include "nitrox_logs.h"
+#define MAX_SGBUF_CNT 16
+#define MAX_SGCOMP_CNT 5
+/* SLC_STORE_INFO */
+#define MIN_UDD_LEN 16
+/* PKT_IN_HDR + SLC_STORE_INFO */
+#define FDATA_SIZE 32
+/* Base destination port for the solicited requests */
+#define SOLICIT_BASE_DPORT 256
#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define CMD_TIMEOUT 2
+struct gphdr {
+ uint16_t param0;
+ uint16_t param1;
+ uint16_t param2;
+ uint16_t param3;
+};
+
union pkt_instr_hdr {
uint64_t value;
struct {
@@ -105,12 +120,46 @@ struct resp_hdr {
uint64_t completion;
};
+struct nitrox_sglist {
+ uint16_t len;
+ uint16_t raz0;
+ uint32_t raz1;
+ rte_iova_t iova;
+ void *virt;
+};
+
+struct nitrox_sgcomp {
+ uint16_t len[4];
+ uint64_t iova[4];
+};
+
+struct nitrox_sgtable {
+ uint8_t map_bufs_cnt;
+ uint8_t nr_sgcomp;
+ uint16_t total_bytes;
+
+ struct nitrox_sglist sglist[MAX_SGBUF_CNT];
+ struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
+};
+
+struct iv {
+ uint8_t *virt;
+ rte_iova_t iova;
+ uint16_t len;
+};
+
struct nitrox_softreq {
struct nitrox_crypto_ctx *ctx;
struct rte_crypto_op *op;
+ struct gphdr gph;
struct nps_pkt_instr instr;
struct resp_hdr resp;
+ struct nitrox_sgtable in;
+ struct nitrox_sgtable out;
+ struct iv iv;
uint64_t timeout;
+ rte_iova_t dptr;
+ rte_iova_t rptr;
rte_iova_t iova;
};
@@ -121,10 +170,383 @@ softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
sr->iova = iova;
}
+/*
+ * 64-Byte Instruction Format
+ *
+ * ----------------------
+ * | DPTR0 | 8 bytes
+ * ----------------------
+ * | PKT_IN_INSTR_HDR | 8 bytes
+ * ----------------------
+ * | PKT_IN_HDR | 16 bytes
+ * ----------------------
+ * | SLC_INFO | 16 bytes
+ * ----------------------
+ * | Front data | 16 bytes
+ * ----------------------
+ */
+static void
+create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
+{
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+ rte_iova_t ctx_handle;
+
+ /* fill the packet instruction */
+ /* word 0 */
+ sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
+
+ /* word 1 */
+ sr->instr.ih.value = 0;
+ sr->instr.ih.s.g = 1;
+ sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
+ sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
+ sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
+ sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
+
+ /* word 2 */
+ sr->instr.irh.value[0] = 0;
+ sr->instr.irh.s.uddl = MIN_UDD_LEN;
+ /* context length in 64-bit words */
+ sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
+ /* offset from solicit base port 256 */
+ sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
+ /* Invalid context cache */
+ sr->instr.irh.s.ctxc = 0x3;
+ sr->instr.irh.s.arg = ctx->req_op;
+ sr->instr.irh.s.opcode = ctx->opcode;
+ sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
+
+ /* word 3 */
+ ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
+ sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
+
+ /* word 4 */
+ sr->instr.slc.value[0] = 0;
+ sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
+
+ /* word 5 */
+ sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
+ /*
+ * No conversion for front data,
+ * It goes into payload
+ * put GP Header in front data
+ */
+ memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
+ sr->instr.fdata[1] = 0;
+ /* flush the soft_req changes before posting the cmd */
+ rte_wmb();
+}
+
+static void
+softreq_copy_iv(struct nitrox_softreq *sr)
+{
+ sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
+ sr->ctx->iv.offset);
+ sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, sr->ctx->iv.offset);
+ sr->iv.len = sr->ctx->iv.length;
+}
+
+static int
+extract_cipher_auth_digest(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ struct rte_crypto_op *op = sr->op;
+ struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
+ op->sym->m_src;
+
+ if (sr->ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ unlikely(!op->sym->auth.digest.data))
+ return -EINVAL;
+
+ digest->len = sr->ctx->digest_length;
+ if (op->sym->auth.digest.data) {
+ digest->iova = op->sym->auth.digest.phys_addr;
+ digest->virt = op->sym->auth.digest.data;
+
+ return 0;
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
+ op->sym->auth.data.length + digest->len))
+ return -EINVAL;
+
+ digest->iova = rte_pktmbuf_mtophys_offset(mdst,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+
+ return 0;
+}
+
+static void
+fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
+ void *virt)
+{
+ struct nitrox_sglist *sglist = sgtbl->sglist;
+ uint8_t cnt = sgtbl->map_bufs_cnt;
+
+ if (unlikely(!len))
+ return;
+
+ sglist[cnt].len = len;
+ sglist[cnt].iova = iova;
+ sglist[cnt].virt = virt;
+ sgtbl->total_bytes += len;
+ cnt++;
+
+ sgtbl->map_bufs_cnt = cnt;
+}
+
+static int
+create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
+ uint32_t off, int datalen)
+{
+ struct nitrox_sglist *sglist = sgtbl->sglist;
+ uint8_t cnt = sgtbl->map_bufs_cnt;
+ struct rte_mbuf *m;
+ int mlen;
+
+ if (unlikely(datalen <= 0))
+ return 0;
+
+ for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
+ off -= rte_pktmbuf_data_len(m);
+
+ if (unlikely(!m))
+ return -EIO;
+
+ mlen = rte_pktmbuf_data_len(m) - off;
+ if (datalen <= mlen)
+ mlen = datalen;
+ sglist[cnt].len = mlen;
+ sglist[cnt].iova = rte_pktmbuf_mtophys_offset(m, off);
+ sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
+ sgtbl->total_bytes += mlen;
+ cnt++;
+ datalen -= mlen;
+
+ for (m = m->next; m && datalen; m = m->next) {
+ mlen = rte_pktmbuf_data_len(m) < datalen ?
+ rte_pktmbuf_data_len(m) : datalen;
+ sglist[cnt].len = mlen;
+ sglist[cnt].iova = rte_pktmbuf_mtophys(m);
+ sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
+ sgtbl->total_bytes += mlen;
+ cnt++;
+ datalen -= mlen;
+ }
+
+ RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
+ sgtbl->map_bufs_cnt = cnt;
+
+ return 0;
+}
+
+static int
+create_cipher_auth_sglist(struct nitrox_softreq *sr,
+ struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
+{
+ struct rte_crypto_op *op = sr->op;
+ int auth_only_len;
+ int err;
+
+ fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
+
+ auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
+ if (unlikely(auth_only_len < 0))
+ return -EINVAL;
+
+ err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
+ auth_only_len);
+ if (unlikely(err))
+ return err;
+
+ err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
+ op->sym->cipher.data.length);
+ if (unlikely(err))
+ return err;
+
+ return 0;
+}
+
+static void
+create_sgcomp(struct nitrox_sgtable *sgtbl)
+{
+ int i, j, nr_sgcomp;
+ struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
+ struct nitrox_sglist *sglist = sgtbl->sglist;
+
+ nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
+ sgtbl->nr_sgcomp = nr_sgcomp;
+
+ for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
+ for (j = 0; j < 4; j++, sglist++) {
+ sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
+ sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
+ }
+ }
+}
+
+static int
+create_cipher_auth_inbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ int err;
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+
+ err = create_cipher_auth_sglist(sr, &sr->in, sr->op->sym->m_src);
+
+ if (unlikely(err))
+ return err;
+
+ if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
+
+ create_sgcomp(&sr->in);
+ sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
+
+ return 0;
+}
+
+static int
+create_cipher_auth_oop_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ int err;
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+
+ err = create_cipher_auth_sglist(sr, &sr->out, sr->op->sym->m_dst);
+ if (unlikely(err))
+ return err;
+
+ if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
+
+ return 0;
+}
+
+static void
+create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ int i, cnt;
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+
+ cnt = sr->out.map_bufs_cnt;
+ for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
+ sr->out.sglist[cnt].len = sr->in.sglist[i].len;
+ sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
+ sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
+ }
+
+ sr->out.map_bufs_cnt = cnt;
+ if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ fill_sglist(&sr->out, digest->len, digest->iova,
+ digest->virt);
+ } else if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sr->out.map_bufs_cnt--;
+ }
+}
+
+static int
+create_cipher_auth_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ struct rte_crypto_op *op = sr->op;
+ int cnt = 0;
+
+ sr->resp.orh = PENDING_SIG;
+ sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
+ sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
+ resp.orh);
+ sr->out.sglist[cnt].virt = &sr->resp.orh;
+ cnt++;
+
+ sr->out.map_bufs_cnt = cnt;
+ if (op->sym->m_dst) {
+ int err;
+
+ err = create_cipher_auth_oop_outbuf(sr, digest);
+ if (unlikely(err))
+ return err;
+ } else {
+ create_cipher_auth_inplace_outbuf(sr, digest);
+ }
+
+ cnt = sr->out.map_bufs_cnt;
+ sr->resp.completion = PENDING_SIG;
+ sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
+ sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
+ resp.completion);
+ sr->out.sglist[cnt].virt = &sr->resp.completion;
+ cnt++;
+
+ RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
+ sr->out.map_bufs_cnt = cnt;
+
+ create_sgcomp(&sr->out);
+ sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
+
+ return 0;
+}
+
+static void
+create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
+ struct gphdr *gph)
+{
+ int auth_only_len;
+ union {
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t iv_offset : 8;
+ uint16_t auth_offset : 8;
+#else
+ uint16_t auth_offset : 8;
+ uint16_t iv_offset : 8;
+#endif
+ };
+ uint16_t value;
+ } param3;
+
+ gph->param0 = rte_cpu_to_be_16(cryptlen);
+ gph->param1 = rte_cpu_to_be_16(authlen);
+
+ auth_only_len = authlen - cryptlen;
+ gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
+
+ param3.iv_offset = 0;
+ param3.auth_offset = ivlen;
+ gph->param3 = rte_cpu_to_be_16(param3.value);
+
+}
+
static int
process_cipher_auth_data(struct nitrox_softreq *sr)
{
- RTE_SET_USED(sr);
+ struct rte_crypto_op *op = sr->op;
+ int err;
+ struct nitrox_sglist digest;
+
+ softreq_copy_iv(sr);
+ err = extract_cipher_auth_digest(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ err = create_cipher_auth_inbuf(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ err = create_cipher_auth_outbuf(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
+ op->sym->auth.data.length, &sr->gph);
+
return 0;
}
@@ -135,6 +557,7 @@ process_softreq(struct nitrox_softreq *sr)
int err = 0;
switch (ctx->nitrox_chain) {
+ break;
case NITROX_CHAIN_CIPHER_AUTH:
case NITROX_CHAIN_AUTH_CIPHER:
err = process_cipher_auth_data(sr);
@@ -152,11 +575,11 @@ nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
struct nitrox_crypto_ctx *ctx,
struct nitrox_softreq *sr)
{
- RTE_SET_USED(qno);
softreq_init(sr, sr->iova);
sr->ctx = ctx;
sr->op = op;
process_softreq(sr);
+ create_se_instr(sr, qno);
sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
return 0;
}
--
2.13.6
next prev parent reply other threads:[~2019-08-26 12:51 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-17 5:29 [dpdk-dev] [PATCH 00/10] add Nitrox crypto device support Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 01/10] crypto/nitrox: add Nitrox build and doc skeleton Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 02/10] crypto/nitrox: add PCI probe and remove routines Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 03/10] crypto/nitrox: create Nitrox symmetric cryptodev Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 04/10] crypto/nitrox: add basic symmetric cryptodev operations Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 05/10] crypto/nitrox: add software queue management functionality Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 06/10] crypto/nitrox: add hardware " Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 07/10] crypto/nitrox: add session management operations Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 08/10] crypto/nitrox: add burst enqueue and dequeue operations Nagadheeraj Rottela
2019-07-17 14:16 ` Aaron Conole
2019-07-17 5:29 ` [dpdk-dev] [PATCH 09/10] crypto/nitrox: add cipher auth crypto chain processing Nagadheeraj Rottela
2019-07-17 5:29 ` [dpdk-dev] [PATCH 10/10] test/crypto: add tests for Nitrox PMD Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 00/10] add Nitrox crypto device support Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 01/10] crypto/nitrox: add Nitrox build and doc skeleton Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 02/10] crypto/nitrox: add PCI probe and remove routines Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 03/10] crypto/nitrox: create Nitrox symmetric cryptodev Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 04/10] crypto/nitrox: add basic symmetric cryptodev operations Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 05/10] crypto/nitrox: add software queue management functionality Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 06/10] crypto/nitrox: add hardware " Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 07/10] crypto/nitrox: add session management operations Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 08/10] crypto/nitrox: add burst enqueue and dequeue operations Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 09/10] crypto/nitrox: add cipher auth crypto chain processing Nagadheeraj Rottela
2019-07-19 12:33 ` [dpdk-dev] [PATCH v2 10/10] test/crypto: add tests for Nitrox PMD Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 00/11] add Nitrox crypto device support Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 01/11] crypto/nitrox: add Nitrox build and doc skeleton Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 02/11] crypto/nitrox: add PCI probe and remove routines Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 03/11] crypto/nitrox: create Nitrox symmetric cryptodev Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 04/11] crypto/nitrox: add basic symmetric cryptodev operations Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 05/11] crypto/nitrox: add software queue management functionality Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 06/11] crypto/nitrox: add hardware " Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 07/11] crypto/nitrox: add session management operations Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 08/11] crypto/nitrox: add burst enqueue and dequeue operations Nagadheeraj Rottela
2019-08-25 20:55 ` Mattias Rönnblom
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 00/11] add Nitrox crypto device support Nagadheeraj Rottela
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 01/11] crypto/nitrox: add Nitrox build and doc skeleton Nagadheeraj Rottela
2019-09-20 8:56 ` Akhil Goyal
2019-09-20 9:02 ` Akhil Goyal
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 02/11] crypto/nitrox: add PCI probe and remove routines Nagadheeraj Rottela
2019-09-20 9:15 ` Akhil Goyal
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 03/11] crypto/nitrox: create Nitrox symmetric cryptodev Nagadheeraj Rottela
2019-09-20 9:29 ` Akhil Goyal
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 04/11] crypto/nitrox: add basic symmetric cryptodev operations Nagadheeraj Rottela
2019-09-20 9:44 ` Akhil Goyal
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 05/11] crypto/nitrox: add software queue management functionality Nagadheeraj Rottela
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 06/11] crypto/nitrox: add hardware " Nagadheeraj Rottela
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 07/11] crypto/nitrox: add session management operations Nagadheeraj Rottela
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 08/11] crypto/nitrox: add burst enqueue and dequeue operations Nagadheeraj Rottela
2019-09-20 10:15 ` Akhil Goyal
2019-09-20 11:11 ` Nagadheeraj Rottela
2019-09-20 11:13 ` Akhil Goyal
2019-09-20 11:23 ` Nagadheeraj Rottela
2019-09-20 11:25 ` Akhil Goyal
2019-08-26 12:49 ` Nagadheeraj Rottela [this message]
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 10/11] test/crypto: add tests for Nitrox PMD Nagadheeraj Rottela
2019-08-26 12:49 ` [dpdk-dev] [PATCH v4 11/11] crypto/nitrox: add SHA224 and SHA256 HMAC algorithms Nagadheeraj Rottela
2019-09-20 8:49 ` Akhil Goyal
2019-09-20 10:16 ` Akhil Goyal
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 09/11] crypto/nitrox: add cipher auth crypto chain processing Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 10/11] test/crypto: add tests for Nitrox PMD Nagadheeraj Rottela
2019-08-23 10:42 ` [dpdk-dev] [PATCH v3 11/11] crypto/nitrox: add SHA224 and SHA256 HMAC algorithms Nagadheeraj Rottela
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190826124836.21187-10-rnagadheeraj@marvell.com \
--to=rnagadheeraj@marvell.com \
--cc=akhil.goyal@nxp.com \
--cc=dev@dpdk.org \
--cc=jsrikanth@marvell.com \
--cc=mattias.ronnblom@ericsson.com \
--cc=pablo.de.lara.guarch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).