From: Fan Zhang <roy.fan.zhang@intel.com>
To: dev@dpdk.org
Cc: konstantin.ananyev@intel.com, declan.doherty@intel.com,
akhil.goyal@nxp.com, Fan Zhang <roy.fan.zhang@intel.com>
Subject: [dpdk-dev] [PATCH 08/10] ipsec: add rte_security cpu_crypto action support
Date: Fri, 6 Sep 2019 14:13:28 +0100 [thread overview]
Message-ID: <20190906131330.40185-9-roy.fan.zhang@intel.com> (raw)
In-Reply-To: <20190906131330.40185-1-roy.fan.zhang@intel.com>
This patch updates the ipsec library to handle the newly introduced
RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO action.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
lib/librte_ipsec/esp_inb.c | 174 +++++++++++++++++++++++++-
lib/librte_ipsec/esp_outb.c | 290 +++++++++++++++++++++++++++++++++++++++++++-
lib/librte_ipsec/sa.c | 53 ++++++--
lib/librte_ipsec/sa.h | 29 +++++
lib/librte_ipsec/ses.c | 4 +-
5 files changed, 539 insertions(+), 11 deletions(-)
diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
index 8e3ecbc64..6077dcb1e 100644
--- a/lib/librte_ipsec/esp_inb.c
+++ b/lib/librte_ipsec/esp_inb.c
@@ -105,6 +105,73 @@ inb_cop_prepare(struct rte_crypto_op *cop,
}
}
+static inline int
+inb_sync_crypto_proc_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ const union sym_op_data *icv, uint32_t pofs, uint32_t plen,
+ struct rte_security_vec *buf, struct iovec *cur_vec,
+ void *iv, void **aad, void **digest)
+{
+ struct rte_mbuf *ms;
+ struct iovec *vec = cur_vec;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivp;
+ uint32_t algo, left, off = 0, n_seg = 0;
+
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct rte_esp_hdr));
+ algo = sa->algo_type;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = (struct aead_gcm_iv *)iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ *aad = icv->va + sa->icv_len;
+ off = sa->ctp.cipher.offset + pofs;
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ off = sa->ctp.auth.offset + pofs;
+ break;
+ case ALGO_TYPE_AES_CTR:
+ off = sa->ctp.auth.offset + pofs;
+ ctr = (struct aesctr_cnt_blk *)iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ break;
+ }
+
+ *digest = icv->va;
+
+ left = plen - sa->ctp.cipher.length;
+
+ ms = mbuf_get_seg_ofs(mb, &off);
+ if (!ms)
+ return -1;
+
+ while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+ uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+ vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+ vec->iov_len = len;
+
+ left -= len;
+ vec++;
+ n_seg++;
+ ms = ms->next;
+ off = 0;
+ }
+
+ if (left)
+ return -1;
+
+ buf->vec = cur_vec;
+ buf->num = n_seg;
+
+ return n_seg;
+}
+
/*
* Helper function for prepare() to deal with situation when
* ICV is spread by two segments. Tries to move ICV completely into the
@@ -512,7 +579,6 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
return k;
}
-
/*
* *process* function for tunnel packets
*/
@@ -625,6 +691,112 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
return n;
}
+/*
+ * process packets using sync crypto engine
+ */
+static uint16_t
+esp_inb_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num, uint8_t sqh_len,
+ esp_inb_process_t process)
+{
+ int32_t rc;
+ uint32_t i, k, hl, n, p;
+ struct rte_ipsec_sa *sa;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ uint32_t sqn[num];
+ uint32_t dr[num];
+ struct rte_security_vec buf[num];
+ struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+ uint32_t vec_idx = 0;
+ uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+ void *iv[num];
+ void *aad[num];
+ void *digest[num];
+ int status[num];
+
+ sa = ss->sa;
+ rsn = rsn_acquire(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ hl = mb[i]->l2_len + mb[i]->l3_len;
+ rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+ if (rc >= 0) {
+ iv[k] = (void *)ivs[k];
+ rc = inb_sync_crypto_proc_prepare(sa, mb[i], &icv, hl,
+ rc, &buf[k], &vec[vec_idx], iv[k],
+ &aad[k], &digest[k]);
+ if (rc < 0) {
+ dr[i - k] = i;
+ continue;
+ }
+
+ vec_idx += rc;
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num) {
+ rte_errno = EBADMSG;
+
+ if (unlikely(k == 0))
+ return 0;
+
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ /* process the packets */
+ n = 0;
+ rte_security_process_cpu_crypto_bulk(ss->security.ctx,
+ ss->security.ses, buf, iv, aad, digest, status,
+ k);
+ /* move failed process packets to dr */
+ for (i = 0; i < k; i++) {
+ if (status[i]) {
+ dr[n++] = i;
+ rte_errno = EBADMSG;
+ }
+ }
+
+ /* move bad packets to the back */
+ if (n)
+ move_bad_mbufs(mb, dr, k, n);
+
+ /* process packets */
+ p = process(sa, mb, sqn, dr, k - n, sqh_len);
+
+ if (p != k - n && p != 0)
+ move_bad_mbufs(mb, dr, k - n, k - n - p);
+
+ if (p != num)
+ rte_errno = EBADMSG;
+
+ return p;
+}
+
+uint16_t
+esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
+ tun_process);
+}
+
+uint16_t
+esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
+ trs_process);
+}
+
/*
* process group of ESP inbound tunnel packets.
*/
diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
index 55799a867..097cb663f 100644
--- a/lib/librte_ipsec/esp_outb.c
+++ b/lib/librte_ipsec/esp_outb.c
@@ -403,6 +403,292 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
return k;
}
+
+static inline int
+outb_sync_crypto_proc_prepare(struct rte_mbuf *m, const struct rte_ipsec_sa *sa,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD],
+ const union sym_op_data *icv, uint32_t hlen, uint32_t plen,
+ struct rte_security_vec *buf, struct iovec *cur_vec, void *iv,
+ void **aad, void **digest)
+{
+ struct rte_mbuf *ms;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ struct iovec *vec = cur_vec;
+ uint32_t left, off = 0, n_seg = 0;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ *aad = (void *)(icv->va + sa->icv_len);
+ off = sa->ctp.cipher.offset + hlen;
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ off = sa->ctp.auth.offset + hlen;
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ break;
+ }
+
+ *digest = (void *)icv->va;
+
+ left = sa->ctp.cipher.length + plen;
+
+ ms = mbuf_get_seg_ofs(m, &off);
+ if (!ms)
+ return -1;
+
+ while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+ uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+ vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+ vec->iov_len = len;
+
+ left -= len;
+ vec++;
+ n_seg++;
+ ms = ms->next;
+ off = 0;
+ }
+
+ if (left)
+ return -1;
+
+ buf->vec = cur_vec;
+ buf->num = n_seg;
+
+ return n_seg;
+}
+
+/**
+ * Local post process function prototype that same as process function prototype
+ * as rte_ipsec_sa_pkt_func's process().
+ */
+typedef uint16_t (*sync_crypto_post_process)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ uint16_t num);
+static uint16_t
+esp_outb_tun_sync_crypto_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num,
+ sync_crypto_post_process post_process)
+{
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_security_ctx *ctx;
+ struct rte_security_session *rss;
+ union sym_op_data icv;
+ struct rte_security_vec buf[num];
+ struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+ uint32_t vec_idx = 0;
+ void *aad[num];
+ void *digest[num];
+ void *iv[num];
+ uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+ uint64_t ivp[IPSEC_MAX_IV_QWORD];
+ int status[num];
+ uint32_t dr[num];
+ uint32_t i, n, k;
+ int32_t rc;
+
+ sa = ss->sa;
+ ctx = ss->security.ctx;
+ rss = ss->security.ses;
+
+ k = 0;
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ for (i = 0; i != n; i++) {
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(ivp, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, ivp, mb[i], &icv,
+ sa->sqh_len);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+
+ iv[k] = (void *)ivs[k];
+ rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
+ 0, rc, &buf[k], &vec[vec_idx], iv[k],
+ &aad[k], &digest[k]);
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ continue;
+ }
+
+ vec_idx += rc;
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ if (unlikely(k == 0)) {
+ rte_errno = EBADMSG;
+ return 0;
+ }
+
+ /* process the packets */
+ n = 0;
+ rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad, digest,
+ status, k);
+ /* move failed process packets to dr */
+ for (i = 0; i < n; i++) {
+ if (status[i])
+ dr[n++] = i;
+ }
+
+ if (n)
+ move_bad_mbufs(mb, dr, k, n);
+
+ return post_process(ss, mb, k - n);
+}
+
+static uint16_t
+esp_outb_trs_sync_crypto_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num,
+ sync_crypto_post_process post_process)
+
+{
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_security_ctx *ctx;
+ struct rte_security_session *rss;
+ union sym_op_data icv;
+ struct rte_security_vec buf[num];
+ struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+ uint32_t vec_idx = 0;
+ void *aad[num];
+ void *digest[num];
+ uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+ void *iv[num];
+ int status[num];
+ uint64_t ivp[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+ uint32_t i, n, k;
+ uint32_t l2, l3;
+ int32_t rc;
+
+ sa = ss->sa;
+ ctx = ss->security.ctx;
+ rss = ss->security.ses;
+
+ k = 0;
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ for (i = 0; i != n; i++) {
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(ivp, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, ivp, mb[i], l2, l3, &icv,
+ sa->sqh_len);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+
+ iv[k] = (void *)ivs[k];
+
+ rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
+ l2 + l3, rc, &buf[k], &vec[vec_idx],
+ iv[k], &aad[k], &digest[k]);
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ continue;
+ }
+
+ vec_idx += rc;
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ /* process the packets */
+ n = 0;
+ rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad, digest,
+ status, k);
+ /* move failed process packets to dr */
+ for (i = 0; i < k; i++) {
+ if (status[i])
+ dr[n++] = i;
+ }
+
+ if (n)
+ move_bad_mbufs(mb, dr, k, n);
+
+ return post_process(ss, mb, k - n);
+}
+
+uint16_t
+esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_tun_sync_crypto_process(ss, mb, num,
+ esp_outb_sqh_process);
+}
+
+uint16_t
+esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_tun_sync_crypto_process(ss, mb, num,
+ esp_outb_pkt_flag_process);
+}
+
+uint16_t
+esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_trs_sync_crypto_process(ss, mb, num,
+ esp_outb_sqh_process);
+}
+
+uint16_t
+esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_trs_sync_crypto_process(ss, mb, num,
+ esp_outb_pkt_flag_process);
+}
+
/*
* process outbound packets for SA with ESN support,
* for algorithms that require SQN.hibits to be implictly included
@@ -410,8 +696,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
* In that case we have to move ICV bytes back to their proper place.
*/
uint16_t
-esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
+esp_outb_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
{
uint32_t i, k, icv_len, *icv;
struct rte_mbuf *ml;
diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
index 23d394b46..31ffbce2c 100644
--- a/lib/librte_ipsec/sa.c
+++ b/lib/librte_ipsec/sa.c
@@ -544,9 +544,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,
* - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
* - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
*/
-static uint16_t
-pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
{
uint32_t i, k;
uint32_t dr[num];
@@ -599,12 +599,48 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
pf->prepare = esp_outb_tun_prepare;
pf->process = (sa->sqh_len != 0) ?
- esp_outb_sqh_process : pkt_flag_process;
+ esp_outb_sqh_process : esp_outb_pkt_flag_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
pf->prepare = esp_outb_trs_prepare;
pf->process = (sa->sqh_len != 0) ?
- esp_outb_sqh_process : pkt_flag_process;
+ esp_outb_sqh_process : esp_outb_pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+static int
+lksd_sync_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = esp_inb_tun_sync_crypto_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = esp_inb_trs_sync_crypto_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_tun_sync_crpyto_sqh_process :
+ esp_outb_tun_sync_crpyto_flag_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_trs_sync_crpyto_sqh_process :
+ esp_outb_trs_sync_crpyto_flag_process;
break;
default:
rc = -ENOTSUP;
@@ -672,13 +708,16 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
RTE_IPSEC_SATP_DIR_IB)
- pf->process = pkt_flag_process;
+ pf->process = esp_outb_pkt_flag_process;
else
pf->process = inline_proto_outb_pkt_process;
break;
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
pf->prepare = lksd_proto_prepare;
- pf->process = pkt_flag_process;
+ pf->process = esp_outb_pkt_flag_process;
+ break;
+ case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+ rc = lksd_sync_crypto_pkt_func_select(sa, pf);
break;
default:
rc = -ENOTSUP;
diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
index 51e69ad05..02c7abc60 100644
--- a/lib/librte_ipsec/sa.h
+++ b/lib/librte_ipsec/sa.h
@@ -156,6 +156,14 @@ uint16_t
inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
/* outbound processing */
uint16_t
@@ -170,6 +178,10 @@ uint16_t
esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num);
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
uint16_t
inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
@@ -182,4 +194,21 @@ uint16_t
inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+
#endif /* _SA_H_ */
diff --git a/lib/librte_ipsec/ses.c b/lib/librte_ipsec/ses.c
index 82c765a33..eaa8c17b7 100644
--- a/lib/librte_ipsec/ses.c
+++ b/lib/librte_ipsec/ses.c
@@ -19,7 +19,9 @@ session_check(struct rte_ipsec_session *ss)
return -EINVAL;
if ((ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
ss->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) &&
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ ss->type ==
+ RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) &&
ss->security.ctx == NULL)
return -EINVAL;
}
--
2.14.5
next prev parent reply other threads:[~2019-09-06 13:15 UTC|newest]
Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-03 15:40 [dpdk-dev] [RFC PATCH 0/9] security: add software synchronous crypto process Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 1/9] security: introduce CPU Crypto action type and API Fan Zhang
2019-09-04 10:32 ` Akhil Goyal
2019-09-04 13:06 ` Zhang, Roy Fan
2019-09-06 9:01 ` Akhil Goyal
2019-09-06 13:12 ` Zhang, Roy Fan
2019-09-10 11:25 ` Akhil Goyal
2019-09-11 13:01 ` Ananyev, Konstantin
2019-09-06 13:27 ` Ananyev, Konstantin
2019-09-10 10:44 ` Akhil Goyal
2019-09-11 12:29 ` Ananyev, Konstantin
2019-09-12 14:12 ` Akhil Goyal
2019-09-16 14:53 ` Ananyev, Konstantin
2019-09-16 15:08 ` Ananyev, Konstantin
2019-09-17 6:02 ` Akhil Goyal
2019-09-18 7:44 ` Ananyev, Konstantin
2019-09-25 18:24 ` Ananyev, Konstantin
2019-09-27 9:26 ` Akhil Goyal
2019-09-30 12:22 ` Ananyev, Konstantin
2019-09-30 13:43 ` Akhil Goyal
2019-10-01 14:49 ` Ananyev, Konstantin
2019-10-03 13:24 ` Akhil Goyal
2019-10-07 12:53 ` Ananyev, Konstantin
2019-10-09 7:20 ` Akhil Goyal
2019-10-09 13:43 ` Ananyev, Konstantin
2019-10-11 13:23 ` Akhil Goyal
2019-10-13 23:07 ` Zhang, Roy Fan
2019-10-14 11:10 ` Ananyev, Konstantin
2019-10-15 15:02 ` Akhil Goyal
2019-10-16 13:04 ` Ananyev, Konstantin
2019-10-15 15:00 ` Akhil Goyal
2019-10-16 22:07 ` Ananyev, Konstantin
2019-10-17 12:49 ` Ananyev, Konstantin
2019-10-18 13:17 ` Akhil Goyal
2019-10-21 13:47 ` Ananyev, Konstantin
2019-10-22 13:31 ` Akhil Goyal
2019-10-22 17:44 ` Ananyev, Konstantin
2019-10-22 22:21 ` Ananyev, Konstantin
2019-10-23 10:05 ` Akhil Goyal
2019-10-30 14:23 ` Ananyev, Konstantin
2019-11-01 13:53 ` Akhil Goyal
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 2/9] crypto/aesni_gcm: add rte_security handler Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 3/9] app/test: add security cpu crypto autotest Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 4/9] app/test: add security cpu crypto perftest Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 5/9] crypto/aesni_mb: add rte_security handler Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 6/9] app/test: add aesni_mb security cpu crypto autotest Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 7/9] app/test: add aesni_mb security cpu crypto perftest Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 8/9] ipsec: add rte_security cpu_crypto action support Fan Zhang
2019-09-03 15:40 ` [dpdk-dev] [RFC PATCH 9/9] examples/ipsec-secgw: add security " Fan Zhang
2019-09-06 13:13 ` [dpdk-dev] [PATCH 00/10] security: add software synchronous crypto process Fan Zhang
2019-09-06 13:13 ` [dpdk-dev] [PATCH 01/10] security: introduce CPU Crypto action type and API Fan Zhang
2019-09-18 12:45 ` Ananyev, Konstantin
2019-09-29 6:00 ` Hemant Agrawal
2019-09-29 16:59 ` Ananyev, Konstantin
2019-09-30 9:43 ` Hemant Agrawal
2019-10-01 15:27 ` Ananyev, Konstantin
2019-10-02 2:47 ` Hemant Agrawal
2019-09-06 13:13 ` [dpdk-dev] [PATCH 02/10] crypto/aesni_gcm: add rte_security handler Fan Zhang
2019-09-18 10:24 ` Ananyev, Konstantin
2019-09-06 13:13 ` [dpdk-dev] [PATCH 03/10] app/test: add security cpu crypto autotest Fan Zhang
2019-09-06 13:13 ` [dpdk-dev] [PATCH 04/10] app/test: add security cpu crypto perftest Fan Zhang
2019-09-06 13:13 ` [dpdk-dev] [PATCH 05/10] crypto/aesni_mb: add rte_security handler Fan Zhang
2019-09-18 15:20 ` Ananyev, Konstantin
2019-09-06 13:13 ` [dpdk-dev] [PATCH 06/10] app/test: add aesni_mb security cpu crypto autotest Fan Zhang
2019-09-06 13:13 ` [dpdk-dev] [PATCH 07/10] app/test: add aesni_mb security cpu crypto perftest Fan Zhang
2019-09-06 13:13 ` Fan Zhang [this message]
2019-09-26 23:20 ` [dpdk-dev] [PATCH 08/10] ipsec: add rte_security cpu_crypto action support Ananyev, Konstantin
2019-09-27 10:38 ` Ananyev, Konstantin
2019-09-06 13:13 ` [dpdk-dev] [PATCH 09/10] examples/ipsec-secgw: add security " Fan Zhang
2019-09-06 13:13 ` [dpdk-dev] [PATCH 10/10] doc: update security cpu process description Fan Zhang
2019-09-09 12:43 ` [dpdk-dev] [PATCH 00/10] security: add software synchronous crypto process Aaron Conole
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 " Fan Zhang
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 01/10] security: introduce CPU Crypto action type and API Fan Zhang
2019-10-08 13:42 ` Ananyev, Konstantin
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 02/10] crypto/aesni_gcm: add rte_security handler Fan Zhang
2019-10-08 13:44 ` Ananyev, Konstantin
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 03/10] app/test: add security cpu crypto autotest Fan Zhang
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 04/10] app/test: add security cpu crypto perftest Fan Zhang
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 05/10] crypto/aesni_mb: add rte_security handler Fan Zhang
2019-10-08 16:23 ` Ananyev, Konstantin
2019-10-09 8:29 ` Ananyev, Konstantin
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 06/10] app/test: add aesni_mb security cpu crypto autotest Fan Zhang
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 07/10] app/test: add aesni_mb security cpu crypto perftest Fan Zhang
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 08/10] ipsec: add rte_security cpu_crypto action support Fan Zhang
2019-10-08 23:28 ` Ananyev, Konstantin
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 09/10] examples/ipsec-secgw: add security " Fan Zhang
2019-10-07 16:28 ` [dpdk-dev] [PATCH v2 10/10] doc: update security cpu process description Fan Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190906131330.40185-9-roy.fan.zhang@intel.com \
--to=roy.fan.zhang@intel.com \
--cc=akhil.goyal@nxp.com \
--cc=declan.doherty@intel.com \
--cc=dev@dpdk.org \
--cc=konstantin.ananyev@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).