From: Radu Nicolau <radu.nicolau@intel.com>
To: Konstantin Ananyev <konstantin.ananyev@intel.com>,
Bernard Iremonger <bernard.iremonger@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Cc: dev@dpdk.org, gakhil@marvell.com, anoobj@marvell.com,
Radu Nicolau <radu.nicolau@intel.com>,
Declan Doherty <declan.doherty@intel.com>,
Abhijit Sinha <abhijit.sinha@intel.com>,
Daniel Martin Buckley <daniel.m.buckley@intel.com>,
Fan Zhang <roy.fan.zhang@intel.com>
Subject: [dpdk-dev] [PATCH 1/2] ipsec: add transmit segmentation offload support
Date: Mon, 18 Oct 2021 15:58:23 +0100 [thread overview]
Message-ID: <20211018145824.1211074-2-radu.nicolau@intel.com> (raw)
In-Reply-To: <20211018145824.1211074-1-radu.nicolau@intel.com>
Add support for transmit segmentation offload to inline crypto processing
mode. This offload is not supported by other offload modes, as at a
minimum it requires inline crypto for IPsec to be supported on the
network interface.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/ipsec_lib.rst | 2 +
doc/guides/rel_notes/release_21_11.rst | 1 +
lib/ipsec/esp_outb.c | 131 +++++++++++++++++++------
3 files changed, 106 insertions(+), 28 deletions(-)
diff --git a/doc/guides/prog_guide/ipsec_lib.rst b/doc/guides/prog_guide/ipsec_lib.rst
index 1bafdc608c..2a262f8c51 100644
--- a/doc/guides/prog_guide/ipsec_lib.rst
+++ b/doc/guides/prog_guide/ipsec_lib.rst
@@ -315,6 +315,8 @@ Supported features
* NAT-T / UDP encapsulated ESP.
+* TSO support (only for inline crypto mode)
+
* algorithms: 3DES-CBC, AES-CBC, AES-CTR, AES-GCM, AES_CCM, CHACHA20_POLY1305,
AES_GMAC, HMAC-SHA1, NULL.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index f6d2bc6f48..955b0bd68f 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -201,6 +201,7 @@ New Features
* Added support for NAT-T / UDP encapsulated ESP
* Added support for SA telemetry.
* Added support for setting a non default starting ESN value.
+ * Added support TSO offload support; only supported for inline crypto mode.
Removed Items
diff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c
index b6c72f58a4..c9fba662f2 100644
--- a/lib/ipsec/esp_outb.c
+++ b/lib/ipsec/esp_outb.c
@@ -18,7 +18,7 @@
typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv, uint8_t sqh_len);
+ union sym_op_data *icv, uint8_t sqh_len, uint8_t tso);
/*
* helper function to fill crypto_sym op for cipher+auth algorithms.
@@ -139,7 +139,7 @@ outb_cop_prepare(struct rte_crypto_op *cop,
static inline int32_t
outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv, uint8_t sqh_len)
+ union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
{
uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
struct rte_mbuf *ml;
@@ -157,11 +157,20 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
/* number of bytes to encrypt */
clen = plen + sizeof(*espt);
- clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* We don't need to pad/align packet when using TSO offload */
+ if (!tso)
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
/* pad length + esp tail */
pdlen = clen - plen;
- tlen = pdlen + sa->icv_len + sqh_len;
+
+ /* We don't append ICV length when using TSO offload */
+ if (!tso)
+ tlen = pdlen + sa->icv_len + sqh_len;
+ else
+ tlen = pdlen + sqh_len;
/* do append and prepend */
ml = rte_pktmbuf_lastseg(mb);
@@ -309,7 +318,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
/* try to update the packet itself */
rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
- sa->sqh_len);
+ sa->sqh_len, 0);
/* success, setup crypto op */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
@@ -336,7 +345,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
static inline int32_t
outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv, uint8_t sqh_len)
+ union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
{
uint8_t np;
uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
@@ -358,11 +367,19 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
/* number of bytes to encrypt */
clen = plen + sizeof(*espt);
- clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* We don't need to pad/align packet when using TSO offload */
+ if (!tso)
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
/* pad length + esp tail */
pdlen = clen - plen;
- tlen = pdlen + sa->icv_len + sqh_len;
+
+ /* We don't append ICV length when using TSO offload */
+ if (!tso)
+ tlen = pdlen + sa->icv_len + sqh_len;
+ else
+ tlen = pdlen + sqh_len;
/* do append and insert */
ml = rte_pktmbuf_lastseg(mb);
@@ -452,7 +469,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
/* try to update the packet itself */
rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
- sa->sqh_len);
+ sa->sqh_len, 0);
/* success, setup crypto op */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
@@ -549,7 +566,7 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
gen_iv(ivbuf[k], sqc);
/* try to update the packet itself */
- rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
+ rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
/* success, proceed with preparations */
if (rc >= 0) {
@@ -668,6 +685,20 @@ inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
ss->sa->statistics.bytes += bytes;
}
+
+static inline int
+esn_outb_nb_segments(struct rte_mbuf *m)
+{
+ if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+ uint16_t pkt_l3len = m->pkt_len - m->l2_len;
+ uint16_t segments =
+ (m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ?
+ (pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1;
+ return segments;
+ }
+ return 1; /* no TSO */
+}
+
/*
* process group of ESP outbound tunnel packets destined for
* INLINE_CRYPTO type of device.
@@ -677,29 +708,51 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
- uint32_t i, k, n;
+ uint32_t i, k, nb_segs_t, n_sqn;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
union sym_op_data icv;
uint64_t iv[IPSEC_MAX_IV_QWORD];
uint32_t dr[num];
+ uint16_t nb_segs[num];
sa = ss->sa;
+ nb_segs_t = 0;
+ /* Calculate number of segments */
+ for (i = 0; i != num; i++) {
+ nb_segs[i] = esn_outb_nb_segments(mb[i]);
+ nb_segs_t += nb_segs[i];
+ }
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
+ n_sqn = nb_segs_t;
+ sqn = esn_outb_update_sqn(sa, &n_sqn);
+ if (n_sqn != nb_segs_t) {
rte_errno = EOVERFLOW;
+ /* if there are segmented packets find out how many can be
+ * sent until overflow occurs
+ */
+ if (nb_segs_t > num) { /* there is at least 1 */
+ uint32_t seg_cnt = 0;
+ for (i = 0; i < num && seg_cnt < n_sqn; i++)
+ seg_cnt += nb_segs[i];
+ num = i - 1;
+ } else {
+ num = n_sqn; /* no segmented packets */
+ }
+ }
k = 0;
- for (i = 0; i != n; i++) {
+ for (i = 0; i != num; i++) {
- sqc = rte_cpu_to_be_64(sqn + i);
+ sqc = rte_cpu_to_be_64(sqn);
gen_iv(iv, sqc);
+ sqn += nb_segs[i];
/* try to update the packet itself */
- rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
+ (mb[i]->ol_flags &
+ (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) != 0);
k += (rc >= 0);
@@ -711,8 +764,8 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
}
/* copy not processed mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
inline_outb_mbuf_prepare(ss, mb, k);
return k;
@@ -727,29 +780,51 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
- uint32_t i, k, n;
+ uint32_t i, k, nb_segs_t, n_sqn;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
union sym_op_data icv;
uint64_t iv[IPSEC_MAX_IV_QWORD];
uint32_t dr[num];
+ uint16_t nb_segs[num];
sa = ss->sa;
+ nb_segs_t = 0;
+ /* Calculate number of segments */
+ for (i = 0; i != num; i++) {
+ nb_segs[i] = esn_outb_nb_segments(mb[i]);
+ nb_segs_t += nb_segs[i];
+ }
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
+ n_sqn = nb_segs_t;
+ sqn = esn_outb_update_sqn(sa, &n_sqn);
+ if (n_sqn != nb_segs_t) {
rte_errno = EOVERFLOW;
+ /* if there are segmented packets find out how many can be
+ * sent until overflow occurs
+ */
+ if (nb_segs_t > num) { /* there is at least 1 */
+ uint32_t seg_cnt = 0;
+ for (i = 0; i < num && seg_cnt < n_sqn; i++)
+ seg_cnt += nb_segs[i];
+ num = i - 1;
+ } else {
+ num = n_sqn; /* no segmented packets */
+ }
+ }
k = 0;
- for (i = 0; i != n; i++) {
+ for (i = 0; i != num; i++) {
- sqc = rte_cpu_to_be_64(sqn + i);
+ sqc = rte_cpu_to_be_64(sqn);
gen_iv(iv, sqc);
+ sqn += nb_segs[i];
/* try to update the packet itself */
- rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
+ (mb[i]->ol_flags &
+ (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) != 0);
k += (rc >= 0);
@@ -761,8 +836,8 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
}
/* copy not processed mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
inline_outb_mbuf_prepare(ss, mb, k);
return k;
--
2.25.1
next prev parent reply other threads:[~2021-10-18 15:13 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-18 14:58 [dpdk-dev] [PATCH 0/2] " Radu Nicolau
2021-10-18 14:58 ` Radu Nicolau [this message]
2021-10-25 9:11 ` [dpdk-dev] [EXT] [PATCH 1/2] " Akhil Goyal
2021-10-26 15:50 ` Nicolau, Radu
2021-10-18 14:58 ` [dpdk-dev] [PATCH 2/2] examples/ipsec-secgw: add support for TSO Radu Nicolau
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211018145824.1211074-2-radu.nicolau@intel.com \
--to=radu.nicolau@intel.com \
--cc=abhijit.sinha@intel.com \
--cc=anoobj@marvell.com \
--cc=bernard.iremonger@intel.com \
--cc=daniel.m.buckley@intel.com \
--cc=declan.doherty@intel.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=konstantin.ananyev@intel.com \
--cc=roy.fan.zhang@intel.com \
--cc=vladimir.medvedkin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).