From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Radu Nicolau <radu.nicolau@intel.com>,
Akhil Goyal <gakhil@marvell.com>
Cc: <dev@dpdk.org>, <anoobj@marvell.com>,
Nithin Dabilpuram <ndabilpuram@marvell.com>
Subject: [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup
Date: Tue, 22 Mar 2022 23:28:44 +0530 [thread overview]
Message-ID: <20220322175902.363520-6-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com>
Update ethernet header during route lookup instead of doing
way later while performing Tx burst. Advantages to doing
is at route lookup is that no additional IP version checks
based on packet data are needed and packet data is already
in cache as route lookup is already consuming that data.
This is also useful for inline protocol offload cases
of v4inv6 or v6inv4 outbound tunnel operations as
packet data will not have any info about what is the tunnel
protocol.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 9 +-
examples/ipsec-secgw/ipsec_worker.h | 197 ++++++++++++++++++++++--------------
2 files changed, 129 insertions(+), 77 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index a04b5e8..84f6150 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -562,7 +562,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
@@ -613,7 +614,8 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf,
if (trf.ip4.num != 0) {
inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
&core_statistics[lcoreid].inbound.spd4);
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
}
/* process ipv6 packets */
@@ -647,7 +649,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
/* process ipv4 packets */
if (trf.ip4.num != 0)
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
/* process ipv6 packets */
if (trf.ip6.num != 0)
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 838b3f6..b183248 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -245,60 +245,6 @@ prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
prepare_one_packet(ctx, pkts[i], t);
}
-static __rte_always_inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static __rte_always_inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
/* Send burst of packets on an output interface */
static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
@@ -310,8 +256,6 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port, qconf);
-
ret = rte_eth_tx_burst(port, queueid, m_table, n);
core_stats_update_tx(ret);
@@ -332,8 +276,11 @@ static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
+ struct rte_ether_hdr *ethhdr;
+ struct rte_ipv4_hdr *ip;
+ struct rte_mbuf *pkt;
struct buffer *tbl;
- uint32_t len, n;
+ uint32_t len, n, i;
int32_t rc;
tbl = qconf->tx_mbufs + port;
@@ -347,6 +294,9 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
n = RTE_DIM(tbl->m_table) - len;
+ /* Strip the ethernet header that was prepended earlier */
+ rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
+
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
@@ -354,13 +304,51 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
- if (rc >= 0)
- len += rc;
- else
+ if (rc < 0) {
RTE_LOG(ERR, IPSEC,
"%s: failed to fragment packet with size %u, "
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
+ rc = 0;
+ }
+
+ i = len;
+ len += rc;
+ for (; i < len; i++) {
+ pkt = tbl->m_table[i];
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ if (proto == IPPROTO_IP) {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ } else {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+ }
free_pkts(&m, 1);
return len;
@@ -379,7 +367,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
qconf = &lcore_conf[lcore_id];
len = qconf->tx_mbufs[port].len;
- if (m->pkt_len <= mtu_size) {
+ /* L2 header is already part of packet */
+ if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
qconf->tx_mbufs[port].m_table[len] = m;
len++;
@@ -475,14 +464,18 @@ get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
}
static __rte_always_inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+ uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
+ struct rte_ether_hdr *ethhdr;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -492,12 +485,13 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
uint32_t *, offset);
dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
lpm_pkts++;
@@ -509,9 +503,10 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 0);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -519,10 +514,41 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip_cksum) {
+ struct rte_ipv4_hdr *ip;
+
+ pkt->ol_flags |= tx_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ }
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IP);
}
}
@@ -531,11 +557,14 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ struct rte_ether_hdr *ethhdr;
uint8_t *ip6_dst;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -545,12 +574,13 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
offset);
memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
lpm_pkts++;
@@ -563,9 +593,10 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 1);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -573,10 +604,28 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if (pkt_hop == -1) {
core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IPV6);
}
}
--
2.8.4
next prev parent reply other threads:[~2022-03-22 17:59 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-14 15:43 ` Ananyev, Konstantin
2022-03-22 17:58 ` [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-03-22 17:58 ` Nithin Dabilpuram [this message]
2022-03-22 17:58 ` [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-13 6:13 ` [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Kumar Dabilpuram
2022-04-14 14:07 ` Ananyev, Konstantin
2022-04-19 13:56 ` Nithin Kumar Dabilpuram
2022-04-20 10:42 ` Ananyev, Konstantin
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-29 10:23 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Kumar Dabilpuram
2022-04-29 10:29 ` Akhil Goyal
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-05-01 17:10 ` Konstantin Ananyev
2022-04-29 20:44 ` [PATCH v4 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-05-11 19:34 ` [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220322175902.363520-6-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=anoobj@marvell.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=radu.nicolau@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).