From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Ruifeng Wang <ruifeng.wang@arm.com>,
Radu Nicolau <radu.nicolau@intel.com>,
Akhil Goyal <gakhil@marvell.com>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>,
Nithin Dabilpuram <ndabilpuram@marvell.com>
Subject: [PATCH v2 5/5] examples/ipsec-secgw: update ether type using tunnel info
Date: Thu, 18 Aug 2022 00:41:46 +0530 [thread overview]
Message-ID: <20220817191146.30085-5-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220817191146.30085-1-ndabilpuram@marvell.com>
Update ether type for outbound SA processing based on tunnel header
information in both NEON functions for poll mode and event mode worker
functions.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec_neon.h | 41 +++++++++++++++++++++++++------------
examples/ipsec-secgw/ipsec_worker.c | 30 +++++++++++++++++++--------
2 files changed, 49 insertions(+), 22 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec_neon.h b/examples/ipsec-secgw/ipsec_neon.h
index 3f2d0a0..9c0498b 100644
--- a/examples/ipsec-secgw/ipsec_neon.h
+++ b/examples/ipsec-secgw/ipsec_neon.h
@@ -18,12 +18,13 @@ extern xmm_t val_eth[RTE_MAX_ETHPORTS];
*/
static inline void
processx4_step3(struct rte_mbuf *pkts[FWDSTEP], uint16_t dst_port[FWDSTEP],
- uint64_t tx_offloads, bool ip_cksum, uint8_t *l_pkt)
+ uint64_t tx_offloads, bool ip_cksum, bool is_ipv4, uint8_t *l_pkt)
{
uint32x4_t te[FWDSTEP];
uint32x4_t ve[FWDSTEP];
uint32_t *p[FWDSTEP];
struct rte_mbuf *pkt;
+ uint32_t val;
uint8_t i;
for (i = 0; i < FWDSTEP; i++) {
@@ -38,7 +39,15 @@ processx4_step3(struct rte_mbuf *pkts[FWDSTEP], uint16_t dst_port[FWDSTEP],
te[i] = vld1q_u32(p[i]);
/* Update last 4 bytes */
- ve[i] = vsetq_lane_u32(vgetq_lane_u32(te[i], 3), ve[i], 3);
+ val = vgetq_lane_u32(te[i], 3);
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ val &= 0xFFFFUL << 16;
+ val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6);
+#else
+ val &= 0xFFFFUL;
+ val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6) << 16;
+#endif
+ ve[i] = vsetq_lane_u32(val, ve[i], 3);
vst1q_u32(p[i], ve[i]);
if (ip_cksum) {
@@ -64,10 +73,11 @@ processx4_step3(struct rte_mbuf *pkts[FWDSTEP], uint16_t dst_port[FWDSTEP],
*/
static inline void
process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint64_t tx_offloads,
- bool ip_cksum, uint8_t *l_pkt)
+ bool ip_cksum, bool is_ipv4, uint8_t *l_pkt)
{
struct rte_ether_hdr *eth_hdr;
uint32x4_t te, ve;
+ uint32_t val;
/* Check if it is a large packet */
if (pkt->pkt_len - RTE_ETHER_HDR_LEN > mtu_size)
@@ -78,7 +88,15 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint64_t tx_offloads,
te = vld1q_u32((uint32_t *)eth_hdr);
ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
- ve = vcopyq_laneq_u32(ve, 3, te, 3);
+ val = vgetq_lane_u32(te, 3);
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ val &= 0xFFFFUL << 16;
+ val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6);
+#else
+ val &= 0xFFFFUL;
+ val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6) << 16;
+#endif
+ ve = vsetq_lane_u32(val, ve, 3);
vst1q_u32((uint32_t *)eth_hdr, ve);
if (ip_cksum) {
@@ -223,14 +241,14 @@ send_multi_pkts(struct rte_mbuf **pkts, uint16_t dst_port[MAX_PKT_BURST],
lp = pnum;
lp[0] = 1;
- processx4_step3(pkts, dst_port, tx_offloads, ip_cksum, &l_pkt);
+ processx4_step3(pkts, dst_port, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
/* dp1: <d[0], d[1], d[2], d[3], ... > */
dp1 = vld1q_u16(dst_port);
for (i = FWDSTEP; i != k; i += FWDSTEP) {
- processx4_step3(&pkts[i], &dst_port[i], tx_offloads,
- ip_cksum, &l_pkt);
+ processx4_step3(&pkts[i], &dst_port[i], tx_offloads, ip_cksum, is_ipv4,
+ &l_pkt);
/*
* dp2:
@@ -268,20 +286,17 @@ send_multi_pkts(struct rte_mbuf **pkts, uint16_t dst_port[MAX_PKT_BURST],
/* Process up to last 3 packets one by one. */
switch (nb_rx % FWDSTEP) {
case 3:
- process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum,
- &l_pkt);
+ process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, i);
i++;
/* fallthrough */
case 2:
- process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum,
- &l_pkt);
+ process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, i);
i++;
/* fallthrough */
case 1:
- process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum,
- &l_pkt);
+ process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, i);
}
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 803157d..5e69450 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -53,11 +53,8 @@ process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
}
static inline void
-update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
+update_mac_addrs(struct rte_ether_hdr *ethhdr, uint16_t portid)
{
- struct rte_ether_hdr *ethhdr;
-
- ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
}
@@ -374,7 +371,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
/* else, we have a matching route */
/* Update mac addresses */
- update_mac_addrs(pkt, port_id);
+ update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id);
/* Update the event with the dest port */
ipsec_event_pre_forward(pkt, port_id);
@@ -392,6 +389,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
struct rte_event *ev)
{
struct rte_ipsec_session *sess;
+ struct rte_ether_hdr *ethhdr;
struct sa_ctx *sa_ctx;
struct rte_mbuf *pkt;
uint16_t port_id = 0;
@@ -430,6 +428,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
goto drop_pkt_and_exit;
}
+ ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
/* Check if the packet has to be bypassed */
if (sa_idx == BYPASS) {
port_id = get_route(pkt, rt, type);
@@ -467,6 +466,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
/* Mark the packet for Tx security offload */
pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
+ /* Update ether type */
+ ethhdr->ether_type = (IS_IP4(sa->flags) ? rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
/* Get the port to which this pkt need to be submitted */
port_id = sa->portid;
@@ -476,7 +478,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
pkt->l2_len = RTE_ETHER_HDR_LEN;
/* Update mac addresses */
- update_mac_addrs(pkt, port_id);
+ update_mac_addrs(ethhdr, port_id);
/* Update the event with the dest port */
ipsec_event_pre_forward(pkt, port_id);
@@ -494,6 +496,7 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
{
struct rte_ipsec_session *sess;
+ struct rte_ether_hdr *ethhdr;
uint32_t sa_idx, i, j = 0;
uint16_t port_id = 0;
struct rte_mbuf *pkt;
@@ -505,7 +508,8 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
port_id = route4_pkt(pkt, rt->rt4_ctx);
if (port_id != RTE_MAX_ETHPORTS) {
/* Update mac addresses */
- update_mac_addrs(pkt, port_id);
+ ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+ update_mac_addrs(ethhdr, port_id);
/* Update the event with the dest port */
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
@@ -520,7 +524,8 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
port_id = route6_pkt(pkt, rt->rt6_ctx);
if (port_id != RTE_MAX_ETHPORTS) {
/* Update mac addresses */
- update_mac_addrs(pkt, port_id);
+ ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+ update_mac_addrs(ethhdr, port_id);
/* Update the event with the dest port */
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
@@ -553,7 +558,14 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
port_id = sa->portid;
- update_mac_addrs(pkt, port_id);
+
+ /* Fetch outer ip type and update */
+ ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+ ethhdr->ether_type = (IS_IP4(sa->flags) ?
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
+ update_mac_addrs(ethhdr, port_id);
+
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
vec->mbufs[j++] = pkt;
--
2.8.4
next prev parent reply other threads:[~2022-08-17 19:12 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-07 7:29 [PATCH 1/4] mbuf: clarify meta data needed for Outbound Inline Nithin Dabilpuram
2022-07-07 7:29 ` [PATCH 2/4] security: clarify L2 header requirement for outbound inline Nithin Dabilpuram
2022-07-07 7:29 ` [PATCH 3/4] net/cnxk: remove l2 header update for outbound inline pkts Nithin Dabilpuram
2022-07-07 7:29 ` [PATCH 4/4] app/test: update l2 header based on tunnel ip version Nithin Dabilpuram
2022-08-17 19:11 ` [PATCH v2 1/5] mbuf: clarify meta data needed for Outbound Inline Nithin Dabilpuram
2022-08-17 19:11 ` [PATCH v2 2/5] security: clarify L2 header requirement for outbound inline Nithin Dabilpuram
2022-08-17 19:11 ` [PATCH v2 3/5] net/cnxk: remove L2 header update for outbound inline pkts Nithin Dabilpuram
2022-08-17 19:11 ` [PATCH v2 4/5] app/test: update L2 header based on tunnel IP version Nithin Dabilpuram
2022-08-18 9:04 ` Akhil Goyal
2022-08-17 19:11 ` Nithin Dabilpuram [this message]
2022-08-18 8:01 ` [PATCH v2 5/5] examples/ipsec-secgw: update ether type using tunnel info Ruifeng Wang
2022-08-18 8:26 ` Akhil Goyal
2022-08-22 14:38 ` [PATCH v3 1/5] mbuf: clarify meta data needed for Outbound Inline Nithin Dabilpuram
2022-08-22 14:38 ` [PATCH v3 2/5] security: clarify L2 header requirement for outbound inline Nithin Dabilpuram
2022-09-21 18:23 ` Akhil Goyal
2022-08-22 14:38 ` [PATCH v3 3/5] net/cnxk: remove L2 header update for outbound inline pkts Nithin Dabilpuram
2022-08-22 14:38 ` [PATCH v3 4/5] app/test: update L2 header based on tunnel IP version Nithin Dabilpuram
2022-08-22 14:38 ` [PATCH v3 5/5] examples/ipsec-secgw: update ether type using tunnel info Nithin Dabilpuram
2022-08-31 1:49 ` [PATCH v3 1/5] mbuf: clarify meta data needed for Outbound Inline Nithin Kumar Dabilpuram
2022-09-21 18:21 ` Akhil Goyal
2022-09-21 18:46 ` Thomas Monjalon
2022-09-22 15:48 ` Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220817191146.30085-5-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=radu.nicolau@intel.com \
--cc=ruifeng.wang@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).