* [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
@ 2022-03-22 17:58 ` Nithin Dabilpuram
2022-04-14 15:43 ` Ananyev, Konstantin
2022-03-22 17:58 ` [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
` (9 subsequent siblings)
10 siblings, 1 reply; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-03-22 17:58 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj, Nithin Dabilpuram
Use HW parsed packet type when ethdev supports necessary protocols.
If packet type is not supported, then register ethdev callbacks
for parse packet in SW. This is better for performance as it
effects fast path.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 259 +++++++++++++++++++++++++++----------
1 file changed, 194 insertions(+), 65 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 76919e5..e8f9e90 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -374,53 +374,30 @@ print_stats_cb(__rte_unused void *param)
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
+ uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
+ uint32_t tun_type, l3_type;
+
+ tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
+ l3_type = ptype & RTE_PTYPE_L3_MASK;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
+ if (l3_type == RTE_PTYPE_L3_IPV4) {
iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
pkt->l2_len = 0;
pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ } else if (l3_type & RTE_PTYPE_L3_IPV6) {
int next_proto;
size_t l3len, ext_len;
uint8_t *p;
@@ -430,47 +407,37 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
RTE_ETHER_HDR_LEN);
adjust_ipv6_pktlen(pkt, iph6, 0);
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
+
+ /* Determine l3 header size up to ESP extension by walking
+ * through extension headers.
+ */
+ if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+ l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ next_proto = iph6->proto;
+ while (next_proto != IPPROTO_ESP &&
+ l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Drop pkt when IPv6 header exceeds first seg size */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+ }
+
pkt->l2_len = 0;
pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
} else {
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
@@ -479,6 +446,11 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
return;
}
+ if (ptype & RTE_PTYPE_L4_TCP)
+ pkt->l4_len = sizeof(struct rte_tcp_hdr);
+ else if (ptype & RTE_PTYPE_L4_UDP)
+ pkt->l4_len = sizeof(struct rte_udp_hdr);
+
/* Check if the packet has been processed inline. For inline protocol
* processed packets, the metadata in the mbuf can be used to identify
* the security processing done on the packet. The metadata will be
@@ -2249,6 +2221,147 @@ cryptodevs_init(uint16_t req_queue_num)
return total_nb_qps;
}
+static int
+check_ptype(int portid)
+{
+ int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
+ int i, nb_ptypes;
+ uint32_t mask;
+
+ mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+ RTE_PTYPE_TUNNEL_MASK);
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
+ if (nb_ptypes <= 0)
+ return 0;
+
+ uint32_t ptypes[nb_ptypes];
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
+ for (i = 0; i < nb_ptypes; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ l3_ipv4 = 1;
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ l3_ipv6 = 1;
+ if (ptypes[i] & RTE_PTYPE_TUNNEL_ESP)
+ tunnel_esp = 1;
+ if (ptypes[i] & RTE_PTYPE_L4_UDP)
+ l4_udp = 1;
+ }
+
+ if (l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (l4_udp == 0)
+ printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
+
+ if (tunnel_esp == 0)
+ printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
+
+ if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_ether_hdr *eth;
+ const struct rte_udp_hdr *udp;
+ uint16_t nat_port, ether_type;
+ int next_proto = 0;
+ size_t ext_len = 0;
+ const uint8_t *p;
+ uint32_t l3len;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ ether_type = eth->ether_type;
+
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
+ l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+
+ if (l3len == sizeof(struct rte_ipv4_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ next_proto = iph4->next_proto_id;
+ p = (const uint8_t *)iph4;
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
+ l3len = sizeof(struct ip6_hdr);
+
+ /* determine l3 header size up to ESP extension */
+ next_proto = iph6->proto;
+ p = (const uint8_t *)iph6;
+ while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Skip IPv6 header exceeds first segment length */
+ if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
+ goto exit;
+
+ if (l3len == sizeof(struct ip6_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = (const struct rte_udp_hdr *)(p + l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port)
+ packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ }
+ break;
+ default:
+ break;
+ }
+exit:
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint32_t i;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
+ for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
+ struct ether_hdr *));
+ parse_ptype(pkts[i]);
+ }
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
@@ -2260,6 +2373,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
+ int ptype_supported;
ret = rte_eth_dev_info_get(portid, &dev_info);
if (ret != 0)
@@ -2357,6 +2471,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
"err=%d, port=%d\n", ret, portid);
+ /* Check if required ptypes are supported */
+ ptype_supported = check_ptype(portid);
+ if (!ptype_supported)
+ printf("Port %d: softly parse packet type info\n", portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -2418,6 +2537,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ /* Register Rx callback if ptypes are not supported */
+ if (!ptype_supported &&
+ !rte_eth_add_rx_callback(portid, queue,
+ parse_ptype_cb, NULL)) {
+ printf("Failed to add rx callback: port=%d, "
+ "queue=%d\n", portid, queue);
+ }
+
+
}
}
printf("\n");
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* RE: [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode
2022-03-22 17:58 ` [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
@ 2022-04-14 15:43 ` Ananyev, Konstantin
0 siblings, 0 replies; 37+ messages in thread
From: Ananyev, Konstantin @ 2022-04-14 15:43 UTC (permalink / raw)
To: Nithin Dabilpuram, jerinj, Nicolau, Radu, Akhil Goyal; +Cc: dev, anoobj
> Use HW parsed packet type when ethdev supports necessary protocols.
> If packet type is not supported, then register ethdev callbacks
> for parse packet in SW. This is better for performance as it
> effects fast path.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> examples/ipsec-secgw/ipsec-secgw.c | 259 +++++++++++++++++++++++++++----------
> 1 file changed, 194 insertions(+), 65 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index 76919e5..e8f9e90 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -374,53 +374,30 @@ print_stats_cb(__rte_unused void *param)
> static inline void
> prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
> {
> + uint32_t ptype = pkt->packet_type;
> const struct rte_ether_hdr *eth;
> const struct rte_ipv4_hdr *iph4;
> const struct rte_ipv6_hdr *iph6;
> - const struct rte_udp_hdr *udp;
> - uint16_t ip4_hdr_len;
> - uint16_t nat_port;
> + uint32_t tun_type, l3_type;
> +
> + tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
> + l3_type = ptype & RTE_PTYPE_L3_MASK;
>
> eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
> - if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> -
> + if (l3_type == RTE_PTYPE_L3_IPV4) {
> iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
> RTE_ETHER_HDR_LEN);
> adjust_ipv4_pktlen(pkt, iph4, 0);
>
> - switch (iph4->next_proto_id) {
> - case IPPROTO_ESP:
> + if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
> t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - break;
> - case IPPROTO_UDP:
> - if (app_sa_prm.udp_encap == 1) {
> - ip4_hdr_len = ((iph4->version_ihl &
> - RTE_IPV4_HDR_IHL_MASK) *
> - RTE_IPV4_IHL_MULTIPLIER);
> - udp = rte_pktmbuf_mtod_offset(pkt,
> - struct rte_udp_hdr *, ip4_hdr_len);
> - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> - if (udp->src_port == nat_port ||
> - udp->dst_port == nat_port){
> - t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - pkt->packet_type |=
> - MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> - break;
> - }
> - }
> - /* Fall through */
> - default:
> + } else {
> t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
> t->ip4.pkts[(t->ip4.num)++] = pkt;
> }
> pkt->l2_len = 0;
> pkt->l3_len = sizeof(*iph4);
> - pkt->packet_type |= RTE_PTYPE_L3_IPV4;
> - if (pkt->packet_type & RTE_PTYPE_L4_TCP)
> - pkt->l4_len = sizeof(struct rte_tcp_hdr);
> - else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
> - pkt->l4_len = sizeof(struct rte_udp_hdr);
> - } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> + } else if (l3_type & RTE_PTYPE_L3_IPV6) {
As a nit:
RTE_ETH_IS_IPV6_HDR(l3_type)
> int next_proto;
> size_t l3len, ext_len;
> uint8_t *p;
> @@ -430,47 +407,37 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
> RTE_ETHER_HDR_LEN);
> adjust_ipv6_pktlen(pkt, iph6, 0);
>
> - next_proto = iph6->proto;
> -
> - /* determine l3 header size up to ESP extension */
> l3len = sizeof(struct ip6_hdr);
> - p = rte_pktmbuf_mtod(pkt, uint8_t *);
> - while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
> - (next_proto = rte_ipv6_get_next_ext(p + l3len,
> - next_proto, &ext_len)) >= 0)
> - l3len += ext_len;
>
> - /* drop packet when IPv6 header exceeds first segment length */
> - if (unlikely(l3len > pkt->data_len)) {
> - free_pkts(&pkt, 1);
> - return;
> - }
> -
> - switch (next_proto) {
> - case IPPROTO_ESP:
> + if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
> t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - break;
> - case IPPROTO_UDP:
> - if (app_sa_prm.udp_encap == 1) {
> - udp = rte_pktmbuf_mtod_offset(pkt,
> - struct rte_udp_hdr *, l3len);
> - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> - if (udp->src_port == nat_port ||
> - udp->dst_port == nat_port){
> - t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - pkt->packet_type |=
> - MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> - break;
> - }
> - }
> - /* Fall through */
> - default:
> + } else {
> t->ip6.data[t->ip6.num] = &iph6->proto;
> t->ip6.pkts[(t->ip6.num)++] = pkt;
> }
> +
> + /* Determine l3 header size up to ESP extension by walking
> + * through extension headers.
> + */
> + if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
> + l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
> + p = rte_pktmbuf_mtod(pkt, uint8_t *);
> + next_proto = iph6->proto;
> + while (next_proto != IPPROTO_ESP &&
> + l3len < pkt->data_len &&
> + (next_proto = rte_ipv6_get_next_ext(p + l3len,
> + next_proto, &ext_len)) >= 0)
> + l3len += ext_len;
> +
> + /* Drop pkt when IPv6 header exceeds first seg size */
> + if (unlikely(l3len > pkt->data_len)) {
> + free_pkts(&pkt, 1);
> + return;
> + }
> + }
> +
> pkt->l2_len = 0;
> pkt->l3_len = l3len;
> - pkt->packet_type |= RTE_PTYPE_L3_IPV6;
> } else {
> /* Unknown/Unsupported type, drop the packet */
> RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
> @@ -479,6 +446,11 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
> return;
> }
>
> + if (ptype & RTE_PTYPE_L4_TCP)
I think it needs to be:
If ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
Same for udp.
Though it seems that it was there already since
commit a7f32947a316c4757a315239752596ca1cf1b268.
> + pkt->l4_len = sizeof(struct rte_tcp_hdr);
> + else if (ptype & RTE_PTYPE_L4_UDP)
> + pkt->l4_len = sizeof(struct rte_udp_hdr);
> +
> /* Check if the packet has been processed inline. For inline protocol
> * processed packets, the metadata in the mbuf can be used to identify
> * the security processing done on the packet. The metadata will be
> @@ -2249,6 +2221,147 @@ cryptodevs_init(uint16_t req_queue_num)
> return total_nb_qps;
> }
>
> +static int
> +check_ptype(int portid)
> +{
> + int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
> + int i, nb_ptypes;
> + uint32_t mask;
> +
> + mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
> + RTE_PTYPE_TUNNEL_MASK);
> +
> + nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
> + if (nb_ptypes <= 0)
> + return 0;
> +
> + uint32_t ptypes[nb_ptypes];
> +
> + nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
> + for (i = 0; i < nb_ptypes; ++i) {
> + if (ptypes[i] & RTE_PTYPE_L3_IPV4)
As nit: RTE_ETH_IS_IPV4_HDR(ptypes[i])
> + l3_ipv4 = 1;
> + if (ptypes[i] & RTE_PTYPE_L3_IPV6)
As nit: RTE_ETH_IS_IPV6_HDR(ptypes[i])
> + l3_ipv6 = 1;
> + if (ptypes[i] & RTE_PTYPE_TUNNEL_ESP)
if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK == RTE_PTYPE_TUNNEL_ESP)
> + tunnel_esp = 1;
> + if (ptypes[i] & RTE_PTYPE_L4_UDP)
Same as above.
> + l4_udp = 1;
> + }
> +
> + if (l3_ipv4 == 0)
> + printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
> +
> + if (l3_ipv6 == 0)
> + printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
> +
> + if (l4_udp == 0)
> + printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
> +
> + if (tunnel_esp == 0)
> + printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
> +
> + if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
> + return 1;
> +
> + return 0;
> +
> +}
> +
> +static inline void
> +parse_ptype(struct rte_mbuf *m)
> +{
> + uint32_t packet_type = RTE_PTYPE_UNKNOWN;
> + const struct rte_ipv4_hdr *iph4;
> + const struct rte_ipv6_hdr *iph6;
> + const struct rte_ether_hdr *eth;
> + const struct rte_udp_hdr *udp;
> + uint16_t nat_port, ether_type;
> + int next_proto = 0;
> + size_t ext_len = 0;
> + const uint8_t *p;
> + uint32_t l3len;
> +
> + eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> + ether_type = eth->ether_type;
> +
> + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> + iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
> + l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
> + RTE_IPV4_IHL_MULTIPLIER);
> +
> + if (l3len == sizeof(struct rte_ipv4_hdr))
> + packet_type |= RTE_PTYPE_L3_IPV4;
> + else
> + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
> +
> + next_proto = iph4->next_proto_id;
> + p = (const uint8_t *)iph4;
> + } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> + iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
> + l3len = sizeof(struct ip6_hdr);
> +
> + /* determine l3 header size up to ESP extension */
> + next_proto = iph6->proto;
> + p = (const uint8_t *)iph6;
> + while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
> + (next_proto = rte_ipv6_get_next_ext(p + l3len,
> + next_proto, &ext_len)) >= 0)
> + l3len += ext_len;
> +
> + /* Skip IPv6 header exceeds first segment length */
> + if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
> + goto exit;
> +
> + if (l3len == sizeof(struct ip6_hdr))
> + packet_type |= RTE_PTYPE_L3_IPV6;
> + else
> + packet_type |= RTE_PTYPE_L3_IPV6_EXT;
> + }
> +
> + switch (next_proto) {
> + case IPPROTO_ESP:
> + packet_type |= RTE_PTYPE_TUNNEL_ESP;
> + break;
> + case IPPROTO_UDP:
> + if (app_sa_prm.udp_encap == 1) {
> + udp = (const struct rte_udp_hdr *)(p + l3len);
> + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> + if (udp->src_port == nat_port ||
> + udp->dst_port == nat_port)
> + packet_type |=
> + MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> + }
> + break;
> + default:
> + break;
> + }
> +exit:
> + m->packet_type = packet_type;
> +}
> +
> +static uint16_t
> +parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
> + struct rte_mbuf *pkts[], uint16_t nb_pkts,
> + uint16_t max_pkts __rte_unused,
> + void *user_param __rte_unused)
> +{
> + uint32_t i;
> +
> + if (unlikely(nb_pkts == 0))
> + return nb_pkts;
> +
> + rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
> + for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
> + rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
> + struct ether_hdr *));
> + parse_ptype(pkts[i]);
> + }
> + parse_ptype(pkts[i]);
> +
> + return nb_pkts;
> +}
> +
> static void
> port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> {
> @@ -2260,6 +2373,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> struct lcore_conf *qconf;
> struct rte_ether_addr ethaddr;
> struct rte_eth_conf local_port_conf = port_conf;
> + int ptype_supported;
>
> ret = rte_eth_dev_info_get(portid, &dev_info);
> if (ret != 0)
> @@ -2357,6 +2471,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
> "err=%d, port=%d\n", ret, portid);
>
> + /* Check if required ptypes are supported */
> + ptype_supported = check_ptype(portid);
> + if (!ptype_supported)
> + printf("Port %d: softly parse packet type info\n", portid);
> +
> /* init one TX queue per lcore */
> tx_queueid = 0;
> for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> @@ -2418,6 +2537,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> rte_exit(EXIT_FAILURE,
> "rte_eth_rx_queue_setup: err=%d, "
> "port=%d\n", ret, portid);
> +
> + /* Register Rx callback if ptypes are not supported */
> + if (!ptype_supported &&
> + !rte_eth_add_rx_callback(portid, queue,
> + parse_ptype_cb, NULL)) {
> + printf("Failed to add rx callback: port=%d, "
> + "queue=%d\n", portid, queue);
> + }
> +
> +
> }
> }
> printf("\n");
> --
> 2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
@ 2022-03-22 17:58 ` Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (8 subsequent siblings)
10 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-03-22 17:58 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj, Nithin Dabilpuram
Allow larger burst size of vector event mode instead of restricting
to 32. Also restructure traffic type struct to have num pkts first
so that it is always in first cacheline. Also cache align
traffic type struct. Since MAX_PKT_BURST is not used by
vector event mode worker, define another macro for its burst
size so that poll mode perf is not effected.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 2 +-
examples/ipsec-secgw/ipsec-secgw.h | 15 ++++++++++-----
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index e8f9e90..7e01495 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1858,7 +1858,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
case CMD_LINE_OPT_VECTOR_SIZE_NUM:
ret = parse_decimal(optarg);
- if (ret > MAX_PKT_BURST) {
+ if (ret > MAX_PKT_BURST_VEC) {
printf("Invalid argument for \'%s\': %s\n",
CMD_LINE_OPT_VECTOR_SIZE, optarg);
print_usage(prgname);
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 24f11ad..c347175 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -10,6 +10,11 @@
#define NB_SOCKETS 4
#define MAX_PKT_BURST 32
+#define MAX_PKT_BURST_VEC 256
+
+#define MAX_PKTS \
+ ((MAX_PKT_BURST_VEC > MAX_PKT_BURST ? \
+ MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2)
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -48,12 +53,12 @@
#define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
struct traffic_type {
- const uint8_t *data[MAX_PKT_BURST * 2];
- struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
- void *saptr[MAX_PKT_BURST * 2];
- uint32_t res[MAX_PKT_BURST * 2];
uint32_t num;
-};
+ struct rte_mbuf *pkts[MAX_PKTS];
+ const uint8_t *data[MAX_PKTS];
+ void *saptr[MAX_PKTS];
+ uint32_t res[MAX_PKTS];
+} __rte_cache_aligned;
struct ipsec_traffic {
struct traffic_type ipsec;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
@ 2022-03-22 17:58 ` Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
` (7 subsequent siblings)
10 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-03-22 17:58 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj, Nithin Dabilpuram
Move fast path helper functions to header file for easy access.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 547 +---------------------------------
examples/ipsec-secgw/ipsec-secgw.h | 4 +
examples/ipsec-secgw/ipsec.h | 34 +++
examples/ipsec-secgw/ipsec_process.c | 49 +--
examples/ipsec-secgw/ipsec_worker.h | 560 +++++++++++++++++++++++++++++++++++
5 files changed, 602 insertions(+), 592 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 7e01495..1d0ce3a 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -70,11 +70,6 @@ volatile bool force_quit;
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-/* Configure how many packets ahead to prefetch, when reading packets */
-#define PREFETCH_OFFSET 3
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -191,9 +186,9 @@ static uint64_t dev_tx_offload = UINT64_MAX;
/*
* global values that determine multi-seg policy
*/
-static uint32_t frag_tbl_sz;
+uint32_t frag_tbl_sz;
static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
-static uint32_t mtu_size = RTE_ETHER_MTU;
+uint32_t mtu_size = RTE_ETHER_MTU;
static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
static uint32_t stats_interval;
@@ -205,11 +200,6 @@ struct app_sa_prm app_sa_prm = {
};
static const char *cfgfile;
-struct lcore_rx_queue {
- uint16_t port_id;
- uint8_t queue_id;
-} __rte_cache_aligned;
-
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
@@ -224,28 +214,7 @@ static uint16_t nb_lcore_params;
static struct rte_hash *cdev_map_in;
static struct rte_hash *cdev_map_out;
-struct buffer {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
-};
-
-struct lcore_conf {
- uint16_t nb_rx_queue;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
- struct ipsec_ctx inbound;
- struct ipsec_ctx outbound;
- struct rt_ctx *rt4_ctx;
- struct rt_ctx *rt6_ctx;
- struct {
- struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_indir;
- struct rte_ip_frag_death_row dr;
- } frag;
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
.rxmode = {
@@ -281,32 +250,6 @@ multi_seg_required(void)
frame_buf_size || frag_tbl_sz != 0);
}
-static inline void
-adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
-static inline void
-adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
@@ -371,341 +314,6 @@ print_stats_cb(__rte_unused void *param)
rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
-{
- uint32_t ptype = pkt->packet_type;
- const struct rte_ether_hdr *eth;
- const struct rte_ipv4_hdr *iph4;
- const struct rte_ipv6_hdr *iph6;
- uint32_t tun_type, l3_type;
-
- tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
- l3_type = ptype & RTE_PTYPE_L3_MASK;
-
- eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (l3_type == RTE_PTYPE_L3_IPV4) {
- iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv4_pktlen(pkt, iph4, 0);
-
- if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- } else {
- t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
- t->ip4.pkts[(t->ip4.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- } else if (l3_type & RTE_PTYPE_L3_IPV6) {
- int next_proto;
- size_t l3len, ext_len;
- uint8_t *p;
-
- /* get protocol type */
- iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv6_pktlen(pkt, iph6, 0);
-
- l3len = sizeof(struct ip6_hdr);
-
- if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- } else {
- t->ip6.data[t->ip6.num] = &iph6->proto;
- t->ip6.pkts[(t->ip6.num)++] = pkt;
- }
-
- /* Determine l3 header size up to ESP extension by walking
- * through extension headers.
- */
- if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
- l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- next_proto = iph6->proto;
- while (next_proto != IPPROTO_ESP &&
- l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
-
- /* Drop pkt when IPv6 header exceeds first seg size */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
- }
-
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- } else {
- /* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
- rte_be_to_cpu_16(eth->ether_type));
- free_pkts(&pkt, 1);
- return;
- }
-
- if (ptype & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (ptype & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
-
- /* Check if the packet has been processed inline. For inline protocol
- * processed packets, the metadata in the mbuf can be used to identify
- * the security processing done on the packet. The metadata will be
- * used to retrieve the application registered userdata associated
- * with the security session.
- */
-
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
- struct ipsec_sa *sa;
- struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
-
- /* Retrieve the userdata registered. Here, the userdata
- * registered is the SA pointer.
- */
- sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
- *rte_security_dynfield(pkt));
- if (sa == NULL) {
- /* userdata could not be retrieved */
- return;
- }
-
- /* Save SA as priv member in mbuf. This will be used in the
- * IPsec selector(SP-SA) check.
- */
-
- priv = get_priv(pkt);
- priv->sa = sa;
- }
-}
-
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
-{
- int32_t i;
-
- t->ipsec.num = 0;
- t->ip4.num = 0;
- t->ip6.num = 0;
-
- for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
- void *));
- prepare_one_packet(pkts[i], t);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
-}
-
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
-/* Send burst of packets on an output interface */
-static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
-{
- struct rte_mbuf **m_table;
- int32_t ret;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
-
- prepare_tx_burst(m_table, n, port, qconf);
-
- ret = rte_eth_tx_burst(port, queueid, m_table, n);
-
- core_stats_update_tx(ret);
-
- if (unlikely(ret < n)) {
- do {
- free_pkts(&m_table[ret], 1);
- } while (++ret < n);
- }
-
- return 0;
-}
-
-/*
- * Helper function to fragment and queue for TX one packet.
- */
-static inline uint32_t
-send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
- uint16_t port, uint8_t proto)
-{
- struct buffer *tbl;
- uint32_t len, n;
- int32_t rc;
-
- tbl = qconf->tx_mbufs + port;
- len = tbl->len;
-
- /* free space for new fragments */
- if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
- send_burst(qconf, len, port);
- len = 0;
- }
-
- n = RTE_DIM(tbl->m_table) - len;
-
- if (proto == IPPROTO_IP)
- rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
- else
- rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
-
- if (rc >= 0)
- len += rc;
- else
- RTE_LOG(ERR, IPSEC,
- "%s: failed to fragment packet with size %u, "
- "error code: %d\n",
- __func__, m->pkt_len, rte_errno);
-
- free_pkts(&m, 1);
- return len;
-}
-
-/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
-{
- uint32_t lcore_id;
- uint16_t len;
- struct lcore_conf *qconf;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
-
- if (m->pkt_len <= mtu_size) {
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* need to fragment the packet */
- } else if (frag_tbl_sz > 0)
- len = send_fragment_packet(qconf, m, port, proto);
- else
- free_pkts(&m, 1);
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
- return 0;
-}
-
-static inline void
-inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
- uint16_t lim, struct ipsec_spd_stats *stats)
-{
- struct rte_mbuf *m;
- uint32_t i, j, res, sa_idx;
-
- if (ip->num == 0 || sp == NULL)
- return;
-
- rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
- ip->num, DEFAULT_MAX_CATEGORIES);
-
- j = 0;
- for (i = 0; i < ip->num; i++) {
- m = ip->pkts[i];
- res = ip->res[i];
- if (res == BYPASS) {
- ip->pkts[j++] = m;
- stats->bypass++;
- continue;
- }
- if (res == DISCARD) {
- free_pkts(&m, 1);
- stats->discard++;
- continue;
- }
-
- /* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
-
- sa_idx = res - 1;
- if (!inbound_sa_check(sa, m, sa_idx)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
- ip->pkts[j++] = m;
- stats->protect++;
- }
- ip->num = j;
-}
-
static void
split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
@@ -934,140 +542,6 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
}
}
-static inline int32_t
-get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
-{
- struct ipsec_mbuf_metadata *priv;
- struct ipsec_sa *sa;
-
- priv = get_priv(pkt);
-
- sa = priv->sa;
- if (unlikely(sa == NULL)) {
- RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
- goto fail;
- }
-
- if (is_ipv6)
- return sa->portid;
-
- /* else */
- return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
-
-fail:
- if (is_ipv6)
- return -1;
-
- /* else */
- return 0;
-}
-
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- uint32_t hop[MAX_PKT_BURST * 2];
- uint32_t dst_ip[MAX_PKT_BURST * 2];
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
- uint32_t *, offset);
- dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
- lpm_pkts++;
- }
- }
-
- rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
- }
-}
-
-static inline void
-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- int32_t hop[MAX_PKT_BURST * 2];
- uint8_t dst_ip[MAX_PKT_BURST * 2][16];
- uint8_t *ip6_dst;
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
- offset);
- memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
- lpm_pkts++;
- }
- }
-
- rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
- lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if (pkt_hop == -1) {
- core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
- }
-}
-
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint16_t portid)
@@ -1093,21 +567,6 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
}
static inline void
-drain_tx_buffers(struct lcore_conf *qconf)
-{
- struct buffer *buf;
- uint32_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- buf = &qconf->tx_mbufs[portid];
- if (buf->len == 0)
- continue;
- send_burst(qconf, buf->len, portid);
- buf->len = 0;
- }
-}
-
-static inline void
drain_crypto_buffers(struct lcore_conf *qconf)
{
uint32_t i;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index c347175..2edf631 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -6,6 +6,7 @@
#include <stdbool.h>
+#define MAX_RX_QUEUE_PER_LCORE 16
#define NB_SOCKETS 4
@@ -141,6 +142,9 @@ extern uint32_t nb_bufs_in_pool;
extern bool per_port_pool;
+extern uint32_t mtu_size;
+extern uint32_t frag_tbl_sz;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index ccfde8e..9a4e7ea 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -9,6 +9,7 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
+#include <rte_ip_frag.h>
#include <rte_security.h>
#include <rte_flow.h>
#include <rte_ipsec.h>
@@ -37,6 +38,11 @@
#define IP6_VERSION (6)
+#define SATP_OUT_IPV4(t) \
+ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
+ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
+ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_mbuf;
@@ -260,6 +266,34 @@ struct cnt_blk {
uint32_t cnt;
} __rte_packed;
+struct lcore_rx_queue {
+ uint16_t port_id;
+ uint8_t queue_id;
+} __rte_cache_aligned;
+
+struct buffer {
+ uint16_t len;
+ struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+};
+
+struct lcore_conf {
+ uint16_t nb_rx_queue;
+ struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
+ struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
+ struct ipsec_ctx inbound;
+ struct ipsec_ctx outbound;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
+ struct {
+ struct rte_ip_frag_tbl *tbl;
+ struct rte_mempool *pool_indir;
+ struct rte_ip_frag_death_row dr;
+ } frag;
+} __rte_cache_aligned;
+
+extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
/* Socket ctx */
extern struct socket_ctx socket_ctx[NB_SOCKETS];
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 285e9c7..089d89f 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -13,11 +13,7 @@
#include "ipsec.h"
#include "ipsec-secgw.h"
-
-#define SATP_OUT_IPV4(t) \
- ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
- (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
- ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+#include "ipsec_worker.h"
/* helper routine to free bulk of crypto-ops and related packets */
static inline void
@@ -209,49 +205,6 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
}
/*
- * helper routine for inline and cpu(synchronous) processing
- * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
- * Should be removed in future.
- */
-static inline void
-prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
-{
- uint32_t j;
- struct ipsec_mbuf_metadata *priv;
-
- for (j = 0; j != cnt; j++) {
- priv = get_priv(mb[j]);
- priv->sa = sa;
- /* setup TSO related fields if TSO enabled*/
- if (priv->sa->mss) {
- uint32_t ptype = mb[j]->packet_type;
- /* only TCP is supported */
- if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
- mb[j]->tso_segsz = priv->sa->mss;
- if ((IS_TUNNEL(priv->sa->flags))) {
- mb[j]->outer_l3_len = mb[j]->l3_len;
- mb[j]->outer_l2_len = mb[j]->l2_len;
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_TUNNEL_ESP;
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IP_CKSUM;
- }
- mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
- mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
- RTE_MBUF_F_TX_TCP_CKSUM);
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV4;
- else
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV6;
- }
- }
- }
-}
-
-/*
* finish processing of packets successfully decrypted by an inline processor
*/
static uint32_t
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 5d85cf1..eb966a6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -4,8 +4,15 @@
#ifndef _IPSEC_WORKER_H_
#define _IPSEC_WORKER_H_
+#include <rte_acl.h>
+#include <rte_ethdev.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+
#include "ipsec.h"
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET 3
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -38,4 +45,557 @@ void ipsec_poll_mode_worker(void);
int ipsec_launch_one_lcore(void *args);
+/*
+ * helper routine for inline and cpu(synchronous) processing
+ * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
+ * Should be removed in future.
+ */
+static inline void
+prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
+{
+ uint32_t j;
+ struct ipsec_mbuf_metadata *priv;
+
+ for (j = 0; j != cnt; j++) {
+ priv = get_priv(mb[j]);
+ priv->sa = sa;
+ /* setup TSO related fields if TSO enabled*/
+ if (priv->sa->mss) {
+ uint32_t ptype = mb[j]->packet_type;
+ /* only TCP is supported */
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ mb[j]->tso_segsz = priv->sa->mss;
+ if ((IS_TUNNEL(priv->sa->flags))) {
+ mb[j]->outer_l3_len = mb[j]->l3_len;
+ mb[j]->outer_l2_len = mb[j]->l2_len;
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_TUNNEL_ESP;
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ }
+ mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
+ mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_TCP_CKSUM);
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV4;
+ else
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV6;
+ }
+ }
+ }
+}
+
+static __rte_always_inline void
+adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static __rte_always_inline void
+adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static __rte_always_inline void
+prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+{
+ uint32_t ptype = pkt->packet_type;
+ const struct rte_ether_hdr *eth;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ uint32_t tun_type, l3_type;
+
+ tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
+ l3_type = ptype & RTE_PTYPE_L3_MASK;
+
+ eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
+ if (l3_type == RTE_PTYPE_L3_IPV4) {
+ iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv4_pktlen(pkt, iph4, 0);
+
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ } else {
+ t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(*iph4);
+ } else if (l3_type & RTE_PTYPE_L3_IPV6) {
+ int next_proto;
+ size_t l3len, ext_len;
+ uint8_t *p;
+
+ /* get protocol type */
+ iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv6_pktlen(pkt, iph6, 0);
+
+ l3len = sizeof(struct ip6_hdr);
+
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ } else {
+ t->ip6.data[t->ip6.num] = &iph6->proto;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
+ }
+
+ /* Determine l3 header size up to ESP extension by walking
+ * through extension headers.
+ */
+ if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+ l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ next_proto = iph6->proto;
+ while (next_proto != IPPROTO_ESP &&
+ l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Drop pkt when IPv6 header exceeds first seg size */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+ }
+
+ pkt->l2_len = 0;
+ pkt->l3_len = l3len;
+ } else {
+ /* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
+ rte_be_to_cpu_16(eth->ether_type));
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ if (ptype & RTE_PTYPE_L4_TCP)
+ pkt->l4_len = sizeof(struct rte_tcp_hdr);
+ else if (ptype & RTE_PTYPE_L4_UDP)
+ pkt->l4_len = sizeof(struct rte_udp_hdr);
+
+ /* Check if the packet has been processed inline. For inline protocol
+ * processed packets, the metadata in the mbuf can be used to identify
+ * the security processing done on the packet. The metadata will be
+ * used to retrieve the application registered userdata associated
+ * with the security session.
+ */
+
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
+ struct ipsec_sa *sa;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ pkt->port);
+
+ /* Retrieve the userdata registered. Here, the userdata
+ * registered is the SA pointer.
+ */
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return;
+ }
+
+ /* Save SA as priv member in mbuf. This will be used in the
+ * IPsec selector(SP-SA) check.
+ */
+
+ priv = get_priv(pkt);
+ priv->sa = sa;
+ }
+}
+
+static __rte_always_inline void
+prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
+ uint16_t nb_pkts)
+{
+ int32_t i;
+
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
+
+ for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
+ void *));
+ prepare_one_packet(pkts[i], t);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_one_packet(pkts[i], t);
+}
+
+static __rte_always_inline void
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ struct ip *ip;
+ struct rte_ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ip->ip_sum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ } else {
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+}
+
+static __rte_always_inline void
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ int32_t i;
+ const int32_t prefetch_offset = 2;
+
+ for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
+ prepare_tx_pkt(pkts[i], port, qconf);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_tx_pkt(pkts[i], port, qconf);
+}
+
+/* Send burst of packets on an output interface */
+static __rte_always_inline int32_t
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
+{
+ struct rte_mbuf **m_table;
+ int32_t ret;
+ uint16_t queueid;
+
+ queueid = qconf->tx_queue_id[port];
+ m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+ prepare_tx_burst(m_table, n, port, qconf);
+
+ ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+ core_stats_update_tx(ret);
+
+ if (unlikely(ret < n)) {
+ do {
+ free_pkts(&m_table[ret], 1);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+/*
+ * Helper function to fragment and queue for TX one packet.
+ */
+static __rte_always_inline uint32_t
+send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
+ uint16_t port, uint8_t proto)
+{
+ struct buffer *tbl;
+ uint32_t len, n;
+ int32_t rc;
+
+ tbl = qconf->tx_mbufs + port;
+ len = tbl->len;
+
+ /* free space for new fragments */
+ if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
+ send_burst(qconf, len, port);
+ len = 0;
+ }
+
+ n = RTE_DIM(tbl->m_table) - len;
+
+ if (proto == IPPROTO_IP)
+ rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+ else
+ rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+
+ if (rc >= 0)
+ len += rc;
+ else
+ RTE_LOG(ERR, IPSEC,
+ "%s: failed to fragment packet with size %u, "
+ "error code: %d\n",
+ __func__, m->pkt_len, rte_errno);
+
+ free_pkts(&m, 1);
+ return len;
+}
+
+/* Enqueue a single packet, and send burst if queue is filled */
+static __rte_always_inline int32_t
+send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
+{
+ uint32_t lcore_id;
+ uint16_t len;
+ struct lcore_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_conf[lcore_id];
+ len = qconf->tx_mbufs[port].len;
+
+ if (m->pkt_len <= mtu_size) {
+ qconf->tx_mbufs[port].m_table[len] = m;
+ len++;
+
+ /* need to fragment the packet */
+ } else if (frag_tbl_sz > 0)
+ len = send_fragment_packet(qconf, m, port, proto);
+ else
+ free_pkts(&m, 1);
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ send_burst(qconf, MAX_PKT_BURST, port);
+ len = 0;
+ }
+
+ qconf->tx_mbufs[port].len = len;
+ return 0;
+}
+
+static __rte_always_inline void
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
+ uint16_t lim, struct ipsec_spd_stats *stats)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, res, sa_idx;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = 0;
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res == BYPASS) {
+ ip->pkts[j++] = m;
+ stats->bypass++;
+ continue;
+ }
+ if (res == DISCARD) {
+ free_pkts(&m, 1);
+ stats->discard++;
+ continue;
+ }
+
+ /* Only check SPI match for processed IPSec packets */
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+
+ sa_idx = res - 1;
+ if (!inbound_sa_check(sa, m, sa_idx)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+ ip->pkts[j++] = m;
+ stats->protect++;
+ }
+ ip->num = j;
+}
+
+static __rte_always_inline int32_t
+get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
+{
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+
+ priv = get_priv(pkt);
+
+ sa = priv->sa;
+ if (unlikely(sa == NULL)) {
+ RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
+ goto fail;
+ }
+
+ if (is_ipv6)
+ return sa->portid;
+
+ /* else */
+ return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
+
+fail:
+ if (is_ipv6)
+ return -1;
+
+ /* else */
+ return 0;
+}
+
+static __rte_always_inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ uint32_t hop[MAX_PKT_BURST * 2];
+ uint32_t dst_ip[MAX_PKT_BURST * 2];
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip, ip_dst);
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ uint32_t *, offset);
+ dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
+ core_statistics[lcoreid].lpm4.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+ }
+}
+
+static __rte_always_inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int32_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ offset);
+ memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
+ lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if (pkt_hop == -1) {
+ core_statistics[lcoreid].lpm6.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+ }
+}
+
+static __rte_always_inline void
+drain_tx_buffers(struct lcore_conf *qconf)
+{
+ struct buffer *buf;
+ uint32_t portid;
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ buf = &qconf->tx_mbufs[portid];
+ if (buf->len == 0)
+ continue;
+ send_burst(qconf, buf->len, portid);
+ buf->len = 0;
+ }
+}
+
#endif /* _IPSEC_WORKER_H_ */
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (2 preceding siblings ...)
2022-03-22 17:58 ` [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
@ 2022-03-22 17:58 ` Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
` (6 subsequent siblings)
10 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-03-22 17:58 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj, Nithin Dabilpuram
Store security context pointer in lcore Rx queue config and
get it from there in fast path for better performance.
Currently rte_eth_dev_get_sec_ctx() which is meant to be control
path API is called per packet basis. For every call to that
API, ethdev port status is checked.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 22 +++++++++++++++++++---
examples/ipsec-secgw/ipsec.h | 1 +
examples/ipsec-secgw/ipsec_worker.h | 17 +++++++----------
3 files changed, 27 insertions(+), 13 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 1d0ce3a..a04b5e8 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -544,11 +544,11 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint16_t portid)
+ uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
{
struct ipsec_traffic traffic;
- prepare_traffic(pkts, &traffic, nb_pkts);
+ prepare_traffic(ctx, pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
if (is_unprotected_port(portid))
@@ -740,7 +740,8 @@ ipsec_poll_mode_worker(void)
if (nb_rx > 0) {
core_stats_update_rx(nb_rx);
- process_pkts(qconf, pkts, nb_rx, portid);
+ process_pkts(qconf, pkts, nb_rx, portid,
+ rxql->sec_ctx);
}
/* dequeue and process completed crypto-ops */
@@ -3060,6 +3061,21 @@ main(int32_t argc, char **argv)
flow_init();
+ /* Get security context if available and only if dynamic field is
+ * registered for fast path access.
+ */
+ if (!rte_security_dynfield_is_registered())
+ goto skip_sec_ctx;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
+ portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
+ lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
+ rte_eth_dev_get_sec_ctx(portid);
+ }
+ }
+skip_sec_ctx:
+
check_all_ports_link_status(enabled_port_mask);
if (stats_interval > 0)
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 9a4e7ea..ecad262 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -269,6 +269,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
+ struct rte_security_ctx *sec_ctx;
} __rte_cache_aligned;
struct buffer {
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index eb966a6..838b3f6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -115,7 +115,8 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
static __rte_always_inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
+ struct ipsec_traffic *t)
{
uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
@@ -201,13 +202,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
* with the security session.
*/
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
+ if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
@@ -229,8 +226,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
}
static __rte_always_inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
+prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
+ struct ipsec_traffic *t, uint16_t nb_pkts)
{
int32_t i;
@@ -241,11 +238,11 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
void *));
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
static __rte_always_inline void
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (3 preceding siblings ...)
2022-03-22 17:58 ` [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
@ 2022-03-22 17:58 ` Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
` (5 subsequent siblings)
10 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-03-22 17:58 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj, Nithin Dabilpuram
Update ethernet header during route lookup instead of doing
way later while performing Tx burst. Advantages to doing
is at route lookup is that no additional IP version checks
based on packet data are needed and packet data is already
in cache as route lookup is already consuming that data.
This is also useful for inline protocol offload cases
of v4inv6 or v6inv4 outbound tunnel operations as
packet data will not have any info about what is the tunnel
protocol.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 9 +-
examples/ipsec-secgw/ipsec_worker.h | 197 ++++++++++++++++++++++--------------
2 files changed, 129 insertions(+), 77 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index a04b5e8..84f6150 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -562,7 +562,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
@@ -613,7 +614,8 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf,
if (trf.ip4.num != 0) {
inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
&core_statistics[lcoreid].inbound.spd4);
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
}
/* process ipv6 packets */
@@ -647,7 +649,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
/* process ipv4 packets */
if (trf.ip4.num != 0)
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
/* process ipv6 packets */
if (trf.ip6.num != 0)
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 838b3f6..b183248 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -245,60 +245,6 @@ prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
prepare_one_packet(ctx, pkts[i], t);
}
-static __rte_always_inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static __rte_always_inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
/* Send burst of packets on an output interface */
static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
@@ -310,8 +256,6 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port, qconf);
-
ret = rte_eth_tx_burst(port, queueid, m_table, n);
core_stats_update_tx(ret);
@@ -332,8 +276,11 @@ static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
+ struct rte_ether_hdr *ethhdr;
+ struct rte_ipv4_hdr *ip;
+ struct rte_mbuf *pkt;
struct buffer *tbl;
- uint32_t len, n;
+ uint32_t len, n, i;
int32_t rc;
tbl = qconf->tx_mbufs + port;
@@ -347,6 +294,9 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
n = RTE_DIM(tbl->m_table) - len;
+ /* Strip the ethernet header that was prepended earlier */
+ rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
+
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
@@ -354,13 +304,51 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
- if (rc >= 0)
- len += rc;
- else
+ if (rc < 0) {
RTE_LOG(ERR, IPSEC,
"%s: failed to fragment packet with size %u, "
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
+ rc = 0;
+ }
+
+ i = len;
+ len += rc;
+ for (; i < len; i++) {
+ pkt = tbl->m_table[i];
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ if (proto == IPPROTO_IP) {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ } else {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+ }
free_pkts(&m, 1);
return len;
@@ -379,7 +367,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
qconf = &lcore_conf[lcore_id];
len = qconf->tx_mbufs[port].len;
- if (m->pkt_len <= mtu_size) {
+ /* L2 header is already part of packet */
+ if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
qconf->tx_mbufs[port].m_table[len] = m;
len++;
@@ -475,14 +464,18 @@ get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
}
static __rte_always_inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+ uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
+ struct rte_ether_hdr *ethhdr;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -492,12 +485,13 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
uint32_t *, offset);
dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
lpm_pkts++;
@@ -509,9 +503,10 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 0);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -519,10 +514,41 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip_cksum) {
+ struct rte_ipv4_hdr *ip;
+
+ pkt->ol_flags |= tx_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ }
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IP);
}
}
@@ -531,11 +557,14 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ struct rte_ether_hdr *ethhdr;
uint8_t *ip6_dst;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -545,12 +574,13 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
offset);
memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
lpm_pkts++;
@@ -563,9 +593,10 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 1);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -573,10 +604,28 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if (pkt_hop == -1) {
core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IPV6);
}
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (4 preceding siblings ...)
2022-03-22 17:58 ` [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
@ 2022-03-22 17:58 ` Nithin Dabilpuram
2022-04-13 6:13 ` [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Kumar Dabilpuram
` (4 subsequent siblings)
10 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-03-22 17:58 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj, Nithin Dabilpuram
Add separate worker thread when all SA's are of type
inline protocol offload and librte_ipsec is enabled
in order to make it more optimal for that case.
Current default worker supports all kinds of SA leading
to doing lot of per-packet checks and branching based on
SA type which can be of 5 types of SA's.
Also make a provision for choosing different poll mode workers
for different combinations of SA types with default being
existing poll mode worker that supports all kinds of SA's.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 6 +-
examples/ipsec-secgw/ipsec-secgw.h | 10 +
examples/ipsec-secgw/ipsec_worker.c | 378 +++++++++++++++++++++++++++++++++++-
examples/ipsec-secgw/ipsec_worker.h | 4 +
examples/ipsec-secgw/sa.c | 9 +
5 files changed, 403 insertions(+), 4 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 84f6150..515b344 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -68,8 +68,6 @@ volatile bool force_quit;
#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -173,7 +171,7 @@ static uint64_t enabled_cryptodev_mask = UINT64_MAX;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
-static uint32_t single_sa;
+uint32_t single_sa;
uint32_t nb_bufs_in_pool;
/*
@@ -238,6 +236,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
bool per_port_pool;
+uint16_t wrkr_flags;
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
@@ -1233,6 +1232,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
single_sa = 1;
single_sa_idx = ret;
eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
+ wrkr_flags |= SS_F;
printf("Configured with single SA index %u\n",
single_sa_idx);
break;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 2edf631..f027360 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -135,6 +135,7 @@ extern uint32_t unprotected_port_mask;
/* Index of SA in single mode */
extern uint32_t single_sa_idx;
+extern uint32_t single_sa;
extern volatile bool force_quit;
@@ -145,6 +146,15 @@ extern bool per_port_pool;
extern uint32_t mtu_size;
extern uint32_t frag_tbl_sz;
+#define SS_F (1U << 0) /* Single SA mode */
+#define INL_PR_F (1U << 1) /* Inline Protocol */
+#define INL_CR_F (1U << 2) /* Inline Crypto */
+#define LA_PR_F (1U << 3) /* Lookaside Protocol */
+#define LA_ANY_F (1U << 4) /* Lookaside Any */
+#define MAX_F (LA_ANY_F << 1)
+
+extern uint16_t wrkr_flags;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426..2b96951 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -17,6 +17,8 @@ struct port_drv_mode_data {
struct rte_security_ctx *ctx;
};
+typedef void (*ipsec_worker_fn_t)(void);
+
static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
{
@@ -1004,6 +1006,380 @@ ipsec_eventmode_worker(struct eh_conf *conf)
eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
}
+static __rte_always_inline void
+outb_inl_pro_spd_process(struct sp_ctx *sp,
+ struct sa_ctx *sa_ctx,
+ struct traffic_type *ip,
+ struct traffic_type *match,
+ struct traffic_type *mismatch,
+ bool match_flag,
+ struct ipsec_spd_stats *stats)
+{
+ uint32_t prev_sa_idx = UINT32_MAX;
+ struct rte_mbuf *ipsec[MAX_PKT_BURST];
+ struct rte_ipsec_session *ips;
+ uint32_t i, j, j_mis, sa_idx;
+ struct ipsec_sa *sa = NULL;
+ uint32_t ipsec_num = 0;
+ struct rte_mbuf *m;
+ uint64_t satp;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = match->num;
+ j_mis = mismatch->num;
+
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] - 1;
+
+ if (unlikely(ip->res[i] == DISCARD)) {
+ free_pkts(&m, 1);
+
+ stats->discard++;
+ } else if (unlikely(ip->res[i] == BYPASS)) {
+ match->pkts[j++] = m;
+
+ stats->bypass++;
+ } else {
+ if (prev_sa_idx == UINT32_MAX) {
+ prev_sa_idx = sa_idx;
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ }
+
+ if (sa_idx != prev_sa_idx) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare packets for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+
+ /* Update to new SA */
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ ipsec_num = 0;
+ }
+
+ ipsec[ipsec_num++] = m;
+ stats->protect++;
+ }
+ }
+
+ if (ipsec_num) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare pacekts for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+ }
+ match->num = j;
+ mismatch->num = j_mis;
+}
+
+/* Poll mode worker when all SA's are of type inline protocol */
+void
+ipsec_poll_mode_wrkr_inl_pr(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct ipsec_core_statistics *stats;
+ struct rt_ctx *rt4_ctx, *rt6_ctx;
+ struct sa_ctx *sa_in, *sa_out;
+ struct traffic_type ip4, ip6;
+ struct lcore_rx_queue *rxql;
+ struct rte_mbuf **v4, **v6;
+ struct ipsec_traffic trf;
+ struct lcore_conf *qconf;
+ uint16_t v4_num, v6_num;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ int32_t i, nb_rx;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+ stats = &core_statistics[lcore_id];
+
+ rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ rt6_ctx = socket_ctx[socket_id].rt_ip6;
+
+ sp4_in = socket_ctx[socket_id].sp_ip4_in;
+ sp6_in = socket_ctx[socket_id].sp_ip6_in;
+ sa_in = socket_ctx[socket_id].sa_in;
+
+ sp4_out = socket_ctx[socket_id].sp_ip4_out;
+ sp6_out = socket_ctx[socket_id].sp_ip6_out;
+ sa_out = socket_ctx[socket_id].sa_out;
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+ /* Drop any IPsec traffic */
+ free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+ if (is_unprotected_port(portid)) {
+ inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
+ trf.ip4.num,
+ &stats->inbound.spd4);
+
+ inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
+ trf.ip6.num,
+ &stats->inbound.spd6);
+
+ v4 = trf.ip4.pkts;
+ v4_num = trf.ip4.num;
+ v6 = trf.ip6.pkts;
+ v6_num = trf.ip6.num;
+ } else {
+ ip4.num = 0;
+ ip6.num = 0;
+
+ outb_inl_pro_spd_process(sp4_out, sa_out,
+ &trf.ip4, &ip4, &ip6,
+ true,
+ &stats->outbound.spd4);
+
+ outb_inl_pro_spd_process(sp6_out, sa_out,
+ &trf.ip6, &ip6, &ip4,
+ false,
+ &stats->outbound.spd6);
+ v4 = ip4.pkts;
+ v4_num = ip4.num;
+ v6 = ip6.pkts;
+ v6_num = ip6.num;
+ }
+
+ route4_pkts(rt4_ctx, v4, v4_num, 0, false);
+ route6_pkts(rt6_ctx, v6, v6_num);
+ }
+ }
+}
+
+/* Poll mode worker when all SA's are of type inline protocol
+ * and single sa mode is enabled.
+ */
+void
+ipsec_poll_mode_wrkr_inl_pr_ss(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct rte_ipsec_session *ips;
+ struct lcore_rx_queue *rxql;
+ struct lcore_conf *qconf;
+ struct ipsec_traffic trf;
+ struct sa_ctx *sa_out;
+ uint32_t i, nb_rx, j;
+ struct ipsec_sa *sa;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+
+ /* Get SA info */
+ sa_out = socket_ctx[socket_id].sa_out;
+ sa = &sa_out->sa[single_sa_idx];
+ ips = ipsec_get_primary_session(sa);
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ if (is_unprotected_port(portid)) {
+ /* Nothing much to do for inbound inline
+ * decrypted traffic.
+ */
+ for (j = 0; j < nb_rx; j++) {
+ uint32_t ptype, proto;
+
+ pkt = pkts[j];
+ ptype = pkt->packet_type &
+ RTE_PTYPE_L3_MASK;
+ if (ptype == RTE_PTYPE_L3_IPV4)
+ proto = IPPROTO_IP;
+ else
+ proto = IPPROTO_IPV6;
+
+ send_single_packet(pkt, portid, proto);
+ }
+
+ continue;
+ }
+
+ /* Prepare packets for outbound */
+ prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+ /* Drop any IPsec traffic */
+ free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+ rte_ipsec_pkt_process(ips, trf.ip4.pkts,
+ trf.ip4.num);
+ rte_ipsec_pkt_process(ips, trf.ip6.pkts,
+ trf.ip6.num);
+ portid = sa->portid;
+
+ /* Send v4 pkts out */
+ for (j = 0; j < trf.ip4.num; j++) {
+ pkt = trf.ip4.pkts[j];
+
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ send_single_packet(pkt, portid, IPPROTO_IP);
+ }
+
+ /* Send v6 pkts out */
+ for (j = 0; j < trf.ip6.num; j++) {
+ pkt = trf.ip6.pkts[j];
+
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ send_single_packet(pkt, portid, IPPROTO_IPV6);
+ }
+ }
+ }
+}
+
+static void
+ipsec_poll_mode_wrkr_launch(void)
+{
+ static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
+ [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
+ [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
+ };
+ ipsec_worker_fn_t fn;
+
+ if (!app_sa_prm.enable) {
+ fn = ipsec_poll_mode_worker;
+ } else {
+ fn = poll_mode_wrkrs[wrkr_flags];
+
+ /* Always default to all mode worker */
+ if (!fn)
+ fn = ipsec_poll_mode_worker;
+ }
+
+ /* Launch worker */
+ (*fn)();
+}
+
int ipsec_launch_one_lcore(void *args)
{
struct eh_conf *conf;
@@ -1012,7 +1388,7 @@ int ipsec_launch_one_lcore(void *args)
if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
/* Run in poll mode */
- ipsec_poll_mode_worker();
+ ipsec_poll_mode_wrkr_launch();
} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
/* Run in event mode */
ipsec_eventmode_worker(conf);
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index b183248..a040d94 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -13,6 +13,8 @@
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -42,6 +44,8 @@ struct lcore_conf_ev_tx_int_port_wrkr {
} __rte_cache_aligned;
void ipsec_poll_mode_worker(void);
+void ipsec_poll_mode_wrkr_inl_pr(void);
+void ipsec_poll_mode_wrkr_inl_pr_ss(void);
int ipsec_launch_one_lcore(void *args);
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 36d890f..db3d6bb 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -936,6 +936,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
}
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ wrkr_flags |= INL_CR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ wrkr_flags |= INL_PR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+ wrkr_flags |= LA_PR_F;
+ else
+ wrkr_flags |= LA_ANY_F;
+
nb_crypto_sessions++;
*ri = *ri + 1;
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (5 preceding siblings ...)
2022-03-22 17:58 ` [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
@ 2022-04-13 6:13 ` Nithin Kumar Dabilpuram
2022-04-14 14:07 ` Ananyev, Konstantin
` (3 subsequent siblings)
10 siblings, 0 replies; 37+ messages in thread
From: Nithin Kumar Dabilpuram @ 2022-04-13 6:13 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, anoobj
Ping.
On 3/22/22 11:28 PM, Nithin Dabilpuram wrote:
> Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
> crypto/protocol or cpu crypto is needed.
> For Tx Inline protocol offload, checksum computation
> is implicitly taken care by HW.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> examples/ipsec-secgw/ipsec-secgw.c | 3 ---
> examples/ipsec-secgw/sa.c | 32 +++++++++++++++++++++++++-------
> 2 files changed, 25 insertions(+), 10 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index 42b5081..76919e5 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -2330,9 +2330,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> local_port_conf.txmode.offloads |=
> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
>
> - if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
> - local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> -
> printf("port %u configuring rx_offloads=0x%" PRIx64
> ", tx_offloads=0x%" PRIx64 "\n",
> portid, local_port_conf.rxmode.offloads,
> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> index 1839ac7..36d890f 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -1785,13 +1785,31 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
> for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
> rule = &sa_out[idx_sa];
> rule_type = ipsec_get_action_type(rule);
> - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> - rule_type ==
> - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
> - && rule->portid == port_id) {
> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> - if (rule->mss)
> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
> + switch (rule_type) {
> + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
> + /* Checksum offload is not needed for inline protocol as
> + * all processing for Outbound IPSec packets will be
> + * implicitly taken care and for non-IPSec packets,
> + * there is no need of IPv4 Checksum offload.
> + */
> + if (rule->portid == port_id)
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> + break;
> + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
> + if (rule->portid == port_id) {
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> + if (rule->mss)
> + *tx_offloads |=
> + RTE_ETH_TX_OFFLOAD_TCP_TSO;
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> + }
> + break;
> + default:
> + /* Enable IPv4 checksum offload even if one of lookaside
> + * SA's are present.
> + */
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> + break;
> }
> }
> return 0;
^ permalink raw reply [flat|nested] 37+ messages in thread
* RE: [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (6 preceding siblings ...)
2022-04-13 6:13 ` [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Kumar Dabilpuram
@ 2022-04-14 14:07 ` Ananyev, Konstantin
2022-04-19 13:56 ` Nithin Kumar Dabilpuram
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (2 subsequent siblings)
10 siblings, 1 reply; 37+ messages in thread
From: Ananyev, Konstantin @ 2022-04-14 14:07 UTC (permalink / raw)
To: Nithin Dabilpuram, jerinj, Nicolau, Radu, Akhil Goyal; +Cc: dev, anoobj
Hi Nithin,
> Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
> crypto/protocol or cpu crypto is needed.
> For Tx Inline protocol offload, checksum computation
> is implicitly taken care by HW.
The thing is that right now it is not stated explicitly that
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL implies TSO support. It says that it 'might', it is not guaranteed.
At least in dpdk docs.
From https://doc.dpdk.org/guides/prog_guide/rte_security.html:
"22.1.2. Inline protocol offload
...
Egress Data path - The software will send the plain packet without any security protocol headers added to the packet. The driver will configure the security index and other requirement in tx descriptors. The hardware device will do security processing on the packet that includes adding the relevant protocol headers and encrypting the data before sending the packet out. The software should make sure that the buffer has required head room and tail room for any protocol header addition. The software may also do early fragmentation if the resultant packet is expected to cross the MTU size.
Note
The underlying device will manage state information required for egress processing. E.g. in case of IPsec, the seq number will be added to the packet, however the device shall provide indication when the sequence number is about to overflow. The underlying device may support post encryption TSO."
So, if I am not mistaken, what you suggest will change HW/PMD requirements.
AFAIK, right now only Marvell supports RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
so in theory I don't mind if you'd like to harden the requirements here.
Though such change probably needs to be properly documented and
acked by other vendors.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> examples/ipsec-secgw/ipsec-secgw.c | 3 ---
> examples/ipsec-secgw/sa.c | 32 +++++++++++++++++++++++++-------
> 2 files changed, 25 insertions(+), 10 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index 42b5081..76919e5 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -2330,9 +2330,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> local_port_conf.txmode.offloads |=
> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
>
> - if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
> - local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> -
> printf("port %u configuring rx_offloads=0x%" PRIx64
> ", tx_offloads=0x%" PRIx64 "\n",
> portid, local_port_conf.rxmode.offloads,
> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> index 1839ac7..36d890f 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -1785,13 +1785,31 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
> for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
> rule = &sa_out[idx_sa];
> rule_type = ipsec_get_action_type(rule);
> - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> - rule_type ==
> - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
> - && rule->portid == port_id) {
> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> - if (rule->mss)
> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
> + switch (rule_type) {
> + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
> + /* Checksum offload is not needed for inline protocol as
> + * all processing for Outbound IPSec packets will be
> + * implicitly taken care and for non-IPSec packets,
> + * there is no need of IPv4 Checksum offload.
> + */
> + if (rule->portid == port_id)
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> + break;
> + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
> + if (rule->portid == port_id) {
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> + if (rule->mss)
> + *tx_offloads |=
> + RTE_ETH_TX_OFFLOAD_TCP_TSO;
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> + }
> + break;
> + default:
> + /* Enable IPv4 checksum offload even if one of lookaside
> + * SA's are present.
> + */
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
Shouldn't we check here that given port really supports IPV4_CKSUM offload?
> + break;
> }
> }
> return 0;
> --
> 2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-04-14 14:07 ` Ananyev, Konstantin
@ 2022-04-19 13:56 ` Nithin Kumar Dabilpuram
2022-04-20 10:42 ` Ananyev, Konstantin
0 siblings, 1 reply; 37+ messages in thread
From: Nithin Kumar Dabilpuram @ 2022-04-19 13:56 UTC (permalink / raw)
To: Ananyev, Konstantin, jerinj, Nicolau, Radu, Akhil Goyal; +Cc: dev, anoobj
Hi Konstantin,
Please see inline.
On 4/14/22 7:37 PM, Ananyev, Konstantin wrote:
> Hi Nithin,
>
>
>> Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
>> crypto/protocol or cpu crypto is needed.
>> For Tx Inline protocol offload, checksum computation
>> is implicitly taken care by HW.
>
> The thing is that right now it is not stated explicitly that
> RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL implies TSO support. It says that it 'might', it is not guaranteed.
> At least in dpdk docs.
> From https://doc.dpdk.org/guides/prog_guide/rte_security.html:
> "22.1.2. Inline protocol offload
> ...
> Egress Data path - The software will send the plain packet without any security protocol headers added to the packet. The driver will configure the security index and other requirement in tx descriptors. The hardware device will do security processing on the packet that includes adding the relevant protocol headers and encrypting the data before sending the packet out. The software should make sure that the buffer has required head room and tail room for any protocol header addition. The software may also do early fragmentation if the resultant packet is expected to cross the MTU size.
> Note
> The underlying device will manage state information required for egress processing. E.g. in case of IPsec, the seq number will be added to the packet, however the device shall provide indication when the sequence number is about to overflow. The underlying device may support post encryption TSO."
>
> So, if I am not mistaken, what you suggest will change HW/PMD requirements.
> AFAIK, right now only Marvell supports RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
> so in theory I don't mind if you'd like to harden the requirements here.
> Though such change probably needs to be properly documented and
> acked by other vendors.
Ok, I was only thinking of IPV4 CKSUM offload without TSO and thought
that is not needed in case of INLINE PROTOCOL.
To maintain the behavior for TSO with INLINE_PROTO, I can set both
IPV4_CKSUM offload and TCP_TSO if TSO is requested i.e rule->mss is set.
We can revist the spec for TSO+INLINE_PROTOCOL offload combination later
as our HW doesn't support TSO before INLINE IPSEC Processing.
>
>>
>> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
>> ---
>> examples/ipsec-secgw/ipsec-secgw.c | 3 ---
>> examples/ipsec-secgw/sa.c | 32 +++++++++++++++++++++++++-------
>> 2 files changed, 25 insertions(+), 10 deletions(-)
>>
>> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
>> index 42b5081..76919e5 100644
>> --- a/examples/ipsec-secgw/ipsec-secgw.c
>> +++ b/examples/ipsec-secgw/ipsec-secgw.c
>> @@ -2330,9 +2330,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
>> local_port_conf.txmode.offloads |=
>> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
>>
>> - if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
>> - local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
>> -
>> printf("port %u configuring rx_offloads=0x%" PRIx64
>> ", tx_offloads=0x%" PRIx64 "\n",
>> portid, local_port_conf.rxmode.offloads,
>> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
>> index 1839ac7..36d890f 100644
>> --- a/examples/ipsec-secgw/sa.c
>> +++ b/examples/ipsec-secgw/sa.c
>> @@ -1785,13 +1785,31 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
>> for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
>> rule = &sa_out[idx_sa];
>> rule_type = ipsec_get_action_type(rule);
>> - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
>> - rule_type ==
>> - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
>> - && rule->portid == port_id) {
>> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
>> - if (rule->mss)
>> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
>> + switch (rule_type) {
>> + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
>> + /* Checksum offload is not needed for inline protocol as
>> + * all processing for Outbound IPSec packets will be
>> + * implicitly taken care and for non-IPSec packets,
>> + * there is no need of IPv4 Checksum offload.
>> + */
>> + if (rule->portid == port_id)
>> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
>> + break;
>> + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
>> + if (rule->portid == port_id) {
>> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
>> + if (rule->mss)
>> + *tx_offloads |=
>> + RTE_ETH_TX_OFFLOAD_TCP_TSO;
>> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
>> + }
>> + break;
>> + default:
>> + /* Enable IPv4 checksum offload even if one of lookaside
>> + * SA's are present.
>> + */
>> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
>
> Shouldn't we check here that given port really supports IPV4_CKSUM offload?
It is already being checked at port_init().
>
>> + break;
>> }
>> }
>> return 0;
>> --
>> 2.8.4
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* RE: [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-04-19 13:56 ` Nithin Kumar Dabilpuram
@ 2022-04-20 10:42 ` Ananyev, Konstantin
0 siblings, 0 replies; 37+ messages in thread
From: Ananyev, Konstantin @ 2022-04-20 10:42 UTC (permalink / raw)
To: Nithin Kumar Dabilpuram, jerinj, Nicolau, Radu, Akhil Goyal; +Cc: dev, anoobj
Hi Nithin,
> >> Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
> >> crypto/protocol or cpu crypto is needed.
> >> For Tx Inline protocol offload, checksum computation
> >> is implicitly taken care by HW.
> >
> > The thing is that right now it is not stated explicitly that
> > RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL implies TSO support. It says that it 'might', it is not guaranteed.
> > At least in dpdk docs.
> > From https://doc.dpdk.org/guides/prog_guide/rte_security.html:
> > "22.1.2. Inline protocol offload
> > ...
> > Egress Data path - The software will send the plain packet without any security protocol headers added to the packet. The driver will
> configure the security index and other requirement in tx descriptors. The hardware device will do security processing on the packet that
> includes adding the relevant protocol headers and encrypting the data before sending the packet out. The software should make sure that
> the buffer has required head room and tail room for any protocol header addition. The software may also do early fragmentation if the
> resultant packet is expected to cross the MTU size.
> > Note
> > The underlying device will manage state information required for egress processing. E.g. in case of IPsec, the seq number will be added to
> the packet, however the device shall provide indication when the sequence number is about to overflow. The underlying device may support
> post encryption TSO."
> >
> > So, if I am not mistaken, what you suggest will change HW/PMD requirements.
> > AFAIK, right now only Marvell supports RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
> > so in theory I don't mind if you'd like to harden the requirements here.
> > Though such change probably needs to be properly documented and
> > acked by other vendors.
>
> Ok, I was only thinking of IPV4 CKSUM offload without TSO and thought
> that is not needed in case of INLINE PROTOCOL.
>
> To maintain the behavior for TSO with INLINE_PROTO, I can set both
> IPV4_CKSUM offload and TCP_TSO if TSO is requested i.e rule->mss is set.
> We can revist the spec for TSO+INLINE_PROTOCOL offload combination later
> as our HW doesn't support TSO before INLINE IPSEC Processing.
Sounds reasonable.
>
> >
> >>
> >> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> >> ---
> >> examples/ipsec-secgw/ipsec-secgw.c | 3 ---
> >> examples/ipsec-secgw/sa.c | 32 +++++++++++++++++++++++++-------
> >> 2 files changed, 25 insertions(+), 10 deletions(-)
> >>
> >> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> >> index 42b5081..76919e5 100644
> >> --- a/examples/ipsec-secgw/ipsec-secgw.c
> >> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> >> @@ -2330,9 +2330,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> >> local_port_conf.txmode.offloads |=
> >> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
> >>
> >> - if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
> >> - local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> >> -
> >> printf("port %u configuring rx_offloads=0x%" PRIx64
> >> ", tx_offloads=0x%" PRIx64 "\n",
> >> portid, local_port_conf.rxmode.offloads,
> >> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> >> index 1839ac7..36d890f 100644
> >> --- a/examples/ipsec-secgw/sa.c
> >> +++ b/examples/ipsec-secgw/sa.c
> >> @@ -1785,13 +1785,31 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
> >> for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
> >> rule = &sa_out[idx_sa];
> >> rule_type = ipsec_get_action_type(rule);
> >> - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> >> - rule_type ==
> >> - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
> >> - && rule->portid == port_id) {
> >> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> >> - if (rule->mss)
> >> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
> >> + switch (rule_type) {
> >> + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
> >> + /* Checksum offload is not needed for inline protocol as
> >> + * all processing for Outbound IPSec packets will be
> >> + * implicitly taken care and for non-IPSec packets,
> >> + * there is no need of IPv4 Checksum offload.
> >> + */
> >> + if (rule->portid == port_id)
> >> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> >> + break;
> >> + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
> >> + if (rule->portid == port_id) {
> >> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> >> + if (rule->mss)
> >> + *tx_offloads |=
> >> + RTE_ETH_TX_OFFLOAD_TCP_TSO;
> >> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> >> + }
> >> + break;
> >> + default:
> >> + /* Enable IPv4 checksum offload even if one of lookaside
> >> + * SA's are present.
> >> + */
> >> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> >
> > Shouldn't we check here that given port really supports IPV4_CKSUM offload?
>
> It is already being checked at port_init().
The problem is that we first invoke sa_check_offloads() which sets required rx/tx offloads.
If in that function we just blindly set |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM to req_tx_offloads,
while actual device doesn't support it, then later port_init() for that device will fail:
port_init(...)
{
....
local_port_conf.txmode.offloads |= req_tx_offloads;
....
if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
> >
> >> + break;
> >> }
> >> }
> >> return 0;
> >> --
> >> 2.8.4
> >
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (7 preceding siblings ...)
2022-04-14 14:07 ` Ananyev, Konstantin
@ 2022-04-21 13:31 ` Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (5 more replies)
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
10 siblings, 6 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Move fast path helper functions to header file for easy access.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
v2:
- Moved this patch from 4/7 to 1/7 to keep all moving as first patch
without any change in function.
- In patch 1/7, handled comments from Konstantin to check for capabilities before
using Tx offload in case of LA and also to enable Checksum offload in case of
TSO+Inline Protocol
- In patch 2/7, handled comments from Konstantin to use RTE_ETH_IS* macros and
- In patch 2/7, used tx_offload field and RTE_MBUF_L2_LEN_BITS shift to write to
mbuf->tx_offload instead of bitfield access so that it is cleared and
there is only stores and no loads.
- In patch 5/7, made few fast path functions always_inline
examples/ipsec-secgw/ipsec-secgw.c | 575 +---------------------------------
examples/ipsec-secgw/ipsec-secgw.h | 4 +
examples/ipsec-secgw/ipsec.h | 34 ++
examples/ipsec-secgw/ipsec_process.c | 49 +--
examples/ipsec-secgw/ipsec_worker.h | 588 +++++++++++++++++++++++++++++++++++
5 files changed, 630 insertions(+), 620 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 42b5081..959a20b 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -70,11 +70,6 @@ volatile bool force_quit;
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-/* Configure how many packets ahead to prefetch, when reading packets */
-#define PREFETCH_OFFSET 3
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -191,9 +186,9 @@ static uint64_t dev_tx_offload = UINT64_MAX;
/*
* global values that determine multi-seg policy
*/
-static uint32_t frag_tbl_sz;
+uint32_t frag_tbl_sz;
static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
-static uint32_t mtu_size = RTE_ETHER_MTU;
+uint32_t mtu_size = RTE_ETHER_MTU;
static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
static uint32_t stats_interval;
@@ -205,11 +200,6 @@ struct app_sa_prm app_sa_prm = {
};
static const char *cfgfile;
-struct lcore_rx_queue {
- uint16_t port_id;
- uint8_t queue_id;
-} __rte_cache_aligned;
-
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
@@ -224,28 +214,7 @@ static uint16_t nb_lcore_params;
static struct rte_hash *cdev_map_in;
static struct rte_hash *cdev_map_out;
-struct buffer {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
-};
-
-struct lcore_conf {
- uint16_t nb_rx_queue;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
- struct ipsec_ctx inbound;
- struct ipsec_ctx outbound;
- struct rt_ctx *rt4_ctx;
- struct rt_ctx *rt6_ctx;
- struct {
- struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_indir;
- struct rte_ip_frag_death_row dr;
- } frag;
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
.rxmode = {
@@ -281,32 +250,6 @@ multi_seg_required(void)
frame_buf_size || frag_tbl_sz != 0);
}
-static inline void
-adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
-static inline void
-adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
@@ -371,369 +314,6 @@ print_stats_cb(__rte_unused void *param)
rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
-{
- const struct rte_ether_hdr *eth;
- const struct rte_ipv4_hdr *iph4;
- const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
-
- eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
- iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv4_pktlen(pkt, iph4, 0);
-
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
- t->ip4.pkts[(t->ip4.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
- int next_proto;
- size_t l3len, ext_len;
- uint8_t *p;
-
- /* get protocol type */
- iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv6_pktlen(pkt, iph6, 0);
-
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
- l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
-
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip6.data[t->ip6.num] = &iph6->proto;
- t->ip6.pkts[(t->ip6.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
- } else {
- /* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
- rte_be_to_cpu_16(eth->ether_type));
- free_pkts(&pkt, 1);
- return;
- }
-
- /* Check if the packet has been processed inline. For inline protocol
- * processed packets, the metadata in the mbuf can be used to identify
- * the security processing done on the packet. The metadata will be
- * used to retrieve the application registered userdata associated
- * with the security session.
- */
-
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
- struct ipsec_sa *sa;
- struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
-
- /* Retrieve the userdata registered. Here, the userdata
- * registered is the SA pointer.
- */
- sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
- *rte_security_dynfield(pkt));
- if (sa == NULL) {
- /* userdata could not be retrieved */
- return;
- }
-
- /* Save SA as priv member in mbuf. This will be used in the
- * IPsec selector(SP-SA) check.
- */
-
- priv = get_priv(pkt);
- priv->sa = sa;
- }
-}
-
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
-{
- int32_t i;
-
- t->ipsec.num = 0;
- t->ip4.num = 0;
- t->ip6.num = 0;
-
- for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
- void *));
- prepare_one_packet(pkts[i], t);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
-}
-
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
-/* Send burst of packets on an output interface */
-static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
-{
- struct rte_mbuf **m_table;
- int32_t ret;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
-
- prepare_tx_burst(m_table, n, port, qconf);
-
- ret = rte_eth_tx_burst(port, queueid, m_table, n);
-
- core_stats_update_tx(ret);
-
- if (unlikely(ret < n)) {
- do {
- free_pkts(&m_table[ret], 1);
- } while (++ret < n);
- }
-
- return 0;
-}
-
-/*
- * Helper function to fragment and queue for TX one packet.
- */
-static inline uint32_t
-send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
- uint16_t port, uint8_t proto)
-{
- struct buffer *tbl;
- uint32_t len, n;
- int32_t rc;
-
- tbl = qconf->tx_mbufs + port;
- len = tbl->len;
-
- /* free space for new fragments */
- if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
- send_burst(qconf, len, port);
- len = 0;
- }
-
- n = RTE_DIM(tbl->m_table) - len;
-
- if (proto == IPPROTO_IP)
- rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
- else
- rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
-
- if (rc >= 0)
- len += rc;
- else
- RTE_LOG(ERR, IPSEC,
- "%s: failed to fragment packet with size %u, "
- "error code: %d\n",
- __func__, m->pkt_len, rte_errno);
-
- free_pkts(&m, 1);
- return len;
-}
-
-/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
-{
- uint32_t lcore_id;
- uint16_t len;
- struct lcore_conf *qconf;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
-
- if (m->pkt_len <= mtu_size) {
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* need to fragment the packet */
- } else if (frag_tbl_sz > 0)
- len = send_fragment_packet(qconf, m, port, proto);
- else
- free_pkts(&m, 1);
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
- return 0;
-}
-
-static inline void
-inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
- uint16_t lim, struct ipsec_spd_stats *stats)
-{
- struct rte_mbuf *m;
- uint32_t i, j, res, sa_idx;
-
- if (ip->num == 0 || sp == NULL)
- return;
-
- rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
- ip->num, DEFAULT_MAX_CATEGORIES);
-
- j = 0;
- for (i = 0; i < ip->num; i++) {
- m = ip->pkts[i];
- res = ip->res[i];
- if (res == BYPASS) {
- ip->pkts[j++] = m;
- stats->bypass++;
- continue;
- }
- if (res == DISCARD) {
- free_pkts(&m, 1);
- stats->discard++;
- continue;
- }
-
- /* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
-
- sa_idx = res - 1;
- if (!inbound_sa_check(sa, m, sa_idx)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
- ip->pkts[j++] = m;
- stats->protect++;
- }
- ip->num = j;
-}
-
static void
split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
@@ -962,140 +542,6 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
}
}
-static inline int32_t
-get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
-{
- struct ipsec_mbuf_metadata *priv;
- struct ipsec_sa *sa;
-
- priv = get_priv(pkt);
-
- sa = priv->sa;
- if (unlikely(sa == NULL)) {
- RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
- goto fail;
- }
-
- if (is_ipv6)
- return sa->portid;
-
- /* else */
- return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
-
-fail:
- if (is_ipv6)
- return -1;
-
- /* else */
- return 0;
-}
-
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- uint32_t hop[MAX_PKT_BURST * 2];
- uint32_t dst_ip[MAX_PKT_BURST * 2];
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
- uint32_t *, offset);
- dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
- lpm_pkts++;
- }
- }
-
- rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
- }
-}
-
-static inline void
-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- int32_t hop[MAX_PKT_BURST * 2];
- uint8_t dst_ip[MAX_PKT_BURST * 2][16];
- uint8_t *ip6_dst;
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
- offset);
- memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
- lpm_pkts++;
- }
- }
-
- rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
- lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if (pkt_hop == -1) {
- core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
- }
-}
-
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint16_t portid)
@@ -1121,21 +567,6 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
}
static inline void
-drain_tx_buffers(struct lcore_conf *qconf)
-{
- struct buffer *buf;
- uint32_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- buf = &qconf->tx_mbufs[portid];
- if (buf->len == 0)
- continue;
- send_burst(qconf, buf->len, portid);
- buf->len = 0;
- }
-}
-
-static inline void
drain_crypto_buffers(struct lcore_conf *qconf)
{
uint32_t i;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 24f11ad..fceb835 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -6,6 +6,7 @@
#include <stdbool.h>
+#define MAX_RX_QUEUE_PER_LCORE 16
#define NB_SOCKETS 4
@@ -136,6 +137,9 @@ extern uint32_t nb_bufs_in_pool;
extern bool per_port_pool;
+extern uint32_t mtu_size;
+extern uint32_t frag_tbl_sz;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index ccfde8e..9a4e7ea 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -9,6 +9,7 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
+#include <rte_ip_frag.h>
#include <rte_security.h>
#include <rte_flow.h>
#include <rte_ipsec.h>
@@ -37,6 +38,11 @@
#define IP6_VERSION (6)
+#define SATP_OUT_IPV4(t) \
+ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
+ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
+ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_mbuf;
@@ -260,6 +266,34 @@ struct cnt_blk {
uint32_t cnt;
} __rte_packed;
+struct lcore_rx_queue {
+ uint16_t port_id;
+ uint8_t queue_id;
+} __rte_cache_aligned;
+
+struct buffer {
+ uint16_t len;
+ struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+};
+
+struct lcore_conf {
+ uint16_t nb_rx_queue;
+ struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
+ struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
+ struct ipsec_ctx inbound;
+ struct ipsec_ctx outbound;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
+ struct {
+ struct rte_ip_frag_tbl *tbl;
+ struct rte_mempool *pool_indir;
+ struct rte_ip_frag_death_row dr;
+ } frag;
+} __rte_cache_aligned;
+
+extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
/* Socket ctx */
extern struct socket_ctx socket_ctx[NB_SOCKETS];
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 285e9c7..089d89f 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -13,11 +13,7 @@
#include "ipsec.h"
#include "ipsec-secgw.h"
-
-#define SATP_OUT_IPV4(t) \
- ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
- (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
- ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+#include "ipsec_worker.h"
/* helper routine to free bulk of crypto-ops and related packets */
static inline void
@@ -209,49 +205,6 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
}
/*
- * helper routine for inline and cpu(synchronous) processing
- * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
- * Should be removed in future.
- */
-static inline void
-prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
-{
- uint32_t j;
- struct ipsec_mbuf_metadata *priv;
-
- for (j = 0; j != cnt; j++) {
- priv = get_priv(mb[j]);
- priv->sa = sa;
- /* setup TSO related fields if TSO enabled*/
- if (priv->sa->mss) {
- uint32_t ptype = mb[j]->packet_type;
- /* only TCP is supported */
- if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
- mb[j]->tso_segsz = priv->sa->mss;
- if ((IS_TUNNEL(priv->sa->flags))) {
- mb[j]->outer_l3_len = mb[j]->l3_len;
- mb[j]->outer_l2_len = mb[j]->l2_len;
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_TUNNEL_ESP;
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IP_CKSUM;
- }
- mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
- mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
- RTE_MBUF_F_TX_TCP_CKSUM);
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV4;
- else
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV6;
- }
- }
- }
-}
-
-/*
* finish processing of packets successfully decrypted by an inline processor
*/
static uint32_t
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 5d85cf1..e0b0a82 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -4,8 +4,15 @@
#ifndef _IPSEC_WORKER_H_
#define _IPSEC_WORKER_H_
+#include <rte_acl.h>
+#include <rte_ethdev.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+
#include "ipsec.h"
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET 3
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -38,4 +45,585 @@ void ipsec_poll_mode_worker(void);
int ipsec_launch_one_lcore(void *args);
+/*
+ * helper routine for inline and cpu(synchronous) processing
+ * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
+ * Should be removed in future.
+ */
+static inline void
+prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
+{
+ uint32_t j;
+ struct ipsec_mbuf_metadata *priv;
+
+ for (j = 0; j != cnt; j++) {
+ priv = get_priv(mb[j]);
+ priv->sa = sa;
+ /* setup TSO related fields if TSO enabled*/
+ if (priv->sa->mss) {
+ uint32_t ptype = mb[j]->packet_type;
+ /* only TCP is supported */
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ mb[j]->tso_segsz = priv->sa->mss;
+ if ((IS_TUNNEL(priv->sa->flags))) {
+ mb[j]->outer_l3_len = mb[j]->l3_len;
+ mb[j]->outer_l2_len = mb[j]->l2_len;
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_TUNNEL_ESP;
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ }
+ mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
+ mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_TCP_CKSUM);
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV4;
+ else
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV6;
+ }
+ }
+ }
+}
+
+static inline void
+adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+{
+ const struct rte_ether_hdr *eth;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
+
+ eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv4_pktlen(pkt, iph4, 0);
+
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
+ t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(*iph4);
+ pkt->packet_type |= RTE_PTYPE_L3_IPV4;
+ if (pkt->packet_type & RTE_PTYPE_L4_TCP)
+ pkt->l4_len = sizeof(struct rte_tcp_hdr);
+ else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
+ pkt->l4_len = sizeof(struct rte_udp_hdr);
+ } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ int next_proto;
+ size_t l3len, ext_len;
+ uint8_t *p;
+
+ /* get protocol type */
+ iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv6_pktlen(pkt, iph6, 0);
+
+ next_proto = iph6->proto;
+
+ /* determine l3 header size up to ESP extension */
+ l3len = sizeof(struct ip6_hdr);
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* drop packet when IPv6 header exceeds first segment length */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
+ t->ip6.data[t->ip6.num] = &iph6->proto;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = l3len;
+ pkt->packet_type |= RTE_PTYPE_L3_IPV6;
+ } else {
+ /* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
+ rte_be_to_cpu_16(eth->ether_type));
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ /* Check if the packet has been processed inline. For inline protocol
+ * processed packets, the metadata in the mbuf can be used to identify
+ * the security processing done on the packet. The metadata will be
+ * used to retrieve the application registered userdata associated
+ * with the security session.
+ */
+
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
+ struct ipsec_sa *sa;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ pkt->port);
+
+ /* Retrieve the userdata registered. Here, the userdata
+ * registered is the SA pointer.
+ */
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return;
+ }
+
+ /* Save SA as priv member in mbuf. This will be used in the
+ * IPsec selector(SP-SA) check.
+ */
+
+ priv = get_priv(pkt);
+ priv->sa = sa;
+ }
+}
+
+static inline void
+prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
+ uint16_t nb_pkts)
+{
+ int32_t i;
+
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
+
+ for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
+ void *));
+ prepare_one_packet(pkts[i], t);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_one_packet(pkts[i], t);
+}
+
+static inline void
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ struct ip *ip;
+ struct rte_ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ip->ip_sum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ } else {
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+}
+
+static inline void
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ int32_t i;
+ const int32_t prefetch_offset = 2;
+
+ for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
+ prepare_tx_pkt(pkts[i], port, qconf);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_tx_pkt(pkts[i], port, qconf);
+}
+
+/* Send burst of packets on an output interface */
+static inline int32_t
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
+{
+ struct rte_mbuf **m_table;
+ int32_t ret;
+ uint16_t queueid;
+
+ queueid = qconf->tx_queue_id[port];
+ m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+ prepare_tx_burst(m_table, n, port, qconf);
+
+ ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+ core_stats_update_tx(ret);
+
+ if (unlikely(ret < n)) {
+ do {
+ free_pkts(&m_table[ret], 1);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+/*
+ * Helper function to fragment and queue for TX one packet.
+ */
+static inline uint32_t
+send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
+ uint16_t port, uint8_t proto)
+{
+ struct buffer *tbl;
+ uint32_t len, n;
+ int32_t rc;
+
+ tbl = qconf->tx_mbufs + port;
+ len = tbl->len;
+
+ /* free space for new fragments */
+ if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
+ send_burst(qconf, len, port);
+ len = 0;
+ }
+
+ n = RTE_DIM(tbl->m_table) - len;
+
+ if (proto == IPPROTO_IP)
+ rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+ else
+ rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+
+ if (rc >= 0)
+ len += rc;
+ else
+ RTE_LOG(ERR, IPSEC,
+ "%s: failed to fragment packet with size %u, "
+ "error code: %d\n",
+ __func__, m->pkt_len, rte_errno);
+
+ free_pkts(&m, 1);
+ return len;
+}
+
+/* Enqueue a single packet, and send burst if queue is filled */
+static inline int32_t
+send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
+{
+ uint32_t lcore_id;
+ uint16_t len;
+ struct lcore_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_conf[lcore_id];
+ len = qconf->tx_mbufs[port].len;
+
+ if (m->pkt_len <= mtu_size) {
+ qconf->tx_mbufs[port].m_table[len] = m;
+ len++;
+
+ /* need to fragment the packet */
+ } else if (frag_tbl_sz > 0)
+ len = send_fragment_packet(qconf, m, port, proto);
+ else
+ free_pkts(&m, 1);
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ send_burst(qconf, MAX_PKT_BURST, port);
+ len = 0;
+ }
+
+ qconf->tx_mbufs[port].len = len;
+ return 0;
+}
+
+static inline void
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
+ uint16_t lim, struct ipsec_spd_stats *stats)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, res, sa_idx;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = 0;
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res == BYPASS) {
+ ip->pkts[j++] = m;
+ stats->bypass++;
+ continue;
+ }
+ if (res == DISCARD) {
+ free_pkts(&m, 1);
+ stats->discard++;
+ continue;
+ }
+
+ /* Only check SPI match for processed IPSec packets */
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+
+ sa_idx = res - 1;
+ if (!inbound_sa_check(sa, m, sa_idx)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+ ip->pkts[j++] = m;
+ stats->protect++;
+ }
+ ip->num = j;
+}
+
+static inline int32_t
+get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
+{
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+
+ priv = get_priv(pkt);
+
+ sa = priv->sa;
+ if (unlikely(sa == NULL)) {
+ RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
+ goto fail;
+ }
+
+ if (is_ipv6)
+ return sa->portid;
+
+ /* else */
+ return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
+
+fail:
+ if (is_ipv6)
+ return -1;
+
+ /* else */
+ return 0;
+}
+
+static inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ uint32_t hop[MAX_PKT_BURST * 2];
+ uint32_t dst_ip[MAX_PKT_BURST * 2];
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip, ip_dst);
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ uint32_t *, offset);
+ dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
+ core_statistics[lcoreid].lpm4.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+ }
+}
+
+static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int32_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ offset);
+ memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
+ lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if (pkt_hop == -1) {
+ core_statistics[lcoreid].lpm6.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+ }
+}
+
+static inline void
+drain_tx_buffers(struct lcore_conf *qconf)
+{
+ struct buffer *buf;
+ uint32_t portid;
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ buf = &qconf->tx_mbufs[portid];
+ if (buf->len == 0)
+ continue;
+ send_burst(qconf, buf->len, portid);
+ buf->len = 0;
+ }
+}
+
#endif /* _IPSEC_WORKER_H_ */
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
@ 2022-04-21 13:31 ` Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
` (4 subsequent siblings)
5 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
crypto/protocol or cpu crypto is needed.
For Tx Inline protocol offload, checksum computation
is implicitly taken care by HW.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 3 ---
examples/ipsec-secgw/sa.c | 46 ++++++++++++++++++++++++++++++++------
2 files changed, 39 insertions(+), 10 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 959a20b..5fe5eee 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1761,9 +1761,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.txmode.offloads |=
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
-
printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 1839ac7..e8f2598 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1766,10 +1766,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
struct ipsec_sa *rule;
uint32_t idx_sa;
enum rte_security_session_action_type rule_type;
+ struct rte_eth_dev_info dev_info;
+ int ret;
*rx_offloads = 0;
*tx_offloads = 0;
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ port_id, strerror(-ret));
+
/* Check for inbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
rule = &sa_in[idx_sa];
@@ -1785,13 +1793,37 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
rule = &sa_out[idx_sa];
rule_type = ipsec_get_action_type(rule);
- if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- rule_type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
- && rule->portid == port_id) {
- *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
- if (rule->mss)
- *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ switch (rule_type) {
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ /* Checksum offload is not needed for inline protocol as
+ * all processing for Outbound IPSec packets will be
+ * implicitly taken care and for non-IPSec packets,
+ * there is no need of IPv4 Checksum offload.
+ */
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
+ }
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |=
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ }
+ break;
+ default:
+ /* Enable IPv4 checksum offload even if one of lookaside
+ * SA's are present.
+ */
+ if (dev_info.tx_offload_capa &
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ break;
}
}
return 0;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
@ 2022-04-21 13:31 ` Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
` (3 subsequent siblings)
5 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Use HW parsed packet type when ethdev supports necessary protocols.
If packet type is not supported, then register ethdev callbacks
for parse packet in SW. This is better for performance as it
effects fast path.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 157 ++++++++++++++++++++++++++++++++++++
examples/ipsec-secgw/ipsec_worker.h | 114 ++++++++++----------------
2 files changed, 201 insertions(+), 70 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5fe5eee..d6a4959 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1680,6 +1680,147 @@ cryptodevs_init(uint16_t req_queue_num)
return total_nb_qps;
}
+static int
+check_ptype(int portid)
+{
+ int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
+ int i, nb_ptypes;
+ uint32_t mask;
+
+ mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+ RTE_PTYPE_TUNNEL_MASK);
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
+ if (nb_ptypes <= 0)
+ return 0;
+
+ uint32_t ptypes[nb_ptypes];
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
+ for (i = 0; i < nb_ptypes; ++i) {
+ if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
+ l3_ipv4 = 1;
+ if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
+ l3_ipv6 = 1;
+ if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
+ tunnel_esp = 1;
+ if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ l4_udp = 1;
+ }
+
+ if (l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (l4_udp == 0)
+ printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
+
+ if (tunnel_esp == 0)
+ printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
+
+ if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_ether_hdr *eth;
+ const struct rte_udp_hdr *udp;
+ uint16_t nat_port, ether_type;
+ int next_proto = 0;
+ size_t ext_len = 0;
+ const uint8_t *p;
+ uint32_t l3len;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ ether_type = eth->ether_type;
+
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
+ l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+
+ if (l3len == sizeof(struct rte_ipv4_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ next_proto = iph4->next_proto_id;
+ p = (const uint8_t *)iph4;
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
+ l3len = sizeof(struct ip6_hdr);
+
+ /* determine l3 header size up to ESP extension */
+ next_proto = iph6->proto;
+ p = (const uint8_t *)iph6;
+ while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Skip IPv6 header exceeds first segment length */
+ if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
+ goto exit;
+
+ if (l3len == sizeof(struct ip6_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = (const struct rte_udp_hdr *)(p + l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port)
+ packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ }
+ break;
+ default:
+ break;
+ }
+exit:
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint32_t i;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
+ for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
+ struct ether_hdr *));
+ parse_ptype(pkts[i]);
+ }
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
@@ -1691,6 +1832,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
+ int ptype_supported;
ret = rte_eth_dev_info_get(portid, &dev_info);
if (ret != 0)
@@ -1788,6 +1930,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
"err=%d, port=%d\n", ret, portid);
+ /* Check if required ptypes are supported */
+ ptype_supported = check_ptype(portid);
+ if (!ptype_supported)
+ printf("Port %d: softly parse packet type info\n", portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1849,6 +1996,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ /* Register Rx callback if ptypes are not supported */
+ if (!ptype_supported &&
+ !rte_eth_add_rx_callback(portid, queue,
+ parse_ptype_cb, NULL)) {
+ printf("Failed to add rx callback: port=%d, "
+ "queue=%d\n", portid, queue);
+ }
+
+
}
}
printf("\n");
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index e0b0a82..7397291 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -117,55 +117,33 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
+ uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
+ uint32_t tun_type, l3_type;
+ uint64_t tx_offload;
+ uint16_t l3len;
+
+ tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
+ l3_type = ptype & RTE_PTYPE_L3_MASK;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
+ if (RTE_ETH_IS_IPV4_HDR(l3_type)) {
iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ tx_offload = sizeof(*iph4) << RTE_MBUF_L2_LEN_BITS;
+ } else if (RTE_ETH_IS_IPV6_HDR(l3_type)) {
int next_proto;
- size_t l3len, ext_len;
+ size_t ext_len;
uint8_t *p;
/* get protocol type */
@@ -173,47 +151,35 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
RTE_ETHER_HDR_LEN);
adjust_ipv6_pktlen(pkt, iph6, 0);
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
+
+ /* Determine l3 header size up to ESP extension by walking
+ * through extension headers.
+ */
+ if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+ l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ next_proto = iph6->proto;
+ while (next_proto != IPPROTO_ESP &&
+ l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Drop pkt when IPv6 header exceeds first seg size */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+ }
+ tx_offload = l3len << RTE_MBUF_L2_LEN_BITS;
} else {
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
@@ -222,6 +188,14 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
return;
}
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+ tx_offload |= (sizeof(struct rte_tcp_hdr) <<
+ (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
+ else if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ tx_offload |= (sizeof(struct rte_udp_hdr) <<
+ (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
+ pkt->tx_offload = tx_offload;
+
/* Check if the packet has been processed inline. For inline protocol
* processed packets, the metadata in the mbuf can be used to identify
* the security processing done on the packet. The metadata will be
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 4/7] examples/ipsec-secgw: allow larger burst size for vectors
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
@ 2022-04-21 13:31 ` Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
` (2 subsequent siblings)
5 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Allow larger burst size of vector event mode instead of restricting
to 32. Also restructure traffic type struct to have num pkts first
so that it is always in first cacheline. Also cache align
traffic type struct. Since MAX_PKT_BURST is not used by
vector event mode worker, define another macro for its burst
size so that poll mode perf is not effected.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 2 +-
examples/ipsec-secgw/ipsec-secgw.h | 15 ++++++++++-----
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index d6a4959..88984a6 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1317,7 +1317,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
case CMD_LINE_OPT_VECTOR_SIZE_NUM:
ret = parse_decimal(optarg);
- if (ret > MAX_PKT_BURST) {
+ if (ret > MAX_PKT_BURST_VEC) {
printf("Invalid argument for \'%s\': %s\n",
CMD_LINE_OPT_VECTOR_SIZE, optarg);
print_usage(prgname);
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index fceb835..2edf631 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -11,6 +11,11 @@
#define NB_SOCKETS 4
#define MAX_PKT_BURST 32
+#define MAX_PKT_BURST_VEC 256
+
+#define MAX_PKTS \
+ ((MAX_PKT_BURST_VEC > MAX_PKT_BURST ? \
+ MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2)
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -49,12 +54,12 @@
#define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
struct traffic_type {
- const uint8_t *data[MAX_PKT_BURST * 2];
- struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
- void *saptr[MAX_PKT_BURST * 2];
- uint32_t res[MAX_PKT_BURST * 2];
uint32_t num;
-};
+ struct rte_mbuf *pkts[MAX_PKTS];
+ const uint8_t *data[MAX_PKTS];
+ void *saptr[MAX_PKTS];
+ uint32_t res[MAX_PKTS];
+} __rte_cache_aligned;
struct ipsec_traffic {
struct traffic_type ipsec;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 5/7] examples/ipsec-secgw: get security context from lcore conf
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (2 preceding siblings ...)
2022-04-21 13:31 ` [PATCH v2 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
@ 2022-04-21 13:31 ` Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
5 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Store security context pointer in lcore Rx queue config and
get it from there in fast path for better performance.
Currently rte_eth_dev_get_sec_ctx() which is meant to be control
path API is called per packet basis. For every call to that
API, ethdev port status is checked.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 22 ++++++++++++++++++---
examples/ipsec-secgw/ipsec.h | 1 +
examples/ipsec-secgw/ipsec_worker.h | 39 +++++++++++++++++--------------------
3 files changed, 38 insertions(+), 24 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 88984a6..14b9c06 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -544,11 +544,11 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint16_t portid)
+ uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
{
struct ipsec_traffic traffic;
- prepare_traffic(pkts, &traffic, nb_pkts);
+ prepare_traffic(ctx, pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
if (is_unprotected_port(portid))
@@ -740,7 +740,8 @@ ipsec_poll_mode_worker(void)
if (nb_rx > 0) {
core_stats_update_rx(nb_rx);
- process_pkts(qconf, pkts, nb_rx, portid);
+ process_pkts(qconf, pkts, nb_rx, portid,
+ rxql->sec_ctx);
}
/* dequeue and process completed crypto-ops */
@@ -3060,6 +3061,21 @@ main(int32_t argc, char **argv)
flow_init();
+ /* Get security context if available and only if dynamic field is
+ * registered for fast path access.
+ */
+ if (!rte_security_dynfield_is_registered())
+ goto skip_sec_ctx;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
+ portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
+ lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
+ rte_eth_dev_get_sec_ctx(portid);
+ }
+ }
+skip_sec_ctx:
+
check_all_ports_link_status(enabled_port_mask);
if (stats_interval > 0)
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 9a4e7ea..ecad262 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -269,6 +269,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
+ struct rte_security_ctx *sec_ctx;
} __rte_cache_aligned;
struct buffer {
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7397291..b1fc364 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -88,7 +88,7 @@ prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
uint32_t l2_len)
{
@@ -101,7 +101,7 @@ adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
uint32_t l2_len)
{
@@ -114,8 +114,9 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+static __rte_always_inline void
+prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
+ struct ipsec_traffic *t)
{
uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
@@ -203,13 +204,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
* with the security session.
*/
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
+ if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
@@ -230,9 +227,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
}
}
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
+static __rte_always_inline void
+prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
+ struct ipsec_traffic *t, uint16_t nb_pkts)
{
int32_t i;
@@ -243,11 +240,11 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
void *));
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
static inline void
@@ -305,7 +302,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
}
/* Send burst of packets on an output interface */
-static inline int32_t
+static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
@@ -333,7 +330,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
/*
* Helper function to fragment and queue for TX one packet.
*/
-static inline uint32_t
+static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
@@ -372,7 +369,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
}
/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
+static __rte_always_inline int32_t
send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
{
uint32_t lcore_id;
@@ -404,7 +401,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
return 0;
}
-static inline void
+static __rte_always_inline void
inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
uint16_t lim, struct ipsec_spd_stats *stats)
{
@@ -451,7 +448,7 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
ip->num = j;
}
-static inline int32_t
+static __rte_always_inline int32_t
get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
{
struct ipsec_mbuf_metadata *priv;
@@ -531,7 +528,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
@@ -585,7 +582,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 6/7] examples/ipsec-secgw: update eth header during route lookup
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (3 preceding siblings ...)
2022-04-21 13:31 ` [PATCH v2 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
@ 2022-04-21 13:31 ` Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
5 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Update ethernet header during route lookup instead of doing
way later while performing Tx burst. Advantages to doing
is at route lookup is that no additional IP version checks
based on packet data are needed and packet data is already
in cache as route lookup is already consuming that data.
This is also useful for inline protocol offload cases
of v4inv6 or v6inv4 outbound tunnel operations as
packet data will not have any info about what is the tunnel
protocol.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 9 +-
examples/ipsec-secgw/ipsec_worker.h | 199 ++++++++++++++++++++++--------------
2 files changed, 130 insertions(+), 78 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 14b9c06..24ee6c0 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -562,7 +562,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
@@ -613,7 +614,8 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf,
if (trf.ip4.num != 0) {
inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
&core_statistics[lcoreid].inbound.spd4);
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
}
/* process ipv6 packets */
@@ -647,7 +649,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
/* process ipv4 packets */
if (trf.ip4.num != 0)
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
/* process ipv6 packets */
if (trf.ip6.num != 0)
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index b1fc364..7f21440 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -247,60 +247,6 @@ prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
prepare_one_packet(ctx, pkts[i], t);
}
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
/* Send burst of packets on an output interface */
static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
@@ -312,8 +258,6 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port, qconf);
-
ret = rte_eth_tx_burst(port, queueid, m_table, n);
core_stats_update_tx(ret);
@@ -334,8 +278,11 @@ static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
+ struct rte_ether_hdr *ethhdr;
+ struct rte_ipv4_hdr *ip;
+ struct rte_mbuf *pkt;
struct buffer *tbl;
- uint32_t len, n;
+ uint32_t len, n, i;
int32_t rc;
tbl = qconf->tx_mbufs + port;
@@ -349,6 +296,9 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
n = RTE_DIM(tbl->m_table) - len;
+ /* Strip the ethernet header that was prepended earlier */
+ rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
+
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
@@ -356,13 +306,51 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
- if (rc >= 0)
- len += rc;
- else
+ if (rc < 0) {
RTE_LOG(ERR, IPSEC,
"%s: failed to fragment packet with size %u, "
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
+ rc = 0;
+ }
+
+ i = len;
+ len += rc;
+ for (; i < len; i++) {
+ pkt = tbl->m_table[i];
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ if (proto == IPPROTO_IP) {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ } else {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+ }
free_pkts(&m, 1);
return len;
@@ -381,7 +369,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
qconf = &lcore_conf[lcore_id];
len = qconf->tx_mbufs[port].len;
- if (m->pkt_len <= mtu_size) {
+ /* L2 header is already part of packet */
+ if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
qconf->tx_mbufs[port].m_table[len] = m;
len++;
@@ -476,15 +465,19 @@ get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
return 0;
}
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+static __rte_always_inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+ uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
+ struct rte_ether_hdr *ethhdr;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -494,12 +487,13 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
uint32_t *, offset);
dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
lpm_pkts++;
@@ -511,9 +505,10 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 0);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -521,10 +516,41 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip_cksum) {
+ struct rte_ipv4_hdr *ip;
+
+ pkt->ol_flags |= tx_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ }
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IP);
}
}
@@ -533,11 +559,14 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ struct rte_ether_hdr *ethhdr;
uint8_t *ip6_dst;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -547,12 +576,13 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
offset);
memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
lpm_pkts++;
@@ -565,9 +595,10 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 1);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -575,10 +606,28 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if (pkt_hop == -1) {
core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IPV6);
}
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v2 7/7] examples/ipsec-secgw: add poll mode worker for inline proto
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (4 preceding siblings ...)
2022-04-21 13:31 ` [PATCH v2 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
@ 2022-04-21 13:31 ` Nithin Dabilpuram
5 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-21 13:31 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Add separate worker thread when all SA's are of type
inline protocol offload and librte_ipsec is enabled
in order to make it more optimal for that case.
Current default worker supports all kinds of SA leading
to doing lot of per-packet checks and branching based on
SA type which can be of 5 types of SA's.
Also make a provision for choosing different poll mode workers
for different combinations of SA types with default being
existing poll mode worker that supports all kinds of SA's.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 6 +-
examples/ipsec-secgw/ipsec-secgw.h | 10 +
examples/ipsec-secgw/ipsec_worker.c | 378 +++++++++++++++++++++++++++++++++++-
examples/ipsec-secgw/ipsec_worker.h | 4 +
examples/ipsec-secgw/sa.c | 9 +
5 files changed, 403 insertions(+), 4 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 24ee6c0..4251952 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -68,8 +68,6 @@ volatile bool force_quit;
#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -173,7 +171,7 @@ static uint64_t enabled_cryptodev_mask = UINT64_MAX;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
-static uint32_t single_sa;
+uint32_t single_sa;
uint32_t nb_bufs_in_pool;
/*
@@ -238,6 +236,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
bool per_port_pool;
+uint16_t wrkr_flags;
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
@@ -1233,6 +1232,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
single_sa = 1;
single_sa_idx = ret;
eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
+ wrkr_flags |= SS_F;
printf("Configured with single SA index %u\n",
single_sa_idx);
break;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 2edf631..f027360 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -135,6 +135,7 @@ extern uint32_t unprotected_port_mask;
/* Index of SA in single mode */
extern uint32_t single_sa_idx;
+extern uint32_t single_sa;
extern volatile bool force_quit;
@@ -145,6 +146,15 @@ extern bool per_port_pool;
extern uint32_t mtu_size;
extern uint32_t frag_tbl_sz;
+#define SS_F (1U << 0) /* Single SA mode */
+#define INL_PR_F (1U << 1) /* Inline Protocol */
+#define INL_CR_F (1U << 2) /* Inline Crypto */
+#define LA_PR_F (1U << 3) /* Lookaside Protocol */
+#define LA_ANY_F (1U << 4) /* Lookaside Any */
+#define MAX_F (LA_ANY_F << 1)
+
+extern uint16_t wrkr_flags;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426..2b96951 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -17,6 +17,8 @@ struct port_drv_mode_data {
struct rte_security_ctx *ctx;
};
+typedef void (*ipsec_worker_fn_t)(void);
+
static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
{
@@ -1004,6 +1006,380 @@ ipsec_eventmode_worker(struct eh_conf *conf)
eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
}
+static __rte_always_inline void
+outb_inl_pro_spd_process(struct sp_ctx *sp,
+ struct sa_ctx *sa_ctx,
+ struct traffic_type *ip,
+ struct traffic_type *match,
+ struct traffic_type *mismatch,
+ bool match_flag,
+ struct ipsec_spd_stats *stats)
+{
+ uint32_t prev_sa_idx = UINT32_MAX;
+ struct rte_mbuf *ipsec[MAX_PKT_BURST];
+ struct rte_ipsec_session *ips;
+ uint32_t i, j, j_mis, sa_idx;
+ struct ipsec_sa *sa = NULL;
+ uint32_t ipsec_num = 0;
+ struct rte_mbuf *m;
+ uint64_t satp;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = match->num;
+ j_mis = mismatch->num;
+
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] - 1;
+
+ if (unlikely(ip->res[i] == DISCARD)) {
+ free_pkts(&m, 1);
+
+ stats->discard++;
+ } else if (unlikely(ip->res[i] == BYPASS)) {
+ match->pkts[j++] = m;
+
+ stats->bypass++;
+ } else {
+ if (prev_sa_idx == UINT32_MAX) {
+ prev_sa_idx = sa_idx;
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ }
+
+ if (sa_idx != prev_sa_idx) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare packets for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+
+ /* Update to new SA */
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ ipsec_num = 0;
+ }
+
+ ipsec[ipsec_num++] = m;
+ stats->protect++;
+ }
+ }
+
+ if (ipsec_num) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare pacekts for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+ }
+ match->num = j;
+ mismatch->num = j_mis;
+}
+
+/* Poll mode worker when all SA's are of type inline protocol */
+void
+ipsec_poll_mode_wrkr_inl_pr(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct ipsec_core_statistics *stats;
+ struct rt_ctx *rt4_ctx, *rt6_ctx;
+ struct sa_ctx *sa_in, *sa_out;
+ struct traffic_type ip4, ip6;
+ struct lcore_rx_queue *rxql;
+ struct rte_mbuf **v4, **v6;
+ struct ipsec_traffic trf;
+ struct lcore_conf *qconf;
+ uint16_t v4_num, v6_num;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ int32_t i, nb_rx;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+ stats = &core_statistics[lcore_id];
+
+ rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ rt6_ctx = socket_ctx[socket_id].rt_ip6;
+
+ sp4_in = socket_ctx[socket_id].sp_ip4_in;
+ sp6_in = socket_ctx[socket_id].sp_ip6_in;
+ sa_in = socket_ctx[socket_id].sa_in;
+
+ sp4_out = socket_ctx[socket_id].sp_ip4_out;
+ sp6_out = socket_ctx[socket_id].sp_ip6_out;
+ sa_out = socket_ctx[socket_id].sa_out;
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+ /* Drop any IPsec traffic */
+ free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+ if (is_unprotected_port(portid)) {
+ inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
+ trf.ip4.num,
+ &stats->inbound.spd4);
+
+ inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
+ trf.ip6.num,
+ &stats->inbound.spd6);
+
+ v4 = trf.ip4.pkts;
+ v4_num = trf.ip4.num;
+ v6 = trf.ip6.pkts;
+ v6_num = trf.ip6.num;
+ } else {
+ ip4.num = 0;
+ ip6.num = 0;
+
+ outb_inl_pro_spd_process(sp4_out, sa_out,
+ &trf.ip4, &ip4, &ip6,
+ true,
+ &stats->outbound.spd4);
+
+ outb_inl_pro_spd_process(sp6_out, sa_out,
+ &trf.ip6, &ip6, &ip4,
+ false,
+ &stats->outbound.spd6);
+ v4 = ip4.pkts;
+ v4_num = ip4.num;
+ v6 = ip6.pkts;
+ v6_num = ip6.num;
+ }
+
+ route4_pkts(rt4_ctx, v4, v4_num, 0, false);
+ route6_pkts(rt6_ctx, v6, v6_num);
+ }
+ }
+}
+
+/* Poll mode worker when all SA's are of type inline protocol
+ * and single sa mode is enabled.
+ */
+void
+ipsec_poll_mode_wrkr_inl_pr_ss(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct rte_ipsec_session *ips;
+ struct lcore_rx_queue *rxql;
+ struct lcore_conf *qconf;
+ struct ipsec_traffic trf;
+ struct sa_ctx *sa_out;
+ uint32_t i, nb_rx, j;
+ struct ipsec_sa *sa;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+
+ /* Get SA info */
+ sa_out = socket_ctx[socket_id].sa_out;
+ sa = &sa_out->sa[single_sa_idx];
+ ips = ipsec_get_primary_session(sa);
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ if (is_unprotected_port(portid)) {
+ /* Nothing much to do for inbound inline
+ * decrypted traffic.
+ */
+ for (j = 0; j < nb_rx; j++) {
+ uint32_t ptype, proto;
+
+ pkt = pkts[j];
+ ptype = pkt->packet_type &
+ RTE_PTYPE_L3_MASK;
+ if (ptype == RTE_PTYPE_L3_IPV4)
+ proto = IPPROTO_IP;
+ else
+ proto = IPPROTO_IPV6;
+
+ send_single_packet(pkt, portid, proto);
+ }
+
+ continue;
+ }
+
+ /* Prepare packets for outbound */
+ prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+ /* Drop any IPsec traffic */
+ free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+ rte_ipsec_pkt_process(ips, trf.ip4.pkts,
+ trf.ip4.num);
+ rte_ipsec_pkt_process(ips, trf.ip6.pkts,
+ trf.ip6.num);
+ portid = sa->portid;
+
+ /* Send v4 pkts out */
+ for (j = 0; j < trf.ip4.num; j++) {
+ pkt = trf.ip4.pkts[j];
+
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ send_single_packet(pkt, portid, IPPROTO_IP);
+ }
+
+ /* Send v6 pkts out */
+ for (j = 0; j < trf.ip6.num; j++) {
+ pkt = trf.ip6.pkts[j];
+
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ send_single_packet(pkt, portid, IPPROTO_IPV6);
+ }
+ }
+ }
+}
+
+static void
+ipsec_poll_mode_wrkr_launch(void)
+{
+ static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
+ [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
+ [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
+ };
+ ipsec_worker_fn_t fn;
+
+ if (!app_sa_prm.enable) {
+ fn = ipsec_poll_mode_worker;
+ } else {
+ fn = poll_mode_wrkrs[wrkr_flags];
+
+ /* Always default to all mode worker */
+ if (!fn)
+ fn = ipsec_poll_mode_worker;
+ }
+
+ /* Launch worker */
+ (*fn)();
+}
+
int ipsec_launch_one_lcore(void *args)
{
struct eh_conf *conf;
@@ -1012,7 +1388,7 @@ int ipsec_launch_one_lcore(void *args)
if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
/* Run in poll mode */
- ipsec_poll_mode_worker();
+ ipsec_poll_mode_wrkr_launch();
} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
/* Run in event mode */
ipsec_eventmode_worker(conf);
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7f21440..315f3d6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -13,6 +13,8 @@
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -42,6 +44,8 @@ struct lcore_conf_ev_tx_int_port_wrkr {
} __rte_cache_aligned;
void ipsec_poll_mode_worker(void);
+void ipsec_poll_mode_wrkr_inl_pr(void);
+void ipsec_poll_mode_wrkr_inl_pr_ss(void);
int ipsec_launch_one_lcore(void *args);
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index e8f2598..13b9113 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -936,6 +936,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
}
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ wrkr_flags |= INL_CR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ wrkr_flags |= INL_PR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+ wrkr_flags |= LA_PR_F;
+ else
+ wrkr_flags |= LA_ANY_F;
+
nb_crypto_sessions++;
*ri = *ri + 1;
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (8 preceding siblings ...)
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (7 more replies)
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
10 siblings, 8 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Move fast path helper functions to header file for easy access.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
v3:
- In patch 7/7, in Inline Protocol single sa mode's worker thread, further
reduce processing by getting the proto from the Single outbound SA flags.
Keep processing as minimal as possible as single-sa mode is only
for benchmarking drivers.
v2:
- Moved this patch from 4/7 to 1/7 to keep all moving as first patch
without any change in function.
- In patch 1/7, handled comments from Konstantin to check for capabilities before
using Tx offload in case of LA and also to enable Checksum offload in case of
TSO+Inline Protocol
- In patch 2/7, handled comments from Konstantin to use RTE_ETH_IS* macros and
- In patch 2/7, used tx_offload field and RTE_MBUF_L2_LEN_BITS shift to write to
mbuf->tx_offload instead of bitfield access so that it is cleared and
there is only stores and no loads.
- In patch 5/7, made few fast path functions always_inline
examples/ipsec-secgw/ipsec-secgw.c | 575 +---------------------------------
examples/ipsec-secgw/ipsec-secgw.h | 4 +
examples/ipsec-secgw/ipsec.h | 34 ++
examples/ipsec-secgw/ipsec_process.c | 49 +--
examples/ipsec-secgw/ipsec_worker.h | 588 +++++++++++++++++++++++++++++++++++
5 files changed, 630 insertions(+), 620 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 42b5081..959a20b 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -70,11 +70,6 @@ volatile bool force_quit;
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-/* Configure how many packets ahead to prefetch, when reading packets */
-#define PREFETCH_OFFSET 3
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -191,9 +186,9 @@ static uint64_t dev_tx_offload = UINT64_MAX;
/*
* global values that determine multi-seg policy
*/
-static uint32_t frag_tbl_sz;
+uint32_t frag_tbl_sz;
static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
-static uint32_t mtu_size = RTE_ETHER_MTU;
+uint32_t mtu_size = RTE_ETHER_MTU;
static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
static uint32_t stats_interval;
@@ -205,11 +200,6 @@ struct app_sa_prm app_sa_prm = {
};
static const char *cfgfile;
-struct lcore_rx_queue {
- uint16_t port_id;
- uint8_t queue_id;
-} __rte_cache_aligned;
-
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
@@ -224,28 +214,7 @@ static uint16_t nb_lcore_params;
static struct rte_hash *cdev_map_in;
static struct rte_hash *cdev_map_out;
-struct buffer {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
-};
-
-struct lcore_conf {
- uint16_t nb_rx_queue;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
- struct ipsec_ctx inbound;
- struct ipsec_ctx outbound;
- struct rt_ctx *rt4_ctx;
- struct rt_ctx *rt6_ctx;
- struct {
- struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_indir;
- struct rte_ip_frag_death_row dr;
- } frag;
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
.rxmode = {
@@ -281,32 +250,6 @@ multi_seg_required(void)
frame_buf_size || frag_tbl_sz != 0);
}
-static inline void
-adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
-static inline void
-adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
@@ -371,369 +314,6 @@ print_stats_cb(__rte_unused void *param)
rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
-{
- const struct rte_ether_hdr *eth;
- const struct rte_ipv4_hdr *iph4;
- const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
-
- eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
- iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv4_pktlen(pkt, iph4, 0);
-
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
- t->ip4.pkts[(t->ip4.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
- int next_proto;
- size_t l3len, ext_len;
- uint8_t *p;
-
- /* get protocol type */
- iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv6_pktlen(pkt, iph6, 0);
-
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
- l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
-
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip6.data[t->ip6.num] = &iph6->proto;
- t->ip6.pkts[(t->ip6.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
- } else {
- /* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
- rte_be_to_cpu_16(eth->ether_type));
- free_pkts(&pkt, 1);
- return;
- }
-
- /* Check if the packet has been processed inline. For inline protocol
- * processed packets, the metadata in the mbuf can be used to identify
- * the security processing done on the packet. The metadata will be
- * used to retrieve the application registered userdata associated
- * with the security session.
- */
-
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
- struct ipsec_sa *sa;
- struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
-
- /* Retrieve the userdata registered. Here, the userdata
- * registered is the SA pointer.
- */
- sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
- *rte_security_dynfield(pkt));
- if (sa == NULL) {
- /* userdata could not be retrieved */
- return;
- }
-
- /* Save SA as priv member in mbuf. This will be used in the
- * IPsec selector(SP-SA) check.
- */
-
- priv = get_priv(pkt);
- priv->sa = sa;
- }
-}
-
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
-{
- int32_t i;
-
- t->ipsec.num = 0;
- t->ip4.num = 0;
- t->ip6.num = 0;
-
- for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
- void *));
- prepare_one_packet(pkts[i], t);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
-}
-
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
-/* Send burst of packets on an output interface */
-static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
-{
- struct rte_mbuf **m_table;
- int32_t ret;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
-
- prepare_tx_burst(m_table, n, port, qconf);
-
- ret = rte_eth_tx_burst(port, queueid, m_table, n);
-
- core_stats_update_tx(ret);
-
- if (unlikely(ret < n)) {
- do {
- free_pkts(&m_table[ret], 1);
- } while (++ret < n);
- }
-
- return 0;
-}
-
-/*
- * Helper function to fragment and queue for TX one packet.
- */
-static inline uint32_t
-send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
- uint16_t port, uint8_t proto)
-{
- struct buffer *tbl;
- uint32_t len, n;
- int32_t rc;
-
- tbl = qconf->tx_mbufs + port;
- len = tbl->len;
-
- /* free space for new fragments */
- if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
- send_burst(qconf, len, port);
- len = 0;
- }
-
- n = RTE_DIM(tbl->m_table) - len;
-
- if (proto == IPPROTO_IP)
- rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
- else
- rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
-
- if (rc >= 0)
- len += rc;
- else
- RTE_LOG(ERR, IPSEC,
- "%s: failed to fragment packet with size %u, "
- "error code: %d\n",
- __func__, m->pkt_len, rte_errno);
-
- free_pkts(&m, 1);
- return len;
-}
-
-/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
-{
- uint32_t lcore_id;
- uint16_t len;
- struct lcore_conf *qconf;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
-
- if (m->pkt_len <= mtu_size) {
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* need to fragment the packet */
- } else if (frag_tbl_sz > 0)
- len = send_fragment_packet(qconf, m, port, proto);
- else
- free_pkts(&m, 1);
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
- return 0;
-}
-
-static inline void
-inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
- uint16_t lim, struct ipsec_spd_stats *stats)
-{
- struct rte_mbuf *m;
- uint32_t i, j, res, sa_idx;
-
- if (ip->num == 0 || sp == NULL)
- return;
-
- rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
- ip->num, DEFAULT_MAX_CATEGORIES);
-
- j = 0;
- for (i = 0; i < ip->num; i++) {
- m = ip->pkts[i];
- res = ip->res[i];
- if (res == BYPASS) {
- ip->pkts[j++] = m;
- stats->bypass++;
- continue;
- }
- if (res == DISCARD) {
- free_pkts(&m, 1);
- stats->discard++;
- continue;
- }
-
- /* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
-
- sa_idx = res - 1;
- if (!inbound_sa_check(sa, m, sa_idx)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
- ip->pkts[j++] = m;
- stats->protect++;
- }
- ip->num = j;
-}
-
static void
split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
@@ -962,140 +542,6 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
}
}
-static inline int32_t
-get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
-{
- struct ipsec_mbuf_metadata *priv;
- struct ipsec_sa *sa;
-
- priv = get_priv(pkt);
-
- sa = priv->sa;
- if (unlikely(sa == NULL)) {
- RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
- goto fail;
- }
-
- if (is_ipv6)
- return sa->portid;
-
- /* else */
- return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
-
-fail:
- if (is_ipv6)
- return -1;
-
- /* else */
- return 0;
-}
-
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- uint32_t hop[MAX_PKT_BURST * 2];
- uint32_t dst_ip[MAX_PKT_BURST * 2];
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
- uint32_t *, offset);
- dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
- lpm_pkts++;
- }
- }
-
- rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
- }
-}
-
-static inline void
-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- int32_t hop[MAX_PKT_BURST * 2];
- uint8_t dst_ip[MAX_PKT_BURST * 2][16];
- uint8_t *ip6_dst;
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
- offset);
- memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
- lpm_pkts++;
- }
- }
-
- rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
- lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if (pkt_hop == -1) {
- core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
- }
-}
-
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint16_t portid)
@@ -1121,21 +567,6 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
}
static inline void
-drain_tx_buffers(struct lcore_conf *qconf)
-{
- struct buffer *buf;
- uint32_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- buf = &qconf->tx_mbufs[portid];
- if (buf->len == 0)
- continue;
- send_burst(qconf, buf->len, portid);
- buf->len = 0;
- }
-}
-
-static inline void
drain_crypto_buffers(struct lcore_conf *qconf)
{
uint32_t i;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 24f11ad..fceb835 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -6,6 +6,7 @@
#include <stdbool.h>
+#define MAX_RX_QUEUE_PER_LCORE 16
#define NB_SOCKETS 4
@@ -136,6 +137,9 @@ extern uint32_t nb_bufs_in_pool;
extern bool per_port_pool;
+extern uint32_t mtu_size;
+extern uint32_t frag_tbl_sz;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index ccfde8e..9a4e7ea 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -9,6 +9,7 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
+#include <rte_ip_frag.h>
#include <rte_security.h>
#include <rte_flow.h>
#include <rte_ipsec.h>
@@ -37,6 +38,11 @@
#define IP6_VERSION (6)
+#define SATP_OUT_IPV4(t) \
+ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
+ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
+ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_mbuf;
@@ -260,6 +266,34 @@ struct cnt_blk {
uint32_t cnt;
} __rte_packed;
+struct lcore_rx_queue {
+ uint16_t port_id;
+ uint8_t queue_id;
+} __rte_cache_aligned;
+
+struct buffer {
+ uint16_t len;
+ struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+};
+
+struct lcore_conf {
+ uint16_t nb_rx_queue;
+ struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
+ struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
+ struct ipsec_ctx inbound;
+ struct ipsec_ctx outbound;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
+ struct {
+ struct rte_ip_frag_tbl *tbl;
+ struct rte_mempool *pool_indir;
+ struct rte_ip_frag_death_row dr;
+ } frag;
+} __rte_cache_aligned;
+
+extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
/* Socket ctx */
extern struct socket_ctx socket_ctx[NB_SOCKETS];
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 285e9c7..089d89f 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -13,11 +13,7 @@
#include "ipsec.h"
#include "ipsec-secgw.h"
-
-#define SATP_OUT_IPV4(t) \
- ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
- (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
- ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+#include "ipsec_worker.h"
/* helper routine to free bulk of crypto-ops and related packets */
static inline void
@@ -209,49 +205,6 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
}
/*
- * helper routine for inline and cpu(synchronous) processing
- * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
- * Should be removed in future.
- */
-static inline void
-prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
-{
- uint32_t j;
- struct ipsec_mbuf_metadata *priv;
-
- for (j = 0; j != cnt; j++) {
- priv = get_priv(mb[j]);
- priv->sa = sa;
- /* setup TSO related fields if TSO enabled*/
- if (priv->sa->mss) {
- uint32_t ptype = mb[j]->packet_type;
- /* only TCP is supported */
- if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
- mb[j]->tso_segsz = priv->sa->mss;
- if ((IS_TUNNEL(priv->sa->flags))) {
- mb[j]->outer_l3_len = mb[j]->l3_len;
- mb[j]->outer_l2_len = mb[j]->l2_len;
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_TUNNEL_ESP;
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IP_CKSUM;
- }
- mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
- mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
- RTE_MBUF_F_TX_TCP_CKSUM);
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV4;
- else
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV6;
- }
- }
- }
-}
-
-/*
* finish processing of packets successfully decrypted by an inline processor
*/
static uint32_t
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 5d85cf1..e0b0a82 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -4,8 +4,15 @@
#ifndef _IPSEC_WORKER_H_
#define _IPSEC_WORKER_H_
+#include <rte_acl.h>
+#include <rte_ethdev.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+
#include "ipsec.h"
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET 3
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -38,4 +45,585 @@ void ipsec_poll_mode_worker(void);
int ipsec_launch_one_lcore(void *args);
+/*
+ * helper routine for inline and cpu(synchronous) processing
+ * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
+ * Should be removed in future.
+ */
+static inline void
+prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
+{
+ uint32_t j;
+ struct ipsec_mbuf_metadata *priv;
+
+ for (j = 0; j != cnt; j++) {
+ priv = get_priv(mb[j]);
+ priv->sa = sa;
+ /* setup TSO related fields if TSO enabled*/
+ if (priv->sa->mss) {
+ uint32_t ptype = mb[j]->packet_type;
+ /* only TCP is supported */
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ mb[j]->tso_segsz = priv->sa->mss;
+ if ((IS_TUNNEL(priv->sa->flags))) {
+ mb[j]->outer_l3_len = mb[j]->l3_len;
+ mb[j]->outer_l2_len = mb[j]->l2_len;
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_TUNNEL_ESP;
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ }
+ mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
+ mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_TCP_CKSUM);
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV4;
+ else
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV6;
+ }
+ }
+ }
+}
+
+static inline void
+adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+{
+ const struct rte_ether_hdr *eth;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
+
+ eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv4_pktlen(pkt, iph4, 0);
+
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
+ t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(*iph4);
+ pkt->packet_type |= RTE_PTYPE_L3_IPV4;
+ if (pkt->packet_type & RTE_PTYPE_L4_TCP)
+ pkt->l4_len = sizeof(struct rte_tcp_hdr);
+ else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
+ pkt->l4_len = sizeof(struct rte_udp_hdr);
+ } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ int next_proto;
+ size_t l3len, ext_len;
+ uint8_t *p;
+
+ /* get protocol type */
+ iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv6_pktlen(pkt, iph6, 0);
+
+ next_proto = iph6->proto;
+
+ /* determine l3 header size up to ESP extension */
+ l3len = sizeof(struct ip6_hdr);
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* drop packet when IPv6 header exceeds first segment length */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
+ t->ip6.data[t->ip6.num] = &iph6->proto;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = l3len;
+ pkt->packet_type |= RTE_PTYPE_L3_IPV6;
+ } else {
+ /* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
+ rte_be_to_cpu_16(eth->ether_type));
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ /* Check if the packet has been processed inline. For inline protocol
+ * processed packets, the metadata in the mbuf can be used to identify
+ * the security processing done on the packet. The metadata will be
+ * used to retrieve the application registered userdata associated
+ * with the security session.
+ */
+
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
+ struct ipsec_sa *sa;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ pkt->port);
+
+ /* Retrieve the userdata registered. Here, the userdata
+ * registered is the SA pointer.
+ */
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return;
+ }
+
+ /* Save SA as priv member in mbuf. This will be used in the
+ * IPsec selector(SP-SA) check.
+ */
+
+ priv = get_priv(pkt);
+ priv->sa = sa;
+ }
+}
+
+static inline void
+prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
+ uint16_t nb_pkts)
+{
+ int32_t i;
+
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
+
+ for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
+ void *));
+ prepare_one_packet(pkts[i], t);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_one_packet(pkts[i], t);
+}
+
+static inline void
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ struct ip *ip;
+ struct rte_ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ip->ip_sum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ } else {
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+}
+
+static inline void
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ int32_t i;
+ const int32_t prefetch_offset = 2;
+
+ for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
+ prepare_tx_pkt(pkts[i], port, qconf);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_tx_pkt(pkts[i], port, qconf);
+}
+
+/* Send burst of packets on an output interface */
+static inline int32_t
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
+{
+ struct rte_mbuf **m_table;
+ int32_t ret;
+ uint16_t queueid;
+
+ queueid = qconf->tx_queue_id[port];
+ m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+ prepare_tx_burst(m_table, n, port, qconf);
+
+ ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+ core_stats_update_tx(ret);
+
+ if (unlikely(ret < n)) {
+ do {
+ free_pkts(&m_table[ret], 1);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+/*
+ * Helper function to fragment and queue for TX one packet.
+ */
+static inline uint32_t
+send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
+ uint16_t port, uint8_t proto)
+{
+ struct buffer *tbl;
+ uint32_t len, n;
+ int32_t rc;
+
+ tbl = qconf->tx_mbufs + port;
+ len = tbl->len;
+
+ /* free space for new fragments */
+ if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
+ send_burst(qconf, len, port);
+ len = 0;
+ }
+
+ n = RTE_DIM(tbl->m_table) - len;
+
+ if (proto == IPPROTO_IP)
+ rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+ else
+ rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+
+ if (rc >= 0)
+ len += rc;
+ else
+ RTE_LOG(ERR, IPSEC,
+ "%s: failed to fragment packet with size %u, "
+ "error code: %d\n",
+ __func__, m->pkt_len, rte_errno);
+
+ free_pkts(&m, 1);
+ return len;
+}
+
+/* Enqueue a single packet, and send burst if queue is filled */
+static inline int32_t
+send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
+{
+ uint32_t lcore_id;
+ uint16_t len;
+ struct lcore_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_conf[lcore_id];
+ len = qconf->tx_mbufs[port].len;
+
+ if (m->pkt_len <= mtu_size) {
+ qconf->tx_mbufs[port].m_table[len] = m;
+ len++;
+
+ /* need to fragment the packet */
+ } else if (frag_tbl_sz > 0)
+ len = send_fragment_packet(qconf, m, port, proto);
+ else
+ free_pkts(&m, 1);
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ send_burst(qconf, MAX_PKT_BURST, port);
+ len = 0;
+ }
+
+ qconf->tx_mbufs[port].len = len;
+ return 0;
+}
+
+static inline void
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
+ uint16_t lim, struct ipsec_spd_stats *stats)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, res, sa_idx;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = 0;
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res == BYPASS) {
+ ip->pkts[j++] = m;
+ stats->bypass++;
+ continue;
+ }
+ if (res == DISCARD) {
+ free_pkts(&m, 1);
+ stats->discard++;
+ continue;
+ }
+
+ /* Only check SPI match for processed IPSec packets */
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+
+ sa_idx = res - 1;
+ if (!inbound_sa_check(sa, m, sa_idx)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+ ip->pkts[j++] = m;
+ stats->protect++;
+ }
+ ip->num = j;
+}
+
+static inline int32_t
+get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
+{
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+
+ priv = get_priv(pkt);
+
+ sa = priv->sa;
+ if (unlikely(sa == NULL)) {
+ RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
+ goto fail;
+ }
+
+ if (is_ipv6)
+ return sa->portid;
+
+ /* else */
+ return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
+
+fail:
+ if (is_ipv6)
+ return -1;
+
+ /* else */
+ return 0;
+}
+
+static inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ uint32_t hop[MAX_PKT_BURST * 2];
+ uint32_t dst_ip[MAX_PKT_BURST * 2];
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip, ip_dst);
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ uint32_t *, offset);
+ dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
+ core_statistics[lcoreid].lpm4.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+ }
+}
+
+static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int32_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ offset);
+ memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
+ lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if (pkt_hop == -1) {
+ core_statistics[lcoreid].lpm6.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+ }
+}
+
+static inline void
+drain_tx_buffers(struct lcore_conf *qconf)
+{
+ struct buffer *buf;
+ uint32_t portid;
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ buf = &qconf->tx_mbufs[portid];
+ if (buf->len == 0)
+ continue;
+ send_burst(qconf, buf->len, portid);
+ buf->len = 0;
+ }
+}
+
#endif /* _IPSEC_WORKER_H_ */
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
` (6 subsequent siblings)
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
crypto/protocol or cpu crypto is needed.
For Tx Inline protocol offload, checksum computation
is implicitly taken care by HW.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 3 ---
examples/ipsec-secgw/sa.c | 46 ++++++++++++++++++++++++++++++++------
2 files changed, 39 insertions(+), 10 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 959a20b..5fe5eee 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1761,9 +1761,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.txmode.offloads |=
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
-
printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 1839ac7..e8f2598 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1766,10 +1766,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
struct ipsec_sa *rule;
uint32_t idx_sa;
enum rte_security_session_action_type rule_type;
+ struct rte_eth_dev_info dev_info;
+ int ret;
*rx_offloads = 0;
*tx_offloads = 0;
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ port_id, strerror(-ret));
+
/* Check for inbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
rule = &sa_in[idx_sa];
@@ -1785,13 +1793,37 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
rule = &sa_out[idx_sa];
rule_type = ipsec_get_action_type(rule);
- if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- rule_type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
- && rule->portid == port_id) {
- *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
- if (rule->mss)
- *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ switch (rule_type) {
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ /* Checksum offload is not needed for inline protocol as
+ * all processing for Outbound IPSec packets will be
+ * implicitly taken care and for non-IPSec packets,
+ * there is no need of IPv4 Checksum offload.
+ */
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
+ }
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |=
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ }
+ break;
+ default:
+ /* Enable IPv4 checksum offload even if one of lookaside
+ * SA's are present.
+ */
+ if (dev_info.tx_offload_capa &
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ break;
}
}
return 0;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
` (5 subsequent siblings)
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Use HW parsed packet type when ethdev supports necessary protocols.
If packet type is not supported, then register ethdev callbacks
for parse packet in SW. This is better for performance as it
effects fast path.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 157 ++++++++++++++++++++++++++++++++++++
examples/ipsec-secgw/ipsec_worker.h | 114 ++++++++++----------------
2 files changed, 201 insertions(+), 70 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5fe5eee..d6a4959 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1680,6 +1680,147 @@ cryptodevs_init(uint16_t req_queue_num)
return total_nb_qps;
}
+static int
+check_ptype(int portid)
+{
+ int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
+ int i, nb_ptypes;
+ uint32_t mask;
+
+ mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+ RTE_PTYPE_TUNNEL_MASK);
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
+ if (nb_ptypes <= 0)
+ return 0;
+
+ uint32_t ptypes[nb_ptypes];
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
+ for (i = 0; i < nb_ptypes; ++i) {
+ if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
+ l3_ipv4 = 1;
+ if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
+ l3_ipv6 = 1;
+ if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
+ tunnel_esp = 1;
+ if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ l4_udp = 1;
+ }
+
+ if (l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (l4_udp == 0)
+ printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
+
+ if (tunnel_esp == 0)
+ printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
+
+ if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_ether_hdr *eth;
+ const struct rte_udp_hdr *udp;
+ uint16_t nat_port, ether_type;
+ int next_proto = 0;
+ size_t ext_len = 0;
+ const uint8_t *p;
+ uint32_t l3len;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ ether_type = eth->ether_type;
+
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
+ l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+
+ if (l3len == sizeof(struct rte_ipv4_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ next_proto = iph4->next_proto_id;
+ p = (const uint8_t *)iph4;
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
+ l3len = sizeof(struct ip6_hdr);
+
+ /* determine l3 header size up to ESP extension */
+ next_proto = iph6->proto;
+ p = (const uint8_t *)iph6;
+ while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Skip IPv6 header exceeds first segment length */
+ if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
+ goto exit;
+
+ if (l3len == sizeof(struct ip6_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = (const struct rte_udp_hdr *)(p + l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port)
+ packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ }
+ break;
+ default:
+ break;
+ }
+exit:
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint32_t i;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
+ for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
+ struct ether_hdr *));
+ parse_ptype(pkts[i]);
+ }
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
@@ -1691,6 +1832,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
+ int ptype_supported;
ret = rte_eth_dev_info_get(portid, &dev_info);
if (ret != 0)
@@ -1788,6 +1930,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
"err=%d, port=%d\n", ret, portid);
+ /* Check if required ptypes are supported */
+ ptype_supported = check_ptype(portid);
+ if (!ptype_supported)
+ printf("Port %d: softly parse packet type info\n", portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1849,6 +1996,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ /* Register Rx callback if ptypes are not supported */
+ if (!ptype_supported &&
+ !rte_eth_add_rx_callback(portid, queue,
+ parse_ptype_cb, NULL)) {
+ printf("Failed to add rx callback: port=%d, "
+ "queue=%d\n", portid, queue);
+ }
+
+
}
}
printf("\n");
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index e0b0a82..7397291 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -117,55 +117,33 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
+ uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
+ uint32_t tun_type, l3_type;
+ uint64_t tx_offload;
+ uint16_t l3len;
+
+ tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
+ l3_type = ptype & RTE_PTYPE_L3_MASK;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
+ if (RTE_ETH_IS_IPV4_HDR(l3_type)) {
iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ tx_offload = sizeof(*iph4) << RTE_MBUF_L2_LEN_BITS;
+ } else if (RTE_ETH_IS_IPV6_HDR(l3_type)) {
int next_proto;
- size_t l3len, ext_len;
+ size_t ext_len;
uint8_t *p;
/* get protocol type */
@@ -173,47 +151,35 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
RTE_ETHER_HDR_LEN);
adjust_ipv6_pktlen(pkt, iph6, 0);
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
+
+ /* Determine l3 header size up to ESP extension by walking
+ * through extension headers.
+ */
+ if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+ l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ next_proto = iph6->proto;
+ while (next_proto != IPPROTO_ESP &&
+ l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Drop pkt when IPv6 header exceeds first seg size */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+ }
+ tx_offload = l3len << RTE_MBUF_L2_LEN_BITS;
} else {
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
@@ -222,6 +188,14 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
return;
}
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+ tx_offload |= (sizeof(struct rte_tcp_hdr) <<
+ (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
+ else if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ tx_offload |= (sizeof(struct rte_udp_hdr) <<
+ (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
+ pkt->tx_offload = tx_offload;
+
/* Check if the packet has been processed inline. For inline protocol
* processed packets, the metadata in the mbuf can be used to identify
* the security processing done on the packet. The metadata will be
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 4/7] examples/ipsec-secgw: allow larger burst size for vectors
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
` (4 subsequent siblings)
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Allow larger burst size of vector event mode instead of restricting
to 32. Also restructure traffic type struct to have num pkts first
so that it is always in first cacheline. Also cache align
traffic type struct. Since MAX_PKT_BURST is not used by
vector event mode worker, define another macro for its burst
size so that poll mode perf is not effected.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 2 +-
examples/ipsec-secgw/ipsec-secgw.h | 15 ++++++++++-----
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index d6a4959..88984a6 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1317,7 +1317,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
case CMD_LINE_OPT_VECTOR_SIZE_NUM:
ret = parse_decimal(optarg);
- if (ret > MAX_PKT_BURST) {
+ if (ret > MAX_PKT_BURST_VEC) {
printf("Invalid argument for \'%s\': %s\n",
CMD_LINE_OPT_VECTOR_SIZE, optarg);
print_usage(prgname);
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index fceb835..2edf631 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -11,6 +11,11 @@
#define NB_SOCKETS 4
#define MAX_PKT_BURST 32
+#define MAX_PKT_BURST_VEC 256
+
+#define MAX_PKTS \
+ ((MAX_PKT_BURST_VEC > MAX_PKT_BURST ? \
+ MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2)
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -49,12 +54,12 @@
#define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
struct traffic_type {
- const uint8_t *data[MAX_PKT_BURST * 2];
- struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
- void *saptr[MAX_PKT_BURST * 2];
- uint32_t res[MAX_PKT_BURST * 2];
uint32_t num;
-};
+ struct rte_mbuf *pkts[MAX_PKTS];
+ const uint8_t *data[MAX_PKTS];
+ void *saptr[MAX_PKTS];
+ uint32_t res[MAX_PKTS];
+} __rte_cache_aligned;
struct ipsec_traffic {
struct traffic_type ipsec;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 5/7] examples/ipsec-secgw: get security context from lcore conf
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (2 preceding siblings ...)
2022-04-28 15:04 ` [PATCH v3 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
` (3 subsequent siblings)
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Store security context pointer in lcore Rx queue config and
get it from there in fast path for better performance.
Currently rte_eth_dev_get_sec_ctx() which is meant to be control
path API is called per packet basis. For every call to that
API, ethdev port status is checked.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 22 ++++++++++++++++++---
examples/ipsec-secgw/ipsec.h | 1 +
examples/ipsec-secgw/ipsec_worker.h | 39 +++++++++++++++++--------------------
3 files changed, 38 insertions(+), 24 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 88984a6..14b9c06 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -544,11 +544,11 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint16_t portid)
+ uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
{
struct ipsec_traffic traffic;
- prepare_traffic(pkts, &traffic, nb_pkts);
+ prepare_traffic(ctx, pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
if (is_unprotected_port(portid))
@@ -740,7 +740,8 @@ ipsec_poll_mode_worker(void)
if (nb_rx > 0) {
core_stats_update_rx(nb_rx);
- process_pkts(qconf, pkts, nb_rx, portid);
+ process_pkts(qconf, pkts, nb_rx, portid,
+ rxql->sec_ctx);
}
/* dequeue and process completed crypto-ops */
@@ -3060,6 +3061,21 @@ main(int32_t argc, char **argv)
flow_init();
+ /* Get security context if available and only if dynamic field is
+ * registered for fast path access.
+ */
+ if (!rte_security_dynfield_is_registered())
+ goto skip_sec_ctx;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
+ portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
+ lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
+ rte_eth_dev_get_sec_ctx(portid);
+ }
+ }
+skip_sec_ctx:
+
check_all_ports_link_status(enabled_port_mask);
if (stats_interval > 0)
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 9a4e7ea..ecad262 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -269,6 +269,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
+ struct rte_security_ctx *sec_ctx;
} __rte_cache_aligned;
struct buffer {
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7397291..b1fc364 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -88,7 +88,7 @@ prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
uint32_t l2_len)
{
@@ -101,7 +101,7 @@ adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
uint32_t l2_len)
{
@@ -114,8 +114,9 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+static __rte_always_inline void
+prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
+ struct ipsec_traffic *t)
{
uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
@@ -203,13 +204,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
* with the security session.
*/
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
+ if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
@@ -230,9 +227,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
}
}
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
+static __rte_always_inline void
+prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
+ struct ipsec_traffic *t, uint16_t nb_pkts)
{
int32_t i;
@@ -243,11 +240,11 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
void *));
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
static inline void
@@ -305,7 +302,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
}
/* Send burst of packets on an output interface */
-static inline int32_t
+static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
@@ -333,7 +330,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
/*
* Helper function to fragment and queue for TX one packet.
*/
-static inline uint32_t
+static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
@@ -372,7 +369,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
}
/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
+static __rte_always_inline int32_t
send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
{
uint32_t lcore_id;
@@ -404,7 +401,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
return 0;
}
-static inline void
+static __rte_always_inline void
inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
uint16_t lim, struct ipsec_spd_stats *stats)
{
@@ -451,7 +448,7 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
ip->num = j;
}
-static inline int32_t
+static __rte_always_inline int32_t
get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
{
struct ipsec_mbuf_metadata *priv;
@@ -531,7 +528,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
@@ -585,7 +582,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 6/7] examples/ipsec-secgw: update eth header during route lookup
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (3 preceding siblings ...)
2022-04-28 15:04 ` [PATCH v3 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
` (2 subsequent siblings)
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Update ethernet header during route lookup instead of doing
way later while performing Tx burst. Advantages to doing
is at route lookup is that no additional IP version checks
based on packet data are needed and packet data is already
in cache as route lookup is already consuming that data.
This is also useful for inline protocol offload cases
of v4inv6 or v6inv4 outbound tunnel operations as
packet data will not have any info about what is the tunnel
protocol.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 9 +-
examples/ipsec-secgw/ipsec_worker.h | 199 ++++++++++++++++++++++--------------
2 files changed, 130 insertions(+), 78 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 14b9c06..24ee6c0 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -562,7 +562,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
@@ -613,7 +614,8 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf,
if (trf.ip4.num != 0) {
inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
&core_statistics[lcoreid].inbound.spd4);
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
}
/* process ipv6 packets */
@@ -647,7 +649,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
/* process ipv4 packets */
if (trf.ip4.num != 0)
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
/* process ipv6 packets */
if (trf.ip6.num != 0)
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index b1fc364..7f21440 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -247,60 +247,6 @@ prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
prepare_one_packet(ctx, pkts[i], t);
}
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
/* Send burst of packets on an output interface */
static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
@@ -312,8 +258,6 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port, qconf);
-
ret = rte_eth_tx_burst(port, queueid, m_table, n);
core_stats_update_tx(ret);
@@ -334,8 +278,11 @@ static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
+ struct rte_ether_hdr *ethhdr;
+ struct rte_ipv4_hdr *ip;
+ struct rte_mbuf *pkt;
struct buffer *tbl;
- uint32_t len, n;
+ uint32_t len, n, i;
int32_t rc;
tbl = qconf->tx_mbufs + port;
@@ -349,6 +296,9 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
n = RTE_DIM(tbl->m_table) - len;
+ /* Strip the ethernet header that was prepended earlier */
+ rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
+
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
@@ -356,13 +306,51 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
- if (rc >= 0)
- len += rc;
- else
+ if (rc < 0) {
RTE_LOG(ERR, IPSEC,
"%s: failed to fragment packet with size %u, "
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
+ rc = 0;
+ }
+
+ i = len;
+ len += rc;
+ for (; i < len; i++) {
+ pkt = tbl->m_table[i];
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ if (proto == IPPROTO_IP) {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ } else {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+ }
free_pkts(&m, 1);
return len;
@@ -381,7 +369,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
qconf = &lcore_conf[lcore_id];
len = qconf->tx_mbufs[port].len;
- if (m->pkt_len <= mtu_size) {
+ /* L2 header is already part of packet */
+ if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
qconf->tx_mbufs[port].m_table[len] = m;
len++;
@@ -476,15 +465,19 @@ get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
return 0;
}
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+static __rte_always_inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+ uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
+ struct rte_ether_hdr *ethhdr;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -494,12 +487,13 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
uint32_t *, offset);
dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
lpm_pkts++;
@@ -511,9 +505,10 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 0);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -521,10 +516,41 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip_cksum) {
+ struct rte_ipv4_hdr *ip;
+
+ pkt->ol_flags |= tx_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ }
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IP);
}
}
@@ -533,11 +559,14 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ struct rte_ether_hdr *ethhdr;
uint8_t *ip6_dst;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -547,12 +576,13 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
offset);
memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
lpm_pkts++;
@@ -565,9 +595,10 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 1);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -575,10 +606,28 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if (pkt_hop == -1) {
core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IPV6);
}
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 7/7] examples/ipsec-secgw: add poll mode worker for inline proto
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (4 preceding siblings ...)
2022-04-28 15:04 ` [PATCH v3 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
@ 2022-04-28 15:04 ` Nithin Dabilpuram
2022-04-29 10:23 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Kumar Dabilpuram
2022-04-29 10:29 ` Akhil Goyal
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-28 15:04 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Add separate worker thread when all SA's are of type
inline protocol offload and librte_ipsec is enabled
in order to make it more optimal for that case.
Current default worker supports all kinds of SA leading
to doing lot of per-packet checks and branching based on
SA type which can be of 5 types of SA's.
Also make a provision for choosing different poll mode workers
for different combinations of SA types with default being
existing poll mode worker that supports all kinds of SA's.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 6 +-
examples/ipsec-secgw/ipsec-secgw.h | 10 +
examples/ipsec-secgw/ipsec_worker.c | 365 +++++++++++++++++++++++++++++++++++-
examples/ipsec-secgw/ipsec_worker.h | 4 +
examples/ipsec-secgw/sa.c | 9 +
5 files changed, 390 insertions(+), 4 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 24ee6c0..4251952 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -68,8 +68,6 @@ volatile bool force_quit;
#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -173,7 +171,7 @@ static uint64_t enabled_cryptodev_mask = UINT64_MAX;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
-static uint32_t single_sa;
+uint32_t single_sa;
uint32_t nb_bufs_in_pool;
/*
@@ -238,6 +236,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
bool per_port_pool;
+uint16_t wrkr_flags;
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
@@ -1233,6 +1232,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
single_sa = 1;
single_sa_idx = ret;
eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
+ wrkr_flags |= SS_F;
printf("Configured with single SA index %u\n",
single_sa_idx);
break;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 2edf631..f027360 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -135,6 +135,7 @@ extern uint32_t unprotected_port_mask;
/* Index of SA in single mode */
extern uint32_t single_sa_idx;
+extern uint32_t single_sa;
extern volatile bool force_quit;
@@ -145,6 +146,15 @@ extern bool per_port_pool;
extern uint32_t mtu_size;
extern uint32_t frag_tbl_sz;
+#define SS_F (1U << 0) /* Single SA mode */
+#define INL_PR_F (1U << 1) /* Inline Protocol */
+#define INL_CR_F (1U << 2) /* Inline Crypto */
+#define LA_PR_F (1U << 3) /* Lookaside Protocol */
+#define LA_ANY_F (1U << 4) /* Lookaside Any */
+#define MAX_F (LA_ANY_F << 1)
+
+extern uint16_t wrkr_flags;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426..65dcb51 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -17,6 +17,8 @@ struct port_drv_mode_data {
struct rte_security_ctx *ctx;
};
+typedef void (*ipsec_worker_fn_t)(void);
+
static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
{
@@ -1004,6 +1006,367 @@ ipsec_eventmode_worker(struct eh_conf *conf)
eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
}
+static __rte_always_inline void
+outb_inl_pro_spd_process(struct sp_ctx *sp,
+ struct sa_ctx *sa_ctx,
+ struct traffic_type *ip,
+ struct traffic_type *match,
+ struct traffic_type *mismatch,
+ bool match_flag,
+ struct ipsec_spd_stats *stats)
+{
+ uint32_t prev_sa_idx = UINT32_MAX;
+ struct rte_mbuf *ipsec[MAX_PKT_BURST];
+ struct rte_ipsec_session *ips;
+ uint32_t i, j, j_mis, sa_idx;
+ struct ipsec_sa *sa = NULL;
+ uint32_t ipsec_num = 0;
+ struct rte_mbuf *m;
+ uint64_t satp;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = match->num;
+ j_mis = mismatch->num;
+
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] - 1;
+
+ if (unlikely(ip->res[i] == DISCARD)) {
+ free_pkts(&m, 1);
+
+ stats->discard++;
+ } else if (unlikely(ip->res[i] == BYPASS)) {
+ match->pkts[j++] = m;
+
+ stats->bypass++;
+ } else {
+ if (prev_sa_idx == UINT32_MAX) {
+ prev_sa_idx = sa_idx;
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ }
+
+ if (sa_idx != prev_sa_idx) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare packets for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+
+ /* Update to new SA */
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ ipsec_num = 0;
+ }
+
+ ipsec[ipsec_num++] = m;
+ stats->protect++;
+ }
+ }
+
+ if (ipsec_num) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare pacekts for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+ }
+ match->num = j;
+ mismatch->num = j_mis;
+}
+
+/* Poll mode worker when all SA's are of type inline protocol */
+void
+ipsec_poll_mode_wrkr_inl_pr(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct ipsec_core_statistics *stats;
+ struct rt_ctx *rt4_ctx, *rt6_ctx;
+ struct sa_ctx *sa_in, *sa_out;
+ struct traffic_type ip4, ip6;
+ struct lcore_rx_queue *rxql;
+ struct rte_mbuf **v4, **v6;
+ struct ipsec_traffic trf;
+ struct lcore_conf *qconf;
+ uint16_t v4_num, v6_num;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ int32_t i, nb_rx;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+ stats = &core_statistics[lcore_id];
+
+ rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ rt6_ctx = socket_ctx[socket_id].rt_ip6;
+
+ sp4_in = socket_ctx[socket_id].sp_ip4_in;
+ sp6_in = socket_ctx[socket_id].sp_ip6_in;
+ sa_in = socket_ctx[socket_id].sa_in;
+
+ sp4_out = socket_ctx[socket_id].sp_ip4_out;
+ sp6_out = socket_ctx[socket_id].sp_ip6_out;
+ sa_out = socket_ctx[socket_id].sa_out;
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+ /* Drop any IPsec traffic */
+ free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+ if (is_unprotected_port(portid)) {
+ inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
+ trf.ip4.num,
+ &stats->inbound.spd4);
+
+ inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
+ trf.ip6.num,
+ &stats->inbound.spd6);
+
+ v4 = trf.ip4.pkts;
+ v4_num = trf.ip4.num;
+ v6 = trf.ip6.pkts;
+ v6_num = trf.ip6.num;
+ } else {
+ ip4.num = 0;
+ ip6.num = 0;
+
+ outb_inl_pro_spd_process(sp4_out, sa_out,
+ &trf.ip4, &ip4, &ip6,
+ true,
+ &stats->outbound.spd4);
+
+ outb_inl_pro_spd_process(sp6_out, sa_out,
+ &trf.ip6, &ip6, &ip4,
+ false,
+ &stats->outbound.spd6);
+ v4 = ip4.pkts;
+ v4_num = ip4.num;
+ v6 = ip6.pkts;
+ v6_num = ip6.num;
+ }
+
+ route4_pkts(rt4_ctx, v4, v4_num, 0, false);
+ route6_pkts(rt6_ctx, v6, v6_num);
+ }
+ }
+}
+
+/* Poll mode worker when all SA's are of type inline protocol
+ * and single sa mode is enabled.
+ */
+void
+ipsec_poll_mode_wrkr_inl_pr_ss(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ uint16_t sa_out_portid, sa_out_proto;
+ struct rte_ipsec_session *ips;
+ struct lcore_rx_queue *rxql;
+ struct lcore_conf *qconf;
+ struct sa_ctx *sa_out;
+ uint32_t i, nb_rx, j;
+ struct ipsec_sa *sa;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+
+ /* Get SA info */
+ sa_out = socket_ctx[socket_id].sa_out;
+ sa = &sa_out->sa[single_sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ sa_out_portid = sa->portid;
+ if (sa->flags & IP6_TUNNEL)
+ sa_out_proto = IPPROTO_IPV6;
+ else
+ sa_out_proto = IPPROTO_IP;
+
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ if (is_unprotected_port(portid)) {
+ /* Nothing much to do for inbound inline
+ * decrypted traffic.
+ */
+ for (j = 0; j < nb_rx; j++) {
+ uint32_t ptype, proto;
+
+ pkt = pkts[j];
+ ptype = pkt->packet_type &
+ RTE_PTYPE_L3_MASK;
+ if (ptype == RTE_PTYPE_L3_IPV4)
+ proto = IPPROTO_IP;
+ else
+ proto = IPPROTO_IPV6;
+
+ send_single_packet(pkt, portid, proto);
+ }
+
+ continue;
+ }
+
+ rte_ipsec_pkt_process(ips, pkts, nb_rx);
+
+ /* Send pkts out */
+ for (j = 0; j < nb_rx; j++) {
+ pkt = pkts[j];
+
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ send_single_packet(pkt, sa_out_portid,
+ sa_out_proto);
+ }
+ }
+ }
+}
+
+static void
+ipsec_poll_mode_wrkr_launch(void)
+{
+ static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
+ [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
+ [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
+ };
+ ipsec_worker_fn_t fn;
+
+ if (!app_sa_prm.enable) {
+ fn = ipsec_poll_mode_worker;
+ } else {
+ fn = poll_mode_wrkrs[wrkr_flags];
+
+ /* Always default to all mode worker */
+ if (!fn)
+ fn = ipsec_poll_mode_worker;
+ }
+
+ /* Launch worker */
+ (*fn)();
+}
+
int ipsec_launch_one_lcore(void *args)
{
struct eh_conf *conf;
@@ -1012,7 +1375,7 @@ int ipsec_launch_one_lcore(void *args)
if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
/* Run in poll mode */
- ipsec_poll_mode_worker();
+ ipsec_poll_mode_wrkr_launch();
} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
/* Run in event mode */
ipsec_eventmode_worker(conf);
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7f21440..315f3d6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -13,6 +13,8 @@
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -42,6 +44,8 @@ struct lcore_conf_ev_tx_int_port_wrkr {
} __rte_cache_aligned;
void ipsec_poll_mode_worker(void);
+void ipsec_poll_mode_wrkr_inl_pr(void);
+void ipsec_poll_mode_wrkr_inl_pr_ss(void);
int ipsec_launch_one_lcore(void *args);
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index e8f2598..13b9113 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -936,6 +936,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
}
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ wrkr_flags |= INL_CR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ wrkr_flags |= INL_PR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+ wrkr_flags |= LA_PR_F;
+ else
+ wrkr_flags |= LA_ANY_F;
+
nb_crypto_sessions++;
*ri = *ri + 1;
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (5 preceding siblings ...)
2022-04-28 15:04 ` [PATCH v3 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
@ 2022-04-29 10:23 ` Nithin Kumar Dabilpuram
2022-04-29 10:29 ` Akhil Goyal
7 siblings, 0 replies; 37+ messages in thread
From: Nithin Kumar Dabilpuram @ 2022-04-29 10:23 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal,
konstantin.v.ananyev
Cc: dev, anoobj
Hi Konstantin,
Any comments on this updated series ?
Thanks
Nithin
On 4/28/22 8:34 PM, Nithin Dabilpuram wrote:
> Move fast path helper functions to header file for easy access.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> v3:
> - In patch 7/7, in Inline Protocol single sa mode's worker thread, further
> reduce processing by getting the proto from the Single outbound SA flags.
> Keep processing as minimal as possible as single-sa mode is only
> for benchmarking drivers.
>
> v2:
> - Moved this patch from 4/7 to 1/7 to keep all moving as first patch
> without any change in function.
> - In patch 1/7, handled comments from Konstantin to check for capabilities before
> using Tx offload in case of LA and also to enable Checksum offload in case of
> TSO+Inline Protocol
> - In patch 2/7, handled comments from Konstantin to use RTE_ETH_IS* macros and
> - In patch 2/7, used tx_offload field and RTE_MBUF_L2_LEN_BITS shift to write to
> mbuf->tx_offload instead of bitfield access so that it is cleared and
> there is only stores and no loads.
> - In patch 5/7, made few fast path functions always_inline
>
> examples/ipsec-secgw/ipsec-secgw.c | 575 +---------------------------------
> examples/ipsec-secgw/ipsec-secgw.h | 4 +
> examples/ipsec-secgw/ipsec.h | 34 ++
> examples/ipsec-secgw/ipsec_process.c | 49 +--
> examples/ipsec-secgw/ipsec_worker.h | 588 +++++++++++++++++++++++++++++++++++
> 5 files changed, 630 insertions(+), 620 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index 42b5081..959a20b 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -70,11 +70,6 @@ volatile bool force_quit;
>
> #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
>
> -/* Configure how many packets ahead to prefetch, when reading packets */
> -#define PREFETCH_OFFSET 3
> -
> -#define MAX_RX_QUEUE_PER_LCORE 16
> -
> #define MAX_LCORE_PARAMS 1024
>
> /*
> @@ -191,9 +186,9 @@ static uint64_t dev_tx_offload = UINT64_MAX;
> /*
> * global values that determine multi-seg policy
> */
> -static uint32_t frag_tbl_sz;
> +uint32_t frag_tbl_sz;
> static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
> -static uint32_t mtu_size = RTE_ETHER_MTU;
> +uint32_t mtu_size = RTE_ETHER_MTU;
> static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
> static uint32_t stats_interval;
>
> @@ -205,11 +200,6 @@ struct app_sa_prm app_sa_prm = {
> };
> static const char *cfgfile;
>
> -struct lcore_rx_queue {
> - uint16_t port_id;
> - uint8_t queue_id;
> -} __rte_cache_aligned;
> -
> struct lcore_params {
> uint16_t port_id;
> uint8_t queue_id;
> @@ -224,28 +214,7 @@ static uint16_t nb_lcore_params;
> static struct rte_hash *cdev_map_in;
> static struct rte_hash *cdev_map_out;
>
> -struct buffer {
> - uint16_t len;
> - struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
> -};
> -
> -struct lcore_conf {
> - uint16_t nb_rx_queue;
> - struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
> - uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
> - struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
> - struct ipsec_ctx inbound;
> - struct ipsec_ctx outbound;
> - struct rt_ctx *rt4_ctx;
> - struct rt_ctx *rt6_ctx;
> - struct {
> - struct rte_ip_frag_tbl *tbl;
> - struct rte_mempool *pool_indir;
> - struct rte_ip_frag_death_row dr;
> - } frag;
> -} __rte_cache_aligned;
> -
> -static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
> +struct lcore_conf lcore_conf[RTE_MAX_LCORE];
>
> static struct rte_eth_conf port_conf = {
> .rxmode = {
> @@ -281,32 +250,6 @@ multi_seg_required(void)
> frame_buf_size || frag_tbl_sz != 0);
> }
>
> -static inline void
> -adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
> - uint32_t l2_len)
> -{
> - uint32_t plen, trim;
> -
> - plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
> - if (plen < m->pkt_len) {
> - trim = m->pkt_len - plen;
> - rte_pktmbuf_trim(m, trim);
> - }
> -}
> -
> -static inline void
> -adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
> - uint32_t l2_len)
> -{
> - uint32_t plen, trim;
> -
> - plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
> - if (plen < m->pkt_len) {
> - trim = m->pkt_len - plen;
> - rte_pktmbuf_trim(m, trim);
> - }
> -}
> -
>
> struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
>
> @@ -371,369 +314,6 @@ print_stats_cb(__rte_unused void *param)
> rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
> }
>
> -static inline void
> -prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
> -{
> - const struct rte_ether_hdr *eth;
> - const struct rte_ipv4_hdr *iph4;
> - const struct rte_ipv6_hdr *iph6;
> - const struct rte_udp_hdr *udp;
> - uint16_t ip4_hdr_len;
> - uint16_t nat_port;
> -
> - eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
> - if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> -
> - iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
> - RTE_ETHER_HDR_LEN);
> - adjust_ipv4_pktlen(pkt, iph4, 0);
> -
> - switch (iph4->next_proto_id) {
> - case IPPROTO_ESP:
> - t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - break;
> - case IPPROTO_UDP:
> - if (app_sa_prm.udp_encap == 1) {
> - ip4_hdr_len = ((iph4->version_ihl &
> - RTE_IPV4_HDR_IHL_MASK) *
> - RTE_IPV4_IHL_MULTIPLIER);
> - udp = rte_pktmbuf_mtod_offset(pkt,
> - struct rte_udp_hdr *, ip4_hdr_len);
> - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> - if (udp->src_port == nat_port ||
> - udp->dst_port == nat_port){
> - t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - pkt->packet_type |=
> - MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> - break;
> - }
> - }
> - /* Fall through */
> - default:
> - t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
> - t->ip4.pkts[(t->ip4.num)++] = pkt;
> - }
> - pkt->l2_len = 0;
> - pkt->l3_len = sizeof(*iph4);
> - pkt->packet_type |= RTE_PTYPE_L3_IPV4;
> - if (pkt->packet_type & RTE_PTYPE_L4_TCP)
> - pkt->l4_len = sizeof(struct rte_tcp_hdr);
> - else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
> - pkt->l4_len = sizeof(struct rte_udp_hdr);
> - } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> - int next_proto;
> - size_t l3len, ext_len;
> - uint8_t *p;
> -
> - /* get protocol type */
> - iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
> - RTE_ETHER_HDR_LEN);
> - adjust_ipv6_pktlen(pkt, iph6, 0);
> -
> - next_proto = iph6->proto;
> -
> - /* determine l3 header size up to ESP extension */
> - l3len = sizeof(struct ip6_hdr);
> - p = rte_pktmbuf_mtod(pkt, uint8_t *);
> - while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
> - (next_proto = rte_ipv6_get_next_ext(p + l3len,
> - next_proto, &ext_len)) >= 0)
> - l3len += ext_len;
> -
> - /* drop packet when IPv6 header exceeds first segment length */
> - if (unlikely(l3len > pkt->data_len)) {
> - free_pkts(&pkt, 1);
> - return;
> - }
> -
> - switch (next_proto) {
> - case IPPROTO_ESP:
> - t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - break;
> - case IPPROTO_UDP:
> - if (app_sa_prm.udp_encap == 1) {
> - udp = rte_pktmbuf_mtod_offset(pkt,
> - struct rte_udp_hdr *, l3len);
> - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> - if (udp->src_port == nat_port ||
> - udp->dst_port == nat_port){
> - t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> - pkt->packet_type |=
> - MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> - break;
> - }
> - }
> - /* Fall through */
> - default:
> - t->ip6.data[t->ip6.num] = &iph6->proto;
> - t->ip6.pkts[(t->ip6.num)++] = pkt;
> - }
> - pkt->l2_len = 0;
> - pkt->l3_len = l3len;
> - pkt->packet_type |= RTE_PTYPE_L3_IPV6;
> - } else {
> - /* Unknown/Unsupported type, drop the packet */
> - RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
> - rte_be_to_cpu_16(eth->ether_type));
> - free_pkts(&pkt, 1);
> - return;
> - }
> -
> - /* Check if the packet has been processed inline. For inline protocol
> - * processed packets, the metadata in the mbuf can be used to identify
> - * the security processing done on the packet. The metadata will be
> - * used to retrieve the application registered userdata associated
> - * with the security session.
> - */
> -
> - if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
> - rte_security_dynfield_is_registered()) {
> - struct ipsec_sa *sa;
> - struct ipsec_mbuf_metadata *priv;
> - struct rte_security_ctx *ctx = (struct rte_security_ctx *)
> - rte_eth_dev_get_sec_ctx(
> - pkt->port);
> -
> - /* Retrieve the userdata registered. Here, the userdata
> - * registered is the SA pointer.
> - */
> - sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
> - *rte_security_dynfield(pkt));
> - if (sa == NULL) {
> - /* userdata could not be retrieved */
> - return;
> - }
> -
> - /* Save SA as priv member in mbuf. This will be used in the
> - * IPsec selector(SP-SA) check.
> - */
> -
> - priv = get_priv(pkt);
> - priv->sa = sa;
> - }
> -}
> -
> -static inline void
> -prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
> - uint16_t nb_pkts)
> -{
> - int32_t i;
> -
> - t->ipsec.num = 0;
> - t->ip4.num = 0;
> - t->ip6.num = 0;
> -
> - for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
> - rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
> - void *));
> - prepare_one_packet(pkts[i], t);
> - }
> - /* Process left packets */
> - for (; i < nb_pkts; i++)
> - prepare_one_packet(pkts[i], t);
> -}
> -
> -static inline void
> -prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
> - const struct lcore_conf *qconf)
> -{
> - struct ip *ip;
> - struct rte_ether_hdr *ethhdr;
> -
> - ip = rte_pktmbuf_mtod(pkt, struct ip *);
> -
> - ethhdr = (struct rte_ether_hdr *)
> - rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
> -
> - if (ip->ip_v == IPVERSION) {
> - pkt->ol_flags |= qconf->outbound.ipv4_offloads;
> - pkt->l3_len = sizeof(struct ip);
> - pkt->l2_len = RTE_ETHER_HDR_LEN;
> -
> - ip->ip_sum = 0;
> -
> - /* calculate IPv4 cksum in SW */
> - if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
> - ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
> -
> - ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> - } else {
> - pkt->ol_flags |= qconf->outbound.ipv6_offloads;
> - pkt->l3_len = sizeof(struct ip6_hdr);
> - pkt->l2_len = RTE_ETHER_HDR_LEN;
> -
> - ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> - }
> -
> - memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
> - sizeof(struct rte_ether_addr));
> - memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
> - sizeof(struct rte_ether_addr));
> -}
> -
> -static inline void
> -prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
> - const struct lcore_conf *qconf)
> -{
> - int32_t i;
> - const int32_t prefetch_offset = 2;
> -
> - for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
> - rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
> - prepare_tx_pkt(pkts[i], port, qconf);
> - }
> - /* Process left packets */
> - for (; i < nb_pkts; i++)
> - prepare_tx_pkt(pkts[i], port, qconf);
> -}
> -
> -/* Send burst of packets on an output interface */
> -static inline int32_t
> -send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
> -{
> - struct rte_mbuf **m_table;
> - int32_t ret;
> - uint16_t queueid;
> -
> - queueid = qconf->tx_queue_id[port];
> - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
> -
> - prepare_tx_burst(m_table, n, port, qconf);
> -
> - ret = rte_eth_tx_burst(port, queueid, m_table, n);
> -
> - core_stats_update_tx(ret);
> -
> - if (unlikely(ret < n)) {
> - do {
> - free_pkts(&m_table[ret], 1);
> - } while (++ret < n);
> - }
> -
> - return 0;
> -}
> -
> -/*
> - * Helper function to fragment and queue for TX one packet.
> - */
> -static inline uint32_t
> -send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
> - uint16_t port, uint8_t proto)
> -{
> - struct buffer *tbl;
> - uint32_t len, n;
> - int32_t rc;
> -
> - tbl = qconf->tx_mbufs + port;
> - len = tbl->len;
> -
> - /* free space for new fragments */
> - if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
> - send_burst(qconf, len, port);
> - len = 0;
> - }
> -
> - n = RTE_DIM(tbl->m_table) - len;
> -
> - if (proto == IPPROTO_IP)
> - rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
> - n, mtu_size, m->pool, qconf->frag.pool_indir);
> - else
> - rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
> - n, mtu_size, m->pool, qconf->frag.pool_indir);
> -
> - if (rc >= 0)
> - len += rc;
> - else
> - RTE_LOG(ERR, IPSEC,
> - "%s: failed to fragment packet with size %u, "
> - "error code: %d\n",
> - __func__, m->pkt_len, rte_errno);
> -
> - free_pkts(&m, 1);
> - return len;
> -}
> -
> -/* Enqueue a single packet, and send burst if queue is filled */
> -static inline int32_t
> -send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
> -{
> - uint32_t lcore_id;
> - uint16_t len;
> - struct lcore_conf *qconf;
> -
> - lcore_id = rte_lcore_id();
> -
> - qconf = &lcore_conf[lcore_id];
> - len = qconf->tx_mbufs[port].len;
> -
> - if (m->pkt_len <= mtu_size) {
> - qconf->tx_mbufs[port].m_table[len] = m;
> - len++;
> -
> - /* need to fragment the packet */
> - } else if (frag_tbl_sz > 0)
> - len = send_fragment_packet(qconf, m, port, proto);
> - else
> - free_pkts(&m, 1);
> -
> - /* enough pkts to be sent */
> - if (unlikely(len == MAX_PKT_BURST)) {
> - send_burst(qconf, MAX_PKT_BURST, port);
> - len = 0;
> - }
> -
> - qconf->tx_mbufs[port].len = len;
> - return 0;
> -}
> -
> -static inline void
> -inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
> - uint16_t lim, struct ipsec_spd_stats *stats)
> -{
> - struct rte_mbuf *m;
> - uint32_t i, j, res, sa_idx;
> -
> - if (ip->num == 0 || sp == NULL)
> - return;
> -
> - rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
> - ip->num, DEFAULT_MAX_CATEGORIES);
> -
> - j = 0;
> - for (i = 0; i < ip->num; i++) {
> - m = ip->pkts[i];
> - res = ip->res[i];
> - if (res == BYPASS) {
> - ip->pkts[j++] = m;
> - stats->bypass++;
> - continue;
> - }
> - if (res == DISCARD) {
> - free_pkts(&m, 1);
> - stats->discard++;
> - continue;
> - }
> -
> - /* Only check SPI match for processed IPSec packets */
> - if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
> - stats->discard++;
> - free_pkts(&m, 1);
> - continue;
> - }
> -
> - sa_idx = res - 1;
> - if (!inbound_sa_check(sa, m, sa_idx)) {
> - stats->discard++;
> - free_pkts(&m, 1);
> - continue;
> - }
> - ip->pkts[j++] = m;
> - stats->protect++;
> - }
> - ip->num = j;
> -}
> -
> static void
> split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
> {
> @@ -962,140 +542,6 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
> }
> }
>
> -static inline int32_t
> -get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
> -{
> - struct ipsec_mbuf_metadata *priv;
> - struct ipsec_sa *sa;
> -
> - priv = get_priv(pkt);
> -
> - sa = priv->sa;
> - if (unlikely(sa == NULL)) {
> - RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
> - goto fail;
> - }
> -
> - if (is_ipv6)
> - return sa->portid;
> -
> - /* else */
> - return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
> -
> -fail:
> - if (is_ipv6)
> - return -1;
> -
> - /* else */
> - return 0;
> -}
> -
> -static inline void
> -route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
> -{
> - uint32_t hop[MAX_PKT_BURST * 2];
> - uint32_t dst_ip[MAX_PKT_BURST * 2];
> - int32_t pkt_hop = 0;
> - uint16_t i, offset;
> - uint16_t lpm_pkts = 0;
> - unsigned int lcoreid = rte_lcore_id();
> -
> - if (nb_pkts == 0)
> - return;
> -
> - /* Need to do an LPM lookup for non-inline packets. Inline packets will
> - * have port ID in the SA
> - */
> -
> - for (i = 0; i < nb_pkts; i++) {
> - if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
> - /* Security offload not enabled. So an LPM lookup is
> - * required to get the hop
> - */
> - offset = offsetof(struct ip, ip_dst);
> - dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
> - uint32_t *, offset);
> - dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
> - lpm_pkts++;
> - }
> - }
> -
> - rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
> -
> - lpm_pkts = 0;
> -
> - for (i = 0; i < nb_pkts; i++) {
> - if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
> - /* Read hop from the SA */
> - pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
> - } else {
> - /* Need to use hop returned by lookup */
> - pkt_hop = hop[lpm_pkts++];
> - }
> -
> - if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
> - core_statistics[lcoreid].lpm4.miss++;
> - free_pkts(&pkts[i], 1);
> - continue;
> - }
> - send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
> - }
> -}
> -
> -static inline void
> -route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
> -{
> - int32_t hop[MAX_PKT_BURST * 2];
> - uint8_t dst_ip[MAX_PKT_BURST * 2][16];
> - uint8_t *ip6_dst;
> - int32_t pkt_hop = 0;
> - uint16_t i, offset;
> - uint16_t lpm_pkts = 0;
> - unsigned int lcoreid = rte_lcore_id();
> -
> - if (nb_pkts == 0)
> - return;
> -
> - /* Need to do an LPM lookup for non-inline packets. Inline packets will
> - * have port ID in the SA
> - */
> -
> - for (i = 0; i < nb_pkts; i++) {
> - if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
> - /* Security offload not enabled. So an LPM lookup is
> - * required to get the hop
> - */
> - offset = offsetof(struct ip6_hdr, ip6_dst);
> - ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
> - offset);
> - memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
> - lpm_pkts++;
> - }
> - }
> -
> - rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
> - lpm_pkts);
> -
> - lpm_pkts = 0;
> -
> - for (i = 0; i < nb_pkts; i++) {
> - if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
> - /* Read hop from the SA */
> - pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
> - } else {
> - /* Need to use hop returned by lookup */
> - pkt_hop = hop[lpm_pkts++];
> - }
> -
> - if (pkt_hop == -1) {
> - core_statistics[lcoreid].lpm6.miss++;
> - free_pkts(&pkts[i], 1);
> - continue;
> - }
> - send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
> - }
> -}
> -
> static inline void
> process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
> uint8_t nb_pkts, uint16_t portid)
> @@ -1121,21 +567,6 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
> }
>
> static inline void
> -drain_tx_buffers(struct lcore_conf *qconf)
> -{
> - struct buffer *buf;
> - uint32_t portid;
> -
> - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
> - buf = &qconf->tx_mbufs[portid];
> - if (buf->len == 0)
> - continue;
> - send_burst(qconf, buf->len, portid);
> - buf->len = 0;
> - }
> -}
> -
> -static inline void
> drain_crypto_buffers(struct lcore_conf *qconf)
> {
> uint32_t i;
> diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
> index 24f11ad..fceb835 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.h
> +++ b/examples/ipsec-secgw/ipsec-secgw.h
> @@ -6,6 +6,7 @@
>
> #include <stdbool.h>
>
> +#define MAX_RX_QUEUE_PER_LCORE 16
>
> #define NB_SOCKETS 4
>
> @@ -136,6 +137,9 @@ extern uint32_t nb_bufs_in_pool;
>
> extern bool per_port_pool;
>
> +extern uint32_t mtu_size;
> +extern uint32_t frag_tbl_sz;
> +
> static inline uint8_t
> is_unprotected_port(uint16_t port_id)
> {
> diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> index ccfde8e..9a4e7ea 100644
> --- a/examples/ipsec-secgw/ipsec.h
> +++ b/examples/ipsec-secgw/ipsec.h
> @@ -9,6 +9,7 @@
>
> #include <rte_byteorder.h>
> #include <rte_crypto.h>
> +#include <rte_ip_frag.h>
> #include <rte_security.h>
> #include <rte_flow.h>
> #include <rte_ipsec.h>
> @@ -37,6 +38,11 @@
>
> #define IP6_VERSION (6)
>
> +#define SATP_OUT_IPV4(t) \
> + ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
> + (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
> + ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
> +
> struct rte_crypto_xform;
> struct ipsec_xform;
> struct rte_mbuf;
> @@ -260,6 +266,34 @@ struct cnt_blk {
> uint32_t cnt;
> } __rte_packed;
>
> +struct lcore_rx_queue {
> + uint16_t port_id;
> + uint8_t queue_id;
> +} __rte_cache_aligned;
> +
> +struct buffer {
> + uint16_t len;
> + struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
> +};
> +
> +struct lcore_conf {
> + uint16_t nb_rx_queue;
> + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
> + uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
> + struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
> + struct ipsec_ctx inbound;
> + struct ipsec_ctx outbound;
> + struct rt_ctx *rt4_ctx;
> + struct rt_ctx *rt6_ctx;
> + struct {
> + struct rte_ip_frag_tbl *tbl;
> + struct rte_mempool *pool_indir;
> + struct rte_ip_frag_death_row dr;
> + } frag;
> +} __rte_cache_aligned;
> +
> +extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
> +
> /* Socket ctx */
> extern struct socket_ctx socket_ctx[NB_SOCKETS];
>
> diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
> index 285e9c7..089d89f 100644
> --- a/examples/ipsec-secgw/ipsec_process.c
> +++ b/examples/ipsec-secgw/ipsec_process.c
> @@ -13,11 +13,7 @@
>
> #include "ipsec.h"
> #include "ipsec-secgw.h"
> -
> -#define SATP_OUT_IPV4(t) \
> - ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
> - (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
> - ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
> +#include "ipsec_worker.h"
>
> /* helper routine to free bulk of crypto-ops and related packets */
> static inline void
> @@ -209,49 +205,6 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
> }
>
> /*
> - * helper routine for inline and cpu(synchronous) processing
> - * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
> - * Should be removed in future.
> - */
> -static inline void
> -prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
> -{
> - uint32_t j;
> - struct ipsec_mbuf_metadata *priv;
> -
> - for (j = 0; j != cnt; j++) {
> - priv = get_priv(mb[j]);
> - priv->sa = sa;
> - /* setup TSO related fields if TSO enabled*/
> - if (priv->sa->mss) {
> - uint32_t ptype = mb[j]->packet_type;
> - /* only TCP is supported */
> - if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
> - mb[j]->tso_segsz = priv->sa->mss;
> - if ((IS_TUNNEL(priv->sa->flags))) {
> - mb[j]->outer_l3_len = mb[j]->l3_len;
> - mb[j]->outer_l2_len = mb[j]->l2_len;
> - mb[j]->ol_flags |=
> - RTE_MBUF_F_TX_TUNNEL_ESP;
> - if (RTE_ETH_IS_IPV4_HDR(ptype))
> - mb[j]->ol_flags |=
> - RTE_MBUF_F_TX_OUTER_IP_CKSUM;
> - }
> - mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
> - mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
> - RTE_MBUF_F_TX_TCP_CKSUM);
> - if (RTE_ETH_IS_IPV4_HDR(ptype))
> - mb[j]->ol_flags |=
> - RTE_MBUF_F_TX_OUTER_IPV4;
> - else
> - mb[j]->ol_flags |=
> - RTE_MBUF_F_TX_OUTER_IPV6;
> - }
> - }
> - }
> -}
> -
> -/*
> * finish processing of packets successfully decrypted by an inline processor
> */
> static uint32_t
> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
> index 5d85cf1..e0b0a82 100644
> --- a/examples/ipsec-secgw/ipsec_worker.h
> +++ b/examples/ipsec-secgw/ipsec_worker.h
> @@ -4,8 +4,15 @@
> #ifndef _IPSEC_WORKER_H_
> #define _IPSEC_WORKER_H_
>
> +#include <rte_acl.h>
> +#include <rte_ethdev.h>
> +#include <rte_lpm.h>
> +#include <rte_lpm6.h>
> +
> #include "ipsec.h"
>
> +/* Configure how many packets ahead to prefetch, when reading packets */
> +#define PREFETCH_OFFSET 3
> enum pkt_type {
> PKT_TYPE_PLAIN_IPV4 = 1,
> PKT_TYPE_IPSEC_IPV4,
> @@ -38,4 +45,585 @@ void ipsec_poll_mode_worker(void);
>
> int ipsec_launch_one_lcore(void *args);
>
> +/*
> + * helper routine for inline and cpu(synchronous) processing
> + * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
> + * Should be removed in future.
> + */
> +static inline void
> +prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
> +{
> + uint32_t j;
> + struct ipsec_mbuf_metadata *priv;
> +
> + for (j = 0; j != cnt; j++) {
> + priv = get_priv(mb[j]);
> + priv->sa = sa;
> + /* setup TSO related fields if TSO enabled*/
> + if (priv->sa->mss) {
> + uint32_t ptype = mb[j]->packet_type;
> + /* only TCP is supported */
> + if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
> + mb[j]->tso_segsz = priv->sa->mss;
> + if ((IS_TUNNEL(priv->sa->flags))) {
> + mb[j]->outer_l3_len = mb[j]->l3_len;
> + mb[j]->outer_l2_len = mb[j]->l2_len;
> + mb[j]->ol_flags |=
> + RTE_MBUF_F_TX_TUNNEL_ESP;
> + if (RTE_ETH_IS_IPV4_HDR(ptype))
> + mb[j]->ol_flags |=
> + RTE_MBUF_F_TX_OUTER_IP_CKSUM;
> + }
> + mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
> + mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
> + RTE_MBUF_F_TX_TCP_CKSUM);
> + if (RTE_ETH_IS_IPV4_HDR(ptype))
> + mb[j]->ol_flags |=
> + RTE_MBUF_F_TX_OUTER_IPV4;
> + else
> + mb[j]->ol_flags |=
> + RTE_MBUF_F_TX_OUTER_IPV6;
> + }
> + }
> + }
> +}
> +
> +static inline void
> +adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
> + uint32_t l2_len)
> +{
> + uint32_t plen, trim;
> +
> + plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
> + if (plen < m->pkt_len) {
> + trim = m->pkt_len - plen;
> + rte_pktmbuf_trim(m, trim);
> + }
> +}
> +
> +static inline void
> +adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
> + uint32_t l2_len)
> +{
> + uint32_t plen, trim;
> +
> + plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
> + if (plen < m->pkt_len) {
> + trim = m->pkt_len - plen;
> + rte_pktmbuf_trim(m, trim);
> + }
> +}
> +
> +static inline void
> +prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
> +{
> + const struct rte_ether_hdr *eth;
> + const struct rte_ipv4_hdr *iph4;
> + const struct rte_ipv6_hdr *iph6;
> + const struct rte_udp_hdr *udp;
> + uint16_t ip4_hdr_len;
> + uint16_t nat_port;
> +
> + eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
> + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +
> + iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
> + RTE_ETHER_HDR_LEN);
> + adjust_ipv4_pktlen(pkt, iph4, 0);
> +
> + switch (iph4->next_proto_id) {
> + case IPPROTO_ESP:
> + t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> + break;
> + case IPPROTO_UDP:
> + if (app_sa_prm.udp_encap == 1) {
> + ip4_hdr_len = ((iph4->version_ihl &
> + RTE_IPV4_HDR_IHL_MASK) *
> + RTE_IPV4_IHL_MULTIPLIER);
> + udp = rte_pktmbuf_mtod_offset(pkt,
> + struct rte_udp_hdr *, ip4_hdr_len);
> + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> + if (udp->src_port == nat_port ||
> + udp->dst_port == nat_port){
> + t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> + pkt->packet_type |=
> + MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> + break;
> + }
> + }
> + /* Fall through */
> + default:
> + t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
> + t->ip4.pkts[(t->ip4.num)++] = pkt;
> + }
> + pkt->l2_len = 0;
> + pkt->l3_len = sizeof(*iph4);
> + pkt->packet_type |= RTE_PTYPE_L3_IPV4;
> + if (pkt->packet_type & RTE_PTYPE_L4_TCP)
> + pkt->l4_len = sizeof(struct rte_tcp_hdr);
> + else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
> + pkt->l4_len = sizeof(struct rte_udp_hdr);
> + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> + int next_proto;
> + size_t l3len, ext_len;
> + uint8_t *p;
> +
> + /* get protocol type */
> + iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
> + RTE_ETHER_HDR_LEN);
> + adjust_ipv6_pktlen(pkt, iph6, 0);
> +
> + next_proto = iph6->proto;
> +
> + /* determine l3 header size up to ESP extension */
> + l3len = sizeof(struct ip6_hdr);
> + p = rte_pktmbuf_mtod(pkt, uint8_t *);
> + while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
> + (next_proto = rte_ipv6_get_next_ext(p + l3len,
> + next_proto, &ext_len)) >= 0)
> + l3len += ext_len;
> +
> + /* drop packet when IPv6 header exceeds first segment length */
> + if (unlikely(l3len > pkt->data_len)) {
> + free_pkts(&pkt, 1);
> + return;
> + }
> +
> + switch (next_proto) {
> + case IPPROTO_ESP:
> + t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> + break;
> + case IPPROTO_UDP:
> + if (app_sa_prm.udp_encap == 1) {
> + udp = rte_pktmbuf_mtod_offset(pkt,
> + struct rte_udp_hdr *, l3len);
> + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
> + if (udp->src_port == nat_port ||
> + udp->dst_port == nat_port){
> + t->ipsec.pkts[(t->ipsec.num)++] = pkt;
> + pkt->packet_type |=
> + MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
> + break;
> + }
> + }
> + /* Fall through */
> + default:
> + t->ip6.data[t->ip6.num] = &iph6->proto;
> + t->ip6.pkts[(t->ip6.num)++] = pkt;
> + }
> + pkt->l2_len = 0;
> + pkt->l3_len = l3len;
> + pkt->packet_type |= RTE_PTYPE_L3_IPV6;
> + } else {
> + /* Unknown/Unsupported type, drop the packet */
> + RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
> + rte_be_to_cpu_16(eth->ether_type));
> + free_pkts(&pkt, 1);
> + return;
> + }
> +
> + /* Check if the packet has been processed inline. For inline protocol
> + * processed packets, the metadata in the mbuf can be used to identify
> + * the security processing done on the packet. The metadata will be
> + * used to retrieve the application registered userdata associated
> + * with the security session.
> + */
> +
> + if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
> + rte_security_dynfield_is_registered()) {
> + struct ipsec_sa *sa;
> + struct ipsec_mbuf_metadata *priv;
> + struct rte_security_ctx *ctx = (struct rte_security_ctx *)
> + rte_eth_dev_get_sec_ctx(
> + pkt->port);
> +
> + /* Retrieve the userdata registered. Here, the userdata
> + * registered is the SA pointer.
> + */
> + sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
> + *rte_security_dynfield(pkt));
> + if (sa == NULL) {
> + /* userdata could not be retrieved */
> + return;
> + }
> +
> + /* Save SA as priv member in mbuf. This will be used in the
> + * IPsec selector(SP-SA) check.
> + */
> +
> + priv = get_priv(pkt);
> + priv->sa = sa;
> + }
> +}
> +
> +static inline void
> +prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
> + uint16_t nb_pkts)
> +{
> + int32_t i;
> +
> + t->ipsec.num = 0;
> + t->ip4.num = 0;
> + t->ip6.num = 0;
> +
> + for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
> + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
> + void *));
> + prepare_one_packet(pkts[i], t);
> + }
> + /* Process left packets */
> + for (; i < nb_pkts; i++)
> + prepare_one_packet(pkts[i], t);
> +}
> +
> +static inline void
> +prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
> + const struct lcore_conf *qconf)
> +{
> + struct ip *ip;
> + struct rte_ether_hdr *ethhdr;
> +
> + ip = rte_pktmbuf_mtod(pkt, struct ip *);
> +
> + ethhdr = (struct rte_ether_hdr *)
> + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
> +
> + if (ip->ip_v == IPVERSION) {
> + pkt->ol_flags |= qconf->outbound.ipv4_offloads;
> + pkt->l3_len = sizeof(struct ip);
> + pkt->l2_len = RTE_ETHER_HDR_LEN;
> +
> + ip->ip_sum = 0;
> +
> + /* calculate IPv4 cksum in SW */
> + if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
> + ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
> +
> + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> + } else {
> + pkt->ol_flags |= qconf->outbound.ipv6_offloads;
> + pkt->l3_len = sizeof(struct ip6_hdr);
> + pkt->l2_len = RTE_ETHER_HDR_LEN;
> +
> + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> + }
> +
> + memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
> + sizeof(struct rte_ether_addr));
> + memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
> + sizeof(struct rte_ether_addr));
> +}
> +
> +static inline void
> +prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
> + const struct lcore_conf *qconf)
> +{
> + int32_t i;
> + const int32_t prefetch_offset = 2;
> +
> + for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
> + rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
> + prepare_tx_pkt(pkts[i], port, qconf);
> + }
> + /* Process left packets */
> + for (; i < nb_pkts; i++)
> + prepare_tx_pkt(pkts[i], port, qconf);
> +}
> +
> +/* Send burst of packets on an output interface */
> +static inline int32_t
> +send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
> +{
> + struct rte_mbuf **m_table;
> + int32_t ret;
> + uint16_t queueid;
> +
> + queueid = qconf->tx_queue_id[port];
> + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
> +
> + prepare_tx_burst(m_table, n, port, qconf);
> +
> + ret = rte_eth_tx_burst(port, queueid, m_table, n);
> +
> + core_stats_update_tx(ret);
> +
> + if (unlikely(ret < n)) {
> + do {
> + free_pkts(&m_table[ret], 1);
> + } while (++ret < n);
> + }
> +
> + return 0;
> +}
> +
> +/*
> + * Helper function to fragment and queue for TX one packet.
> + */
> +static inline uint32_t
> +send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
> + uint16_t port, uint8_t proto)
> +{
> + struct buffer *tbl;
> + uint32_t len, n;
> + int32_t rc;
> +
> + tbl = qconf->tx_mbufs + port;
> + len = tbl->len;
> +
> + /* free space for new fragments */
> + if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
> + send_burst(qconf, len, port);
> + len = 0;
> + }
> +
> + n = RTE_DIM(tbl->m_table) - len;
> +
> + if (proto == IPPROTO_IP)
> + rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
> + n, mtu_size, m->pool, qconf->frag.pool_indir);
> + else
> + rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
> + n, mtu_size, m->pool, qconf->frag.pool_indir);
> +
> + if (rc >= 0)
> + len += rc;
> + else
> + RTE_LOG(ERR, IPSEC,
> + "%s: failed to fragment packet with size %u, "
> + "error code: %d\n",
> + __func__, m->pkt_len, rte_errno);
> +
> + free_pkts(&m, 1);
> + return len;
> +}
> +
> +/* Enqueue a single packet, and send burst if queue is filled */
> +static inline int32_t
> +send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
> +{
> + uint32_t lcore_id;
> + uint16_t len;
> + struct lcore_conf *qconf;
> +
> + lcore_id = rte_lcore_id();
> +
> + qconf = &lcore_conf[lcore_id];
> + len = qconf->tx_mbufs[port].len;
> +
> + if (m->pkt_len <= mtu_size) {
> + qconf->tx_mbufs[port].m_table[len] = m;
> + len++;
> +
> + /* need to fragment the packet */
> + } else if (frag_tbl_sz > 0)
> + len = send_fragment_packet(qconf, m, port, proto);
> + else
> + free_pkts(&m, 1);
> +
> + /* enough pkts to be sent */
> + if (unlikely(len == MAX_PKT_BURST)) {
> + send_burst(qconf, MAX_PKT_BURST, port);
> + len = 0;
> + }
> +
> + qconf->tx_mbufs[port].len = len;
> + return 0;
> +}
> +
> +static inline void
> +inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
> + uint16_t lim, struct ipsec_spd_stats *stats)
> +{
> + struct rte_mbuf *m;
> + uint32_t i, j, res, sa_idx;
> +
> + if (ip->num == 0 || sp == NULL)
> + return;
> +
> + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
> + ip->num, DEFAULT_MAX_CATEGORIES);
> +
> + j = 0;
> + for (i = 0; i < ip->num; i++) {
> + m = ip->pkts[i];
> + res = ip->res[i];
> + if (res == BYPASS) {
> + ip->pkts[j++] = m;
> + stats->bypass++;
> + continue;
> + }
> + if (res == DISCARD) {
> + free_pkts(&m, 1);
> + stats->discard++;
> + continue;
> + }
> +
> + /* Only check SPI match for processed IPSec packets */
> + if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
> + stats->discard++;
> + free_pkts(&m, 1);
> + continue;
> + }
> +
> + sa_idx = res - 1;
> + if (!inbound_sa_check(sa, m, sa_idx)) {
> + stats->discard++;
> + free_pkts(&m, 1);
> + continue;
> + }
> + ip->pkts[j++] = m;
> + stats->protect++;
> + }
> + ip->num = j;
> +}
> +
> +static inline int32_t
> +get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
> +{
> + struct ipsec_mbuf_metadata *priv;
> + struct ipsec_sa *sa;
> +
> + priv = get_priv(pkt);
> +
> + sa = priv->sa;
> + if (unlikely(sa == NULL)) {
> + RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
> + goto fail;
> + }
> +
> + if (is_ipv6)
> + return sa->portid;
> +
> + /* else */
> + return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
> +
> +fail:
> + if (is_ipv6)
> + return -1;
> +
> + /* else */
> + return 0;
> +}
> +
> +static inline void
> +route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
> +{
> + uint32_t hop[MAX_PKT_BURST * 2];
> + uint32_t dst_ip[MAX_PKT_BURST * 2];
> + int32_t pkt_hop = 0;
> + uint16_t i, offset;
> + uint16_t lpm_pkts = 0;
> + unsigned int lcoreid = rte_lcore_id();
> +
> + if (nb_pkts == 0)
> + return;
> +
> + /* Need to do an LPM lookup for non-inline packets. Inline packets will
> + * have port ID in the SA
> + */
> +
> + for (i = 0; i < nb_pkts; i++) {
> + if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
> + /* Security offload not enabled. So an LPM lookup is
> + * required to get the hop
> + */
> + offset = offsetof(struct ip, ip_dst);
> + dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
> + uint32_t *, offset);
> + dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
> + lpm_pkts++;
> + }
> + }
> +
> + rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
> +
> + lpm_pkts = 0;
> +
> + for (i = 0; i < nb_pkts; i++) {
> + if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
> + /* Read hop from the SA */
> + pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
> + } else {
> + /* Need to use hop returned by lookup */
> + pkt_hop = hop[lpm_pkts++];
> + }
> +
> + if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
> + core_statistics[lcoreid].lpm4.miss++;
> + free_pkts(&pkts[i], 1);
> + continue;
> + }
> + send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
> + }
> +}
> +
> +static inline void
> +route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
> +{
> + int32_t hop[MAX_PKT_BURST * 2];
> + uint8_t dst_ip[MAX_PKT_BURST * 2][16];
> + uint8_t *ip6_dst;
> + int32_t pkt_hop = 0;
> + uint16_t i, offset;
> + uint16_t lpm_pkts = 0;
> + unsigned int lcoreid = rte_lcore_id();
> +
> + if (nb_pkts == 0)
> + return;
> +
> + /* Need to do an LPM lookup for non-inline packets. Inline packets will
> + * have port ID in the SA
> + */
> +
> + for (i = 0; i < nb_pkts; i++) {
> + if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
> + /* Security offload not enabled. So an LPM lookup is
> + * required to get the hop
> + */
> + offset = offsetof(struct ip6_hdr, ip6_dst);
> + ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
> + offset);
> + memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
> + lpm_pkts++;
> + }
> + }
> +
> + rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
> + lpm_pkts);
> +
> + lpm_pkts = 0;
> +
> + for (i = 0; i < nb_pkts; i++) {
> + if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
> + /* Read hop from the SA */
> + pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
> + } else {
> + /* Need to use hop returned by lookup */
> + pkt_hop = hop[lpm_pkts++];
> + }
> +
> + if (pkt_hop == -1) {
> + core_statistics[lcoreid].lpm6.miss++;
> + free_pkts(&pkts[i], 1);
> + continue;
> + }
> + send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
> + }
> +}
> +
> +static inline void
> +drain_tx_buffers(struct lcore_conf *qconf)
> +{
> + struct buffer *buf;
> + uint32_t portid;
> +
> + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
> + buf = &qconf->tx_mbufs[portid];
> + if (buf->len == 0)
> + continue;
> + send_burst(qconf, buf->len, portid);
> + buf->len = 0;
> + }
> +}
> +
> #endif /* _IPSEC_WORKER_H_ */
^ permalink raw reply [flat|nested] 37+ messages in thread
* RE: [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
` (6 preceding siblings ...)
2022-04-29 10:23 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Kumar Dabilpuram
@ 2022-04-29 10:29 ` Akhil Goyal
7 siblings, 0 replies; 37+ messages in thread
From: Akhil Goyal @ 2022-04-29 10:29 UTC (permalink / raw)
To: Nithin Kumar Dabilpuram, Jerin Jacob Kollanukkaran,
konstantin.ananyev, Radu Nicolau, konstantin.v.ananyev
Cc: dev, Anoob Joseph, Nithin Kumar Dabilpuram, Gagandeep Singh,
hemant.agrawal
> Subject: [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions
>
> Move fast path helper functions to header file for easy access.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
Series
Acked-by: Akhil Goyal <gakhil@marvell.com>
@konstantin.ananyev@intel.com: any more comments on this series?
> v3:
> - In patch 7/7, in Inline Protocol single sa mode's worker thread, further
> reduce processing by getting the proto from the Single outbound SA flags.
> Keep processing as minimal as possible as single-sa mode is only
> for benchmarking drivers.
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (9 preceding siblings ...)
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
` (6 more replies)
10 siblings, 7 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Move fast path helper functions to header file for easy access.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
v4:
- Fix NULL pointer access in patch 7/7's inline protocol single sa worker
thread function when outbound SA doesn't exist.
v3:
- In patch 7/7, in Inline Protocol single sa mode's worker thread, further
reduce processing by getting the proto from the Single outbound SA flags.
Keep processing as minimal as possible as single-sa mode is only
for benchmarking drivers.
v2:
- Moved this patch from 4/7 to 1/7 to keep all moving as first patch
without any change in function.
- In patch 1/7, handled comments from Konstantin to check for capabilities before
using Tx offload in case of LA and also to enable Checksum offload in case of
TSO+Inline Protocol
- In patch 2/7, handled comments from Konstantin to use RTE_ETH_IS* macros and
- In patch 2/7, used tx_offload field and RTE_MBUF_L2_LEN_BITS shift to write to
mbuf->tx_offload instead of bitfield access so that it is cleared and
there is only stores and no loads.
- In patch 5/7, made few fast path functions always_inline
examples/ipsec-secgw/ipsec-secgw.c | 575 +---------------------------------
examples/ipsec-secgw/ipsec-secgw.h | 4 +
examples/ipsec-secgw/ipsec.h | 34 ++
examples/ipsec-secgw/ipsec_process.c | 49 +--
examples/ipsec-secgw/ipsec_worker.h | 588 +++++++++++++++++++++++++++++++++++
5 files changed, 630 insertions(+), 620 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 42b5081..959a20b 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -70,11 +70,6 @@ volatile bool force_quit;
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-/* Configure how many packets ahead to prefetch, when reading packets */
-#define PREFETCH_OFFSET 3
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -191,9 +186,9 @@ static uint64_t dev_tx_offload = UINT64_MAX;
/*
* global values that determine multi-seg policy
*/
-static uint32_t frag_tbl_sz;
+uint32_t frag_tbl_sz;
static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
-static uint32_t mtu_size = RTE_ETHER_MTU;
+uint32_t mtu_size = RTE_ETHER_MTU;
static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
static uint32_t stats_interval;
@@ -205,11 +200,6 @@ struct app_sa_prm app_sa_prm = {
};
static const char *cfgfile;
-struct lcore_rx_queue {
- uint16_t port_id;
- uint8_t queue_id;
-} __rte_cache_aligned;
-
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
@@ -224,28 +214,7 @@ static uint16_t nb_lcore_params;
static struct rte_hash *cdev_map_in;
static struct rte_hash *cdev_map_out;
-struct buffer {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
-};
-
-struct lcore_conf {
- uint16_t nb_rx_queue;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
- struct ipsec_ctx inbound;
- struct ipsec_ctx outbound;
- struct rt_ctx *rt4_ctx;
- struct rt_ctx *rt6_ctx;
- struct {
- struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_indir;
- struct rte_ip_frag_death_row dr;
- } frag;
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
.rxmode = {
@@ -281,32 +250,6 @@ multi_seg_required(void)
frame_buf_size || frag_tbl_sz != 0);
}
-static inline void
-adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
-static inline void
-adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
@@ -371,369 +314,6 @@ print_stats_cb(__rte_unused void *param)
rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
-{
- const struct rte_ether_hdr *eth;
- const struct rte_ipv4_hdr *iph4;
- const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
-
- eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
- iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv4_pktlen(pkt, iph4, 0);
-
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
- t->ip4.pkts[(t->ip4.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
- int next_proto;
- size_t l3len, ext_len;
- uint8_t *p;
-
- /* get protocol type */
- iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv6_pktlen(pkt, iph6, 0);
-
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
- l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
-
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip6.data[t->ip6.num] = &iph6->proto;
- t->ip6.pkts[(t->ip6.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
- } else {
- /* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
- rte_be_to_cpu_16(eth->ether_type));
- free_pkts(&pkt, 1);
- return;
- }
-
- /* Check if the packet has been processed inline. For inline protocol
- * processed packets, the metadata in the mbuf can be used to identify
- * the security processing done on the packet. The metadata will be
- * used to retrieve the application registered userdata associated
- * with the security session.
- */
-
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
- struct ipsec_sa *sa;
- struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
-
- /* Retrieve the userdata registered. Here, the userdata
- * registered is the SA pointer.
- */
- sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
- *rte_security_dynfield(pkt));
- if (sa == NULL) {
- /* userdata could not be retrieved */
- return;
- }
-
- /* Save SA as priv member in mbuf. This will be used in the
- * IPsec selector(SP-SA) check.
- */
-
- priv = get_priv(pkt);
- priv->sa = sa;
- }
-}
-
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
-{
- int32_t i;
-
- t->ipsec.num = 0;
- t->ip4.num = 0;
- t->ip6.num = 0;
-
- for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
- void *));
- prepare_one_packet(pkts[i], t);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
-}
-
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
-/* Send burst of packets on an output interface */
-static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
-{
- struct rte_mbuf **m_table;
- int32_t ret;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
-
- prepare_tx_burst(m_table, n, port, qconf);
-
- ret = rte_eth_tx_burst(port, queueid, m_table, n);
-
- core_stats_update_tx(ret);
-
- if (unlikely(ret < n)) {
- do {
- free_pkts(&m_table[ret], 1);
- } while (++ret < n);
- }
-
- return 0;
-}
-
-/*
- * Helper function to fragment and queue for TX one packet.
- */
-static inline uint32_t
-send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
- uint16_t port, uint8_t proto)
-{
- struct buffer *tbl;
- uint32_t len, n;
- int32_t rc;
-
- tbl = qconf->tx_mbufs + port;
- len = tbl->len;
-
- /* free space for new fragments */
- if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
- send_burst(qconf, len, port);
- len = 0;
- }
-
- n = RTE_DIM(tbl->m_table) - len;
-
- if (proto == IPPROTO_IP)
- rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
- else
- rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, m->pool, qconf->frag.pool_indir);
-
- if (rc >= 0)
- len += rc;
- else
- RTE_LOG(ERR, IPSEC,
- "%s: failed to fragment packet with size %u, "
- "error code: %d\n",
- __func__, m->pkt_len, rte_errno);
-
- free_pkts(&m, 1);
- return len;
-}
-
-/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
-{
- uint32_t lcore_id;
- uint16_t len;
- struct lcore_conf *qconf;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
-
- if (m->pkt_len <= mtu_size) {
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* need to fragment the packet */
- } else if (frag_tbl_sz > 0)
- len = send_fragment_packet(qconf, m, port, proto);
- else
- free_pkts(&m, 1);
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
- return 0;
-}
-
-static inline void
-inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
- uint16_t lim, struct ipsec_spd_stats *stats)
-{
- struct rte_mbuf *m;
- uint32_t i, j, res, sa_idx;
-
- if (ip->num == 0 || sp == NULL)
- return;
-
- rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
- ip->num, DEFAULT_MAX_CATEGORIES);
-
- j = 0;
- for (i = 0; i < ip->num; i++) {
- m = ip->pkts[i];
- res = ip->res[i];
- if (res == BYPASS) {
- ip->pkts[j++] = m;
- stats->bypass++;
- continue;
- }
- if (res == DISCARD) {
- free_pkts(&m, 1);
- stats->discard++;
- continue;
- }
-
- /* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
-
- sa_idx = res - 1;
- if (!inbound_sa_check(sa, m, sa_idx)) {
- stats->discard++;
- free_pkts(&m, 1);
- continue;
- }
- ip->pkts[j++] = m;
- stats->protect++;
- }
- ip->num = j;
-}
-
static void
split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
@@ -962,140 +542,6 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
}
}
-static inline int32_t
-get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
-{
- struct ipsec_mbuf_metadata *priv;
- struct ipsec_sa *sa;
-
- priv = get_priv(pkt);
-
- sa = priv->sa;
- if (unlikely(sa == NULL)) {
- RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
- goto fail;
- }
-
- if (is_ipv6)
- return sa->portid;
-
- /* else */
- return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
-
-fail:
- if (is_ipv6)
- return -1;
-
- /* else */
- return 0;
-}
-
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- uint32_t hop[MAX_PKT_BURST * 2];
- uint32_t dst_ip[MAX_PKT_BURST * 2];
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
- uint32_t *, offset);
- dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
- lpm_pkts++;
- }
- }
-
- rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
- }
-}
-
-static inline void
-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- int32_t hop[MAX_PKT_BURST * 2];
- uint8_t dst_ip[MAX_PKT_BURST * 2][16];
- uint8_t *ip6_dst;
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
- unsigned int lcoreid = rte_lcore_id();
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
- offset);
- memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
- lpm_pkts++;
- }
- }
-
- rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
- lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if (pkt_hop == -1) {
- core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
- }
-}
-
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint16_t portid)
@@ -1121,21 +567,6 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
}
static inline void
-drain_tx_buffers(struct lcore_conf *qconf)
-{
- struct buffer *buf;
- uint32_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- buf = &qconf->tx_mbufs[portid];
- if (buf->len == 0)
- continue;
- send_burst(qconf, buf->len, portid);
- buf->len = 0;
- }
-}
-
-static inline void
drain_crypto_buffers(struct lcore_conf *qconf)
{
uint32_t i;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 24f11ad..fceb835 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -6,6 +6,7 @@
#include <stdbool.h>
+#define MAX_RX_QUEUE_PER_LCORE 16
#define NB_SOCKETS 4
@@ -136,6 +137,9 @@ extern uint32_t nb_bufs_in_pool;
extern bool per_port_pool;
+extern uint32_t mtu_size;
+extern uint32_t frag_tbl_sz;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index ccfde8e..9a4e7ea 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -9,6 +9,7 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
+#include <rte_ip_frag.h>
#include <rte_security.h>
#include <rte_flow.h>
#include <rte_ipsec.h>
@@ -37,6 +38,11 @@
#define IP6_VERSION (6)
+#define SATP_OUT_IPV4(t) \
+ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
+ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
+ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_mbuf;
@@ -260,6 +266,34 @@ struct cnt_blk {
uint32_t cnt;
} __rte_packed;
+struct lcore_rx_queue {
+ uint16_t port_id;
+ uint8_t queue_id;
+} __rte_cache_aligned;
+
+struct buffer {
+ uint16_t len;
+ struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+};
+
+struct lcore_conf {
+ uint16_t nb_rx_queue;
+ struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
+ struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
+ struct ipsec_ctx inbound;
+ struct ipsec_ctx outbound;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
+ struct {
+ struct rte_ip_frag_tbl *tbl;
+ struct rte_mempool *pool_indir;
+ struct rte_ip_frag_death_row dr;
+ } frag;
+} __rte_cache_aligned;
+
+extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
/* Socket ctx */
extern struct socket_ctx socket_ctx[NB_SOCKETS];
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 285e9c7..089d89f 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -13,11 +13,7 @@
#include "ipsec.h"
#include "ipsec-secgw.h"
-
-#define SATP_OUT_IPV4(t) \
- ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
- (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
- ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+#include "ipsec_worker.h"
/* helper routine to free bulk of crypto-ops and related packets */
static inline void
@@ -209,49 +205,6 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
}
/*
- * helper routine for inline and cpu(synchronous) processing
- * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
- * Should be removed in future.
- */
-static inline void
-prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
-{
- uint32_t j;
- struct ipsec_mbuf_metadata *priv;
-
- for (j = 0; j != cnt; j++) {
- priv = get_priv(mb[j]);
- priv->sa = sa;
- /* setup TSO related fields if TSO enabled*/
- if (priv->sa->mss) {
- uint32_t ptype = mb[j]->packet_type;
- /* only TCP is supported */
- if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
- mb[j]->tso_segsz = priv->sa->mss;
- if ((IS_TUNNEL(priv->sa->flags))) {
- mb[j]->outer_l3_len = mb[j]->l3_len;
- mb[j]->outer_l2_len = mb[j]->l2_len;
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_TUNNEL_ESP;
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IP_CKSUM;
- }
- mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
- mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
- RTE_MBUF_F_TX_TCP_CKSUM);
- if (RTE_ETH_IS_IPV4_HDR(ptype))
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV4;
- else
- mb[j]->ol_flags |=
- RTE_MBUF_F_TX_OUTER_IPV6;
- }
- }
- }
-}
-
-/*
* finish processing of packets successfully decrypted by an inline processor
*/
static uint32_t
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 5d85cf1..e0b0a82 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -4,8 +4,15 @@
#ifndef _IPSEC_WORKER_H_
#define _IPSEC_WORKER_H_
+#include <rte_acl.h>
+#include <rte_ethdev.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+
#include "ipsec.h"
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET 3
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -38,4 +45,585 @@ void ipsec_poll_mode_worker(void);
int ipsec_launch_one_lcore(void *args);
+/*
+ * helper routine for inline and cpu(synchronous) processing
+ * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
+ * Should be removed in future.
+ */
+static inline void
+prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
+{
+ uint32_t j;
+ struct ipsec_mbuf_metadata *priv;
+
+ for (j = 0; j != cnt; j++) {
+ priv = get_priv(mb[j]);
+ priv->sa = sa;
+ /* setup TSO related fields if TSO enabled*/
+ if (priv->sa->mss) {
+ uint32_t ptype = mb[j]->packet_type;
+ /* only TCP is supported */
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ mb[j]->tso_segsz = priv->sa->mss;
+ if ((IS_TUNNEL(priv->sa->flags))) {
+ mb[j]->outer_l3_len = mb[j]->l3_len;
+ mb[j]->outer_l2_len = mb[j]->l2_len;
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_TUNNEL_ESP;
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ }
+ mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
+ mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_TCP_CKSUM);
+ if (RTE_ETH_IS_IPV4_HDR(ptype))
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV4;
+ else
+ mb[j]->ol_flags |=
+ RTE_MBUF_F_TX_OUTER_IPV6;
+ }
+ }
+ }
+}
+
+static inline void
+adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+{
+ const struct rte_ether_hdr *eth;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
+
+ eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv4_pktlen(pkt, iph4, 0);
+
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
+ t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(*iph4);
+ pkt->packet_type |= RTE_PTYPE_L3_IPV4;
+ if (pkt->packet_type & RTE_PTYPE_L4_TCP)
+ pkt->l4_len = sizeof(struct rte_tcp_hdr);
+ else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
+ pkt->l4_len = sizeof(struct rte_udp_hdr);
+ } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ int next_proto;
+ size_t l3len, ext_len;
+ uint8_t *p;
+
+ /* get protocol type */
+ iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv6_pktlen(pkt, iph6, 0);
+
+ next_proto = iph6->proto;
+
+ /* determine l3 header size up to ESP extension */
+ l3len = sizeof(struct ip6_hdr);
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* drop packet when IPv6 header exceeds first segment length */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
+ t->ip6.data[t->ip6.num] = &iph6->proto;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
+ }
+ pkt->l2_len = 0;
+ pkt->l3_len = l3len;
+ pkt->packet_type |= RTE_PTYPE_L3_IPV6;
+ } else {
+ /* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
+ rte_be_to_cpu_16(eth->ether_type));
+ free_pkts(&pkt, 1);
+ return;
+ }
+
+ /* Check if the packet has been processed inline. For inline protocol
+ * processed packets, the metadata in the mbuf can be used to identify
+ * the security processing done on the packet. The metadata will be
+ * used to retrieve the application registered userdata associated
+ * with the security session.
+ */
+
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
+ struct ipsec_sa *sa;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ pkt->port);
+
+ /* Retrieve the userdata registered. Here, the userdata
+ * registered is the SA pointer.
+ */
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return;
+ }
+
+ /* Save SA as priv member in mbuf. This will be used in the
+ * IPsec selector(SP-SA) check.
+ */
+
+ priv = get_priv(pkt);
+ priv->sa = sa;
+ }
+}
+
+static inline void
+prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
+ uint16_t nb_pkts)
+{
+ int32_t i;
+
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
+
+ for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
+ void *));
+ prepare_one_packet(pkts[i], t);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_one_packet(pkts[i], t);
+}
+
+static inline void
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ struct ip *ip;
+ struct rte_ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ip->ip_sum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ } else {
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+}
+
+static inline void
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
+ const struct lcore_conf *qconf)
+{
+ int32_t i;
+ const int32_t prefetch_offset = 2;
+
+ for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
+ prepare_tx_pkt(pkts[i], port, qconf);
+ }
+ /* Process left packets */
+ for (; i < nb_pkts; i++)
+ prepare_tx_pkt(pkts[i], port, qconf);
+}
+
+/* Send burst of packets on an output interface */
+static inline int32_t
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
+{
+ struct rte_mbuf **m_table;
+ int32_t ret;
+ uint16_t queueid;
+
+ queueid = qconf->tx_queue_id[port];
+ m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+ prepare_tx_burst(m_table, n, port, qconf);
+
+ ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+ core_stats_update_tx(ret);
+
+ if (unlikely(ret < n)) {
+ do {
+ free_pkts(&m_table[ret], 1);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+/*
+ * Helper function to fragment and queue for TX one packet.
+ */
+static inline uint32_t
+send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
+ uint16_t port, uint8_t proto)
+{
+ struct buffer *tbl;
+ uint32_t len, n;
+ int32_t rc;
+
+ tbl = qconf->tx_mbufs + port;
+ len = tbl->len;
+
+ /* free space for new fragments */
+ if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
+ send_burst(qconf, len, port);
+ len = 0;
+ }
+
+ n = RTE_DIM(tbl->m_table) - len;
+
+ if (proto == IPPROTO_IP)
+ rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+ else
+ rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
+
+ if (rc >= 0)
+ len += rc;
+ else
+ RTE_LOG(ERR, IPSEC,
+ "%s: failed to fragment packet with size %u, "
+ "error code: %d\n",
+ __func__, m->pkt_len, rte_errno);
+
+ free_pkts(&m, 1);
+ return len;
+}
+
+/* Enqueue a single packet, and send burst if queue is filled */
+static inline int32_t
+send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
+{
+ uint32_t lcore_id;
+ uint16_t len;
+ struct lcore_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_conf[lcore_id];
+ len = qconf->tx_mbufs[port].len;
+
+ if (m->pkt_len <= mtu_size) {
+ qconf->tx_mbufs[port].m_table[len] = m;
+ len++;
+
+ /* need to fragment the packet */
+ } else if (frag_tbl_sz > 0)
+ len = send_fragment_packet(qconf, m, port, proto);
+ else
+ free_pkts(&m, 1);
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ send_burst(qconf, MAX_PKT_BURST, port);
+ len = 0;
+ }
+
+ qconf->tx_mbufs[port].len = len;
+ return 0;
+}
+
+static inline void
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
+ uint16_t lim, struct ipsec_spd_stats *stats)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, res, sa_idx;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = 0;
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res == BYPASS) {
+ ip->pkts[j++] = m;
+ stats->bypass++;
+ continue;
+ }
+ if (res == DISCARD) {
+ free_pkts(&m, 1);
+ stats->discard++;
+ continue;
+ }
+
+ /* Only check SPI match for processed IPSec packets */
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+
+ sa_idx = res - 1;
+ if (!inbound_sa_check(sa, m, sa_idx)) {
+ stats->discard++;
+ free_pkts(&m, 1);
+ continue;
+ }
+ ip->pkts[j++] = m;
+ stats->protect++;
+ }
+ ip->num = j;
+}
+
+static inline int32_t
+get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
+{
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+
+ priv = get_priv(pkt);
+
+ sa = priv->sa;
+ if (unlikely(sa == NULL)) {
+ RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
+ goto fail;
+ }
+
+ if (is_ipv6)
+ return sa->portid;
+
+ /* else */
+ return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
+
+fail:
+ if (is_ipv6)
+ return -1;
+
+ /* else */
+ return 0;
+}
+
+static inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ uint32_t hop[MAX_PKT_BURST * 2];
+ uint32_t dst_ip[MAX_PKT_BURST * 2];
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip, ip_dst);
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ uint32_t *, offset);
+ dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
+ core_statistics[lcoreid].lpm4.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+ }
+}
+
+static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int32_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ int32_t pkt_hop = 0;
+ uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
+
+ if (nb_pkts == 0)
+ return;
+
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ offset);
+ memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+ lpm_pkts++;
+ }
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
+ lpm_pkts);
+
+ lpm_pkts = 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if (pkt_hop == -1) {
+ core_statistics[lcoreid].lpm6.miss++;
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+ }
+}
+
+static inline void
+drain_tx_buffers(struct lcore_conf *qconf)
+{
+ struct buffer *buf;
+ uint32_t portid;
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ buf = &qconf->tx_mbufs[portid];
+ if (buf->len == 0)
+ continue;
+ send_burst(qconf, buf->len, portid);
+ buf->len = 0;
+ }
+}
+
#endif /* _IPSEC_WORKER_H_ */
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-05-01 17:10 ` Konstantin Ananyev
2022-04-29 20:44 ` [PATCH v4 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
` (5 subsequent siblings)
6 siblings, 1 reply; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
crypto/protocol or cpu crypto is needed.
For Tx Inline protocol offload, checksum computation
is implicitly taken care by HW.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 3 ---
examples/ipsec-secgw/sa.c | 46 ++++++++++++++++++++++++++++++++------
2 files changed, 39 insertions(+), 10 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 959a20b..5fe5eee 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1761,9 +1761,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.txmode.offloads |=
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
-
printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 1839ac7..e8f2598 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1766,10 +1766,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
struct ipsec_sa *rule;
uint32_t idx_sa;
enum rte_security_session_action_type rule_type;
+ struct rte_eth_dev_info dev_info;
+ int ret;
*rx_offloads = 0;
*tx_offloads = 0;
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ port_id, strerror(-ret));
+
/* Check for inbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
rule = &sa_in[idx_sa];
@@ -1785,13 +1793,37 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
rule = &sa_out[idx_sa];
rule_type = ipsec_get_action_type(rule);
- if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- rule_type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
- && rule->portid == port_id) {
- *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
- if (rule->mss)
- *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ switch (rule_type) {
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ /* Checksum offload is not needed for inline protocol as
+ * all processing for Outbound IPSec packets will be
+ * implicitly taken care and for non-IPSec packets,
+ * there is no need of IPv4 Checksum offload.
+ */
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
+ }
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |=
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ }
+ break;
+ default:
+ /* Enable IPv4 checksum offload even if one of lookaside
+ * SA's are present.
+ */
+ if (dev_info.tx_offload_capa &
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ break;
}
}
return 0;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline
2022-04-29 20:44 ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
@ 2022-05-01 17:10 ` Konstantin Ananyev
0 siblings, 0 replies; 37+ messages in thread
From: Konstantin Ananyev @ 2022-05-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj
29/04/2022 21:44, Nithin Dabilpuram пишет:
> Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside
> crypto/protocol or cpu crypto is needed.
> For Tx Inline protocol offload, checksum computation
> is implicitly taken care by HW.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
> examples/ipsec-secgw/ipsec-secgw.c | 3 ---
> examples/ipsec-secgw/sa.c | 46 ++++++++++++++++++++++++++++++++------
> 2 files changed, 39 insertions(+), 10 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index 959a20b..5fe5eee 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -1761,9 +1761,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
> local_port_conf.txmode.offloads |=
> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
>
> - if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
> - local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> -
> printf("port %u configuring rx_offloads=0x%" PRIx64
> ", tx_offloads=0x%" PRIx64 "\n",
> portid, local_port_conf.rxmode.offloads,
> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> index 1839ac7..e8f2598 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -1766,10 +1766,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
> struct ipsec_sa *rule;
> uint32_t idx_sa;
> enum rte_security_session_action_type rule_type;
> + struct rte_eth_dev_info dev_info;
> + int ret;
>
> *rx_offloads = 0;
> *tx_offloads = 0;
>
> + ret = rte_eth_dev_info_get(port_id, &dev_info);
> + if (ret != 0)
> + rte_exit(EXIT_FAILURE,
> + "Error during getting device (port %u) info: %s\n",
> + port_id, strerror(-ret));
> +
> /* Check for inbound rules that use offloads and use this port */
> for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
> rule = &sa_in[idx_sa];
> @@ -1785,13 +1793,37 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
> for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
> rule = &sa_out[idx_sa];
> rule_type = ipsec_get_action_type(rule);
> - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> - rule_type ==
> - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
> - && rule->portid == port_id) {
> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> - if (rule->mss)
> - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
> + switch (rule_type) {
> + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
> + /* Checksum offload is not needed for inline protocol as
> + * all processing for Outbound IPSec packets will be
> + * implicitly taken care and for non-IPSec packets,
> + * there is no need of IPv4 Checksum offload.
> + */
> + if (rule->portid == port_id) {
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> + if (rule->mss)
> + *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
> + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
> + }
> + break;
> + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
> + if (rule->portid == port_id) {
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
> + if (rule->mss)
> + *tx_offloads |=
> + RTE_ETH_TX_OFFLOAD_TCP_TSO;
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> + }
> + break;
> + default:
> + /* Enable IPv4 checksum offload even if one of lookaside
> + * SA's are present.
> + */
> + if (dev_info.tx_offload_capa &
> + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
> + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
> + break;
> }
> }
> return 0;
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
` (4 subsequent siblings)
6 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Use HW parsed packet type when ethdev supports necessary protocols.
If packet type is not supported, then register ethdev callbacks
for parse packet in SW. This is better for performance as it
effects fast path.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 157 ++++++++++++++++++++++++++++++++++++
examples/ipsec-secgw/ipsec_worker.h | 114 ++++++++++----------------
2 files changed, 201 insertions(+), 70 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5fe5eee..d6a4959 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1680,6 +1680,147 @@ cryptodevs_init(uint16_t req_queue_num)
return total_nb_qps;
}
+static int
+check_ptype(int portid)
+{
+ int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
+ int i, nb_ptypes;
+ uint32_t mask;
+
+ mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+ RTE_PTYPE_TUNNEL_MASK);
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
+ if (nb_ptypes <= 0)
+ return 0;
+
+ uint32_t ptypes[nb_ptypes];
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
+ for (i = 0; i < nb_ptypes; ++i) {
+ if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
+ l3_ipv4 = 1;
+ if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
+ l3_ipv6 = 1;
+ if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
+ tunnel_esp = 1;
+ if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ l4_udp = 1;
+ }
+
+ if (l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (l4_udp == 0)
+ printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
+
+ if (tunnel_esp == 0)
+ printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
+
+ if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_ether_hdr *eth;
+ const struct rte_udp_hdr *udp;
+ uint16_t nat_port, ether_type;
+ int next_proto = 0;
+ size_t ext_len = 0;
+ const uint8_t *p;
+ uint32_t l3len;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ ether_type = eth->ether_type;
+
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
+ l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+
+ if (l3len == sizeof(struct rte_ipv4_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ next_proto = iph4->next_proto_id;
+ p = (const uint8_t *)iph4;
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
+ l3len = sizeof(struct ip6_hdr);
+
+ /* determine l3 header size up to ESP extension */
+ next_proto = iph6->proto;
+ p = (const uint8_t *)iph6;
+ while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Skip IPv6 header exceeds first segment length */
+ if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
+ goto exit;
+
+ if (l3len == sizeof(struct ip6_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = (const struct rte_udp_hdr *)(p + l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port)
+ packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ }
+ break;
+ default:
+ break;
+ }
+exit:
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint32_t i;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
+ for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
+ struct ether_hdr *));
+ parse_ptype(pkts[i]);
+ }
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
@@ -1691,6 +1832,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
+ int ptype_supported;
ret = rte_eth_dev_info_get(portid, &dev_info);
if (ret != 0)
@@ -1788,6 +1930,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
"err=%d, port=%d\n", ret, portid);
+ /* Check if required ptypes are supported */
+ ptype_supported = check_ptype(portid);
+ if (!ptype_supported)
+ printf("Port %d: softly parse packet type info\n", portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1849,6 +1996,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ /* Register Rx callback if ptypes are not supported */
+ if (!ptype_supported &&
+ !rte_eth_add_rx_callback(portid, queue,
+ parse_ptype_cb, NULL)) {
+ printf("Failed to add rx callback: port=%d, "
+ "queue=%d\n", portid, queue);
+ }
+
+
}
}
printf("\n");
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index e0b0a82..7397291 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -117,55 +117,33 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
+ uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
+ uint32_t tun_type, l3_type;
+ uint64_t tx_offload;
+ uint16_t l3len;
+
+ tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
+ l3_type = ptype & RTE_PTYPE_L3_MASK;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
+ if (RTE_ETH_IS_IPV4_HDR(l3_type)) {
iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- if (pkt->packet_type & RTE_PTYPE_L4_TCP)
- pkt->l4_len = sizeof(struct rte_tcp_hdr);
- else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
- pkt->l4_len = sizeof(struct rte_udp_hdr);
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ tx_offload = sizeof(*iph4) << RTE_MBUF_L2_LEN_BITS;
+ } else if (RTE_ETH_IS_IPV6_HDR(l3_type)) {
int next_proto;
- size_t l3len, ext_len;
+ size_t ext_len;
uint8_t *p;
/* get protocol type */
@@ -173,47 +151,35 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
RTE_ETHER_HDR_LEN);
adjust_ipv6_pktlen(pkt, iph6, 0);
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
+ if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
+ } else {
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
+
+ /* Determine l3 header size up to ESP extension by walking
+ * through extension headers.
+ */
+ if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+ l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ next_proto = iph6->proto;
+ while (next_proto != IPPROTO_ESP &&
+ l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Drop pkt when IPv6 header exceeds first seg size */
+ if (unlikely(l3len > pkt->data_len)) {
+ free_pkts(&pkt, 1);
+ return;
+ }
+ }
+ tx_offload = l3len << RTE_MBUF_L2_LEN_BITS;
} else {
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
@@ -222,6 +188,14 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
return;
}
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+ tx_offload |= (sizeof(struct rte_tcp_hdr) <<
+ (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
+ else if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ tx_offload |= (sizeof(struct rte_udp_hdr) <<
+ (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
+ pkt->tx_offload = tx_offload;
+
/* Check if the packet has been processed inline. For inline protocol
* processed packets, the metadata in the mbuf can be used to identify
* the security processing done on the packet. The metadata will be
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 4/7] examples/ipsec-secgw: allow larger burst size for vectors
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
` (3 subsequent siblings)
6 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Allow larger burst size of vector event mode instead of restricting
to 32. Also restructure traffic type struct to have num pkts first
so that it is always in first cacheline. Also cache align
traffic type struct. Since MAX_PKT_BURST is not used by
vector event mode worker, define another macro for its burst
size so that poll mode perf is not effected.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 2 +-
examples/ipsec-secgw/ipsec-secgw.h | 15 ++++++++++-----
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index d6a4959..88984a6 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1317,7 +1317,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
case CMD_LINE_OPT_VECTOR_SIZE_NUM:
ret = parse_decimal(optarg);
- if (ret > MAX_PKT_BURST) {
+ if (ret > MAX_PKT_BURST_VEC) {
printf("Invalid argument for \'%s\': %s\n",
CMD_LINE_OPT_VECTOR_SIZE, optarg);
print_usage(prgname);
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index fceb835..2edf631 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -11,6 +11,11 @@
#define NB_SOCKETS 4
#define MAX_PKT_BURST 32
+#define MAX_PKT_BURST_VEC 256
+
+#define MAX_PKTS \
+ ((MAX_PKT_BURST_VEC > MAX_PKT_BURST ? \
+ MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2)
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -49,12 +54,12 @@
#define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
struct traffic_type {
- const uint8_t *data[MAX_PKT_BURST * 2];
- struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
- void *saptr[MAX_PKT_BURST * 2];
- uint32_t res[MAX_PKT_BURST * 2];
uint32_t num;
-};
+ struct rte_mbuf *pkts[MAX_PKTS];
+ const uint8_t *data[MAX_PKTS];
+ void *saptr[MAX_PKTS];
+ uint32_t res[MAX_PKTS];
+} __rte_cache_aligned;
struct ipsec_traffic {
struct traffic_type ipsec;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 5/7] examples/ipsec-secgw: get security context from lcore conf
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
` (2 preceding siblings ...)
2022-04-29 20:44 ` [PATCH v4 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
` (2 subsequent siblings)
6 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Store security context pointer in lcore Rx queue config and
get it from there in fast path for better performance.
Currently rte_eth_dev_get_sec_ctx() which is meant to be control
path API is called per packet basis. For every call to that
API, ethdev port status is checked.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 22 ++++++++++++++++++---
examples/ipsec-secgw/ipsec.h | 1 +
examples/ipsec-secgw/ipsec_worker.h | 39 +++++++++++++++++--------------------
3 files changed, 38 insertions(+), 24 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 88984a6..14b9c06 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -544,11 +544,11 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint16_t portid)
+ uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
{
struct ipsec_traffic traffic;
- prepare_traffic(pkts, &traffic, nb_pkts);
+ prepare_traffic(ctx, pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
if (is_unprotected_port(portid))
@@ -740,7 +740,8 @@ ipsec_poll_mode_worker(void)
if (nb_rx > 0) {
core_stats_update_rx(nb_rx);
- process_pkts(qconf, pkts, nb_rx, portid);
+ process_pkts(qconf, pkts, nb_rx, portid,
+ rxql->sec_ctx);
}
/* dequeue and process completed crypto-ops */
@@ -3060,6 +3061,21 @@ main(int32_t argc, char **argv)
flow_init();
+ /* Get security context if available and only if dynamic field is
+ * registered for fast path access.
+ */
+ if (!rte_security_dynfield_is_registered())
+ goto skip_sec_ctx;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
+ portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
+ lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
+ rte_eth_dev_get_sec_ctx(portid);
+ }
+ }
+skip_sec_ctx:
+
check_all_ports_link_status(enabled_port_mask);
if (stats_interval > 0)
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 9a4e7ea..ecad262 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -269,6 +269,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
+ struct rte_security_ctx *sec_ctx;
} __rte_cache_aligned;
struct buffer {
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7397291..b1fc364 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -88,7 +88,7 @@ prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
uint32_t l2_len)
{
@@ -101,7 +101,7 @@ adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
uint32_t l2_len)
{
@@ -114,8 +114,9 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+static __rte_always_inline void
+prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
+ struct ipsec_traffic *t)
{
uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
@@ -203,13 +204,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
* with the security session.
*/
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
+ if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
@@ -230,9 +227,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
}
}
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
+static __rte_always_inline void
+prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
+ struct ipsec_traffic *t, uint16_t nb_pkts)
{
int32_t i;
@@ -243,11 +240,11 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
void *));
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
static inline void
@@ -305,7 +302,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
}
/* Send burst of packets on an output interface */
-static inline int32_t
+static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
@@ -333,7 +330,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
/*
* Helper function to fragment and queue for TX one packet.
*/
-static inline uint32_t
+static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
@@ -372,7 +369,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
}
/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
+static __rte_always_inline int32_t
send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
{
uint32_t lcore_id;
@@ -404,7 +401,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
return 0;
}
-static inline void
+static __rte_always_inline void
inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
uint16_t lim, struct ipsec_spd_stats *stats)
{
@@ -451,7 +448,7 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
ip->num = j;
}
-static inline int32_t
+static __rte_always_inline int32_t
get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
{
struct ipsec_mbuf_metadata *priv;
@@ -531,7 +528,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
@@ -585,7 +582,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 6/7] examples/ipsec-secgw: update eth header during route lookup
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
` (3 preceding siblings ...)
2022-04-29 20:44 ` [PATCH v4 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-05-11 19:34 ` [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions Akhil Goyal
6 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Update ethernet header during route lookup instead of doing
way later while performing Tx burst. Advantages to doing
is at route lookup is that no additional IP version checks
based on packet data are needed and packet data is already
in cache as route lookup is already consuming that data.
This is also useful for inline protocol offload cases
of v4inv6 or v6inv4 outbound tunnel operations as
packet data will not have any info about what is the tunnel
protocol.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 9 +-
examples/ipsec-secgw/ipsec_worker.h | 199 ++++++++++++++++++++++--------------
2 files changed, 130 insertions(+), 78 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 14b9c06..24ee6c0 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -562,7 +562,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
@@ -613,7 +614,8 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf,
if (trf.ip4.num != 0) {
inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
&core_statistics[lcoreid].inbound.spd4);
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
}
/* process ipv6 packets */
@@ -647,7 +649,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
/* process ipv4 packets */
if (trf.ip4.num != 0)
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
+ qconf->outbound.ipv4_offloads, true);
/* process ipv6 packets */
if (trf.ip6.num != 0)
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index b1fc364..7f21440 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -247,60 +247,6 @@ prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
prepare_one_packet(ctx, pkts[i], t);
}
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
/* Send burst of packets on an output interface */
static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
@@ -312,8 +258,6 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port, qconf);
-
ret = rte_eth_tx_burst(port, queueid, m_table, n);
core_stats_update_tx(ret);
@@ -334,8 +278,11 @@ static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
+ struct rte_ether_hdr *ethhdr;
+ struct rte_ipv4_hdr *ip;
+ struct rte_mbuf *pkt;
struct buffer *tbl;
- uint32_t len, n;
+ uint32_t len, n, i;
int32_t rc;
tbl = qconf->tx_mbufs + port;
@@ -349,6 +296,9 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
n = RTE_DIM(tbl->m_table) - len;
+ /* Strip the ethernet header that was prepended earlier */
+ rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
+
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
@@ -356,13 +306,51 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
n, mtu_size, m->pool, qconf->frag.pool_indir);
- if (rc >= 0)
- len += rc;
- else
+ if (rc < 0) {
RTE_LOG(ERR, IPSEC,
"%s: failed to fragment packet with size %u, "
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
+ rc = 0;
+ }
+
+ i = len;
+ len += rc;
+ for (; i < len; i++) {
+ pkt = tbl->m_table[i];
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ if (proto == IPPROTO_IP) {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ } else {
+ ethhdr->ether_type =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
+ }
+
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+ }
free_pkts(&m, 1);
return len;
@@ -381,7 +369,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
qconf = &lcore_conf[lcore_id];
len = qconf->tx_mbufs[port].len;
- if (m->pkt_len <= mtu_size) {
+ /* L2 header is already part of packet */
+ if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
qconf->tx_mbufs[port].m_table[len] = m;
len++;
@@ -476,15 +465,19 @@ get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
return 0;
}
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+static __rte_always_inline void
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
+ uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
+ struct rte_ether_hdr *ethhdr;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -494,12 +487,13 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
uint32_t *, offset);
dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
lpm_pkts++;
@@ -511,9 +505,10 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 0);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -521,10 +516,41 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
core_statistics[lcoreid].lpm4.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ if (ip_cksum) {
+ struct rte_ipv4_hdr *ip;
+
+ pkt->ol_flags |= tx_offloads;
+
+ ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
+ ip->hdr_checksum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ }
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IP);
}
}
@@ -533,11 +559,14 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ struct rte_ether_hdr *ethhdr;
uint8_t *ip6_dst;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
unsigned int lcoreid = rte_lcore_id();
+ struct rte_mbuf *pkt;
+ uint16_t port;
if (nb_pkts == 0)
return;
@@ -547,12 +576,13 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+ pkt = pkts[i];
+ if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
offset);
memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
lpm_pkts++;
@@ -565,9 +595,10 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+ pkt = pkts[i];
+ if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ pkt_hop = get_hop_for_offload_pkt(pkt, 1);
} else {
/* Need to use hop returned by lookup */
pkt_hop = hop[lpm_pkts++];
@@ -575,10 +606,28 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
if (pkt_hop == -1) {
core_statistics[lcoreid].lpm6.miss++;
- free_pkts(&pkts[i], 1);
+ free_pkts(&pkt, 1);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
+
+ port = pkt_hop & 0xff;
+
+ /* Update minimum offload data */
+ pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ /* Update Ethernet header */
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
+ sizeof(struct rte_ether_addr));
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
+ sizeof(struct rte_ether_addr));
+
+ send_single_packet(pkt, port, IPPROTO_IPV6);
}
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v4 7/7] examples/ipsec-secgw: add poll mode worker for inline proto
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
` (4 preceding siblings ...)
2022-04-29 20:44 ` [PATCH v4 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
@ 2022-04-29 20:44 ` Nithin Dabilpuram
2022-05-11 19:34 ` [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions Akhil Goyal
6 siblings, 0 replies; 37+ messages in thread
From: Nithin Dabilpuram @ 2022-04-29 20:44 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Radu Nicolau, Akhil Goyal
Cc: dev, anoobj, Nithin Dabilpuram
Add separate worker thread when all SA's are of type
inline protocol offload and librte_ipsec is enabled
in order to make it more optimal for that case.
Current default worker supports all kinds of SA leading
to doing lot of per-packet checks and branching based on
SA type which can be of 5 types of SA's.
Also make a provision for choosing different poll mode workers
for different combinations of SA types with default being
existing poll mode worker that supports all kinds of SA's.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 6 +-
examples/ipsec-secgw/ipsec-secgw.h | 10 +
examples/ipsec-secgw/ipsec_worker.c | 372 +++++++++++++++++++++++++++++++++++-
examples/ipsec-secgw/ipsec_worker.h | 4 +
examples/ipsec-secgw/sa.c | 9 +
5 files changed, 397 insertions(+), 4 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 24ee6c0..4251952 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -68,8 +68,6 @@ volatile bool force_quit;
#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
#define MAX_LCORE_PARAMS 1024
/*
@@ -173,7 +171,7 @@ static uint64_t enabled_cryptodev_mask = UINT64_MAX;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
-static uint32_t single_sa;
+uint32_t single_sa;
uint32_t nb_bufs_in_pool;
/*
@@ -238,6 +236,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
bool per_port_pool;
+uint16_t wrkr_flags;
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
@@ -1233,6 +1232,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
single_sa = 1;
single_sa_idx = ret;
eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
+ wrkr_flags |= SS_F;
printf("Configured with single SA index %u\n",
single_sa_idx);
break;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 2edf631..f027360 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -135,6 +135,7 @@ extern uint32_t unprotected_port_mask;
/* Index of SA in single mode */
extern uint32_t single_sa_idx;
+extern uint32_t single_sa;
extern volatile bool force_quit;
@@ -145,6 +146,15 @@ extern bool per_port_pool;
extern uint32_t mtu_size;
extern uint32_t frag_tbl_sz;
+#define SS_F (1U << 0) /* Single SA mode */
+#define INL_PR_F (1U << 1) /* Inline Protocol */
+#define INL_CR_F (1U << 2) /* Inline Crypto */
+#define LA_PR_F (1U << 3) /* Lookaside Protocol */
+#define LA_ANY_F (1U << 4) /* Lookaside Any */
+#define MAX_F (LA_ANY_F << 1)
+
+extern uint16_t wrkr_flags;
+
static inline uint8_t
is_unprotected_port(uint16_t port_id)
{
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426..fbb2334 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -17,6 +17,8 @@ struct port_drv_mode_data {
struct rte_security_ctx *ctx;
};
+typedef void (*ipsec_worker_fn_t)(void);
+
static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
{
@@ -1004,6 +1006,374 @@ ipsec_eventmode_worker(struct eh_conf *conf)
eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
}
+static __rte_always_inline void
+outb_inl_pro_spd_process(struct sp_ctx *sp,
+ struct sa_ctx *sa_ctx,
+ struct traffic_type *ip,
+ struct traffic_type *match,
+ struct traffic_type *mismatch,
+ bool match_flag,
+ struct ipsec_spd_stats *stats)
+{
+ uint32_t prev_sa_idx = UINT32_MAX;
+ struct rte_mbuf *ipsec[MAX_PKT_BURST];
+ struct rte_ipsec_session *ips;
+ uint32_t i, j, j_mis, sa_idx;
+ struct ipsec_sa *sa = NULL;
+ uint32_t ipsec_num = 0;
+ struct rte_mbuf *m;
+ uint64_t satp;
+
+ if (ip->num == 0 || sp == NULL)
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
+
+ j = match->num;
+ j_mis = mismatch->num;
+
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] - 1;
+
+ if (unlikely(ip->res[i] == DISCARD)) {
+ free_pkts(&m, 1);
+
+ stats->discard++;
+ } else if (unlikely(ip->res[i] == BYPASS)) {
+ match->pkts[j++] = m;
+
+ stats->bypass++;
+ } else {
+ if (prev_sa_idx == UINT32_MAX) {
+ prev_sa_idx = sa_idx;
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ }
+
+ if (sa_idx != prev_sa_idx) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare packets for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+
+ /* Update to new SA */
+ sa = &sa_ctx->sa[sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ satp = rte_ipsec_sa_type(ips->sa);
+ ipsec_num = 0;
+ }
+
+ ipsec[ipsec_num++] = m;
+ stats->protect++;
+ }
+ }
+
+ if (ipsec_num) {
+ prep_process_group(sa, ipsec, ipsec_num);
+
+ /* Prepare pacekts for outbound */
+ rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+ /* Copy to current tr or a different tr */
+ if (SATP_OUT_IPV4(satp) == match_flag) {
+ memcpy(&match->pkts[j], ipsec,
+ ipsec_num * sizeof(void *));
+ j += ipsec_num;
+ } else {
+ memcpy(&mismatch->pkts[j_mis], ipsec,
+ ipsec_num * sizeof(void *));
+ j_mis += ipsec_num;
+ }
+ }
+ match->num = j;
+ mismatch->num = j_mis;
+}
+
+/* Poll mode worker when all SA's are of type inline protocol */
+void
+ipsec_poll_mode_wrkr_inl_pr(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct ipsec_core_statistics *stats;
+ struct rt_ctx *rt4_ctx, *rt6_ctx;
+ struct sa_ctx *sa_in, *sa_out;
+ struct traffic_type ip4, ip6;
+ struct lcore_rx_queue *rxql;
+ struct rte_mbuf **v4, **v6;
+ struct ipsec_traffic trf;
+ struct lcore_conf *qconf;
+ uint16_t v4_num, v6_num;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ int32_t i, nb_rx;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+ stats = &core_statistics[lcore_id];
+
+ rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ rt6_ctx = socket_ctx[socket_id].rt_ip6;
+
+ sp4_in = socket_ctx[socket_id].sp_ip4_in;
+ sp6_in = socket_ctx[socket_id].sp_ip6_in;
+ sa_in = socket_ctx[socket_id].sa_in;
+
+ sp4_out = socket_ctx[socket_id].sp_ip4_out;
+ sp6_out = socket_ctx[socket_id].sp_ip6_out;
+ sa_out = socket_ctx[socket_id].sa_out;
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+ /* Drop any IPsec traffic */
+ free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+ if (is_unprotected_port(portid)) {
+ inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
+ trf.ip4.num,
+ &stats->inbound.spd4);
+
+ inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
+ trf.ip6.num,
+ &stats->inbound.spd6);
+
+ v4 = trf.ip4.pkts;
+ v4_num = trf.ip4.num;
+ v6 = trf.ip6.pkts;
+ v6_num = trf.ip6.num;
+ } else {
+ ip4.num = 0;
+ ip6.num = 0;
+
+ outb_inl_pro_spd_process(sp4_out, sa_out,
+ &trf.ip4, &ip4, &ip6,
+ true,
+ &stats->outbound.spd4);
+
+ outb_inl_pro_spd_process(sp6_out, sa_out,
+ &trf.ip6, &ip6, &ip4,
+ false,
+ &stats->outbound.spd6);
+ v4 = ip4.pkts;
+ v4_num = ip4.num;
+ v6 = ip6.pkts;
+ v6_num = ip6.num;
+ }
+
+ route4_pkts(rt4_ctx, v4, v4_num, 0, false);
+ route6_pkts(rt6_ctx, v6, v6_num);
+ }
+ }
+}
+
+/* Poll mode worker when all SA's are of type inline protocol
+ * and single sa mode is enabled.
+ */
+void
+ipsec_poll_mode_wrkr_inl_pr_ss(void)
+{
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ uint16_t sa_out_portid = 0, sa_out_proto = 0;
+ struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ struct rte_ipsec_session *ips = NULL;
+ struct lcore_rx_queue *rxql;
+ struct ipsec_sa *sa = NULL;
+ struct lcore_conf *qconf;
+ struct sa_ctx *sa_out;
+ uint32_t i, nb_rx, j;
+ int32_t socket_id;
+ uint32_t lcore_id;
+ uint16_t portid;
+ uint8_t queueid;
+
+ prev_tsc = 0;
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+ rxql = qconf->rx_queue_list;
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+
+ /* Get SA info */
+ sa_out = socket_ctx[socket_id].sa_out;
+ if (sa_out && single_sa_idx < sa_out->nb_sa) {
+ sa = &sa_out->sa[single_sa_idx];
+ ips = ipsec_get_primary_session(sa);
+ sa_out_portid = sa->portid;
+ if (sa->flags & IP6_TUNNEL)
+ sa_out_proto = IPPROTO_IPV6;
+ else
+ sa_out_proto = IPPROTO_IP;
+ }
+
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+ if (qconf->nb_rx_queue == 0) {
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_queue; i++) {
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ RTE_LOG(INFO, IPSEC,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ while (!force_quit) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX queue buffer drain */
+ diff_tsc = cur_tsc - prev_tsc;
+
+ if (unlikely(diff_tsc > drain_tsc)) {
+ drain_tx_buffers(qconf);
+ prev_tsc = cur_tsc;
+ }
+
+ for (i = 0; i < qconf->nb_rx_queue; ++i) {
+ /* Read packets from RX queues */
+ portid = rxql[i].port_id;
+ queueid = rxql[i].queue_id;
+ nb_rx = rte_eth_rx_burst(portid, queueid,
+ pkts, MAX_PKT_BURST);
+
+ if (nb_rx <= 0)
+ continue;
+
+ core_stats_update_rx(nb_rx);
+
+ if (is_unprotected_port(portid)) {
+ /* Nothing much to do for inbound inline
+ * decrypted traffic.
+ */
+ for (j = 0; j < nb_rx; j++) {
+ uint32_t ptype, proto;
+
+ pkt = pkts[j];
+ ptype = pkt->packet_type &
+ RTE_PTYPE_L3_MASK;
+ if (ptype == RTE_PTYPE_L3_IPV4)
+ proto = IPPROTO_IP;
+ else
+ proto = IPPROTO_IPV6;
+
+ send_single_packet(pkt, portid, proto);
+ }
+
+ continue;
+ }
+
+ /* Free packets if there are no outbound sessions */
+ if (unlikely(!ips)) {
+ rte_pktmbuf_free_bulk(pkts, nb_rx);
+ continue;
+ }
+
+ rte_ipsec_pkt_process(ips, pkts, nb_rx);
+
+ /* Send pkts out */
+ for (j = 0; j < nb_rx; j++) {
+ pkt = pkts[j];
+
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ send_single_packet(pkt, sa_out_portid,
+ sa_out_proto);
+ }
+ }
+ }
+}
+
+static void
+ipsec_poll_mode_wrkr_launch(void)
+{
+ static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
+ [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
+ [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
+ };
+ ipsec_worker_fn_t fn;
+
+ if (!app_sa_prm.enable) {
+ fn = ipsec_poll_mode_worker;
+ } else {
+ fn = poll_mode_wrkrs[wrkr_flags];
+
+ /* Always default to all mode worker */
+ if (!fn)
+ fn = ipsec_poll_mode_worker;
+ }
+
+ /* Launch worker */
+ (*fn)();
+}
+
int ipsec_launch_one_lcore(void *args)
{
struct eh_conf *conf;
@@ -1012,7 +1382,7 @@ int ipsec_launch_one_lcore(void *args)
if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
/* Run in poll mode */
- ipsec_poll_mode_worker();
+ ipsec_poll_mode_wrkr_launch();
} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
/* Run in event mode */
ipsec_eventmode_worker(conf);
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7f21440..315f3d6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -13,6 +13,8 @@
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
enum pkt_type {
PKT_TYPE_PLAIN_IPV4 = 1,
PKT_TYPE_IPSEC_IPV4,
@@ -42,6 +44,8 @@ struct lcore_conf_ev_tx_int_port_wrkr {
} __rte_cache_aligned;
void ipsec_poll_mode_worker(void);
+void ipsec_poll_mode_wrkr_inl_pr(void);
+void ipsec_poll_mode_wrkr_inl_pr_ss(void);
int ipsec_launch_one_lcore(void *args);
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index e8f2598..13b9113 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -936,6 +936,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
}
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ wrkr_flags |= INL_CR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ wrkr_flags |= INL_PR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+ wrkr_flags |= LA_PR_F;
+ else
+ wrkr_flags |= LA_ANY_F;
+
nb_crypto_sessions++;
*ri = *ri + 1;
}
--
2.8.4
^ permalink raw reply [flat|nested] 37+ messages in thread
* RE: [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
` (5 preceding siblings ...)
2022-04-29 20:44 ` [PATCH v4 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
@ 2022-05-11 19:34 ` Akhil Goyal
6 siblings, 0 replies; 37+ messages in thread
From: Akhil Goyal @ 2022-05-11 19:34 UTC (permalink / raw)
To: Nithin Kumar Dabilpuram, Jerin Jacob Kollanukkaran,
konstantin.ananyev, Radu Nicolau
Cc: dev, Anoob Joseph, Nithin Kumar Dabilpuram
> Subject: [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions
>
> Move fast path helper functions to header file for easy access.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
> v4:
> - Fix NULL pointer access in patch 7/7's inline protocol single sa worker
> thread function when outbound SA doesn't exist.
Series Applied to dpdk-next-crypto
Thanks.
^ permalink raw reply [flat|nested] 37+ messages in thread