From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 44976A034C; Thu, 28 Apr 2022 17:05:30 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9AC594282D; Thu, 28 Apr 2022 17:05:17 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 2E54F4282E for ; Thu, 28 Apr 2022 17:05:16 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 23SA6dHQ016763; Thu, 28 Apr 2022 08:05:15 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=zVDk+HHYDYOik63miJYlRX25JhK/FfLN/jFvHWIZVBg=; b=QUxcp/lI5BQxJWYIF4o0MPqVQakmc3vyP1GihevzUUPKdmi7teF3uEGptsXYb+mwUbhR LzbD9eoY9phiN7DdThiBIYlJyIx0od00Sv7RAMxXSJthD42vzL3zCjmsf8p2zM6eHAqL 2MV2cFTVxang/r9BikZ8aE5uYiUmJvZ8/M8OOmunom39w6UXlpnLgvZV1QWQbfk2eAqY x9Qekgs7LPsTHbRKPfdhOMiZ2jdUlM8sfyhbaRxn+3bxoN3BPgklJ7yGnsf7EHYVfBgB UbGmywa+yyga4zGc3dJAVWVHDYbqUM9mGDEUP1ZvnJvyKMn7m/rBgZuD88aBvAFGBWwT 4A== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3fqpvy1j1g-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 28 Apr 2022 08:05:15 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 28 Apr 2022 08:05:13 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 28 Apr 2022 08:05:13 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 2806B5B695E; Thu, 28 Apr 2022 08:05:10 -0700 (PDT) From: Nithin Dabilpuram To: , , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Date: Thu, 28 Apr 2022 20:34:55 +0530 Message-ID: <20220428150459.23950-3-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20220428150459.23950-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> <20220428150459.23950-1-ndabilpuram@marvell.com> MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-GUID: SIu0PEI0qI709MmXlORuvhFBuBq0nGTx X-Proofpoint-ORIG-GUID: SIu0PEI0qI709MmXlORuvhFBuBq0nGTx X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.858,Hydra:6.0.486,FMLib:17.11.64.514 definitions=2022-04-28_02,2022-04-28_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use HW parsed packet type when ethdev supports necessary protocols. If packet type is not supported, then register ethdev callbacks for parse packet in SW. This is better for performance as it effects fast path. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 157 ++++++++++++++++++++++++++++++++++++ examples/ipsec-secgw/ipsec_worker.h | 114 ++++++++++---------------- 2 files changed, 201 insertions(+), 70 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 5fe5eee..d6a4959 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -1680,6 +1680,147 @@ cryptodevs_init(uint16_t req_queue_num) return total_nb_qps; } +static int +check_ptype(int portid) +{ + int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0; + int i, nb_ptypes; + uint32_t mask; + + mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | + RTE_PTYPE_TUNNEL_MASK); + + nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0); + if (nb_ptypes <= 0) + return 0; + + uint32_t ptypes[nb_ptypes]; + + nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes); + for (i = 0; i < nb_ptypes; ++i) { + if (RTE_ETH_IS_IPV4_HDR(ptypes[i])) + l3_ipv4 = 1; + if (RTE_ETH_IS_IPV6_HDR(ptypes[i])) + l3_ipv6 = 1; + if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP) + tunnel_esp = 1; + if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) + l4_udp = 1; + } + + if (l3_ipv4 == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid); + + if (l3_ipv6 == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid); + + if (l4_udp == 0) + printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid); + + if (tunnel_esp == 0) + printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid); + + if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp) + return 1; + + return 0; + +} + +static inline void +parse_ptype(struct rte_mbuf *m) +{ + uint32_t packet_type = RTE_PTYPE_UNKNOWN; + const struct rte_ipv4_hdr *iph4; + const struct rte_ipv6_hdr *iph6; + const struct rte_ether_hdr *eth; + const struct rte_udp_hdr *udp; + uint16_t nat_port, ether_type; + int next_proto = 0; + size_t ext_len = 0; + const uint8_t *p; + uint32_t l3len; + + eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + ether_type = eth->ether_type; + + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + iph4 = (const struct rte_ipv4_hdr *)(eth + 1); + l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER); + + if (l3len == sizeof(struct rte_ipv4_hdr)) + packet_type |= RTE_PTYPE_L3_IPV4; + else + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + + next_proto = iph4->next_proto_id; + p = (const uint8_t *)iph4; + } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + iph6 = (const struct rte_ipv6_hdr *)(eth + 1); + l3len = sizeof(struct ip6_hdr); + + /* determine l3 header size up to ESP extension */ + next_proto = iph6->proto; + p = (const uint8_t *)iph6; + while (next_proto != IPPROTO_ESP && l3len < m->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* Skip IPv6 header exceeds first segment length */ + if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len)) + goto exit; + + if (l3len == sizeof(struct ip6_hdr)) + packet_type |= RTE_PTYPE_L3_IPV6; + else + packet_type |= RTE_PTYPE_L3_IPV6_EXT; + } + + switch (next_proto) { + case IPPROTO_ESP: + packet_type |= RTE_PTYPE_TUNNEL_ESP; + break; + case IPPROTO_UDP: + if (app_sa_prm.udp_encap == 1) { + udp = (const struct rte_udp_hdr *)(p + l3len); + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); + if (udp->src_port == nat_port || + udp->dst_port == nat_port) + packet_type |= + MBUF_PTYPE_TUNNEL_ESP_IN_UDP; + } + break; + default: + break; + } +exit: + m->packet_type = packet_type; +} + +static uint16_t +parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + uint16_t max_pkts __rte_unused, + void *user_param __rte_unused) +{ + uint32_t i; + + if (unlikely(nb_pkts == 0)) + return nb_pkts; + + rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *)); + for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], + struct ether_hdr *)); + parse_ptype(pkts[i]); + } + parse_ptype(pkts[i]); + + return nb_pkts; +} + static void port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) { @@ -1691,6 +1832,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) struct lcore_conf *qconf; struct rte_ether_addr ethaddr; struct rte_eth_conf local_port_conf = port_conf; + int ptype_supported; ret = rte_eth_dev_info_get(portid, &dev_info); if (ret != 0) @@ -1788,6 +1930,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: " "err=%d, port=%d\n", ret, portid); + /* Check if required ptypes are supported */ + ptype_supported = check_ptype(portid); + if (!ptype_supported) + printf("Port %d: softly parse packet type info\n", portid); + /* init one TX queue per lcore */ tx_queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { @@ -1849,6 +1996,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, " "port=%d\n", ret, portid); + + /* Register Rx callback if ptypes are not supported */ + if (!ptype_supported && + !rte_eth_add_rx_callback(portid, queue, + parse_ptype_cb, NULL)) { + printf("Failed to add rx callback: port=%d, " + "queue=%d\n", portid, queue); + } + + } } printf("\n"); diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h index e0b0a82..7397291 100644 --- a/examples/ipsec-secgw/ipsec_worker.h +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -117,55 +117,33 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, static inline void prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) { + uint32_t ptype = pkt->packet_type; const struct rte_ether_hdr *eth; const struct rte_ipv4_hdr *iph4; const struct rte_ipv6_hdr *iph6; - const struct rte_udp_hdr *udp; - uint16_t ip4_hdr_len; - uint16_t nat_port; + uint32_t tun_type, l3_type; + uint64_t tx_offload; + uint16_t l3len; + + tun_type = ptype & RTE_PTYPE_TUNNEL_MASK; + l3_type = ptype & RTE_PTYPE_L3_MASK; eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *); - if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { - + if (RTE_ETH_IS_IPV4_HDR(l3_type)) { iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); adjust_ipv4_pktlen(pkt, iph4, 0); - switch (iph4->next_proto_id) { - case IPPROTO_ESP: + if (tun_type == RTE_PTYPE_TUNNEL_ESP) { t->ipsec.pkts[(t->ipsec.num)++] = pkt; - break; - case IPPROTO_UDP: - if (app_sa_prm.udp_encap == 1) { - ip4_hdr_len = ((iph4->version_ihl & - RTE_IPV4_HDR_IHL_MASK) * - RTE_IPV4_IHL_MULTIPLIER); - udp = rte_pktmbuf_mtod_offset(pkt, - struct rte_udp_hdr *, ip4_hdr_len); - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); - if (udp->src_port == nat_port || - udp->dst_port == nat_port){ - t->ipsec.pkts[(t->ipsec.num)++] = pkt; - pkt->packet_type |= - MBUF_PTYPE_TUNNEL_ESP_IN_UDP; - break; - } - } - /* Fall through */ - default: + } else { t->ip4.data[t->ip4.num] = &iph4->next_proto_id; t->ip4.pkts[(t->ip4.num)++] = pkt; } - pkt->l2_len = 0; - pkt->l3_len = sizeof(*iph4); - pkt->packet_type |= RTE_PTYPE_L3_IPV4; - if (pkt->packet_type & RTE_PTYPE_L4_TCP) - pkt->l4_len = sizeof(struct rte_tcp_hdr); - else if (pkt->packet_type & RTE_PTYPE_L4_UDP) - pkt->l4_len = sizeof(struct rte_udp_hdr); - } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + tx_offload = sizeof(*iph4) << RTE_MBUF_L2_LEN_BITS; + } else if (RTE_ETH_IS_IPV6_HDR(l3_type)) { int next_proto; - size_t l3len, ext_len; + size_t ext_len; uint8_t *p; /* get protocol type */ @@ -173,47 +151,35 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) RTE_ETHER_HDR_LEN); adjust_ipv6_pktlen(pkt, iph6, 0); - next_proto = iph6->proto; - - /* determine l3 header size up to ESP extension */ l3len = sizeof(struct ip6_hdr); - p = rte_pktmbuf_mtod(pkt, uint8_t *); - while (next_proto != IPPROTO_ESP && l3len < pkt->data_len && - (next_proto = rte_ipv6_get_next_ext(p + l3len, - next_proto, &ext_len)) >= 0) - l3len += ext_len; - /* drop packet when IPv6 header exceeds first segment length */ - if (unlikely(l3len > pkt->data_len)) { - free_pkts(&pkt, 1); - return; - } - - switch (next_proto) { - case IPPROTO_ESP: + if (tun_type == RTE_PTYPE_TUNNEL_ESP) { t->ipsec.pkts[(t->ipsec.num)++] = pkt; - break; - case IPPROTO_UDP: - if (app_sa_prm.udp_encap == 1) { - udp = rte_pktmbuf_mtod_offset(pkt, - struct rte_udp_hdr *, l3len); - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); - if (udp->src_port == nat_port || - udp->dst_port == nat_port){ - t->ipsec.pkts[(t->ipsec.num)++] = pkt; - pkt->packet_type |= - MBUF_PTYPE_TUNNEL_ESP_IN_UDP; - break; - } - } - /* Fall through */ - default: + } else { t->ip6.data[t->ip6.num] = &iph6->proto; t->ip6.pkts[(t->ip6.num)++] = pkt; } - pkt->l2_len = 0; - pkt->l3_len = l3len; - pkt->packet_type |= RTE_PTYPE_L3_IPV6; + + /* Determine l3 header size up to ESP extension by walking + * through extension headers. + */ + if (l3_type == RTE_PTYPE_L3_IPV6_EXT || + l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { + p = rte_pktmbuf_mtod(pkt, uint8_t *); + next_proto = iph6->proto; + while (next_proto != IPPROTO_ESP && + l3len < pkt->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* Drop pkt when IPv6 header exceeds first seg size */ + if (unlikely(l3len > pkt->data_len)) { + free_pkts(&pkt, 1); + return; + } + } + tx_offload = l3len << RTE_MBUF_L2_LEN_BITS; } else { /* Unknown/Unsupported type, drop the packet */ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", @@ -222,6 +188,14 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) return; } + if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) + tx_offload |= (sizeof(struct rte_tcp_hdr) << + (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS)); + else if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) + tx_offload |= (sizeof(struct rte_udp_hdr) << + (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS)); + pkt->tx_offload = tx_offload; + /* Check if the packet has been processed inline. For inline protocol * processed packets, the metadata in the mbuf can be used to identify * the security processing done on the packet. The metadata will be -- 2.8.4