From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DD1E3A0C41; Wed, 15 Sep 2021 15:54:00 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7ED5841175; Wed, 15 Sep 2021 15:53:33 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id AE07E41183 for ; Wed, 15 Sep 2021 15:53:21 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10107"; a="201825277" X-IronPort-AV: E=Sophos;i="5.85,295,1624345200"; d="scan'208";a="201825277" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2021 06:53:21 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,295,1624345200"; d="scan'208";a="700225784" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by fmsmga005.fm.intel.com with ESMTP; 15 Sep 2021 06:53:20 -0700 From: Radu Nicolau To: Radu Nicolau , Akhil Goyal Cc: dev@dpdk.org, declan.doherty@intel.com, hemant.agrawal@oss.nxp.com Date: Wed, 15 Sep 2021 14:45:22 +0100 Message-Id: <20210915134522.1311843-10-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20210915134522.1311843-1-radu.nicolau@intel.com> References: <20210903112257.303961-1-radu.nicolau@intel.com> <20210915134522.1311843-1-radu.nicolau@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 9/9] examples/ipsec-secgw: add support for inline crypto UDP encapsulation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable UDP encapsulation for both transport and tunnel modes for the inline crypto offload path. Signed-off-by: Radu Nicolau --- examples/ipsec-secgw/ipsec.c | 34 ++++++++-- examples/ipsec-secgw/ipsec.h | 7 +- examples/ipsec-secgw/sa.c | 123 ++++++++++++++++++++++++++++------- 3 files changed, 136 insertions(+), 28 deletions(-) diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 868089ad3e..edc0b21478 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -222,6 +222,13 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, } } + if (sa->udp_encap) { + sess_conf.ipsec.options.udp_encap = 1; + + sess_conf.ipsec.udp.sport = htons(sa->udp.sport); + sess_conf.ipsec.udp.dport = htons(sa->udp.dport); + } + if (sa->esn > 0) { sess_conf.ipsec.options.esn = 1; sess_conf.ipsec.esn.value = sa->esn; @@ -295,12 +302,31 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; } - sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; - sa->pattern[2].spec = &sa->esp_spec; - sa->pattern[2].mask = &rte_flow_item_esp_mask; sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); - sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + if (sa->udp_encap) { + + sa->udp_spec.hdr.dst_port = + rte_cpu_to_be_16(sa->udp.dport); + sa->udp_spec.hdr.src_port = + rte_cpu_to_be_16(sa->udp.sport); + + sa->pattern[2].mask = &rte_flow_item_udp_mask; + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP; + sa->pattern[2].spec = &sa->udp_spec; + + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[3].spec = &sa->esp_spec; + sa->pattern[3].mask = &rte_flow_item_esp_mask; + + sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END; + } else { + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[2].spec = &sa->esp_spec; + sa->pattern[2].mask = &rte_flow_item_esp_mask; + + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + } sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; sa->action[0].conf = ips->security.ses; diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 3ec3e55170..5fa4e62f37 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -128,6 +128,10 @@ struct ipsec_sa { struct ip_addr src; struct ip_addr dst; + struct { + uint16_t sport; + uint16_t dport; + } udp; uint8_t cipher_key[MAX_KEY_SIZE]; uint16_t cipher_key_len; uint8_t auth_key[MAX_KEY_SIZE]; @@ -145,7 +149,7 @@ struct ipsec_sa { uint8_t fdir_qid; uint8_t fdir_flag; -#define MAX_RTE_FLOW_PATTERN (4) +#define MAX_RTE_FLOW_PATTERN (5) #define MAX_RTE_FLOW_ACTIONS (3) struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN]; struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS]; @@ -154,6 +158,7 @@ struct ipsec_sa { struct rte_flow_item_ipv4 ipv4_spec; struct rte_flow_item_ipv6 ipv6_spec; }; + struct rte_flow_item_udp udp_spec; struct rte_flow_item_esp esp_spec; struct rte_flow *flow; struct rte_security_session_conf sess_conf; diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index bd58edebc9..847ac37b81 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -882,6 +883,11 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, app_sa_prm.udp_encap = 1; udp_encap_p = 1; break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + rule->udp_encap = 1; + rule->udp.sport = 0; + rule->udp.dport = 4500; + break; default: APP_CHECK(0, status, "UDP encapsulation not supported for " @@ -969,6 +975,8 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) } printf("mode:"); + if (sa->udp_encap) + printf("UDP encapsulated "); switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: @@ -1428,9 +1436,21 @@ fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm, prm->ipsec_xform.replay_win_sz = app_prm->window_size; } +struct udp_ipv4_tunnel { + struct rte_ipv4_hdr v4; + struct rte_udp_hdr udp; +} __rte_packed; + +struct udp_ipv6_tunnel { + struct rte_ipv6_hdr v6; + struct rte_udp_hdr udp; +} __rte_packed; + static int fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, - const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6) + const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6, + const struct udp_ipv4_tunnel *udp_ipv4, + const struct udp_ipv6_tunnel *udp_ipv6) { int32_t rc; @@ -1454,6 +1474,7 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ? RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT : RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; + prm->ipsec_xform.options.udp_encap = ss->udp_encap; prm->ipsec_xform.options.ecn = 1; prm->ipsec_xform.options.copy_dscp = 1; @@ -1471,16 +1492,31 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; prm->tun.hdr_l3_len = sizeof(*v4); prm->tun.hdr_l3_off = 0; - prm->tun.hdr_len = sizeof(*v4); prm->tun.next_proto = rc; - prm->tun.hdr = v4; + if (ss->udp_encap) { + prm->tun.hdr_len = sizeof(*udp_ipv4); + prm->tun.hdr = udp_ipv4; + + } else { + prm->tun.hdr_len = sizeof(*v4); + prm->tun.hdr = v4; + } + } else if (IS_IP6_TUNNEL(ss->flags)) { prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6; prm->tun.hdr_l3_len = sizeof(*v6); prm->tun.hdr_l3_off = 0; - prm->tun.hdr_len = sizeof(*v6); prm->tun.next_proto = rc; - prm->tun.hdr = v6; + if (ss->udp_encap) { + + prm->tun.hdr_len = sizeof(*udp_ipv6); + prm->tun.hdr = udp_ipv6; + + } else { + prm->tun.hdr_len = sizeof(*v6); + prm->tun.hdr = v6; + } + } else { /* transport mode */ prm->trs.proto = rc; @@ -1519,25 +1555,66 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) int rc; struct rte_ipsec_sa_prm prm; struct rte_ipsec_session *ips; - struct rte_ipv4_hdr v4 = { - .version_ihl = IPVERSION << 4 | - sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER, - .time_to_live = IPDEFTTL, - .next_proto_id = IPPROTO_ESP, - .src_addr = lsa->src.ip.ip4, - .dst_addr = lsa->dst.ip.ip4, - }; - struct rte_ipv6_hdr v6 = { - .vtc_flow = htonl(IP6_VERSION << 28), - .proto = IPPROTO_ESP, - }; - - if (IS_IP6_TUNNEL(lsa->flags)) { - memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr)); - memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr)); + struct rte_ipv4_hdr v4; + struct rte_ipv6_hdr v6; + struct udp_ipv4_tunnel udp_ipv4; + struct udp_ipv6_tunnel udp_ipv6; + + + if (IS_TUNNEL(lsa->flags) && (lsa->udp_encap)) { + if (IS_IP4(lsa->flags)) { + + udp_ipv4.v4.version_ihl = IPVERSION << 4 | sizeof(v4) / + RTE_IPV4_IHL_MULTIPLIER; + udp_ipv4.v4.time_to_live = IPDEFTTL; + udp_ipv4.v4.next_proto_id = IPPROTO_UDP; + udp_ipv4.v4.src_addr = lsa->src.ip.ip4; + udp_ipv4.v4.dst_addr = lsa->dst.ip.ip4; + + udp_ipv4.udp.src_port = + rte_cpu_to_be_16(lsa->udp.sport); + udp_ipv4.udp.dst_port = + rte_cpu_to_be_16(lsa->udp.dport); + + } else if (IS_IP6(lsa->flags)) { + + udp_ipv6.v6.vtc_flow = htonl(IP6_VERSION << 28), + udp_ipv6.v6.proto = IPPROTO_UDP, + memcpy(udp_ipv6.v6.src_addr, lsa->src.ip.ip6.ip6_b, + sizeof(udp_ipv6.v6.src_addr)); + memcpy(udp_ipv6.v6.dst_addr, lsa->dst.ip.ip6.ip6_b, + sizeof(udp_ipv6.v6.dst_addr)); + + udp_ipv6.udp.src_port = + rte_cpu_to_be_16(lsa->udp.sport); + udp_ipv6.udp.dst_port = + rte_cpu_to_be_16(lsa->udp.dport); + } + + } else if (IS_TUNNEL(lsa->flags)) { + + if (IS_IP4(lsa->flags)) { + v4.version_ihl = IPVERSION << 4 | sizeof(v4) / + RTE_IPV4_IHL_MULTIPLIER; + v4.time_to_live = IPDEFTTL; + v4.next_proto_id = IPPROTO_ESP; + v4.src_addr = lsa->src.ip.ip4; + v4.dst_addr = lsa->dst.ip.ip4; + + } else if (IS_IP6(lsa->flags)) { + + v6.vtc_flow = htonl(IP6_VERSION << 28), + v6.proto = IPPROTO_ESP, + memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, + sizeof(v6.src_addr)); + memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, + sizeof(v6.dst_addr)); + + } + } - rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6); + rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6, &udp_ipv4, &udp_ipv6); if (rc == 0) rc = rte_ipsec_sa_init(sa, &prm, sa_size); if (rc < 0) @@ -1575,7 +1652,7 @@ ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket) /* determine SA size */ idx = 0; - fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL); + fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL, NULL, NULL); sz = rte_ipsec_sa_size(&prm); if (sz < 0) { RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): " -- 2.25.1