From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 87798A04F1; Sun, 8 Dec 2019 13:33:18 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 11F661BFB1; Sun, 8 Dec 2019 13:32:19 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 025DD1BFB1 for ; Sun, 8 Dec 2019 13:32:16 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id xB8CVB1n021884; Sun, 8 Dec 2019 04:32:16 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0818; bh=wp6BKr/k6f1/A2lrg7BxNBPQmF10qtQRM7Bu2NqslRk=; b=QbpsNmrRRjQmOlo3CY7/Yk3ULR4pLG9m/PbLZIpi+ZK6jTSSl9/Ctt8Mw5s1VhBQDnOY LLQ7kvs47eiM3DQE2gSKrYTgmavc8uWRRz+tngcw7rQLzw3vuDPW39Di69UqsLIiL06P XdmpCHfSSlYU6fy3L+8NN7zMrA09ItXAVYOBX6FTOToyf0fEapCYhEZys+/XK52x604T NOu2LnXYsyiz+xcN5qkfdYtLRkFyAZpWecA7tNesbqPZVpu/qg/zeLxgWer5Dq8JJnlb kQRr0999WB/7FN6zIPW1Tl2aa/N/RRcZeXp+pYc3OWWQfeNzTYza393u/hShfjPqd8c3 jQ== Received: from sc-exch01.marvell.com ([199.233.58.181]) by mx0a-0016f401.pphosted.com with ESMTP id 2wrbawjm7y-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Sun, 08 Dec 2019 04:32:16 -0800 Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH01.marvell.com (10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Sun, 8 Dec 2019 04:32:14 -0800 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Sun, 8 Dec 2019 04:32:14 -0800 Received: from ajoseph83.caveonetworks.com.com (unknown [10.29.45.60]) by maili.marvell.com (Postfix) with ESMTP id 213EA3F703F; Sun, 8 Dec 2019 04:32:09 -0800 (PST) From: Anoob Joseph To: Akhil Goyal , Radu Nicolau , Thomas Monjalon CC: Lukasz Bartosik , Jerin Jacob , Narayana Prasad , Ankur Dwivedi , Anoob Joseph , Archana Muniganti , Tejasree Kondoj , Vamsi Attunuru , Konstantin Ananyev , Date: Sun, 8 Dec 2019 18:00:46 +0530 Message-ID: <1575808249-31135-12-git-send-email-anoobj@marvell.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1575808249-31135-1-git-send-email-anoobj@marvell.com> References: <1575808249-31135-1-git-send-email-anoobj@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.95,18.0.572 definitions=2019-12-08_03:2019-12-05,2019-12-08 signatures=0 Subject: [dpdk-dev] [PATCH 11/14] examples/ipsec-secgw: add app processing code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Lukasz Bartosik Add IPsec application processing code for event mode. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/ipsec-secgw.c | 124 ++++++------------ examples/ipsec-secgw/ipsec-secgw.h | 81 ++++++++++++ examples/ipsec-secgw/ipsec.h | 37 +++--- examples/ipsec-secgw/ipsec_worker.c | 242 ++++++++++++++++++++++++++++++++++-- examples/ipsec-secgw/ipsec_worker.h | 39 ++++++ examples/ipsec-secgw/sa.c | 11 -- 6 files changed, 409 insertions(+), 125 deletions(-) create mode 100644 examples/ipsec-secgw/ipsec-secgw.h create mode 100644 examples/ipsec-secgw/ipsec_worker.h diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index c5d95b9..2e7d4d8 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -50,12 +50,11 @@ #include "event_helper.h" #include "ipsec.h" +#include "ipsec_worker.h" #include "parser.h" volatile bool force_quit; -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 - #define MAX_JUMBO_PKT_LEN 9600 #define MEMPOOL_CACHE_SIZE 256 @@ -70,8 +69,6 @@ volatile bool force_quit; #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ -#define NB_SOCKETS 4 - /* Configure how many packets ahead to prefetch, when reading packets */ #define PREFETCH_OFFSET 3 @@ -79,8 +76,6 @@ volatile bool force_quit; #define MAX_LCORE_PARAMS 1024 -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) - /* * Configurable number of RX/TX ring descriptors */ @@ -89,29 +84,6 @@ volatile bool force_quit; static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; -#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((a) & 0xff) << 56) | \ - ((uint64_t)((b) & 0xff) << 48) | \ - ((uint64_t)((c) & 0xff) << 40) | \ - ((uint64_t)((d) & 0xff) << 32) | \ - ((uint64_t)((e) & 0xff) << 24) | \ - ((uint64_t)((f) & 0xff) << 16) | \ - ((uint64_t)((g) & 0xff) << 8) | \ - ((uint64_t)(h) & 0xff)) -#else -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((h) & 0xff) << 56) | \ - ((uint64_t)((g) & 0xff) << 48) | \ - ((uint64_t)((f) & 0xff) << 40) | \ - ((uint64_t)((e) & 0xff) << 32) | \ - ((uint64_t)((d) & 0xff) << 24) | \ - ((uint64_t)((c) & 0xff) << 16) | \ - ((uint64_t)((b) & 0xff) << 8) | \ - ((uint64_t)(a) & 0xff)) -#endif -#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) - #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ @@ -123,18 +95,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) -/* port/source ethernet addr and destination ethernet addr */ -struct ethaddr_info { - uint64_t src, dst; -}; - -struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } -}; - struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; #define CMD_LINE_OPT_CONFIG "config" @@ -192,10 +152,16 @@ static const struct option lgopts[] = { {NULL, 0, 0, 0} }; +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } +}; + /* mask of enabled ports */ static uint32_t enabled_port_mask; static uint64_t enabled_cryptodev_mask = UINT64_MAX; -static uint32_t unprotected_port_mask; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; @@ -283,8 +249,6 @@ static struct rte_eth_conf port_conf = { }, }; -static struct socket_ctx socket_ctx[NB_SOCKETS]; - /* * Determine is multi-segment support required: * - either frame buffer size is smaller then mtu @@ -2828,47 +2792,10 @@ main(int32_t argc, char **argv) sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); port_init(portid, req_rx_offloads, req_tx_offloads); - /* Create default ipsec flow for the ethernet device */ - ret = create_default_ipsec_flow(portid, req_rx_offloads); - if (ret) - printf("Cannot create default flow, err=%d, port=%d\n", - ret, portid); } cryptodevs_init(); - /* start ports */ - RTE_ETH_FOREACH_DEV(portid) { - if ((enabled_port_mask & (1 << portid)) == 0) - continue; - - /* - * Start device - * note: device must be started before a flow rule - * can be installed. - */ - ret = rte_eth_dev_start(portid); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " - "err=%d, port=%d\n", ret, portid); - /* - * If enabled, put device in promiscuous mode. - * This allows IO forwarding mode to forward packets - * to itself through 2 cross-connected ports of the - * target machine. - */ - if (promiscuous_on) { - ret = rte_eth_promiscuous_enable(portid); - if (ret != 0) - rte_exit(EXIT_FAILURE, - "rte_eth_promiscuous_enable: err=%s, port=%d\n", - rte_strerror(-ret), portid); - } - - rte_eth_dev_callback_register(portid, - RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); - } - /* fragment reassemble is enabled */ if (frag_tbl_sz != 0) { ret = reassemble_init(); @@ -2889,8 +2816,6 @@ main(int32_t argc, char **argv) } } - check_all_ports_link_status(enabled_port_mask); - /* * Set the enabled port mask in helper config for use by helper * sub-system. This will be used while intializing devices using @@ -2903,6 +2828,39 @@ main(int32_t argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); + /* Create default ipsec flow for each port and start each port */ + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + ret = create_default_ipsec_flow(portid, req_rx_offloads); + if (ret) + printf("create_default_ipsec_flow failed, err=%d, " + "port=%d\n", ret, portid); + /* + * Start device + * note: device must be started before a flow rule + * can be installed. + */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%d\n", ret, portid); + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(portid); + + rte_eth_dev_callback_register(portid, + RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); + } + + check_all_ports_link_status(enabled_port_mask); + /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER); diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h new file mode 100644 index 0000000..67e1193 --- /dev/null +++ b/examples/ipsec-secgw/ipsec-secgw.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ +#ifndef _IPSEC_SECGW_H_ +#define _IPSEC_SECGW_H_ + +#include + +#define MAX_PKT_BURST 32 + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 + +#define NB_SOCKETS 4 + +#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) + +#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((a) & 0xff) << 56) | \ + ((uint64_t)((b) & 0xff) << 48) | \ + ((uint64_t)((c) & 0xff) << 40) | \ + ((uint64_t)((d) & 0xff) << 32) | \ + ((uint64_t)((e) & 0xff) << 24) | \ + ((uint64_t)((f) & 0xff) << 16) | \ + ((uint64_t)((g) & 0xff) << 8) | \ + ((uint64_t)(h) & 0xff)) +#else +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((h) & 0xff) << 56) | \ + ((uint64_t)((g) & 0xff) << 48) | \ + ((uint64_t)((f) & 0xff) << 40) | \ + ((uint64_t)((e) & 0xff) << 32) | \ + ((uint64_t)((d) & 0xff) << 24) | \ + ((uint64_t)((c) & 0xff) << 16) | \ + ((uint64_t)((b) & 0xff) << 8) | \ + ((uint64_t)(a) & 0xff)) +#endif + +#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) + +struct traffic_type { + const uint8_t *data[MAX_PKT_BURST * 2]; + struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; + void *saptr[MAX_PKT_BURST * 2]; + uint32_t res[MAX_PKT_BURST * 2]; + uint32_t num; +}; + +struct ipsec_traffic { + struct traffic_type ipsec; + struct traffic_type ip4; + struct traffic_type ip6; +}; + +/* Fields optimized for devices without burst */ +struct traffic_type_nb { + const uint8_t *data; + struct rte_mbuf *pkt; + uint32_t res; + uint32_t num; +}; + +struct ipsec_traffic_nb { + struct traffic_type_nb ipsec; + struct traffic_type_nb ip4; + struct traffic_type_nb ip6; +}; + +/* port/source ethernet addr and destination ethernet addr */ +struct ethaddr_info { + uint64_t src, dst; +}; + +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS]; + +/* TODO: All var definitions need to be part of a .c file */ + +/* Port mask to identify the unprotected ports */ +uint32_t unprotected_port_mask; + +#endif /* _IPSEC_SECGW_H_ */ diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 0b9fc04..0c5ee8a 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -13,11 +13,11 @@ #include #include -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 +#include "ipsec-secgw.h" + #define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 #define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3 -#define MAX_PKT_BURST 32 #define MAX_INFLIGHT 128 #define MAX_QP_PER_LCORE 256 @@ -153,6 +153,17 @@ struct ipsec_sa { struct rte_security_session_conf sess_conf; } __rte_cache_aligned; +struct sa_ctx { + void *satbl; /* pointer to array of rte_ipsec_sa objects*/ + struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; + union { + struct { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; + }; + } xf[IPSEC_SA_MAX_ENTRIES]; +}; + struct ipsec_mbuf_metadata { struct ipsec_sa *sa; struct rte_crypto_op cop; @@ -233,26 +244,8 @@ struct cnt_blk { uint32_t cnt; } __attribute__((packed)); -struct traffic_type { - const uint8_t *data[MAX_PKT_BURST * 2]; - struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; - void *saptr[MAX_PKT_BURST * 2]; - uint32_t res[MAX_PKT_BURST * 2]; - uint32_t num; -}; - -struct ipsec_traffic { - struct traffic_type ipsec; - struct traffic_type ip4; - struct traffic_type ip6; -}; - - -void -ipsec_poll_mode_worker(void); - -int -ipsec_launch_one_lcore(void *args); +/* Socket ctx */ +struct socket_ctx socket_ctx[NB_SOCKETS]; uint16_t ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index fce274a..2af9475 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -29,12 +30,51 @@ #include #include #include +#include +#include #include "ipsec.h" +#include "ipsec_worker.h" #include "event_helper.h" extern volatile bool force_quit; +static inline enum pkt_type +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) +{ + struct rte_ether_hdr *eth; + + eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + + offsetof(struct ip, ip_p)); + if (**nlp == IPPROTO_ESP) + return PKT_TYPE_IPSEC_IPV4; + else + return PKT_TYPE_PLAIN_IPV4; + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + + offsetof(struct ip6_hdr, ip6_nxt)); + if (**nlp == IPPROTO_ESP) + return PKT_TYPE_IPSEC_IPV6; + else + return PKT_TYPE_PLAIN_IPV6; + } + + /* Unknown/Unsupported type */ + return PKT_TYPE_INVALID; +} + +static inline void +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) +{ + struct rte_ether_hdr *ethhdr; + + ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); + memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); +} + static inline void ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) { @@ -45,6 +85,177 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) rte_event_eth_tx_adapter_txq_set(m, 0); } +static inline int +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) +{ + uint32_t res; + + if (unlikely(sp == NULL)) + return 0; + + rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, + DEFAULT_MAX_CATEGORIES); + + if (unlikely(res == 0)) { + /* No match */ + return 0; + } + + if (res == DISCARD) + return 0; + else if (res == BYPASS) { + *sa_idx = 0; + return 1; + } + + *sa_idx = SPI2IDX(res); + if (*sa_idx < IPSEC_SA_MAX_ENTRIES) + return 1; + + /* Invalid SA IDX */ + return 0; +} + +static inline uint16_t +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) +{ + uint32_t dst_ip; + uint16_t offset; + uint32_t hop; + int ret; + + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); + dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); + dst_ip = rte_be_to_cpu_32(dst_ip); + + ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); + + if (ret == 0) { + /* We have a hit */ + return hop; + } + + /* else */ + return RTE_MAX_ETHPORTS; +} + +/* TODO: To be tested */ +static inline uint16_t +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) +{ + uint8_t dst_ip[16]; + uint8_t *ip6_dst; + uint16_t offset; + uint32_t hop; + int ret; + + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); + memcpy(&dst_ip[0], ip6_dst, 16); + + ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); + + if (ret == 0) { + /* We have a hit */ + return hop; + } + + /* else */ + return RTE_MAX_ETHPORTS; +} + +static inline uint16_t +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) +{ + if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) + return route4_pkt(pkt, rt->rt4_ctx); + else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) + return route6_pkt(pkt, rt->rt6_ctx); + + return RTE_MAX_ETHPORTS; +} + +static inline int +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, + struct rte_event *ev) +{ + struct ipsec_sa *sa = NULL; + struct rte_mbuf *pkt; + uint16_t port_id = 0; + enum pkt_type type; + uint32_t sa_idx; + uint8_t *nlp; + + /* Get pkt from event */ + pkt = ev->mbuf; + + /* Check the packet type */ + type = process_ipsec_get_pkt_type(pkt, &nlp); + + switch (type) { + case PKT_TYPE_PLAIN_IPV4: + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) + sa = (struct ipsec_sa *) pkt->udata64; + + /* Check if we have a match */ + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + + case PKT_TYPE_PLAIN_IPV6: + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) + sa = (struct ipsec_sa *) pkt->udata64; + + /* Check if we have a match */ + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + + default: + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); + goto drop_pkt_and_exit; + } + + /* Check if the packet has to be bypassed */ + if (sa_idx == 0) + goto route_and_send_pkt; + + /* Else the packet has to be protected with SA */ + + /* If the packet was IPsec processed, then SA pointer should be set */ + if (sa == NULL) + goto drop_pkt_and_exit; + + /* SPI on the packet should match with the one in SA */ + if (unlikely(sa->spi != sa_idx)) + goto drop_pkt_and_exit; + +route_and_send_pkt: + port_id = get_route(pkt, rt, type); + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { + /* no match */ + goto drop_pkt_and_exit; + } + /* else, we have a matching route */ + + /* Update mac addresses */ + update_mac_addrs(pkt, port_id); + + /* Update the event with the dest port */ + ipsec_event_pre_forward(pkt, port_id); + return 1; + +drop_pkt_and_exit: + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); + rte_pktmbuf_free(pkt); + ev->mbuf = NULL; + return 0; +} + /* * Event mode exposes various operating modes depending on the * capabilities of the event device and the operating mode @@ -134,11 +345,11 @@ static void ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, uint8_t nb_links) { + struct lcore_conf_ev_tx_int_port_wrkr lconf; unsigned int nb_rx = 0; - unsigned int port_id; - struct rte_mbuf *pkt; struct rte_event ev; uint32_t lcore_id; + int32_t socket_id; /* Check if we have links registered for this lcore */ if (nb_links == 0) { @@ -151,6 +362,21 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, /* Get core ID */ lcore_id = rte_lcore_id(); + /* Get socket ID */ + socket_id = rte_lcore_to_socket_id(lcore_id); + + /* Save routing table */ + lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; + lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; + lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; + lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; + lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; + lconf.inbound.session_pool = socket_ctx[socket_id].session_pool; + lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; + lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; + lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; + lconf.outbound.session_pool = socket_ctx[socket_id].session_pool; + RTE_LOG(INFO, IPSEC, "Launching event mode worker (non-burst - Tx internal port - " "app mode - inbound) on lcore %d\n", lcore_id); @@ -175,13 +401,11 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, if (nb_rx == 0) continue; - port_id = ev.queue_id; - pkt = ev.mbuf; - - rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); - - /* Process packet */ - ipsec_event_pre_forward(pkt, port_id); + if (process_ipsec_ev_inbound(&lconf.inbound, + &lconf.rt, &ev) != 1) { + /* The pkt has been dropped */ + continue; + } /* * Since tx internal port is available, events can be diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h new file mode 100644 index 0000000..fd18a2e --- /dev/null +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ +#ifndef _IPSEC_WORKER_H_ +#define _IPSEC_WORKER_H_ + +#include "ipsec.h" + +enum pkt_type { + PKT_TYPE_PLAIN_IPV4 = 1, + PKT_TYPE_IPSEC_IPV4, + PKT_TYPE_PLAIN_IPV6, + PKT_TYPE_IPSEC_IPV6, + PKT_TYPE_INVALID +}; + +struct route_table { + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; +}; + +/* + * Conf required by event mode worker with tx internal port + */ +struct lcore_conf_ev_tx_int_port_wrkr { + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct route_table rt; +} __rte_cache_aligned; + +/* TODO + * + * Move this function to ipsec_worker.c + */ +void ipsec_poll_mode_worker(void); + +int ipsec_launch_one_lcore(void *args); + +#endif /* _IPSEC_WORKER_H_ */ diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 7f046e3..9e17ba0 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -772,17 +772,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) printf("\n"); } -struct sa_ctx { - void *satbl; /* pointer to array of rte_ipsec_sa objects*/ - struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; - union { - struct { - struct rte_crypto_sym_xform a; - struct rte_crypto_sym_xform b; - }; - } xf[IPSEC_SA_MAX_ENTRIES]; -}; - static struct sa_ctx * sa_create(const char *name, int32_t socket_id) { -- 2.7.4