From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 09292A0528; Mon, 20 Jan 2020 14:47:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BD31144C7; Mon, 20 Jan 2020 14:46:35 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id DC45C1BE81 for ; Mon, 20 Jan 2020 14:46:33 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id 00KDjJ8e023710; Mon, 20 Jan 2020 05:46:33 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0818; bh=2/ZITXrCguOx/JuUg3AwagOIjdi7QxEchBVCkcjI/cw=; b=ESuM9o947iTSDDLkjrwBWijWrS10FXVECtw6HgNqvlg4Wytxd0Yt+pOm8CYNxfezhdYb 1ujPOOjqNiwoofPio1RUyRy63xa1Lr/TS2x/ciEy3ha8i3DqoBq5fCt8aRLdY3dJo/2O 4RV4vbSkX0O2GD/NjbJQ5UIJ1NIAXLssZhpflIF/1wLHQXyaYP4hTtgMkpO7WGePcS2v eT8ESKnPoOGqY7JFFfDidF0c3Xy5xJ0LqtVrYiCQZoCvF/RNwt02yHL/5XrGoioj2cSI BwXzKhWToggNf+PQHeTf5qVJFATY1asYCUWCyoY6yIWG28b4ddIA1Vsd9+z4OQuFcjkL 1w== Received: from sc-exch04.marvell.com ([199.233.58.184]) by mx0b-0016f401.pphosted.com with ESMTP id 2xm2dsx4a0-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Mon, 20 Jan 2020 05:46:33 -0800 Received: from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH04.marvell.com (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 20 Jan 2020 05:46:30 -0800 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com (10.93.176.81) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Mon, 20 Jan 2020 05:46:30 -0800 Received: from ajoseph83.caveonetworks.com (unknown [10.29.45.60]) by maili.marvell.com (Postfix) with ESMTP id E1A573F703F; Mon, 20 Jan 2020 05:46:26 -0800 (PST) From: Anoob Joseph To: Akhil Goyal , Radu Nicolau , Thomas Monjalon CC: Lukasz Bartosik , Jerin Jacob , Narayana Prasad , Ankur Dwivedi , Anoob Joseph , Archana Muniganti , Tejasree Kondoj , Vamsi Attunuru , Konstantin Ananyev , Date: Mon, 20 Jan 2020 19:15:15 +0530 Message-ID: <1579527918-360-10-git-send-email-anoobj@marvell.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1579527918-360-1-git-send-email-anoobj@marvell.com> References: <1575808249-31135-1-git-send-email-anoobj@marvell.com> <1579527918-360-1-git-send-email-anoobj@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.138, 18.0.572 definitions=2020-01-20_02:2020-01-20, 2020-01-20 signatures=0 Subject: [dpdk-dev] [PATCH v2 09/12] examples/ipsec-secgw: add eventmode to ipsec-secgw X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Lukasz Bartosik Add eventmode support to ipsec-secgw. With the aid of event helper configure and use the eventmode capabilities. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 4 +- examples/ipsec-secgw/event_helper.h | 14 ++ examples/ipsec-secgw/ipsec-secgw.c | 341 +++++++++++++++++++++++++++++++++++- examples/ipsec-secgw/ipsec.h | 11 ++ examples/ipsec-secgw/sa.c | 11 -- 5 files changed, 365 insertions(+), 16 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 9719ab4..54a98c9 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -966,6 +966,8 @@ eh_find_worker(uint32_t lcore_id, struct eh_conf *conf, else curr_conf.cap.burst = EH_RX_TYPE_NON_BURST; + curr_conf.cap.ipsec_mode = conf->ipsec_mode; + /* Parse the passed list and see if we have matching capabilities */ /* Initialize the pointer used to traverse the list */ @@ -1625,7 +1627,7 @@ eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr, } /* Get eventmode conf */ - em_conf = (struct eventmode_conf *)(conf->mode_params); + em_conf = conf->mode_params; /* Get core ID */ lcore_id = rte_lcore_id(); diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index 15a7bd6..cf5d346 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -74,6 +74,14 @@ enum eh_tx_types { EH_TX_TYPE_NO_INTERNAL_PORT }; +/** + * Event mode ipsec mode types + */ +enum eh_ipsec_mode_types { + EH_IPSEC_MODE_TYPE_APP = 0, + EH_IPSEC_MODE_TYPE_DRIVER +}; + /* Event dev params */ struct eventdev_params { uint8_t eventdev_id; @@ -183,6 +191,10 @@ struct eh_conf { */ void *mode_params; /**< Mode specific parameters */ + + /** Application specific params */ + enum eh_ipsec_mode_types ipsec_mode; + /**< Mode of ipsec run */ }; /* Workers registered by the application */ @@ -194,6 +206,8 @@ struct eh_app_worker_params { /**< Specify status of rx type burst */ uint64_t tx_internal_port : 1; /**< Specify whether tx internal port is available */ + uint64_t ipsec_mode : 1; + /**< Specify ipsec processing level */ }; uint64_t u64; } cap; diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index d5e8fe5..f1cc3fb 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -2,6 +2,7 @@ * Copyright(c) 2016 Intel Corporation */ +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include #include @@ -41,12 +43,17 @@ #include #include #include +#include +#include #include #include +#include "event_helper.h" #include "ipsec.h" #include "parser.h" +volatile bool force_quit; + #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 #define MAX_JUMBO_PKT_LEN 9600 @@ -133,12 +140,20 @@ struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; #define CMD_LINE_OPT_CONFIG "config" #define CMD_LINE_OPT_SINGLE_SA "single-sa" #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask" +#define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode" +#define CMD_LINE_OPT_SCHEDULE_TYPE "schedule-type" #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload" #define CMD_LINE_OPT_TX_OFFLOAD "txoffload" #define CMD_LINE_OPT_REASSEMBLE "reassemble" #define CMD_LINE_OPT_MTU "mtu" #define CMD_LINE_OPT_FRAG_TTL "frag-ttl" +#define CMD_LINE_ARG_EVENT "event" +#define CMD_LINE_ARG_POLL "poll" +#define CMD_LINE_ARG_ORDERED "ordered" +#define CMD_LINE_ARG_ATOMIC "atomic" +#define CMD_LINE_ARG_PARALLEL "parallel" + enum { /* long options mapped to a short option */ @@ -149,6 +164,8 @@ enum { CMD_LINE_OPT_CONFIG_NUM, CMD_LINE_OPT_SINGLE_SA_NUM, CMD_LINE_OPT_CRYPTODEV_MASK_NUM, + CMD_LINE_OPT_TRANSFER_MODE_NUM, + CMD_LINE_OPT_SCHEDULE_TYPE_NUM, CMD_LINE_OPT_RX_OFFLOAD_NUM, CMD_LINE_OPT_TX_OFFLOAD_NUM, CMD_LINE_OPT_REASSEMBLE_NUM, @@ -160,6 +177,8 @@ static const struct option lgopts[] = { {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM}, {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM}, + {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM}, + {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM}, {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM}, {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM}, {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM}, @@ -177,6 +196,7 @@ static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; static uint32_t single_sa; static uint32_t single_sa_idx; +static uint32_t schedule_type; /* * RX/TX HW offload capabilities to enable/use on ethernet ports. @@ -1185,7 +1205,7 @@ main_loop(__attribute__((unused)) void *dummy) } static int32_t -check_params(void) +check_params(struct eh_conf *eh_conf) { uint8_t lcore; uint16_t portid; @@ -1220,6 +1240,14 @@ check_params(void) return -1; } } + + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL) { + if (schedule_type) { + printf("error: option --schedule-type applies only to event mode\n"); + return -1; + } + } + return 0; } @@ -1277,6 +1305,8 @@ print_usage(const char *prgname) " --config (port,queue,lcore)[,(port,queue,lcore)]" " [--single-sa SAIDX]" " [--cryptodev_mask MASK]" + " [--transfer-mode MODE]" + " [--schedule-type TYPE]" " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]" " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]" " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]" @@ -1298,6 +1328,14 @@ print_usage(const char *prgname) " bypassing the SP\n" " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n" " devices to configure\n" + " --transfer-mode MODE\n" + " \"poll\" : Packet transfer via polling (default)\n" + " \"event\" : Packet transfer via event device\n" + " --schedule-type TYPE queue schedule type, used only when\n" + " transfer mode is set to event\n" + " \"ordered\" : Ordered (default)\n" + " \"atomic\" : Atomic\n" + " \"parallel\" : Parallel\n" " --" CMD_LINE_OPT_RX_OFFLOAD ": bitmask of the RX HW offload capabilities to enable/use\n" " (DEV_RX_OFFLOAD_*)\n" @@ -1432,8 +1470,45 @@ print_app_sa_prm(const struct app_sa_prm *prm) printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns); } +static int +parse_transfer_mode(struct eh_conf *conf, const char *optarg) +{ + if (!strcmp(CMD_LINE_ARG_POLL, optarg)) + conf->mode = EH_PKT_TRANSFER_MODE_POLL; + else if (!strcmp(CMD_LINE_ARG_EVENT, optarg)) + conf->mode = EH_PKT_TRANSFER_MODE_EVENT; + else { + printf("Unsupported packet transfer mode\n"); + return -EINVAL; + } + + return 0; +} + +static int +parse_schedule_type(struct eh_conf *conf, const char *optarg) +{ + struct eventmode_conf *em_conf = NULL; + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + if (!strcmp(CMD_LINE_ARG_ORDERED, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; + else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC; + else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL; + else { + printf("Unsupported queue schedule type\n"); + return -EINVAL; + } + + return 0; +} + static int32_t -parse_args(int32_t argc, char **argv) +parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) { int opt; int64_t ret; @@ -1522,6 +1597,7 @@ parse_args(int32_t argc, char **argv) /* else */ single_sa = 1; single_sa_idx = ret; + eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; printf("Configured with single SA index %u\n", single_sa_idx); break; @@ -1536,6 +1612,26 @@ parse_args(int32_t argc, char **argv) /* else */ enabled_cryptodev_mask = ret; break; + + case CMD_LINE_OPT_TRANSFER_MODE_NUM: + ret = parse_transfer_mode(eh_conf, optarg); + if (ret < 0) { + printf("Invalid packet transfer mode\n"); + print_usage(prgname); + return -1; + } + break; + + case CMD_LINE_OPT_SCHEDULE_TYPE_NUM: + ret = parse_schedule_type(eh_conf, optarg); + if (ret < 0) { + printf("Invalid queue schedule type\n"); + print_usage(prgname); + return -1; + } + schedule_type = 1; + break; + case CMD_LINE_OPT_RX_OFFLOAD_NUM: ret = parse_mask(optarg, &dev_rx_offload); if (ret != 0) { @@ -2450,16 +2546,176 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads) port_id); } +static struct eh_conf * +eh_conf_init(void) +{ + struct eventmode_conf *em_conf = NULL; + struct eh_conf *conf = NULL; + unsigned int eth_core_id; + uint32_t nb_bytes; + void *mem = NULL; + + /* Allocate memory for config */ + conf = calloc(1, sizeof(struct eh_conf)); + if (conf == NULL) { + printf("Failed to allocate memory for eventmode helper conf"); + goto err; + } + + /* Set default conf */ + + /* Packet transfer mode: poll */ + conf->mode = EH_PKT_TRANSFER_MODE_POLL; + conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP; + + /* Keep all ethernet ports enabled by default */ + conf->eth_portmask = -1; + + /* Allocate memory for event mode params */ + conf->mode_params = calloc(1, sizeof(struct eventmode_conf)); + if (conf->mode_params == NULL) { + printf("Failed to allocate memory for event mode params"); + goto err; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Allocate and initialize bitmap for eth cores */ + nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE); + if (!nb_bytes) { + printf("Failed to get bitmap footprint"); + goto err; + } + + mem = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes, + RTE_CACHE_LINE_SIZE); + if (!mem) { + printf("Failed to allocate memory for eth cores bitmap\n"); + goto err; + } + + em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, mem, nb_bytes); + if (!em_conf->eth_core_mask) { + printf("Failed to initialize bitmap"); + goto err; + } + + /* Schedule type: ordered */ + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; + + /* Set two cores as eth cores for Rx & Tx */ + + /* Use first core other than master core as Rx core */ + eth_core_id = rte_get_next_lcore(0, /* curr core */ + 1, /* skip master core */ + 0 /* wrap */); + + rte_bitmap_set(em_conf->eth_core_mask, eth_core_id); + + /* Use next core as Tx core */ + eth_core_id = rte_get_next_lcore(eth_core_id, /* curr core */ + 1, /* skip master core */ + 0 /* wrap */); + + rte_bitmap_set(em_conf->eth_core_mask, eth_core_id); + + return conf; +err: + rte_free(mem); + free(em_conf); + free(conf); + return NULL; +} + +static void +eh_conf_uninit(struct eh_conf *conf) +{ + struct eventmode_conf *em_conf = NULL; + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Free evenmode configuration memory */ + rte_free(em_conf->eth_core_mask); + free(em_conf); + free(conf); +} + +static void +signal_handler(int signum) +{ + if (signum == SIGINT || signum == SIGTERM) { + printf("\n\nSignal %d received, preparing to exit...\n", + signum); + force_quit = true; + } +} + +static void +inline_sessions_free(struct sa_ctx *sa_ctx) +{ + struct rte_ipsec_session *ips; + struct ipsec_sa *sa; + int32_t i, ret; + + for (i = 0; i < IPSEC_SA_MAX_ENTRIES; i++) { + + sa = &sa_ctx->sa[i]; + if (!sa->spi) + continue; + + ips = ipsec_get_primary_session(sa); + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL && + ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + continue; + + ret = rte_security_session_destroy( + rte_eth_dev_get_sec_ctx(sa->portid), + ips->security.ses); + if (ret) + RTE_LOG(ERR, IPSEC, "Failed to destroy security " + "session type %d, spi %d\n", + ips->type, sa->spi); + } +} + +static void +ev_mode_sess_verify(struct sa_ctx *sa_ctx) +{ + struct rte_ipsec_session *ips; + struct ipsec_sa *sa; + int32_t i; + + if (!sa_ctx) + return; + + for (i = 0; i < IPSEC_SA_MAX_ENTRIES; i++) { + + sa = &sa_ctx->sa[i]; + if (!sa->spi) + continue; + + ips = ipsec_get_primary_session(sa); + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + rte_exit(EXIT_FAILURE, "Event mode supports only " + "inline protocol sessions\n"); + } + +} + int32_t main(int32_t argc, char **argv) { int32_t ret; uint32_t lcore_id; + uint32_t cdev_id; uint32_t i; uint8_t socket_id; uint16_t portid; uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; + struct eh_conf *eh_conf = NULL; size_t sess_sz; /* init EAL */ @@ -2469,8 +2725,17 @@ main(int32_t argc, char **argv) argc -= ret; argv += ret; + force_quit = false; + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + /* initialize event helper configuration */ + eh_conf = eh_conf_init(); + if (eh_conf == NULL) + rte_exit(EXIT_FAILURE, "Failed to init event helper config"); + /* parse application arguments (after the EAL ones) */ - ret = parse_args(argc, argv); + ret = parse_args(argc, argv, eh_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid parameters\n"); @@ -2487,7 +2752,7 @@ main(int32_t argc, char **argv) rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", unprotected_port_mask); - if (check_params() < 0) + if (check_params(eh_conf) < 0) rte_exit(EXIT_FAILURE, "check_params failed\n"); ret = init_lcore_rx_queues(); @@ -2529,6 +2794,18 @@ main(int32_t argc, char **argv) cryptodevs_init(); + /* + * Set the enabled port mask in helper config for use by helper + * sub-system. This will be used while initializing devices using + * helper sub-system. + */ + eh_conf->eth_portmask = enabled_port_mask; + + /* Initialize eventmode components */ + ret = eh_devs_init(eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); + /* start ports */ RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) @@ -2576,6 +2853,18 @@ main(int32_t argc, char **argv) sp4_init(&socket_ctx[socket_id], socket_id); sp6_init(&socket_ctx[socket_id], socket_id); rt_init(&socket_ctx[socket_id], socket_id); + + /* + * Event mode currently supports only inline protocol + * sessions. If there are other types of sessions + * configured then exit with error. + */ + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT) { + ev_mode_sess_verify( + socket_ctx[socket_id].sa_in); + ev_mode_sess_verify( + socket_ctx[socket_id].sa_out); + } } } @@ -2583,10 +2872,54 @@ main(int32_t argc, char **argv) /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } + /* Uninitialize eventmode components */ + ret = eh_devs_uninit(eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret); + + /* Free eventmode configuration memory */ + eh_conf_uninit(eh_conf); + + /* Destroy inline inbound and outbound sessions */ + for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) { + socket_id = rte_socket_id_by_idx(i); + inline_sessions_free(socket_ctx[socket_id].sa_in); + inline_sessions_free(socket_ctx[socket_id].sa_out); + } + + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { + printf("Closing cryptodev %d...", cdev_id); + rte_cryptodev_stop(cdev_id); + rte_cryptodev_close(cdev_id); + printf(" Done\n"); + } + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + printf("Closing port %d...", portid); + if (flow_info_tbl[portid].rx_def_flow) { + struct rte_flow_error err; + + ret = rte_flow_destroy(portid, + flow_info_tbl[portid].rx_def_flow, &err); + if (ret) + RTE_LOG(ERR, IPSEC, "Failed to destroy flow " + " for port %u, err msg: %s\n", portid, + err.message); + } + rte_eth_dev_stop(portid); + rte_eth_dev_close(portid); + printf(" Done\n"); + } + printf("Bye...\n"); + return 0; } diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 28ff07d..0539aec 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -153,6 +153,17 @@ struct ipsec_sa { struct rte_security_session_conf sess_conf; } __rte_cache_aligned; +struct sa_ctx { + void *satbl; /* pointer to array of rte_ipsec_sa objects*/ + struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; + union { + struct { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; + }; + } xf[IPSEC_SA_MAX_ENTRIES]; +}; + struct ipsec_mbuf_metadata { struct ipsec_sa *sa; struct rte_crypto_op cop; diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index c75a5a1..2ec3e17 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -781,17 +781,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) printf("\n"); } -struct sa_ctx { - void *satbl; /* pointer to array of rte_ipsec_sa objects*/ - struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; - union { - struct { - struct rte_crypto_sym_xform a; - struct rte_crypto_sym_xform b; - }; - } xf[IPSEC_SA_MAX_ENTRIES]; -}; - static struct sa_ctx * sa_create(const char *name, int32_t socket_id) { -- 2.7.4