From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8C401A0543; Mon, 30 May 2022 15:39:15 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 811AD40694; Mon, 30 May 2022 15:39:15 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 103B8400D6 for ; Mon, 30 May 2022 15:39:13 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 24UBlFGc015426 for ; Mon, 30 May 2022 06:39:13 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=xW2+h6pXjj3Q+LAiFJf6fFWx3Tc5uWknsANgjI7TBH4=; b=ONvAs/+CTHRPrQC3PB5tlIjf2ZDiPEbr3T5ZuVU2GGMO7EMD13qjFAAWiyjKYSpzR/Qg D7GbfC6s0z7uA+dUG9ansr2pQIw1iEBlIWjJhK3NjLNZcIEPicL0Yii3a7t5ViKWAFan ZhUp8KxtEQ1fa6nw5Q/4y0LcKIqMAOtaLkCGitzm++jQN8oO8AipnVW3Qzdx+ht/eAza 876KBmRsfbKxuBkZ1/QK6/XFNXuLIKqdj+LT0ERby2Z00VoMTcl6eRCFno46zkXEzIAI 9fJwotSPltf84DSIbqhKXRiBmeCVNHD8cY+sUagN+eSq8ueYp/VTYr3LApMkMw9vF9JY Kw== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3gbk8n5kgs-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Mon, 30 May 2022 06:39:13 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 30 May 2022 06:39:11 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Mon, 30 May 2022 06:39:11 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 80E403F7048; Mon, 30 May 2022 06:39:09 -0700 (PDT) From: Volodymyr Fialko To: , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH] app/test: add event inline security tests Date: Mon, 30 May 2022 15:39:06 +0200 Message-ID: <20220530133906.1139270-1-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-ORIG-GUID: _umU13qz7Odffj5ywkRju5uMcMesihry X-Proofpoint-GUID: _umU13qz7Odffj5ywkRju5uMcMesihry X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.874,Hydra:6.0.486,FMLib:17.11.64.514 definitions=2022-05-30_04,2022-05-30_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Enable ability to run inline security tests using event API(rte_event_eth_tx_adapter_enqueue/rte_event_dequeue_burst). New test command - event_inline_ipsec_autotest will run same list of test cases as inline_ipsec_autotest, but packets will go through eventdev. Signed-off-by: Volodymyr Fialko --- app/test/test_security_inline_proto.c | 390 +++++++++++++++++++++++++- 1 file changed, 375 insertions(+), 15 deletions(-) diff --git a/app/test/test_security_inline_proto.c b/app/test/test_security_inline_proto.c index 82d27550f4..1dce722f24 100644 --- a/app/test/test_security_inline_proto.c +++ b/app/test/test_security_inline_proto.c @@ -7,6 +7,9 @@ #include #include +#include +#include +#include #include #include @@ -21,6 +24,12 @@ test_inline_ipsec(void) return TEST_SKIPPED; } +test_event_inline_ipsec(void) +{ + printf("Event inline ipsec not supported on Windows, skipping test\n"); + return TEST_SKIPPED; +} + #else #define NB_ETHPORTS_USED 1 @@ -93,7 +102,12 @@ static struct rte_eth_txconf tx_conf = { .tx_rs_thresh = 32, /* Use PMD default values */ }; -uint16_t port_id; +static uint16_t port_id; +static uint8_t eventdev_id; +static uint8_t rx_adapter_id; +static uint8_t tx_adapter_id; + +static bool event_mode_enabled; static uint64_t link_mbps; @@ -885,6 +899,51 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector, return ret; } +static int +event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_event ev = {0}; + int i, nb_sent = 0; + + /* Convert packets to events */ + ev.sched_type = RTE_SCHED_TYPE_PARALLEL; + for (i = 0; i < nb_pkts; i++) { + ev.mbuf = tx_pkts[i]; + nb_sent += rte_event_eth_tx_adapter_enqueue( + eventdev_id, port_id, &ev, 1, 0); + } + + return nb_sent; +} + +static int +event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx) +{ + int nb_ev, nb_rx = 0, j = 0; + const int ms_per_pkt = 3; + struct rte_event ev; + + do { + nb_ev = rte_event_dequeue_burst(eventdev_id, port_id, + &ev, 1, 0); + + if (nb_ev == 0) { + rte_delay_ms(1); + continue; + } + + /* Get packet from event */ + if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) { + printf("Unsupported event type: %i\n", + ev.event_type); + continue; + } + rx_pkts[nb_rx++] = ev.mbuf; + } while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx); + + return nb_rx; +} + static int test_ipsec_inline_proto_process(struct ipsec_test_data *td, struct ipsec_test_data *res_d, @@ -958,9 +1017,13 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td, } } /* Send packet to ethdev for inline IPsec processing. */ - nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts); + if (event_mode_enabled) + nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts); + else + nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts); + if (nb_sent != nb_pkts) { - printf("\nUnable to TX %d packets", nb_pkts); + printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent); for ( ; nb_sent < nb_pkts; nb_sent++) rte_pktmbuf_free(tx_pkts_burst[nb_sent]); ret = TEST_FAILED; @@ -970,17 +1033,22 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td, rte_pause(); /* Receive back packet on loopback interface. */ - do { - rte_delay_ms(1); - nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx], - nb_sent - nb_rx); - if (nb_rx >= nb_sent) - break; - } while (j++ < 5 || nb_rx == 0); + if (event_mode_enabled) + nb_rx = event_rx_burst(rx_pkts_burst, nb_sent); + else + do { + rte_delay_ms(1); + nb_rx += rte_eth_rx_burst(port_id, 0, + &rx_pkts_burst[nb_rx], + nb_sent - nb_rx); + if (nb_rx >= nb_sent) + break; + } while (j++ < 5 || nb_rx == 0); if (nb_rx != nb_sent) { - printf("\nUnable to RX all %d packets", nb_sent); - while (--nb_rx) + printf("\nUnable to RX all %d packets, received(%i)", + nb_sent, nb_rx); + while (--nb_rx >= 0) rte_pktmbuf_free(rx_pkts_burst[nb_rx]); ret = TEST_FAILED; goto out; @@ -1380,6 +1448,289 @@ inline_ipsec_testsuite_teardown(void) printf("rte_eth_dev_reset: err=%s, port=%u\n", rte_strerror(-ret), port_id); } + rte_free(tx_pkts_burst); + rte_free(rx_pkts_burst); +} + +static int +event_inline_ipsec_testsuite_setup(void) +{ + struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; + struct rte_event_dev_info evdev_default_conf = {0}; + struct rte_event_dev_config eventdev_conf = {0}; + struct rte_event_queue_conf eventq_conf = {0}; + struct rte_event_port_conf ev_port_conf = {0}; + const uint16_t nb_txd = 1024, nb_rxd = 1024; + uint16_t nb_rx_queue = 1, nb_tx_queue = 1; + uint8_t ev_queue_id = 0, tx_queue_id = 0; + int nb_eventqueue = 1, nb_eventport = 1; + const int all_queues = -1; + uint32_t caps = 0; + uint16_t nb_ports; + int ret; + + printf("Start event inline IPsec test.\n"); + + nb_ports = rte_eth_dev_count_avail(); + if (nb_ports == 0) { + printf("Test require: 1 port, available: 0\n"); + return TEST_SKIPPED; + } + + init_mempools(NB_MBUF); + + if (tx_pkts_burst == NULL) { + tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff", + MAX_TRAFFIC_BURST, + sizeof(void *), + RTE_CACHE_LINE_SIZE); + if (!tx_pkts_burst) + return -1; + + rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff", + MAX_TRAFFIC_BURST, + sizeof(void *), + RTE_CACHE_LINE_SIZE); + if (!rx_pkts_burst) + return -1; + + } + + printf("Generate %d packets\n", MAX_TRAFFIC_BURST); + + /* configuring port 0 for the test is enough */ + port_id = 0; + /* port configure */ + ret = rte_eth_dev_configure(port_id, nb_rx_queue, + nb_tx_queue, &port_conf); + if (ret < 0) { + printf("Cannot configure device: err=%d, port=%d\n", + ret, port_id); + return ret; + } + + /* Tx queue setup */ + ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, + SOCKET_ID_ANY, &tx_conf); + if (ret < 0) { + printf("rte_eth_tx_queue_setup: err=%d, port=%d\n", + ret, port_id); + return ret; + } + + /* rx queue steup */ + ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY, + &rx_conf, mbufpool); + if (ret < 0) { + printf("rte_eth_rx_queue_setup: err=%d, port=%d\n", + ret, port_id); + return ret; + } + + /* Setup eventdev */ + eventdev_id = 0; + rx_adapter_id = 0; + tx_adapter_id = 0; + + /* Get default conf of eventdev */ + ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); + if (ret < 0) { + printf("Error in getting event device info[devID:%d]\n", + eventdev_id); + return ret; + } + + /* Get Tx adapter capabilities */ + ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps); + if (ret < 0) { + printf("Failed to get event device %d eth tx adapter" + " capabilities for port %d\n", + eventdev_id, port_id); + return ret; + } + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + tx_queue_id = nb_eventqueue++; + + eventdev_conf.nb_events_limit = + evdev_default_conf.max_num_events; + eventdev_conf.nb_event_queue_flows = + evdev_default_conf.max_event_queue_flows; + eventdev_conf.nb_event_port_dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + eventdev_conf.nb_event_port_enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + eventdev_conf.nb_event_queues = nb_eventqueue; + eventdev_conf.nb_event_ports = nb_eventport; + + /* Configure event device */ + + ret = rte_event_dev_configure(eventdev_id, &eventdev_conf); + if (ret < 0) { + printf("Error in configuring event device\n"); + return ret; + } + + /* Configure event queue */ + eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL; + eventq_conf.nb_atomic_flows = 1024; + eventq_conf.nb_atomic_order_sequences = 1024; + + /* Setup the queue */ + ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf); + if (ret < 0) { + printf("Failed to setup event queue %d\n", ret); + return ret; + } + + /* Configure event port */ + ret = rte_event_port_setup(eventdev_id, port_id, NULL); + if (ret < 0) { + printf("Failed to setup event port %d\n", ret); + return ret; + } + + /* Make event queue - event port link */ + ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1); + if (ret < 0) { + printf("Failed to link event port %d\n", ret); + return ret; + } + + /* Setup port conf */ + ev_port_conf.new_event_threshold = 1200; + ev_port_conf.dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + ev_port_conf.enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + /* Create Rx adapter */ + ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id, + &ev_port_conf); + if (ret < 0) { + printf("Failed to create rx adapter %d\n", ret); + return ret; + } + + /* Setup queue conf */ + queue_conf.ev.queue_id = ev_queue_id; + queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL; + queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV; + + /* Add queue to the adapter */ + ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id, + all_queues, &queue_conf); + if (ret < 0) { + printf("Failed to add eth queue to rx adapter %d\n", ret); + return ret; + } + + /* Start rx adapter */ + ret = rte_event_eth_rx_adapter_start(rx_adapter_id); + if (ret < 0) { + printf("Failed to start rx adapter %d\n", ret); + return ret; + } + + /* Create tx adapter */ + ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id, + &ev_port_conf); + if (ret < 0) { + printf("Failed to create tx adapter %d\n", ret); + return ret; + } + + /* Add queue to the adapter */ + ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id, + all_queues); + if (ret < 0) { + printf("Failed to add eth queue to tx adapter %d\n", ret); + return ret; + } + /* Setup Tx queue & port */ + if (tx_queue_id) { + /* Setup the queue */ + ret = rte_event_queue_setup(eventdev_id, tx_queue_id, + &eventq_conf); + if (ret < 0) { + printf("Failed to setup tx event queue %d\n", ret); + return ret; + } + /* Link Tx event queue to Tx port */ + ret = rte_event_port_link(eventdev_id, port_id, + &tx_queue_id, NULL, 1); + if (ret != 1) { + printf("Failed to link event queue to port\n"); + return ret; + } + } + + /* Start tx adapter */ + ret = rte_event_eth_tx_adapter_start(tx_adapter_id); + if (ret < 0) { + printf("Failed to start tx adapter %d\n", ret); + return ret; + } + + /* Start eventdev */ + ret = rte_event_dev_start(eventdev_id); + if (ret < 0) { + printf("Failed to start event device %d\n", ret); + return ret; + } + + event_mode_enabled = true; + test_ipsec_alg_list_populate(); + + return 0; +} + +static void +event_inline_ipsec_testsuite_teardown(void) +{ + uint16_t portid; + int ret; + + event_mode_enabled = false; + + /* Stop and release rx adapter */ + ret = rte_event_eth_rx_adapter_stop(rx_adapter_id); + if (ret < 0) + printf("Failed to stop rx adapter %d\n", ret); + ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1); + if (ret < 0) + printf("Failed to remove rx adapter queues %d\n", ret); + ret = rte_event_eth_rx_adapter_free(rx_adapter_id); + if (ret < 0) + printf("Failed to free rx adapter %d\n", ret); + + /* Stop and release tx adapter */ + ret = rte_event_eth_tx_adapter_stop(tx_adapter_id); + if (ret < 0) + printf("Failed to stop tx adapter %d\n", ret); + ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1); + if (ret < 0) + printf("Failed to remove tx adapter queues %d\n", ret); + ret = rte_event_eth_tx_adapter_free(tx_adapter_id); + if (ret < 0) + printf("Failed to free tx adapter %d\n", ret); + + /* Stop and release event devices */ + rte_event_dev_stop(eventdev_id); + ret = rte_event_dev_close(eventdev_id); + if (ret < 0) + printf("Failed to close event dev %d, %d\n", eventdev_id, ret); + + /* port tear down */ + RTE_ETH_FOREACH_DEV(portid) { + ret = rte_eth_dev_reset(portid); + if (ret != 0) + printf("rte_eth_dev_reset: err=%s, port=%u\n", + rte_strerror(-ret), port_id); + } + + rte_free(tx_pkts_burst); + rte_free(rx_pkts_burst); } static int @@ -1920,7 +2271,7 @@ test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[], flags.antireplay = true; for (i = 0; i < nb_pkts; i++) { - memcpy(&td_outb[i], test_data, sizeof(td_outb)); + memcpy(&td_outb[i], test_data, sizeof(td_outb[0])); td_outb[i].ipsec_xform.options.iv_gen_disable = 1; td_outb[i].ipsec_xform.replay_win_sz = winsz; td_outb[i].ipsec_xform.options.esn = esn_en; @@ -2054,8 +2405,6 @@ test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data) static struct unit_test_suite inline_ipsec_testsuite = { .suite_name = "Inline IPsec Ethernet Device Unit Test Suite", - .setup = inline_ipsec_testsuite_setup, - .teardown = inline_ipsec_testsuite_teardown, .unit_test_cases = { TEST_CASE_NAMED_WITH_DATA( "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)", @@ -2374,9 +2723,20 @@ static struct unit_test_suite inline_ipsec_testsuite = { static int test_inline_ipsec(void) { + inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup; + inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown; + return unit_test_suite_runner(&inline_ipsec_testsuite); +} + +static int +test_event_inline_ipsec(void) +{ + inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup; + inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown; return unit_test_suite_runner(&inline_ipsec_testsuite); } #endif /* !RTE_EXEC_ENV_WINDOWS */ REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec); +REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec); -- 2.25.1