From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E0D12A3160 for ; Wed, 9 Oct 2019 17:11:21 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3725A1E90C; Wed, 9 Oct 2019 17:10:54 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 53F221C214 for ; Wed, 9 Oct 2019 17:10:52 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id x99Et1V3020519; Wed, 9 Oct 2019 08:10:51 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0818; bh=UJ0E4m4KrcoZp0oD4uRLhLVMThHERNP8+F3q+bkwl94=; b=v8FbNAMN/iv9Ct29Hu4HUw90v1qvV9gY4NQlthZXOVWCHmAVOOhwoCBuRsFxTgDNbqno 3rxO703JFIbV3r1tGnDJN76ffwsU/FgSG2IbOmC/SSG/72VduEmT6hIFATTcs3Y0V86y He3UI18xkwlxOCvyVbNeieYsJau4Ef2NSjA6VAbTti7OY4aJ2BZz9etEgmvumwxX3dof qj9ezNc0Zxuc5+/KJjtrilE6nlzLz/nMrvZcB50sxh1safNe0p+WiinQe7w4dNEECNFY 0McDpmwq0zCa+nONgiAVxDho93Xoc0pZER2MuIteVjWavR+GFdvgJwMpKIIvm089b/50 RA== Received: from sc-exch03.marvell.com ([199.233.58.183]) by mx0a-0016f401.pphosted.com with ESMTP id 2vh5rqjkkp-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Wed, 09 Oct 2019 08:10:51 -0700 Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Wed, 9 Oct 2019 08:10:50 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Wed, 9 Oct 2019 08:10:50 -0700 Received: from ajoseph83.caveonetworks.com.com (unknown [10.29.45.60]) by maili.marvell.com (Postfix) with ESMTP id 148F13F703F; Wed, 9 Oct 2019 08:10:46 -0700 (PDT) From: Anoob Joseph To: Akhil Goyal , Radu Nicolau CC: Anoob Joseph , Thomas Monjalon , Jerin Jacob , Narayana Prasad , , Lukasz Bartosik Date: Wed, 9 Oct 2019 20:40:07 +0530 Message-ID: <1570633816-4706-5-git-send-email-anoobj@marvell.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1570633816-4706-1-git-send-email-anoobj@marvell.com> References: <1570633816-4706-1-git-send-email-anoobj@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.95,1.0.8 definitions=2019-10-09_06:2019-10-08,2019-10-09 signatures=0 Subject: [dpdk-dev] [RFC PATCH 04/13] examples/ipsec-secgw: add Tx adapter support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add Tx adapter support. The event helper init routine will initialize the Tx adapter according to the configuration. If Tx adapter config is not present it will generate a default config. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 375 +++++++++++++++++++++++++++++++++++- examples/ipsec-secgw/event_helper.h | 58 ++++++ 2 files changed, 424 insertions(+), 9 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index b5417a0..7745fbb 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -2,31 +2,58 @@ * Copyright (C) 2019 Marvell International Ltd. */ #include +#include #include #include #include #include "event_helper.h" +static int +eh_get_enabled_cores(struct rte_bitmap *eth_core_mask) +{ + int i; + int count = 0; + + RTE_LCORE_FOREACH(i) { + /* Check if this core is enabled in core_mask*/ + if (rte_bitmap_get(eth_core_mask, i)) { + /* We have enabled core */ + count++; + } + } + return count; +} + static inline unsigned int -eh_get_next_rx_core(struct eventmode_conf *em_conf, - unsigned int prev_core) +eh_get_next_eth_core(struct eventmode_conf *em_conf) { unsigned int next_core; + static unsigned int prev_core = -1; + + /* + * Make sure we have atleast one eth core running, else the following + * logic would lead to an infinite loop. + */ + if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) { + EH_LOG_ERR("No enabled eth core found"); + return RTE_MAX_LCORE; + } get_next_core: /* Get the next core */ - next_core = rte_get_next_lcore(prev_core, 0, 0); + next_core = rte_get_next_lcore(prev_core, 0, 1); /* Check if we have reached max lcores */ if (next_core == RTE_MAX_LCORE) return next_core; - /* Only some cores would be marked as rx cores. Skip others */ - if (!(rte_bitmap_get(em_conf->eth_core_mask, next_core))) { - prev_core = next_core; + /* Update prev_core */ + prev_core = next_core; + + /* Only some cores would be marked as eth cores. Skip others */ + if (!(rte_bitmap_get(em_conf->eth_core_mask, next_core))) goto get_next_core; - } return next_core; } @@ -45,7 +72,7 @@ eh_get_next_active_core(struct eventmode_conf *em_conf, if (next_core == RTE_MAX_LCORE) return next_core; - /* Some cores would be reserved as rx cores. Skip them */ + /* Some cores would be reserved as eth cores. Skip them */ if (rte_bitmap_get(em_conf->eth_core_mask, next_core)) { prev_core = next_core; goto get_next_core; @@ -54,6 +81,23 @@ eh_get_next_active_core(struct eventmode_conf *em_conf, return next_core; } +static struct eventdev_params * +eh_get_eventdev_params(struct eventmode_conf *em_conf, + uint8_t eventdev_id) +{ + int i; + + for (i = 0; i < em_conf->nb_eventdev; i++) { + if (em_conf->eventdev_config[i].eventdev_id == eventdev_id) + break; + } + + /* No match */ + if (i == em_conf->nb_eventdev) + return NULL; + + return &(em_conf->eventdev_config[i]); +} static int eh_validate_user_params(struct eventmode_conf *em_conf) { @@ -224,7 +268,7 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) /* Set adapter conf */ adapter->eventdev_id = eventdev_id; adapter->adapter_id = adapter_id; - adapter->rx_core_id = eh_get_next_rx_core(em_conf, -1); + adapter->rx_core_id = eh_get_next_eth_core(em_conf); /* * All queues of one eth device (port) will be mapped to one event @@ -271,6 +315,100 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) } static int +eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf) +{ + int nb_eth_dev; + int eventdev_id; + int adapter_id; + int i; + int conn_id; + struct eventdev_params *eventdev_config; + struct tx_adapter_conf *tx_adapter; + struct tx_adapter_connection_info *conn; + + /* + * Create one Tx adapter with all eth queues mapped to event queues + * 1:1. + */ + + if (em_conf->nb_eventdev == 0) { + EH_LOG_ERR("No event devs registered"); + return -EINVAL; + } + + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + + /* Use the first event dev */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Get eventdev ID */ + eventdev_id = eventdev_config->eventdev_id; + adapter_id = 0; + + /* Get adapter conf */ + tx_adapter = &(em_conf->tx_adapter[adapter_id]); + + /* Set adapter conf */ + tx_adapter->eventdev_id = eventdev_id; + tx_adapter->adapter_id = adapter_id; + + /* TODO: Tx core is required only when internal port is not present */ + + tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf); + + /* + * Application would need to use one event queue per adapter for + * submitting packets for Tx. Reserving the last queue available + * and decrementing the total available event queues for this + */ + + /* Queue numbers start at 0 */ + tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1; + + /* Update the number of event queues available in eventdev */ + eventdev_config->nb_eventqueue--; + + /* + * All Tx queues of the eth device (port) will be mapped to the event + * device. + */ + + /* Set defaults for connections */ + + /* + * One eth device (port) would be one connection. All Tx queues of + * the device would be mapped to the Tx adapter. + */ + + for (i = 0; i < nb_eth_dev; i++) { + + /* Use only the ports enabled */ + if ((em_conf->eth_portmask & (1 << i)) == 0) + continue; + + /* Get the connection id */ + conn_id = tx_adapter->nb_connections; + + /* Get the connection */ + conn = &(tx_adapter->conn[conn_id]); + + /* Add ethdev to connections */ + conn->ethdev_id = i; + + /* Add all eth tx queues to adapter */ + conn->ethdev_tx_qid = -1; + + /* Update no of connections */ + tx_adapter->nb_connections++; + } + + /* We have setup one adapter */ + em_conf->nb_tx_adapter = 1; + return 0; +} + +static int eh_validate_conf(struct eventmode_conf *em_conf) { int ret; @@ -310,6 +448,16 @@ eh_validate_conf(struct eventmode_conf *em_conf) return ret; } + /* + * See if tx adapters are specified. Else generate a default conf + * with one tx adapter. + */ + if (em_conf->nb_tx_adapter == 0) { + ret = eh_set_default_conf_tx_adapter(em_conf); + if (ret != 0) + return ret; + } + return 0; } @@ -579,6 +727,147 @@ eh_initialize_rx_adapter(struct eventmode_conf *em_conf) return 0; } +static int +eh_tx_adapter_configure(struct eventmode_conf *em_conf, + struct tx_adapter_conf *adapter) +{ + int ret, j; + uint8_t tx_port_id = 0; + uint8_t eventdev_id; + uint32_t service_id; + struct rte_event_port_conf port_conf = {0}; + struct rte_event_dev_info evdev_default_conf = {0}; + struct tx_adapter_connection_info *conn; + struct eventdev_params *eventdev_config; + + /* Get event dev ID */ + eventdev_id = adapter->eventdev_id; + + /* Get event device conf */ + eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id); + + /* Create Tx adapter */ + + /* Get default configuration of event dev */ + ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); + if (ret < 0) { + EH_LOG_ERR("Error in getting event device info[devID:%d]", + eventdev_id); + return ret; + } + + /* Setup port conf */ + port_conf.new_event_threshold = + evdev_default_conf.max_num_events; + port_conf.dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + port_conf.enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + /* Create Tx adapter */ + ret = rte_event_eth_tx_adapter_create(adapter->adapter_id, + adapter->eventdev_id, + &port_conf); + if (ret < 0) { + EH_LOG_ERR("Error in Tx adapter creation"); + return ret; + } + + /* Setup various connections in the adapter */ + for (j = 0; j < adapter->nb_connections; j++) { + + /* Get connection */ + conn = &(adapter->conn[j]); + + /* Add queue to the adapter */ + ret = rte_event_eth_tx_adapter_queue_add( + adapter->adapter_id, + conn->ethdev_id, + conn->ethdev_tx_qid); + if (ret < 0) { + EH_LOG_ERR("Error in adding eth queue in Tx adapter"); + return ret; + } + } + + /* Get event port used by the adapter */ + ret = rte_event_eth_tx_adapter_event_port_get( + adapter->adapter_id, + &tx_port_id); + if (ret) { + EH_LOG_ERR("Failed to get Tx adapter port ID"); + return ret; + } + + /* + * TODO: event queue for Tx adapter is required only if the + * INTERNAL PORT is not present. + */ + + /* + * Tx event queue would be reserved for Tx adapter. Need to unlink + * this queue from all other ports + * + */ + for (j = 0; j < eventdev_config->nb_eventport; j++) { + rte_event_port_unlink(eventdev_id, j, + &(adapter->tx_ev_queue), 1); + } + + ret = rte_event_port_link( + eventdev_id, + tx_port_id, + &(adapter->tx_ev_queue), + NULL, 1); + if (ret != 1) { + EH_LOG_ERR("Failed to link event queue to port"); + return ret; + } + + /* Get the service ID used by Tx adapter */ + ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id, + &service_id); + if (ret != -ESRCH && ret != 0) { + EH_LOG_ERR("Error getting service ID used by adapter"); + return ret; + } + + /* + * TODO + * Tx core will invoke the service when required. The runstate check + * is not required. + * + */ + rte_service_set_runstate_mapped_check(service_id, 0); + + /* Start adapter */ + ret = rte_event_eth_tx_adapter_start(adapter->adapter_id); + if (ret) { + EH_LOG_ERR("Error in starting Tx adapter"); + return ret; + } + + return 0; +} + +static int +eh_initialize_tx_adapter(struct eventmode_conf *em_conf) +{ + int i, ret; + struct tx_adapter_conf *adapter; + + /* Configure Tx adapters */ + for (i = 0; i < em_conf->nb_tx_adapter; i++) { + adapter = &(em_conf->tx_adapter[i]); + ret = eh_tx_adapter_configure(em_conf, adapter); + if (ret < 0) { + EH_LOG_ERR("Tx adapter configuration failed"); + return ret; + } + } + return 0; +} + int32_t eh_devs_init(struct eh_conf *mode_conf) { @@ -631,6 +920,11 @@ eh_devs_init(struct eh_conf *mode_conf) if (ret != 0) return ret; + /* Setup Tx adapter */ + ret = eh_initialize_tx_adapter(em_conf); + if (ret != 0) + return ret; + /* Start eth devices after setting up adapter */ RTE_ETH_FOREACH_DEV(portid) { @@ -713,5 +1007,68 @@ eh_devs_uninit(struct eh_conf *mode_conf) } } + /* Stop and release tx adapters */ + for (i = 0; i < em_conf->nb_tx_adapter; i++) { + + id = em_conf->tx_adapter[i].adapter_id; + ret = rte_event_eth_tx_adapter_stop(id); + if (ret < 0) { + EH_LOG_ERR("Error stopping tx adapter %d", id); + return ret; + } + + for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) { + + ret = rte_event_eth_tx_adapter_queue_del(id, + em_conf->tx_adapter[i].conn[j].ethdev_id, -1); + if (ret < 0) { + EH_LOG_ERR( + "Error deleting tx adapter queues %d", + id); + return ret; + } + } + + ret = rte_event_eth_tx_adapter_free(id); + if (ret < 0) { + EH_LOG_ERR("Error freeing tx adapter %d", id); + return ret; + } + } + return 0; } + +uint8_t +eh_get_tx_queue(struct eh_conf *mode_conf, uint8_t eventdev_id) +{ + struct eventdev_params *eventdev_config; + struct eventmode_conf *em_conf; + + if (mode_conf == NULL) { + EH_LOG_ERR("Invalid conf"); + return -EINVAL; + } + + if (mode_conf->mode_params == NULL) { + EH_LOG_ERR("Invalid mode params"); + return -EINVAL; + } + + /* Get eventmode conf */ + em_conf = (struct eventmode_conf *)(mode_conf->mode_params); + + /* Get event device conf */ + eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id); + + if (eventdev_config == NULL) { + EH_LOG_ERR("Error reading eventdev conf"); + return -EINVAL; + } + + /* + * The last queue would be reserved to be used as atomic queue for the + * last stage (eth packet tx stage) + */ + return eventdev_config->nb_eventqueue - 1; +} diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index 4233b42..01e2aca 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -10,6 +10,16 @@ extern "C" { #include +/** + * Flag to indicate that the event device used by all adapters is same + */ +#define EH_TX_EV_LINK_COMMON_EVENT_DEV (1 << 0) + +/** + * Flag to indicate that the event queue to be used for all adapters is same + */ +#define EH_TX_EV_LINK_COMMON_EVENT_QUEUE (1 << 1) + #define RTE_LOGTYPE_EH RTE_LOGTYPE_USER4 #define EH_LOG_ERR(...) \ @@ -23,9 +33,15 @@ extern "C" { /* Max Rx adapters supported */ #define EVENT_MODE_MAX_RX_ADAPTERS RTE_EVENT_MAX_DEVS +/* Max Tx adapters supported */ +#define EVENT_MODE_MAX_TX_ADAPTERS RTE_EVENT_MAX_DEVS + /* Max Rx adapter connections */ #define EVENT_MODE_MAX_CONNECTIONS_PER_ADAPTER 16 +/* Max Tx adapter connections */ +#define EVENT_MODE_MAX_CONNECTIONS_PER_TX_ADAPTER 16 + /* Max event queues supported per event device */ #define EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV RTE_EVENT_MAX_QUEUES_PER_DEV @@ -33,6 +49,9 @@ extern "C" { #define EVENT_MODE_MAX_LCORE_LINKS \ (EVENT_MODE_MAX_EVENT_DEVS * EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV) +/* Max adapters that one Tx core can handle */ +#define EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE EVENT_MODE_MAX_TX_ADAPTERS + /** * Packet transfer mode of the application */ @@ -80,6 +99,23 @@ struct rx_adapter_conf { conn[EVENT_MODE_MAX_CONNECTIONS_PER_ADAPTER]; }; +/* Tx adapter connection info */ +struct tx_adapter_connection_info { + uint8_t ethdev_id; + int32_t ethdev_tx_qid; +}; + +/* Tx adapter conf */ +struct tx_adapter_conf { + int32_t eventdev_id; + int32_t adapter_id; + uint32_t tx_core_id; + uint8_t nb_connections; + struct tx_adapter_connection_info + conn[EVENT_MODE_MAX_CONNECTIONS_PER_TX_ADAPTER]; + uint8_t tx_ev_queue; +}; + /* Eventmode conf data */ struct eventmode_conf { int nb_eventdev; @@ -90,6 +126,10 @@ struct eventmode_conf { /**< No of Rx adapters */ struct rx_adapter_conf rx_adapter[EVENT_MODE_MAX_RX_ADAPTERS]; /**< Rx adapter conf */ + uint8_t nb_tx_adapter; + /**< No of Tx adapters */ + struct tx_adapter_conf tx_adapter[EVENT_MODE_MAX_TX_ADAPTERS]; + /** Tx adapter conf */ uint8_t nb_link; /**< No of links */ struct eh_event_link_info @@ -170,6 +210,24 @@ eh_devs_init(struct eh_conf *mode_conf); int32_t eh_devs_uninit(struct eh_conf *mode_conf); +/** + * Get eventdev tx queue + * + * If the application uses event device which does not support internal port + * then it needs to submit the events to a Tx queue before final transmission. + * This Tx queue will be created internally by the eventmode helper subsystem, + * and application will need its queue ID when it runs the execution loop. + * + * @param mode_conf + * Configuration of the mode in which app is doing packet handling + * @param eventdev_id + * Event device ID + * @return + * Tx queue ID + */ +uint8_t +eh_get_tx_queue(struct eh_conf *mode_conf, uint8_t eventdev_id); + #ifdef __cplusplus } #endif -- 2.7.4