From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D0770A0544; Mon, 10 Oct 2022 18:56:52 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 11364427F3; Mon, 10 Oct 2022 18:56:51 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 4DC7C40041 for ; Mon, 10 Oct 2022 18:56:49 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 29ACEY3P015818; Mon, 10 Oct 2022 09:56:48 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=XmwyJv+ZePggn7Ppe4pPvcAL/KdykInnf+W3kKJB8zQ=; b=QWOm+VFgQGMnO9KJcQjdiQLrllQ3KXkjaO5hsYQEWirFlgLEYDUxBGxCx9A2m80SIiCO r0QqZE9IvZ2meGYbtjiX7DsY0DtNHpc/sut+GUtuVU3LD0yALMjh5ru+lubZcYt1bL33 iEnJ+Qtl9jANiJwxLj7M/zf+jDUrsCfmi6c3SeGjBVOk8cvUnmaonPn8eTWQEqEjBg7V XYDd+6BRVSOl7qFmYRiQna2vtv25lk16pl6pd2uHS7ZGikMcwLT0csvweCFHssC4msB9 2knaj98TFRs0ghc1KOWnyMrHDXKZ5dENERhfrEzA4oUrEpuJ7mv/9/n3HRDhgTsGbh9v tQ== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3k40g4uma4-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Mon, 10 Oct 2022 09:56:48 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 10 Oct 2022 09:56:46 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Mon, 10 Oct 2022 09:56:46 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 30F373F708F; Mon, 10 Oct 2022 09:56:43 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , , Volodymyr Fialko Subject: [PATCH v3 2/6] examples/ipsec-secgw: add queue for event crypto adapter Date: Mon, 10 Oct 2022 18:56:25 +0200 Message-ID: <20221010165629.17744-3-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221010165629.17744-1-vfialko@marvell.com> References: <20221010123102.3962719-1-vfialko@marvell.com> <20221010165629.17744-1-vfialko@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-ORIG-GUID: G9NPFaDkVwx7mRVfWoxpN0dLcPNfUFaU X-Proofpoint-GUID: G9NPFaDkVwx7mRVfWoxpN0dLcPNfUFaU X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.895,Hydra:6.0.528,FMLib:17.11.122.1 definitions=2022-10-10_10,2022-10-10_02,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add separate event queue for event crypto adapter processing, to resolve queue contention between new and already processed events. Signed-off-by: Volodymyr Fialko --- examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++-------- examples/ipsec-secgw/event_helper.h | 2 + 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 30a1f253c8..90c5d716ff 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -19,6 +19,8 @@ #define DEFAULT_VECTOR_SIZE 16 #define DEFAULT_VECTOR_TMO 102400 +#define INVALID_EV_QUEUE_ID -1 + static volatile bool eth_core_running; static int @@ -153,11 +155,10 @@ eh_dev_has_burst_mode(uint8_t dev_id) } static int -eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +eh_set_nb_eventdev(struct eventmode_conf *em_conf) { - int lcore_count, nb_eventdev, nb_eth_dev, ret; struct eventdev_params *eventdev_config; - struct rte_event_dev_info dev_info; + int nb_eventdev; /* Get the number of event devices */ nb_eventdev = rte_event_dev_count(); @@ -172,6 +173,23 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) return -EINVAL; } + /* Set event dev id*/ + eventdev_config = &(em_conf->eventdev_config[0]); + eventdev_config->eventdev_id = 0; + + /* Update the number of event devices */ + em_conf->nb_eventdev = 1; + + return 0; +} + +static int +eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +{ + int lcore_count, nb_eth_dev, ret; + struct eventdev_params *eventdev_config; + struct rte_event_dev_info dev_info; + /* Get the number of eth devs */ nb_eth_dev = rte_eth_dev_count_avail(); if (nb_eth_dev == 0) { @@ -199,15 +217,30 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config = &(em_conf->eventdev_config[0]); /* Save number of queues & ports available */ - eventdev_config->eventdev_id = 0; - eventdev_config->nb_eventqueue = dev_info.max_event_queues; + eventdev_config->nb_eventqueue = nb_eth_dev; eventdev_config->nb_eventport = dev_info.max_event_ports; eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES; - /* Check if there are more queues than required */ - if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) { - /* One queue is reserved for Tx */ - eventdev_config->nb_eventqueue = nb_eth_dev + 1; + /* One queue is reserved for Tx */ + eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID; + if (eventdev_config->all_internal_ports) { + if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) { + EH_LOG_ERR("Not enough event queues available"); + return -EINVAL; + } + eventdev_config->tx_queue_id = + eventdev_config->nb_eventqueue++; + } + + /* One queue is reserved for event crypto adapter */ + eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID; + if (em_conf->enable_event_crypto_adapter) { + if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) { + EH_LOG_ERR("Not enough event queues available"); + return -EINVAL; + } + eventdev_config->ev_cpt_queue_id = + eventdev_config->nb_eventqueue++; } /* Check if there are more ports than required */ @@ -216,9 +249,6 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config->nb_eventport = lcore_count; } - /* Update the number of event devices */ - em_conf->nb_eventdev++; - return 0; } @@ -247,15 +277,10 @@ eh_do_capability_check(struct eventmode_conf *em_conf) /* * If Rx & Tx internal ports are supported by all event devices then - * eth cores won't be required. Override the eth core mask requested - * and decrement number of event queues by one as it won't be needed - * for Tx. + * eth cores won't be required. Override the eth core mask requested. */ - if (all_internal_ports) { + if (all_internal_ports) rte_bitmap_reset(em_conf->eth_core_mask); - for (i = 0; i < em_conf->nb_eventdev; i++) - em_conf->eventdev_config[i].nb_eventqueue--; - } } static int @@ -372,6 +397,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) eventdev_config->nb_eventqueue : eventdev_config->nb_eventqueue - 1; + /* Reserve one queue for event crypto adapter */ + if (em_conf->enable_event_crypto_adapter) + nb_eventqueue--; + /* * Map all queues of eth device (port) to an event queue. If there * are more event queues than eth ports then create 1:1 mapping. @@ -543,14 +572,18 @@ eh_validate_conf(struct eventmode_conf *em_conf) * and initialize the config with all ports & queues available */ if (em_conf->nb_eventdev == 0) { + ret = eh_set_nb_eventdev(em_conf); + if (ret != 0) + return ret; + eh_do_capability_check(em_conf); ret = eh_set_default_conf_eventdev(em_conf); if (ret != 0) return ret; + } else { + /* Perform capability check for the selected event devices */ + eh_do_capability_check(em_conf); } - /* Perform capability check for the selected event devices */ - eh_do_capability_check(em_conf); - /* * Check if links are specified. Else generate a default config for * the event ports used. @@ -596,8 +629,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) uint8_t *queue = NULL; uint8_t eventdev_id; int nb_eventqueue; - uint8_t i, j; - int ret; + int ret, j; + uint8_t i; for (i = 0; i < nb_eventdev; i++) { @@ -659,14 +692,24 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) * stage if event device does not have internal * ports. This will be an atomic queue. */ - if (!eventdev_config->all_internal_ports && - j == nb_eventqueue-1) { + if (j == eventdev_config->tx_queue_id) { eventq_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; } else { eventq_conf.schedule_type = em_conf->ext_params.sched_type; } + /* + * Give event crypto device's queue higher priority then Rx queues. This + * will allow crypto events to be processed with highest priority. + */ + if (j == eventdev_config->ev_cpt_queue_id) { + eventq_conf.priority = + RTE_EVENT_DEV_PRIORITY_HIGHEST; + } else { + eventq_conf.priority = + RTE_EVENT_DEV_PRIORITY_NORMAL; + } /* Set max atomic flows to 1024 */ eventq_conf.nb_atomic_flows = 1024; diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index 4b26dc8fc2..af5cfcf794 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -88,6 +88,8 @@ struct eventdev_params { uint8_t nb_eventport; uint8_t ev_queue_mode; uint8_t all_internal_ports; + int tx_queue_id; + int ev_cpt_queue_id; }; /** -- 2.25.1