From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 58DBEA0C45; Wed, 6 Oct 2021 08:51:34 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5FF87413E5; Wed, 6 Oct 2021 08:51:13 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 3DC914142A for ; Wed, 6 Oct 2021 08:51:12 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with SMTP id 19620sqX030285; Tue, 5 Oct 2021 23:51:11 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=1LCuEiUK3+fXfOguKjT8QYz8juNB1tiN9WTqxT/11tQ=; b=dmM5evJatMHVjHrqz7bQBDWCva1yux/9kxp8T8HwPXmg2XMtFI3c6Kzqf/zxp5e6WxOz JDIER+6s9tIq3zNpRlDIOpVKm/WEIyFAtkpqwhUhzIX4d4lVPMZLzsULdj8VQLuMKzYH 7Lw47TpvrN4mcOKx1jK50LL5r3ql4e/VPxct9MdLm+zJ296rXUHnRC+DB3UHV/A0mdAs NuZ/sXCOnQlw5IheBRP2q1myUu/brJd4qN8iEKyENMckbLHAukYHqxvB3x/xkz4TQnPl wAdOqgawfSderFKs4DbrZedbSrePKbTHCNgVydqGmC9+C+pHuRBGy2mecVxfBzuTeXox 4w== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com with ESMTP id 3bgr1qkfbm-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 05 Oct 2021 23:51:11 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 5 Oct 2021 23:51:08 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Tue, 5 Oct 2021 23:51:08 -0700 Received: from BG-LT7430.marvell.com (BG-LT7430.marvell.com [10.28.177.176]) by maili.marvell.com (Postfix) with ESMTP id 054D93F704C; Tue, 5 Oct 2021 23:51:06 -0700 (PDT) From: To: , Abhinandan Gujjar , "Jay Jayatheerthan" CC: , Pavan Nikhilesh Date: Wed, 6 Oct 2021 12:20:03 +0530 Message-ID: <20211006065012.16508-6-pbhagavatula@marvell.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211006065012.16508-1-pbhagavatula@marvell.com> References: <20211003082710.8398-1-pbhagavatula@marvell.com> <20211006065012.16508-1-pbhagavatula@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-ORIG-GUID: aKPm2kpHUlUQVM98_89LTfZ4oO33DA46 X-Proofpoint-GUID: aKPm2kpHUlUQVM98_89LTfZ4oO33DA46 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.182.1,Aquarius:18.0.790,Hydra:6.0.391,FMLib:17.0.607.475 definitions=2021-10-05_06,2021-10-04_01,2020-04-07_01 Subject: [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Pavan Nikhilesh Use new driver interface for the fastpath enqueue/dequeue inline functions. Signed-off-by: Pavan Nikhilesh Acked-by: Jay Jayatheerthan --- lib/eventdev/rte_event_crypto_adapter.h | 15 +++++--- lib/eventdev/rte_event_eth_tx_adapter.h | 15 ++++---- lib/eventdev/rte_eventdev.h | 46 +++++++++++++++---------- 3 files changed, 47 insertions(+), 29 deletions(-) diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h index 431d05b6ed..eb82818d05 100644 --- a/lib/eventdev/rte_event_crypto_adapter.h +++ b/lib/eventdev/rte_event_crypto_adapter.h @@ -568,12 +568,19 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id, struct rte_event ev[], uint16_t nb_events) { - const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + void *port; + fp_ops = &rte_event_fp_ops[dev_id]; + port = fp_ops->data[port_id]; #ifdef RTE_LIBRTE_EVENTDEV_DEBUG - RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (dev_id >= RTE_EVENT_MAX_DEVS || + port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { + rte_errno = EINVAL; + return 0; + } - if (port_id >= dev->data->nb_ports) { + if (port == NULL) { rte_errno = EINVAL; return 0; } @@ -581,7 +588,7 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id, rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev, nb_events); - return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events); + return fp_ops->ca_enqueue(port, ev, nb_events); } #ifdef __cplusplus diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h index 8c59547165..3908c2ded5 100644 --- a/lib/eventdev/rte_event_eth_tx_adapter.h +++ b/lib/eventdev/rte_event_eth_tx_adapter.h @@ -355,16 +355,19 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id, uint16_t nb_events, const uint8_t flags) { - const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + void *port; + fp_ops = &rte_event_fp_ops[dev_id]; + port = fp_ops->data[port_id]; #ifdef RTE_LIBRTE_EVENTDEV_DEBUG if (dev_id >= RTE_EVENT_MAX_DEVS || - !rte_eventdevs[dev_id].attached) { + port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { rte_errno = EINVAL; return 0; } - if (port_id >= dev->data->nb_ports) { + if (port == NULL) { rte_errno = EINVAL; return 0; } @@ -372,11 +375,9 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id, rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev, nb_events, flags); if (flags) - return dev->txa_enqueue_same_dest(dev->data->ports[port_id], - ev, nb_events); + return fp_ops->txa_enqueue_same_dest(port, ev, nb_events); else - return dev->txa_enqueue(dev->data->ports[port_id], ev, - nb_events); + return fp_ops->txa_enqueue(port, ev, nb_events); } /** diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h index 1b11d4576d..31fa9ac4b8 100644 --- a/lib/eventdev/rte_eventdev.h +++ b/lib/eventdev/rte_eventdev.h @@ -1747,15 +1747,19 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events, const event_enqueue_burst_t fn) { - const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + void *port; + fp_ops = &rte_event_fp_ops[dev_id]; + port = fp_ops->data[port_id]; #ifdef RTE_LIBRTE_EVENTDEV_DEBUG - if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { + if (dev_id >= RTE_EVENT_MAX_DEVS || + port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { rte_errno = EINVAL; return 0; } - if (port_id >= dev->data->nb_ports) { + if (port == NULL) { rte_errno = EINVAL; return 0; } @@ -1766,9 +1770,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, * requests nb_events as const one */ if (nb_events == 1) - return (*dev->enqueue)(dev->data->ports[port_id], ev); + return (fp_ops->enqueue)(port, ev); else - return fn(dev->data->ports[port_id], ev, nb_events); + return fn(port, ev, nb_events); } /** @@ -1818,10 +1822,11 @@ static inline uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events) { - const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + fp_ops = &rte_event_fp_ops[dev_id]; return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, - dev->enqueue_burst); + fp_ops->enqueue_burst); } /** @@ -1869,10 +1874,11 @@ static inline uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events) { - const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + fp_ops = &rte_event_fp_ops[dev_id]; return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, - dev->enqueue_new_burst); + fp_ops->enqueue_new_burst); } /** @@ -1920,10 +1926,11 @@ static inline uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events) { - const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + fp_ops = &rte_event_fp_ops[dev_id]; return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, - dev->enqueue_forward_burst); + fp_ops->enqueue_forward_burst); } /** @@ -1996,15 +2003,19 @@ static inline uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks) { - struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_event_fp_ops *fp_ops; + void *port; + fp_ops = &rte_event_fp_ops[dev_id]; + port = fp_ops->data[port_id]; #ifdef RTE_LIBRTE_EVENTDEV_DEBUG - if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { + if (dev_id >= RTE_EVENT_MAX_DEVS || + port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { rte_errno = EINVAL; return 0; } - if (port_id >= dev->data->nb_ports) { + if (port == NULL) { rte_errno = EINVAL; return 0; } @@ -2015,11 +2026,10 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], * requests nb_events as const one */ if (nb_events == 1) - return (*dev->dequeue)(dev->data->ports[port_id], ev, - timeout_ticks); + return (fp_ops->dequeue)(port, ev, timeout_ticks); else - return (*dev->dequeue_burst)(dev->data->ports[port_id], ev, - nb_events, timeout_ticks); + return (fp_ops->dequeue_burst)(port, ev, nb_events, + timeout_ticks); } #ifdef __cplusplus -- 2.17.1