From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 55EF2A0032; Fri, 1 Oct 2021 16:06:44 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B7680411B8; Fri, 1 Oct 2021 16:06:40 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 675EB41194 for ; Fri, 1 Oct 2021 16:06:39 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10123"; a="222241803" X-IronPort-AV: E=Sophos;i="5.85,339,1624345200"; d="scan'208";a="222241803" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Oct 2021 07:04:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,339,1624345200"; d="scan'208";a="709883357" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga005.fm.intel.com with ESMTP; 01 Oct 2021 07:04:36 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: xiaoyun.li@intel.com, anoobj@marvell.com, jerinj@marvell.com, ndabilpuram@marvell.com, adwivedi@marvell.com, shepard.siegel@atomicrules.com, ed.czeck@atomicrules.com, john.miller@atomicrules.com, irusskikh@marvell.com, ajit.khaparde@broadcom.com, somnath.kotur@broadcom.com, rahul.lakkireddy@chelsio.com, hemant.agrawal@nxp.com, sachin.saxena@oss.nxp.com, haiyue.wang@intel.com, johndale@cisco.com, hyonkim@cisco.com, qi.z.zhang@intel.com, xiao.w.wang@intel.com, humin29@huawei.com, yisen.zhuang@huawei.com, oulijun@huawei.com, beilei.xing@intel.com, jingjing.wu@intel.com, qiming.yang@intel.com, matan@nvidia.com, viacheslavo@nvidia.com, sthemmin@microsoft.com, longli@microsoft.com, heinrich.kuhn@corigine.com, kirankumark@marvell.com, andrew.rybchenko@oktetlabs.ru, mczekaj@marvell.com, jiawenwu@trustnetic.com, jianwang@trustnetic.com, maxime.coquelin@redhat.com, chenbo.xia@intel.com, thomas@monjalon.net, ferruh.yigit@intel.com, mdr@ashroe.eu, jay.jayatheerthan@intel.com, Konstantin Ananyev Date: Fri, 1 Oct 2021 15:02:52 +0100 Message-Id: <20211001140255.5726-5-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20211001140255.5726-1-konstantin.ananyev@intel.com> References: <20210922140953.19388-1-konstantin.ananyev@intel.com> <20211001140255.5726-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [PATCH v3 4/7] ethdev: make burst functions to use new flat array X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Rework 'fast' burst functions to use rte_eth_fp_ops[]. While it is an API/ABI breakage, this change is intended to be transparent for both users (no changes in user app is required) and PMD developers (no changes in PMD is required). One extra thing to note - RX/TX callback invocation will cause extra function call with these changes. That might cause some insignificant slowdown for code-path where RX/TX callbacks are heavily involved. Signed-off-by: Konstantin Ananyev --- lib/ethdev/ethdev_private.c | 31 +++++ lib/ethdev/rte_ethdev.h | 244 ++++++++++++++++++++++++++---------- lib/ethdev/version.map | 5 + 3 files changed, 212 insertions(+), 68 deletions(-) diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c index 3eeda6e9f9..27d29b2ac6 100644 --- a/lib/ethdev/ethdev_private.c +++ b/lib/ethdev/ethdev_private.c @@ -226,3 +226,34 @@ eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo, fpo->txq.data = dev->data->tx_queues; fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs; } + +uint16_t +__rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts, + void *opaque) +{ + const struct rte_eth_rxtx_callback *cb = opaque; + + while (cb != NULL) { + nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, + nb_pkts, cb->param); + cb = cb->next; + } + + return nb_rx; +} + +uint16_t +__rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque) +{ + const struct rte_eth_rxtx_callback *cb = opaque; + + while (cb != NULL) { + nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts, + cb->param); + cb = cb->next; + } + + return nb_pkts; +} diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 9642b7c00f..20ac5ba88d 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -4904,6 +4904,34 @@ int rte_eth_representor_info_get(uint16_t port_id, #include +/** + * @internal + * Helper routine for eth driver rx_burst API. + * Should be called at exit from PMD's rte_eth_rx_bulk implementation. + * Does necessary post-processing - invokes RX callbacks if any, etc. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the receive queue from which to retrieve input packets. + * @param rx_pkts + * The address of an array of pointers to *rte_mbuf* structures that + * have been retrieved from the device. + * @param nb_pkts + * The number of packets that were retrieved from the device. + * @param nb_pkts + * The number of elements in *rx_pkts* array. + * @param opaque + * Opaque pointer of RX queue callback related data. + * + * @return + * The number of packets effectively supplied to the *rx_pkts* array. + */ +__rte_experimental +uint16_t __rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts, + void *opaque); + /** * * Retrieve a burst of input packets from a receive queue of an Ethernet @@ -4995,23 +5023,37 @@ static inline uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; uint16_t nb_rx; + struct rte_eth_fp_ops *p; + void *cb, *qd; + +#ifdef RTE_ETHDEV_DEBUG_RX + if (port_id >= RTE_MAX_ETHPORTS || + queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid port_id=%u or queue_id=%u\n", + port_id, queue_id); + return 0; + } +#endif + + /* fetch pointer to queue data */ + p = &rte_eth_fp_ops[port_id]; + qd = p->rxq.data[queue_id]; #ifdef RTE_ETHDEV_DEBUG_RX RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0); - if (queue_id >= dev->data->nb_rx_queues) { - RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); + if (qd == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u for port_id=%u\n", + queue_id, port_id); return 0; } #endif - nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], - rx_pkts, nb_pkts); + + nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts); #ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb; /* __ATOMIC_RELEASE memory order was used when the * call back was inserted into the list. @@ -5019,16 +5061,10 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is * not required. */ - cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id], - __ATOMIC_RELAXED); - - if (unlikely(cb != NULL)) { - do { - nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, - nb_pkts, cb->param); - cb = cb->next; - } while (cb != NULL); - } + cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id], __ATOMIC_RELAXED); + if (unlikely(cb != NULL)) + nb_rx = __rte_eth_rx_epilog(port_id, queue_id, rx_pkts, nb_rx, + nb_pkts, cb); #endif rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx); @@ -5051,16 +5087,27 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, static inline int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) { - struct rte_eth_dev *dev; + struct rte_eth_fp_ops *p; + void *qd; + + if (port_id >= RTE_MAX_ETHPORTS || + queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid port_id=%u or queue_id=%u\n", + port_id, queue_id); + return -EINVAL; + } + + /* fetch pointer to queue data */ + p = &rte_eth_fp_ops[port_id]; + qd = p->rxq.data[queue_id]; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP); - if (queue_id >= dev->data->nb_rx_queues || - dev->data->rx_queues[queue_id] == NULL) + RTE_FUNC_PTR_OR_ERR_RET(*p->rx_queue_count, -ENOTSUP); + if (qd == NULL) return -EINVAL; - return (int)(*dev->rx_queue_count)(dev->data->rx_queues[queue_id]); + return (int)(*p->rx_queue_count)(qd); } /** @@ -5133,21 +5180,30 @@ static inline int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset) { - struct rte_eth_dev *dev; - void *rxq; + struct rte_eth_fp_ops *p; + void *qd; #ifdef RTE_ETHDEV_DEBUG_RX - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + if (port_id >= RTE_MAX_ETHPORTS || + queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid port_id=%u or queue_id=%u\n", + port_id, queue_id); + return -EINVAL; + } #endif - dev = &rte_eth_devices[port_id]; + + /* fetch pointer to queue data */ + p = &rte_eth_fp_ops[port_id]; + qd = p->rxq.data[queue_id]; + #ifdef RTE_ETHDEV_DEBUG_RX - if (queue_id >= dev->data->nb_rx_queues) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + if (qd == NULL) return -ENODEV; #endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP); - rxq = dev->data->rx_queues[queue_id]; - - return (*dev->rx_descriptor_status)(rxq, offset); + RTE_FUNC_PTR_OR_ERR_RET(*p->rx_descriptor_status, -ENOTSUP); + return (*p->rx_descriptor_status)(qd, offset); } /**@{@name Tx hardware descriptor states @@ -5194,23 +5250,55 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, static inline int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset) { - struct rte_eth_dev *dev; - void *txq; + struct rte_eth_fp_ops *p; + void *qd; #ifdef RTE_ETHDEV_DEBUG_TX - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + if (port_id >= RTE_MAX_ETHPORTS || + queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid port_id=%u or queue_id=%u\n", + port_id, queue_id); + return -EINVAL; + } #endif - dev = &rte_eth_devices[port_id]; + + /* fetch pointer to queue data */ + p = &rte_eth_fp_ops[port_id]; + qd = p->txq.data[queue_id]; + #ifdef RTE_ETHDEV_DEBUG_TX - if (queue_id >= dev->data->nb_tx_queues) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + if (qd == NULL) return -ENODEV; #endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP); - txq = dev->data->tx_queues[queue_id]; - - return (*dev->tx_descriptor_status)(txq, offset); + RTE_FUNC_PTR_OR_ERR_RET(*p->tx_descriptor_status, -ENOTSUP); + return (*p->tx_descriptor_status)(qd, offset); } +/** + * @internal + * Helper routine for eth driver tx_burst API. + * Should be called before entry PMD's rte_eth_tx_bulk implementation. + * Does necessary pre-processing - invokes TX callbacks if any, etc. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the transmit queue through which output packets must be + * sent. + * @param tx_pkts + * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures + * which contain the output packets. + * @param nb_pkts + * The maximum number of packets to transmit. + * @return + * The number of output packets to transmit. + */ +__rte_experimental +uint16_t __rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque); + /** * Send a burst of output packets on a transmit queue of an Ethernet device. * @@ -5281,20 +5369,34 @@ static inline uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_fp_ops *p; + void *cb, *qd; + +#ifdef RTE_ETHDEV_DEBUG_TX + if (port_id >= RTE_MAX_ETHPORTS || + queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid port_id=%u or queue_id=%u\n", + port_id, queue_id); + return 0; + } +#endif + + /* fetch pointer to queue data */ + p = &rte_eth_fp_ops[port_id]; + qd = p->txq.data[queue_id]; #ifdef RTE_ETHDEV_DEBUG_TX RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); - RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0); - if (queue_id >= dev->data->nb_tx_queues) { - RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); + if (qd == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u for port_id=%u\n", + queue_id, port_id); return 0; } #endif #ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb; /* __ATOMIC_RELEASE memory order was used when the * call back was inserted into the list. @@ -5302,21 +5404,16 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is * not required. */ - cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id], - __ATOMIC_RELAXED); - - if (unlikely(cb != NULL)) { - do { - nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts, - cb->param); - cb = cb->next; - } while (cb != NULL); - } + cb = __atomic_load_n((void **)&p->txq.clbk[queue_id], __ATOMIC_RELAXED); + if (unlikely(cb != NULL)) + nb_pkts = __rte_eth_tx_prolog(port_id, queue_id, tx_pkts, + nb_pkts, cb); #endif - rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, - nb_pkts); - return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts); + nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts); + + rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts); + return nb_pkts; } /** @@ -5379,31 +5476,42 @@ static inline uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_eth_dev *dev; + struct rte_eth_fp_ops *p; + void *qd; #ifdef RTE_ETHDEV_DEBUG_TX - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id); + if (port_id >= RTE_MAX_ETHPORTS || + queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid port_id=%u or queue_id=%u\n", + port_id, queue_id); rte_errno = ENODEV; return 0; } #endif - dev = &rte_eth_devices[port_id]; + /* fetch pointer to queue data */ + p = &rte_eth_fp_ops[port_id]; + qd = p->txq.data[queue_id]; #ifdef RTE_ETHDEV_DEBUG_TX - if (queue_id >= dev->data->nb_tx_queues) { - RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id); + rte_errno = ENODEV; + return 0; + } + if (qd == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u for port_id=%u\n", + queue_id, port_id); rte_errno = EINVAL; return 0; } #endif - if (!dev->tx_pkt_prepare) + if (!p->tx_pkt_prepare) return nb_pkts; - return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id], - tx_pkts, nb_pkts); + return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts); } #else diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index 904bce6ea1..3a1278b448 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -247,11 +247,16 @@ EXPERIMENTAL { rte_mtr_meter_policy_delete; rte_mtr_meter_policy_update; rte_mtr_meter_policy_validate; + + # added in 21.05 + __rte_eth_rx_epilog; + __rte_eth_tx_prolog; }; INTERNAL { global: + rte_eth_fp_ops; rte_eth_dev_allocate; rte_eth_dev_allocated; rte_eth_dev_attach_secondary; -- 2.26.3