From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 585E3A04B1; Mon, 5 Oct 2020 14:31:50 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7631B1C1D2; Mon, 5 Oct 2020 14:10:19 +0200 (CEST) Received: from qq.com (smtpbg467.qq.com [59.36.132.50]) by dpdk.org (Postfix) with ESMTP id 0D06C1BFBB for ; Mon, 5 Oct 2020 14:09:49 +0200 (CEST) X-QQ-mid: bizesmtp9t1601899785t1h1zxpeo Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Mon, 05 Oct 2020 20:09:45 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: Gxd91G060N2QTicvELU2hzw7XVCia3UjuFmU9abfNfj413YFplpLLT8DqRwl7 LU3morS0KjUDzLfnrgBeXewc9P3Iu7yPIEzugl9BKVkF5Rf/3Tif61PvYMAbMn79nUc9rvb VEnMbi0CEi6o30lmyazJqBEK4wlvYHeCqG2hb0S+Fza5TZNX6Qt0sxQW8286E9wlgkt93oA 23zGsVyosT+FxC7bk+Hl9Cs5vrXw8vNVEUlCvBKDLveainA8Fa5ZoLMR0iWUmXMoumn1d9E NI3zusYsZkzNu8GL+wH4cA82DQM4RV5F+ohr4KnAtqjhLUI7Yh+/YgkunAXLhoNpsWBg74k KSMHBiHqvcKbCdlfK4= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: jiawenwu Date: Mon, 5 Oct 2020 20:09:10 +0800 Message-Id: <20201005120910.189343-57-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201005120910.189343-1-jiawenwu@trustnetic.com> References: <20201005120910.189343-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgweb:qybgweb14 Subject: [dpdk-dev] [PATCH v2 56/56] net/txgbe: add Rx and Tx descriptor status X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: jiawenwu Supports check the status of Rx and Tx descriptors. Signed-off-by: jiawenwu --- doc/guides/nics/features/txgbe.ini | 2 + drivers/net/txgbe/txgbe_ethdev.c | 5 + drivers/net/txgbe/txgbe_ethdev.h | 8 ++ drivers/net/txgbe/txgbe_rxtx.c | 182 +++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_rxtx.h | 1 + 5 files changed, 198 insertions(+) diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index 1684bcc7e..7c457fede 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -37,6 +37,8 @@ Inner L3 checksum = P Inner L4 checksum = P Packet type parsing = Y Timesync = Y +Rx descriptor status = Y +Tx descriptor status = Y Basic stats = Y Extended stats = Y Stats per queue = Y diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 15d2bc07c..d127a03bf 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -4211,6 +4211,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable, .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable, .rx_queue_release = txgbe_dev_rx_queue_release, + .rx_queue_count = txgbe_dev_rx_queue_count, + .rx_descriptor_done = txgbe_dev_rx_descriptor_done, + .rx_descriptor_status = txgbe_dev_rx_descriptor_status, + .tx_descriptor_status = txgbe_dev_tx_descriptor_status, .tx_queue_setup = txgbe_dev_tx_queue_setup, .tx_queue_release = txgbe_dev_tx_queue_release, .dev_led_on = txgbe_dev_led_on, @@ -4247,6 +4251,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .timesync_adjust_time = txgbe_timesync_adjust_time, .timesync_read_time = txgbe_timesync_read_time, .timesync_write_time = txgbe_timesync_write_time, + .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd); diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 66ba8db39..2f4e9a81d 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -218,6 +218,14 @@ int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); +int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + int txgbe_dev_rx_init(struct rte_eth_dev *dev); void txgbe_dev_tx_init(struct rte_eth_dev *dev); diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 12fc49165..2dbd43b46 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -1962,6 +1962,97 @@ txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq) } } +static int +txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct txgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (pkt_cnt < free_cnt) { + if (txgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_free_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh) + break; + + n = txgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +int +txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && + txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) { + + return txgbe_tx_done_cleanup_simple(txq, free_cnt); + } + + return txgbe_tx_done_cleanup_full(txq, free_cnt); +} + static void __rte_cold txgbe_tx_free_swring(struct txgbe_tx_queue *txq) { @@ -2546,6 +2637,97 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return 0; } +uint32_t +txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define TXGBE_RXQ_SCAN_INTERVAL 4 + volatile struct txgbe_rx_desc *rxdp; + struct txgbe_rx_queue *rxq; + uint32_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->qw1.lo.status & + rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) { + desc += TXGBE_RXQ_SCAN_INTERVAL; + rxdp += TXGBE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile struct txgbe_rx_desc *rxdp; + struct txgbe_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->qw1.lo.status & + rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)); +} + +int +txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct txgbe_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t nb_hold, desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + nb_hold = rxq->nb_rx_hold; + if (offset >= rxq->nb_rx_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].qw1.lo.status; + if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct txgbe_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].dw3; + if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void __rte_cold txgbe_dev_clear_queues(struct rte_eth_dev *dev) { diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index 958ca2e97..f773357a3 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -402,6 +402,7 @@ struct txgbe_txq_ops { void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq); void txgbe_set_rx_function(struct rte_eth_dev *dev); +int txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev); uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); -- 2.18.4