DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Subject: [PATCH 06/13] net/bnxt: free and account a bad Tx mbuf
Date: Fri, 25 Oct 2024 10:57:31 -0700	[thread overview]
Message-ID: <20241025175738.99564-7-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20241025175738.99564-1-ajit.khaparde@broadcom.com>

When the PMD gets a bad Tx mbuf from the application, it is not
freeing it currently. The PMD is depending on the application to
do it. but in most cases, the application may not know this.

Instead the Tx burst function now frees the mbuf and updates the
oerrors counter to indicate that the PMD encounteres a bad mbuf
during transmit.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
---
 drivers/net/bnxt/bnxt_stats.c |  7 ++++
 drivers/net/bnxt/bnxt_txq.h   |  1 +
 drivers/net/bnxt/bnxt_txr.c   | 64 +++++++++++++++++++++++++----------
 3 files changed, 54 insertions(+), 18 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 5e59afe79f..ccd28f19b3 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -746,6 +746,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
 			return rc;
 
 		bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, false);
+		bnxt_stats->oerrors += rte_atomic64_read(&txq->tx_mbuf_drop);
 	}
 
 	return rc;
@@ -792,6 +793,12 @@ int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
 		rxq->rx_mbuf_alloc_fail = 0;
 	}
 
+	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+		rte_atomic64_clear(&txq->tx_mbuf_drop);
+	}
+
 	bnxt_clear_prev_stat(bp);
 
 	return ret;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 9e54985c4c..44a672a401 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -34,6 +34,7 @@ struct bnxt_tx_queue {
 	const struct rte_memzone *mz;
 	struct rte_mbuf **free;
 	uint64_t offloads;
+	rte_atomic64_t          tx_mbuf_drop;
 };
 
 void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 4e9e377d5b..e961fed9b5 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -228,7 +228,7 @@ static int bnxt_invalid_mbuf(struct rte_mbuf *mbuf)
 	return 0;
 }
 
-static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
+static int bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 				struct bnxt_tx_queue *txq,
 				uint16_t *coal_pkts,
 				struct tx_bd_long **last_txbd)
@@ -251,27 +251,37 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 		TX_BD_LONG_FLAGS_LHINT_LT2K,
 		TX_BD_LONG_FLAGS_LHINT_LT2K
 	};
+	int rc = 0;
 
-	if (unlikely(is_bnxt_in_error(txq->bp)))
-		return -EIO;
+	if (unlikely(is_bnxt_in_error(txq->bp))) {
+		rc = -EIO;
+		goto ret;
+	}
 
-	if (unlikely(bnxt_invalid_mbuf(tx_pkt)))
-		return -EINVAL;
+	if (unlikely(bnxt_invalid_mbuf(tx_pkt))) {
+		rc = -EINVAL;
+		goto drop;
+	}
 
-	if (unlikely(bnxt_invalid_nb_segs(tx_pkt)))
-		return -EINVAL;
+	if (unlikely(bnxt_invalid_nb_segs(tx_pkt))) {
+		rc = -EINVAL;
+		goto drop;
+	}
 
 	long_bd = bnxt_xmit_need_long_bd(tx_pkt, txq);
 	nr_bds = long_bd + tx_pkt->nb_segs;
 
-	if (unlikely(bnxt_tx_avail(txq) < nr_bds))
-		return -ENOMEM;
+	if (unlikely(bnxt_tx_avail(txq) < nr_bds)) {
+		rc = -ENOMEM;
+		goto ret;
+	}
 
 	/* Check if number of Tx descriptors is above HW limit */
 	if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
 		PMD_DRV_LOG_LINE(ERR,
 			    "Num descriptors %d exceeds HW limit", nr_bds);
-		return -ENOSPC;
+		rc = -EINVAL;
+		goto drop;
 	}
 
 	/* If packet length is less than minimum packet size, pad it */
@@ -283,7 +293,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 			PMD_DRV_LOG_LINE(ERR,
 				    "Failed to pad mbuf by %d bytes",
 				    pad);
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto ret;
 		}
 
 		/* Note: data_len, pkt len are updated in rte_pktmbuf_append */
@@ -291,8 +302,10 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	}
 
 	/* Check non zero data_len */
-	if (unlikely(bnxt_zero_data_len_tso_segsz(tx_pkt, true, false)))
-		return -EIO;
+	if (unlikely(bnxt_zero_data_len_tso_segsz(tx_pkt, true, false))) {
+		rc = -EINVAL;
+		goto drop;
+	}
 
 	if (unlikely(txq->bp->ptp_cfg != NULL && txq->bp->ptp_all_rx_tstamp == 1))
 		pkt_needs_ts = bnxt_check_pkt_needs_ts(tx_pkt);
@@ -381,8 +394,10 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 			 */
 			txbd1->kid_or_ts_low_hdr_size = hdr_size >> 1;
 			txbd1->kid_or_ts_high_mss = tx_pkt->tso_segsz;
-			if (unlikely(bnxt_zero_data_len_tso_segsz(tx_pkt, false, true)))
-				return -EIO;
+			if (unlikely(bnxt_zero_data_len_tso_segsz(tx_pkt, false, true))) {
+				rc = -EINVAL;
+				goto drop;
+			}
 
 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
 			   PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
@@ -456,8 +471,10 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	m_seg = tx_pkt->next;
 	while (m_seg) {
 		/* Check non zero data_len */
-		if (unlikely(bnxt_zero_data_len_tso_segsz(m_seg, true, false)))
-			return -EIO;
+		if (unlikely(bnxt_zero_data_len_tso_segsz(m_seg, true, false))) {
+			rc = -EINVAL;
+			goto drop;
+		}
 		txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
 
 		prod = RING_IDX(ring, txr->tx_raw_prod);
@@ -477,6 +494,10 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
 
 	return 0;
+drop:
+	rte_pktmbuf_free(tx_pkt);
+ret:
+	return rc;
 }
 
 /*
@@ -644,6 +665,7 @@ uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t coal_pkts = 0;
 	struct bnxt_tx_queue *txq = tx_queue;
 	struct tx_bd_long *last_txbd = NULL;
+	uint8_t dropped = 0;
 
 	/* Handle TX completions */
 	bnxt_handle_tx_cp(txq);
@@ -660,8 +682,13 @@ uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
 				     &coal_pkts, &last_txbd);
 
-		if (unlikely(rc))
+		if (unlikely(rc)) {
+			if (rc == -EINVAL) {
+				rte_atomic64_inc(&txq->tx_mbuf_drop);
+				dropped++;
+			}
 			break;
+		}
 	}
 
 	if (likely(nb_tx_pkts)) {
@@ -670,6 +697,7 @@ uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_raw_prod);
 	}
 
+	nb_tx_pkts += dropped;
 	return nb_tx_pkts;
 }
 
-- 
2.39.5 (Apple Git-154)


  parent reply	other threads:[~2024-10-25 17:58 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-25 17:57 [PATCH 00/13] patchset for bnxt PMD Ajit Khaparde
2024-10-25 17:57 ` [PATCH 01/13] net/bnxt: fix TCP and UDP checksum flags Ajit Khaparde
2024-10-25 17:57 ` [PATCH 02/13] net/bnxt: fix bad action offset in Tx bd Ajit Khaparde
2024-10-25 17:57 ` [PATCH 03/13] net/bnxt: add check to validate TSO segment size Ajit Khaparde
2024-10-25 17:57 ` [PATCH 04/13] net/bnxt: add check for number of segs Ajit Khaparde
2024-10-25 17:57 ` [PATCH 05/13] net/bnxt: add check for invalid mbuf passed by application Ajit Khaparde
2024-10-25 17:57 ` Ajit Khaparde [this message]
2024-10-25 17:57 ` [PATCH 07/13] net/bnxt: register for and handle RSS change event Ajit Khaparde
2024-10-25 17:57 ` [PATCH 08/13] net/bnxt: fix LRO offload capability Ajit Khaparde
2024-10-25 17:57 ` [PATCH 09/13] net/bnxt: disable VLAN filter when TF is enabled Ajit Khaparde
2024-10-25 17:57 ` [PATCH 10/13] net/bnxt: remove the VNIC async event handler Ajit Khaparde
2024-10-25 17:57 ` [PATCH 11/13] net/bnxt: remove some unnecessary logs Ajit Khaparde
2024-10-25 17:57 ` [PATCH 12/13] net/bnxt: add support for buffer split Rx offload Ajit Khaparde
2024-10-25 17:57 ` [PATCH 13/13] net/bnxt: remove unnecessary ifdef Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241025175738.99564-7-ajit.khaparde@broadcom.com \
    --to=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=kalesh-anakkur.purayil@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).