patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 19.11] net/cxgbe: fix Tx queue stuck with mbuf chain coalescing
@ 2022-07-16 13:03 Rahul Lakkireddy
  2022-08-03  9:52 ` Christian Ehrhardt
  0 siblings, 1 reply; 2+ messages in thread
From: Rahul Lakkireddy @ 2022-07-16 13:03 UTC (permalink / raw)
  To: stable

[ upstream commit 151e828f6427667faf3fdfaa00d14a65c7f57cd6 ]

When trying to coalesce mbufs with chain on Tx side, it is possible
to get stuck during queue wrap around. When coalescing this mbuf
chain fails, the Tx path returns EBUSY and when the same packet
is retried again, it couldn't get coalesced again, and the loop
repeats. Fix by pushing the packet through the normal Tx path.
Also use FW_ETH_TX_PKTS_WR to handle mbufs with chain for FW
to optimize.

Fixes: 6c2809628cd5 ("net/cxgbe: improve latency for slow traffic")
Cc: stable@dpdk.org

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/sge.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 61ee218be..f3ff576cf 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -793,9 +793,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
 
 #define MAX_COALESCE_LEN 64000
 
-static inline int wraps_around(struct sge_txq *q, int ndesc)
+static inline bool wraps_around(struct sge_txq *q, int ndesc)
 {
-	return (q->pidx + ndesc) > q->size ? 1 : 0;
+	return (q->pidx + ndesc) > q->size ? true : false;
 }
 
 static void tx_timer_cb(void *data)
@@ -846,7 +846,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
 
 	/* fill the pkts WR header */
 	wr = (void *)&q->desc[q->pidx];
-	wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
 	vmwr = (void *)&q->desc[q->pidx];
 
 	wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
@@ -856,8 +855,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
 	wr->npkt = q->coalesce.idx;
 	wr->r3 = 0;
 	if (is_pf4(adap)) {
-		wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
 		wr->type = q->coalesce.type;
+		if (likely(wr->type != 0))
+			wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+		else
+			wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
 	} else {
 		wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
 		vmwr->r4 = 0;
@@ -936,13 +938,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
 		ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
 		credits = txq_avail(q) - ndesc;
 
+		if (unlikely(wraps_around(q, ndesc)))
+			return 0;
+
 		/* If we are wrapping or this is last mbuf then, send the
 		 * already coalesced mbufs and let the non-coalesce pass
 		 * handle the mbuf.
 		 */
-		if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
+		if (unlikely(credits < 0)) {
 			ship_tx_pkt_coalesce_wr(adap, txq);
-			return 0;
+			return -EBUSY;
 		}
 
 		/* If the max coalesce len or the max WR len is reached
@@ -966,8 +971,12 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
 	ndesc = flits_to_desc(q->coalesce.flits + flits);
 	credits = txq_avail(q) - ndesc;
 
-	if (unlikely(credits < 0 || wraps_around(q, ndesc)))
+	if (unlikely(wraps_around(q, ndesc)))
 		return 0;
+
+	if (unlikely(credits < 0))
+		return -EBUSY;
+
 	q->coalesce.flits += wr_size / sizeof(__be64);
 	q->coalesce.type = type;
 	q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
@@ -1110,7 +1119,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
 	unsigned int flits, ndesc, cflits;
 	int l3hdr_len, l4hdr_len, eth_xtra_len;
 	int len, last_desc;
-	int credits;
+	int should_coal, credits;
 	u32 wr_mid;
 	u64 cntrl, *end;
 	bool v6;
@@ -1141,9 +1150,9 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
 	/* align the end of coalesce WR to a 512 byte boundary */
 	txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
 
-	if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
-			m->pkt_len > RTE_ETHER_MAX_LEN)) {
-		if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
+	if ((m->ol_flags & PKT_TX_TCP_SEG) == 0) {
+		should_coal = should_tx_packet_coalesce(txq, mbuf, &cflits, adap);
+		if (should_coal > 0) {
 			if (unlikely(map_mbuf(mbuf, addr) < 0)) {
 				dev_warn(adap, "%s: mapping err for coalesce\n",
 					 __func__);
@@ -1152,8 +1161,8 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
 			}
 			return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
 						     pi, addr, nb_pkts);
-		} else {
-			return -EBUSY;
+		} else if (should_coal < 0) {
+			return should_coal;
 		}
 	}
 
@@ -1200,8 +1209,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
 		end = (u64 *)vmwr + flits;
 	}
 
-	len = 0;
-	len += sizeof(*cpl);
+	len = sizeof(*cpl);
 
 	/* Coalescing skipped and we send through normal path */
 	if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
-- 
2.27.0


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH 19.11] net/cxgbe: fix Tx queue stuck with mbuf chain coalescing
  2022-07-16 13:03 [PATCH 19.11] net/cxgbe: fix Tx queue stuck with mbuf chain coalescing Rahul Lakkireddy
@ 2022-08-03  9:52 ` Christian Ehrhardt
  0 siblings, 0 replies; 2+ messages in thread
From: Christian Ehrhardt @ 2022-08-03  9:52 UTC (permalink / raw)
  To: Rahul Lakkireddy; +Cc: stable

On Sat, Jul 16, 2022 at 3:04 PM Rahul Lakkireddy
<rahul.lakkireddy@chelsio.com> wrote:
>
> [ upstream commit 151e828f6427667faf3fdfaa00d14a65c7f57cd6 ]

Thank you,
your patch was in time, but sadly a few fell through the cracks on my side.
(and many more just arrived too late).

Your patch is applied to the WIP branch now, but currently testing of
-rc1 is going on which I do not want to disrupt.

If we need an -rc2 anyway or generally have the time to do an -rc2
without too much disruption it will be in 19.11.13, otherwise it is
already queued for 19.11.14

> When trying to coalesce mbufs with chain on Tx side, it is possible
> to get stuck during queue wrap around. When coalescing this mbuf
> chain fails, the Tx path returns EBUSY and when the same packet
> is retried again, it couldn't get coalesced again, and the loop
> repeats. Fix by pushing the packet through the normal Tx path.
> Also use FW_ETH_TX_PKTS_WR to handle mbufs with chain for FW
> to optimize.
>
> Fixes: 6c2809628cd5 ("net/cxgbe: improve latency for slow traffic")
> Cc: stable@dpdk.org
>
> Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
> ---
>  drivers/net/cxgbe/sge.c | 38 +++++++++++++++++++++++---------------
>  1 file changed, 23 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
> index 61ee218be..f3ff576cf 100644
> --- a/drivers/net/cxgbe/sge.c
> +++ b/drivers/net/cxgbe/sge.c
> @@ -793,9 +793,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
>
>  #define MAX_COALESCE_LEN 64000
>
> -static inline int wraps_around(struct sge_txq *q, int ndesc)
> +static inline bool wraps_around(struct sge_txq *q, int ndesc)
>  {
> -       return (q->pidx + ndesc) > q->size ? 1 : 0;
> +       return (q->pidx + ndesc) > q->size ? true : false;
>  }
>
>  static void tx_timer_cb(void *data)
> @@ -846,7 +846,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
>
>         /* fill the pkts WR header */
>         wr = (void *)&q->desc[q->pidx];
> -       wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
>         vmwr = (void *)&q->desc[q->pidx];
>
>         wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
> @@ -856,8 +855,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
>         wr->npkt = q->coalesce.idx;
>         wr->r3 = 0;
>         if (is_pf4(adap)) {
> -               wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
>                 wr->type = q->coalesce.type;
> +               if (likely(wr->type != 0))
> +                       wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
> +               else
> +                       wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
>         } else {
>                 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
>                 vmwr->r4 = 0;
> @@ -936,13 +938,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
>                 ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
>                 credits = txq_avail(q) - ndesc;
>
> +               if (unlikely(wraps_around(q, ndesc)))
> +                       return 0;
> +
>                 /* If we are wrapping or this is last mbuf then, send the
>                  * already coalesced mbufs and let the non-coalesce pass
>                  * handle the mbuf.
>                  */
> -               if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
> +               if (unlikely(credits < 0)) {
>                         ship_tx_pkt_coalesce_wr(adap, txq);
> -                       return 0;
> +                       return -EBUSY;
>                 }
>
>                 /* If the max coalesce len or the max WR len is reached
> @@ -966,8 +971,12 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
>         ndesc = flits_to_desc(q->coalesce.flits + flits);
>         credits = txq_avail(q) - ndesc;
>
> -       if (unlikely(credits < 0 || wraps_around(q, ndesc)))
> +       if (unlikely(wraps_around(q, ndesc)))
>                 return 0;
> +
> +       if (unlikely(credits < 0))
> +               return -EBUSY;
> +
>         q->coalesce.flits += wr_size / sizeof(__be64);
>         q->coalesce.type = type;
>         q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
> @@ -1110,7 +1119,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
>         unsigned int flits, ndesc, cflits;
>         int l3hdr_len, l4hdr_len, eth_xtra_len;
>         int len, last_desc;
> -       int credits;
> +       int should_coal, credits;
>         u32 wr_mid;
>         u64 cntrl, *end;
>         bool v6;
> @@ -1141,9 +1150,9 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
>         /* align the end of coalesce WR to a 512 byte boundary */
>         txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
>
> -       if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
> -                       m->pkt_len > RTE_ETHER_MAX_LEN)) {
> -               if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
> +       if ((m->ol_flags & PKT_TX_TCP_SEG) == 0) {
> +               should_coal = should_tx_packet_coalesce(txq, mbuf, &cflits, adap);
> +               if (should_coal > 0) {
>                         if (unlikely(map_mbuf(mbuf, addr) < 0)) {
>                                 dev_warn(adap, "%s: mapping err for coalesce\n",
>                                          __func__);
> @@ -1152,8 +1161,8 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
>                         }
>                         return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
>                                                      pi, addr, nb_pkts);
> -               } else {
> -                       return -EBUSY;
> +               } else if (should_coal < 0) {
> +                       return should_coal;
>                 }
>         }
>
> @@ -1200,8 +1209,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
>                 end = (u64 *)vmwr + flits;
>         }
>
> -       len = 0;
> -       len += sizeof(*cpl);
> +       len = sizeof(*cpl);
>
>         /* Coalescing skipped and we send through normal path */
>         if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
> --
> 2.27.0
>


-- 
Christian Ehrhardt
Senior Staff Engineer, Ubuntu Server
Canonical Ltd

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-08-03  9:52 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-16 13:03 [PATCH 19.11] net/cxgbe: fix Tx queue stuck with mbuf chain coalescing Rahul Lakkireddy
2022-08-03  9:52 ` Christian Ehrhardt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).