From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7FD3BA00C4; Tue, 19 Apr 2022 00:24:45 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D1AE1427EF; Tue, 19 Apr 2022 00:24:39 +0200 (CEST) Received: from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8]) by mails.dpdk.org (Postfix) with ESMTP id 18314427EF; Tue, 19 Apr 2022 00:24:37 +0200 (CEST) Received: from localhost (arunbr.asicdesigners.com [10.193.177.142] (may be forged)) by stargate.chelsio.com (8.14.7/8.14.7) with ESMTP id 23IMOZhx004429; Mon, 18 Apr 2022 15:24:36 -0700 From: Rahul Lakkireddy To: dev@dpdk.org Cc: stable@dpdk.org Subject: [PATCH 2/5] net/cxgbe: fix Tx queue stuck with mbuf chain coalescing Date: Tue, 19 Apr 2022 03:54:19 +0530 Message-Id: <10a95ba2134dc97ebdd42e48fb0cb4a042ccdcce.1650297776.git.rahul.lakkireddy@chelsio.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org When trying to coalesce mbufs with chain on Tx side, it is possible to get stuck during queue wrap around. When coalescing this mbuf chain fails, the Tx path returns EBUSY and when the same packet is retried again, it couldn't get coalesced again, and the loop repeats. Fix by pushing the packet through the normal Tx path. Also use FW_ETH_TX_PKTS_WR to handle mbufs with chain for FW to optimize. Fixes: 6c2809628cd5 ("net/cxgbe: improve latency for slow traffic") Cc: stable@dpdk.org Signed-off-by: Rahul Lakkireddy --- drivers/net/cxgbe/sge.c | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 5c176004f9..566cd48406 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -789,9 +789,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n) #define MAX_COALESCE_LEN 64000 -static inline int wraps_around(struct sge_txq *q, int ndesc) +static inline bool wraps_around(struct sge_txq *q, int ndesc) { - return (q->pidx + ndesc) > q->size ? 1 : 0; + return (q->pidx + ndesc) > q->size ? true : false; } static void tx_timer_cb(void *data) @@ -842,7 +842,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, /* fill the pkts WR header */ wr = (void *)&q->desc[q->pidx]; - wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); vmwr = (void *)&q->desc[q->pidx]; wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); @@ -852,8 +851,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, wr->npkt = q->coalesce.idx; wr->r3 = 0; if (is_pf4(adap)) { - wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); wr->type = q->coalesce.type; + if (likely(wr->type != 0)) + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + else + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); } else { wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); vmwr->r4 = 0; @@ -932,13 +934,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8); credits = txq_avail(q) - ndesc; + if (unlikely(wraps_around(q, ndesc))) + return 0; + /* If we are wrapping or this is last mbuf then, send the * already coalesced mbufs and let the non-coalesce pass * handle the mbuf. */ - if (unlikely(credits < 0 || wraps_around(q, ndesc))) { + if (unlikely(credits < 0)) { ship_tx_pkt_coalesce_wr(adap, txq); - return 0; + return -EBUSY; } /* If the max coalesce len or the max WR len is reached @@ -962,8 +967,12 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, ndesc = flits_to_desc(q->coalesce.flits + flits); credits = txq_avail(q) - ndesc; - if (unlikely(credits < 0 || wraps_around(q, ndesc))) + if (unlikely(wraps_around(q, ndesc))) return 0; + + if (unlikely(credits < 0)) + return -EBUSY; + q->coalesce.flits += wr_size / sizeof(__be64); q->coalesce.type = type; q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + @@ -1106,7 +1115,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, unsigned int flits, ndesc, cflits; int l3hdr_len, l4hdr_len, eth_xtra_len; int len, last_desc; - int credits; + int should_coal, credits; u32 wr_mid; u64 cntrl, *end; bool v6; @@ -1138,9 +1147,9 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, /* align the end of coalesce WR to a 512 byte boundary */ txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; - if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) || - m->pkt_len > RTE_ETHER_MAX_LEN)) { - if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { + if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) { + should_coal = should_tx_packet_coalesce(txq, mbuf, &cflits, adap); + if (should_coal > 0) { if (unlikely(map_mbuf(mbuf, addr) < 0)) { dev_warn(adap, "%s: mapping err for coalesce\n", __func__); @@ -1149,8 +1158,8 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, } return tx_do_packet_coalesce(txq, mbuf, cflits, adap, pi, addr, nb_pkts); - } else { - return -EBUSY; + } else if (should_coal < 0) { + return should_coal; } } @@ -1197,8 +1206,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, end = (u64 *)vmwr + flits; } - len = 0; - len += sizeof(*cpl); + len = sizeof(*cpl); /* Coalescing skipped and we send through normal path */ if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { -- 2.27.0