DPDK patches and discussions
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@cavium.com>
To: <dev@dpdk.org>
Cc: Harish Patil <harish.patil@cavium.com>, <Dept-EngDPDKDev@cavium.com>
Subject: [dpdk-dev] [PATCH v2 5/5] net/qede: refactor Tx routine
Date: Wed, 7 Jun 2017 00:42:22 -0700	[thread overview]
Message-ID: <1496821342-6832-6-git-send-email-rasesh.mody@cavium.com> (raw)
In-Reply-To: <1495960654-352-1-git-send-email-rasesh.mody@cavium.com>

From: Harish Patil <harish.patil@cavium.com>

Refactor TX routine such that TX BD updates can all be grouped together.
Based on the TX offloads requested the TX bitfields are calculated in
a temporary variable and TX BDs are updated at the end. This will minimize
the if checks also. This change is done to easily accommodate newer TX
offload operations in the future.

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_rxtx.c |  210 ++++++++++++++++++++++--------------------
 1 file changed, 108 insertions(+), 102 deletions(-)

diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 82aa607..1633b91 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1512,12 +1512,23 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 	uint8_t nbds;
 	bool ipv6_ext_flg;
 	bool lso_flg;
-	bool tunn_flg;
+	__rte_unused bool tunn_flg;
 	struct eth_tx_1st_bd *bd1;
 	struct eth_tx_2nd_bd *bd2;
 	struct eth_tx_3rd_bd *bd3;
 	uint64_t tx_ol_flags;
 	uint16_t hdr_size;
+	/* BD1 */
+	uint16_t bd1_bf;
+	uint8_t bd1_bd_flags_bf;
+	uint16_t vlan;
+	/* BD2 */
+	uint16_t bd2_bf1;
+	uint16_t bd2_bf2;
+	/* BD3 */
+	uint16_t mss;
+	uint16_t bd3_bf;
+
 
 	if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
 		PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
@@ -1533,10 +1544,17 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		tunn_flg = false;
 		lso_flg = false;
 		nbds = 0;
+		vlan = 0;
 		bd1 = NULL;
 		bd2 = NULL;
 		bd3 = NULL;
 		hdr_size = 0;
+		bd1_bf = 0;
+		bd1_bd_flags_bf = 0;
+		bd2_bf1 = 0;
+		bd2_bf2 = 0;
+		mss = 0;
+		bd3_bf = 0;
 
 		mbuf = *tx_pkts++;
 		assert(mbuf);
@@ -1546,149 +1564,137 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			break;
 
 		tx_ol_flags = mbuf->ol_flags;
+		bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
 
 #define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
-		if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
+		if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type)) {
 			ipv6_ext_flg = true;
-
-		if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
-			tunn_flg = true;
-
-		if (tx_ol_flags & PKT_TX_TCP_SEG)
-			lso_flg = true;
-
-		if (lso_flg) {
-			if (unlikely(txq->nb_tx_avail <
-						ETH_TX_MIN_BDS_PER_LSO_PKT))
-				break;
-		} else {
-			if (unlikely(txq->nb_tx_avail <
-					ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
-				break;
-		}
-
-		if (tunn_flg && ipv6_ext_flg) {
-			if (unlikely(txq->nb_tx_avail <
-				ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
-				break;
-		}
-		if (ipv6_ext_flg) {
 			if (unlikely(txq->nb_tx_avail <
 					ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
 				break;
 		}
 
-		/* Fill the entry in the SW ring and the BDs in the FW ring */
-		idx = TX_PROD(txq);
-		txq->sw_tx_ring[idx].mbuf = mbuf;
-
-		/* BD1 */
-		bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
-		memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
-		nbds++;
-
-		bd1->data.bd_flags.bitfields |=
-			1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-		/* FW 8.10.x specific change */
-		if (!lso_flg) {
-			bd1->data.bitfields |=
-			(mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
-				<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
-			/* Map MBUF linear data for DMA and set in the BD1 */
-			QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
-					     mbuf->data_len);
-		} else {
-			/* For LSO, packet header and payload must reside on
-			 * buffers pointed by different BDs. Using BD1 for HDR
-			 * and BD2 onwards for data.
-			 */
-			hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
-			QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
-					     hdr_size);
-		}
-
-		if (tunn_flg) {
+		if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
+			if (ipv6_ext_flg) {
+				if (unlikely(txq->nb_tx_avail <
+				    ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+					break;
+			}
+			tunn_flg = true;
 			/* First indicate its a tunnel pkt */
-			bd1->data.bitfields |=
-				ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
-				ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
+			bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+				  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
 			/* Legacy FW had flipped behavior in regard to this bit
 			 * i.e. it needed to set to prevent FW from touching
 			 * encapsulated packets when it didn't need to.
 			 */
-			if (unlikely(txq->is_legacy))
-				bd1->data.bitfields ^=
-					1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
+			if (unlikely(txq->is_legacy)) {
+				bd1_bf ^= 1 <<
+					ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+			}
 			/* Outer IP checksum offload */
 			if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-				bd1->data.bd_flags.bitfields |=
+				bd1_bd_flags_bf |=
 					ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
 					ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
 			}
-
 			/* Outer UDP checksum offload */
-			bd1->data.bd_flags.bitfields |=
+			bd1_bd_flags_bf |=
 				ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
 				ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
 		}
 
+		if (tx_ol_flags & PKT_TX_TCP_SEG) {
+			lso_flg = true;
+			if (unlikely(txq->nb_tx_avail <
+						ETH_TX_MIN_BDS_PER_LSO_PKT))
+				break;
+			/* For LSO, packet header and payload must reside on
+			 * buffers pointed by different BDs. Using BD1 for HDR
+			 * and BD2 onwards for data.
+			 */
+			hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+			bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+			bd1_bd_flags_bf |=
+					1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+			/* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+			bd1_bd_flags_bf |=
+					1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+			mss = rte_cpu_to_le_16(mbuf->tso_segsz);
+			/* Using one header BD */
+			bd3_bf |= rte_cpu_to_le_16(1 <<
+					ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+		} else {
+			if (unlikely(txq->nb_tx_avail <
+					ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+				break;
+			bd1_bf |=
+			       (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+				<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+		}
+
 		/* Descriptor based VLAN insertion */
 		if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
-			bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
-			bd1->data.bd_flags.bitfields |=
+			vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+			bd1_bd_flags_bf |=
 			    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
 		}
 
-		if (lso_flg)
-			bd1->data.bd_flags.bitfields |=
-				1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
-
 		/* Offload the IP checksum in the hardware */
-		if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
-			bd1->data.bd_flags.bitfields |=
-			    1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+		if (tx_ol_flags & PKT_TX_IP_CKSUM)
+			bd1_bd_flags_bf |=
+				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
 
 		/* L4 checksum offload (tcp or udp) */
-		if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
-						PKT_TX_UDP_CKSUM)))
-			/* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
-			bd1->data.bd_flags.bitfields |=
-			    1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+		if (tx_ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
+			bd1_bd_flags_bf |=
+				1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+		if (ipv6_ext_flg) {
+			/* TBD: check pseudo csum iff tx_prepare not called? */
+			bd2_bf1 |= ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
+				ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+		}
+
+		/* Fill the entry in the SW ring and the BDs in the FW ring */
+		idx = TX_PROD(txq);
+		txq->sw_tx_ring[idx].mbuf = mbuf;
+
+		/* BD1 */
+		bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
+		memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+		nbds++;
+
+		/* Map MBUF linear data for DMA and set in the BD1 */
+		QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+					mbuf->data_len);
+		bd1->data.bitfields = bd1_bf;
+		bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+		bd1->data.vlan = vlan;
 
-		/* BD2 */
 		if (lso_flg || ipv6_ext_flg) {
 			bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
 							(&txq->tx_pbl);
 			memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
 			nbds++;
-			QEDE_BD_SET_ADDR_LEN(bd2,
-					    (hdr_size +
-					    rte_mbuf_data_dma_addr(mbuf)),
-					    mbuf->data_len - hdr_size);
-			/* TBD: check pseudo csum iff tx_prepare not called? */
-			if (ipv6_ext_flg) {
-				bd2->data.bitfields1 |=
-				ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
-				ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
-			}
-		}
 
-		/* BD3 */
-		if (lso_flg || ipv6_ext_flg) {
+			/* BD1 */
+			QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+					     hdr_size);
+			/* BD2 */
+			QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
+					     rte_mbuf_data_dma_addr(mbuf)),
+					     mbuf->data_len - hdr_size);
+			bd2->data.bitfields1 = bd2_bf1;
+			bd2->data.bitfields2 = bd2_bf2;
+
+			/* BD3 */
 			bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
 							(&txq->tx_pbl);
 			memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
 			nbds++;
-			if (lso_flg) {
-				bd3->data.lso_mss =
-					rte_cpu_to_le_16(mbuf->tso_segsz);
-				/* Using one header BD */
-				bd3->data.bitfields |=
-					rte_cpu_to_le_16(1 <<
-					ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
-			}
+			bd3->data.bitfields = bd3_bf;
+			bd3->data.lso_mss = mss;
 		}
 
 		/* Handle fragmented MBUF */
-- 
1.7.10.3

  parent reply	other threads:[~2017-06-07  7:43 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-28  8:37 [dpdk-dev] [PATCH 0/7] net/qede/base: update PMD to 2.5.0.1 Rasesh Mody
2017-05-28  8:37 ` [dpdk-dev] [PATCH 1/7] net/qede: refactoring vport handling code Rasesh Mody
2017-05-28  8:37 ` [dpdk-dev] [PATCH 2/7] net/qede: refactoring multi-queue implementation Rasesh Mody
2017-05-28  8:37 ` [dpdk-dev] [PATCH 3/7] net/qede/base: upgrade the FW to 8.20.0.0 Rasesh Mody
2017-05-28  8:37 ` [dpdk-dev] [PATCH 4/7] net/qede: fix VXLAN tunnel Tx offload flag setting Rasesh Mody
2017-05-28  8:37 ` [dpdk-dev] [PATCH 5/7] net/qede: refactor Tx routine Rasesh Mody
2017-05-28  8:37 ` [dpdk-dev] [PATCH 6/7] mbuf: introduce new Tx offload flag for MPLS-in-UDP Rasesh Mody
2017-05-30 12:23   ` Ferruh Yigit
2017-06-01 18:59     ` Patil, Harish
2017-06-01 19:43       ` Ferruh Yigit
2017-06-07  7:49         ` Mody, Rasesh
2017-05-28  8:37 ` [dpdk-dev] [PATCH 7/7] net/qede: add Tx offloads for MPLS-in-UDP packets Rasesh Mody
2017-06-07  7:42 ` [dpdk-dev] [PATCH v2 0/5] net/qede/base: update PMD to 2.5.0.1 Rasesh Mody
2017-06-07  9:38   ` Ferruh Yigit
2017-06-07  7:42 ` [dpdk-dev] [PATCH v2 1/5] net/qede: refactoring vport handling code Rasesh Mody
2017-06-07  7:42 ` [dpdk-dev] [PATCH v2 2/5] net/qede: refactoring multi-queue implementation Rasesh Mody
2017-06-07  7:42 ` [dpdk-dev] [PATCH v2 3/5] net/qede/base: upgrade the FW to 8.20.0.0 Rasesh Mody
2017-06-07  7:42 ` [dpdk-dev] [PATCH v2 4/5] net/qede: fix VXLAN tunnel Tx offload flag setting Rasesh Mody
2017-06-07  7:42 ` Rasesh Mody [this message]
2017-06-07  7:43 ` [dpdk-dev] [PATCH v2 1/2] mbuf: introduce new Tx offload flag for MPLS-in-UDP Rasesh Mody
2017-06-08 12:25   ` Olivier Matz
2017-06-08 21:46     ` Patil, Harish
     [not found]       ` <20170609091811.0867b1d1@platinum>
2017-06-10  6:17         ` Patil, Harish
2017-06-17 21:57   ` [dpdk-dev] [PATCH v3 " Rasesh Mody
2017-06-27  7:21     ` [dpdk-dev] [PATCH v4 " Rasesh Mody
2017-06-30  8:33       ` Olivier Matz
2017-06-27  7:21     ` [dpdk-dev] [PATCH v4 2/2] net/qede: add Tx offloads for MPLS-in-UDP packets Rasesh Mody
2017-06-17 21:57   ` [dpdk-dev] [PATCH v3 " Rasesh Mody
2017-06-07  7:43 ` [dpdk-dev] [PATCH v2 " Rasesh Mody

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1496821342-6832-6-git-send-email-rasesh.mody@cavium.com \
    --to=rasesh.mody@cavium.com \
    --cc=Dept-EngDPDKDev@cavium.com \
    --cc=dev@dpdk.org \
    --cc=harish.patil@cavium.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).