DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] net/qede: enhance receive data path CPU utilization
@ 2020-01-02 17:59 Shahed Shaikh
  2020-01-02 17:59 ` [dpdk-dev] [PATCH 2/2] net/qede: enhance transmit " Shahed Shaikh
  2020-01-14  6:11 ` [dpdk-dev] [PATCH 1/2] net/qede: enhance receive " Jerin Jacob
  0 siblings, 2 replies; 3+ messages in thread
From: Shahed Shaikh @ 2020-01-02 17:59 UTC (permalink / raw)
  To: dev; +Cc: rmody, jerinj, GR-Everest-DPDK-Dev

Use light weight receive packet handler for non-LRO and
non-scatter packets to improve CPU utilization in receive data path.
We achieved ~18% CPU cycles improvement using this handler.

Signed-off-by: Shahed Shaikh <shshaikh@marvell.com>
---
 drivers/net/qede/qede_ethdev.c |  36 ++++--
 drivers/net/qede/qede_rxtx.c   | 222 +++++++++++++++++++++++++++++++++
 drivers/net/qede/qede_rxtx.h   |   4 +-
 3 files changed, 253 insertions(+), 9 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 8064735db..47e90096a 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -267,6 +267,29 @@ qede_interrupt_handler(void *param)
 		DP_ERR(edev, "rte_intr_ack failed\n");
 }
 
+static void
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
+{
+	struct qede_dev *qdev = dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	if (ECORE_IS_CMT(edev)) {
+		dev->rx_pkt_burst = qede_recv_pkts_cmt;
+		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+		return;
+	}
+
+	if (dev->data->lro || dev->data->scattered_rx) {
+		DP_INFO(edev, "Assigning qede_recv_pkts\n");
+		dev->rx_pkt_burst = qede_recv_pkts;
+	} else {
+		DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
+		dev->rx_pkt_burst = qede_recv_pkts_regular;
+	}
+
+	dev->tx_pkt_burst = qede_xmit_pkts;
+}
+
 static void
 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
 {
@@ -1064,6 +1087,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	/* Start/resume traffic */
 	qede_fastpath_start(edev);
 
+	qede_assign_rxtx_handlers(eth_dev);
 	DP_INFO(edev, "Device started\n");
 
 	return 0;
@@ -1951,6 +1975,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 	};
 
 	if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+	    eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
 	    eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
 		return ptypes;
 
@@ -2242,7 +2267,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* update max frame size */
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+
 	/* Reassign back */
+	qede_assign_rxtx_handlers(dev);
 	if (ECORE_IS_CMT(edev)) {
 		dev->rx_pkt_burst = qede_recv_pkts_cmt;
 		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
@@ -2447,14 +2474,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
 		QEDE_PMD_DRV_VER_STR_SIZE);
 
-	if (ECORE_IS_CMT(edev)) {
-		eth_dev->rx_pkt_burst = qede_recv_pkts_cmt;
-		eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;
-	} else {
-		eth_dev->rx_pkt_burst = qede_recv_pkts;
-		eth_dev->tx_pkt_burst = qede_xmit_pkts;
-	}
-
+	qede_assign_rxtx_handlers(eth_dev);
 	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
 
 	/* For CMT mode device do periodic polling for slowpath events.
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index dbb74fc64..3b486a0a4 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1518,6 +1518,228 @@ print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
 }
 #endif
 
+uint16_t
+qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
+	register struct rte_mbuf *rx_mb = NULL;
+	struct qede_rx_queue *rxq = p_rxq;
+	struct qede_dev *qdev = rxq->qdev;
+	struct ecore_dev *edev = &qdev->edev;
+	union eth_rx_cqe *cqe;
+	uint64_t ol_flags;
+	enum eth_rx_cqe_type cqe_type;
+	int rss_enable = qdev->rss_enable;
+	int rx_alloc_count = 0;
+	uint32_t packet_type;
+	uint32_t rss_hash;
+	uint16_t vlan_tci, port_id;
+	uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
+	uint16_t rx_pkt = 0;
+	uint16_t pkt_len = 0;
+	uint16_t len; /* Length of first BD */
+	uint16_t preload_idx;
+	uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+	uint8_t bitfield_val;
+#endif
+	uint8_t offset, flags, bd_num;
+
+
+	/* Allocate buffers that we used in previous loop */
+	if (rxq->rx_alloc_count) {
+		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
+			     rxq->rx_alloc_count))) {
+			struct rte_eth_dev *dev;
+
+			PMD_RX_LOG(ERR, rxq,
+				   "New buffer allocation failed,"
+				   "dropping incoming packetn");
+			dev = &rte_eth_devices[rxq->port_id];
+			dev->data->rx_mbuf_alloc_failed +=
+							rxq->rx_alloc_count;
+			rxq->rx_alloc_errors += rxq->rx_alloc_count;
+			return 0;
+		}
+		qede_update_rx_prod(qdev, rxq);
+		rxq->rx_alloc_count = 0;
+	}
+
+	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+	sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+	rte_rmb();
+
+	if (hw_comp_cons == sw_comp_cons)
+		return 0;
+
+	num_rx_bds =  NUM_RX_BDS(rxq);
+	port_id = rxq->port_id;
+
+	while (sw_comp_cons != hw_comp_cons) {
+		ol_flags = 0;
+		packet_type = RTE_PTYPE_UNKNOWN;
+		vlan_tci = 0;
+		rss_hash = 0;
+
+		/* Get the CQE from the completion ring */
+		cqe =
+		    (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+		cqe_type = cqe->fast_path_regular.type;
+		PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+		if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
+			fp_cqe = &cqe->fast_path_regular;
+		} else {
+			if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
+				PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
+				ecore_eth_cqe_completion
+					(&edev->hwfns[rxq->queue_id %
+						      edev->num_hwfns],
+					 (struct eth_slow_path_rx_cqe *)cqe);
+			}
+			goto next_cqe;
+		}
+
+		/* Get the data from the SW ring */
+		sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
+		rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+		assert(rx_mb != NULL);
+
+		parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+		offset = fp_cqe->placement_offset;
+		len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+		pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+		vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+		rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+		bd_num = fp_cqe->bd_num;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+		bitfield_val = fp_cqe->bitfields;
+#endif
+
+		if (unlikely(qede_tunn_exist(parse_flag))) {
+			PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+			if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+				PMD_RX_LOG(ERR, rxq,
+					    "L4 csum failed, flags = 0x%x\n",
+					    parse_flag);
+				rxq->rx_hw_errors++;
+				ol_flags |= PKT_RX_L4_CKSUM_BAD;
+			} else {
+				ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+			}
+
+			if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
+				PMD_RX_LOG(ERR, rxq,
+					"Outer L3 csum failed, flags = 0x%x\n",
+					parse_flag);
+				rxq->rx_hw_errors++;
+				ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+			} else {
+				ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+			}
+
+			flags = fp_cqe->tunnel_pars_flags.flags;
+
+			/* Tunnel_type */
+			packet_type =
+				qede_rx_cqe_to_tunn_pkt_type(flags);
+
+			/* Inner header */
+			packet_type |=
+			      qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+			/* Outer L3/L4 types is not available in CQE */
+			packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+			/* Outer L3/L4 types is not available in CQE.
+			 * Need to add offset to parse correctly,
+			 */
+			rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+			packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+		} else {
+			packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
+		}
+
+		/* Common handling for non-tunnel packets and for inner
+		 * headers in the case of tunnel.
+		 */
+		if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+			PMD_RX_LOG(ERR, rxq,
+				    "L4 csum failed, flags = 0x%x\n",
+				    parse_flag);
+			rxq->rx_hw_errors++;
+			ol_flags |= PKT_RX_L4_CKSUM_BAD;
+		} else {
+			ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+		}
+		if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+			PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+				   parse_flag);
+			rxq->rx_hw_errors++;
+			ol_flags |= PKT_RX_IP_CKSUM_BAD;
+		} else {
+			ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+		}
+
+		if (unlikely(CQE_HAS_VLAN(parse_flag) ||
+			     CQE_HAS_OUTER_VLAN(parse_flag))) {
+			/* Note: FW doesn't indicate Q-in-Q packet */
+			ol_flags |= PKT_RX_VLAN;
+			if (qdev->vlan_strip_flg) {
+				ol_flags |= PKT_RX_VLAN_STRIPPED;
+				rx_mb->vlan_tci = vlan_tci;
+			}
+		}
+
+		if (rss_enable) {
+			ol_flags |= PKT_RX_RSS_HASH;
+			rx_mb->hash.rss = rss_hash;
+		}
+
+		rx_alloc_count++;
+		qede_rx_bd_ring_consume(rxq);
+
+		/* Prefetch next mbuf while processing current one. */
+		preload_idx = rxq->sw_rx_cons & num_rx_bds;
+		rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+		/* Update rest of the MBUF fields */
+		rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+		rx_mb->port = port_id;
+		rx_mb->ol_flags = ol_flags;
+		rx_mb->data_len = len;
+		rx_mb->packet_type = packet_type;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+		print_rx_bd_info(rx_mb, rxq, bitfield_val);
+#endif
+		rx_mb->nb_segs = bd_num;
+		rx_mb->pkt_len = pkt_len;
+
+		rx_pkts[rx_pkt] = rx_mb;
+		rx_pkt++;
+
+next_cqe:
+		ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+		sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+		if (rx_pkt == nb_pkts) {
+			PMD_RX_LOG(DEBUG, rxq,
+				   "Budget reached nb_pkts=%u received=%u",
+				   rx_pkt, nb_pkts);
+			break;
+		}
+	}
+
+	/* Request number of bufferes to be allocated in next loop */
+	rxq->rx_alloc_count = rx_alloc_count;
+
+	rxq->rcv_pkts += rx_pkt;
+	rxq->rx_segs += rx_pkt;
+	PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
+
+	return rx_pkt;
+}
+
 uint16_t
 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 75cc930fd..a4c634e88 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -283,7 +283,9 @@ uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts);
 uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts,
 			    uint16_t nb_pkts);
-
+uint16_t
+qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts,
+		       uint16_t nb_pkts);
 uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
 			      struct rte_mbuf **pkts,
 			      uint16_t nb_pkts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/qede: enhance transmit data path CPU utilization
  2020-01-02 17:59 [dpdk-dev] [PATCH 1/2] net/qede: enhance receive data path CPU utilization Shahed Shaikh
@ 2020-01-02 17:59 ` Shahed Shaikh
  2020-01-14  6:11 ` [dpdk-dev] [PATCH 1/2] net/qede: enhance receive " Jerin Jacob
  1 sibling, 0 replies; 3+ messages in thread
From: Shahed Shaikh @ 2020-01-02 17:59 UTC (permalink / raw)
  To: dev; +Cc: rmody, jerinj, GR-Everest-DPDK-Dev

Use lightweight transmit handler which handles non-offloaded Tx data path.
We get CPU utilization improvement of ~8%.

Signed-off-by: Shahed Shaikh <shshaikh@marvell.com>
---
 drivers/net/qede/qede_ethdev.c |  15 +++-
 drivers/net/qede/qede_rxtx.c   | 125 +++++++++++++++++++++++++++++++++
 drivers/net/qede/qede_rxtx.h   |   2 +
 3 files changed, 141 insertions(+), 1 deletion(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 47e90096a..055f046e2 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -270,8 +270,10 @@ qede_interrupt_handler(void *param)
 static void
 qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
 {
+	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 	struct qede_dev *qdev = dev->data->dev_private;
 	struct ecore_dev *edev = &qdev->edev;
+	bool use_tx_offload = false;
 
 	if (ECORE_IS_CMT(edev)) {
 		dev->rx_pkt_burst = qede_recv_pkts_cmt;
@@ -287,7 +289,18 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
 		dev->rx_pkt_burst = qede_recv_pkts_regular;
 	}
 
-	dev->tx_pkt_burst = qede_xmit_pkts;
+	use_tx_offload = !!(tx_offloads &
+			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
+			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+
+	if (use_tx_offload) {
+		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
+		dev->tx_pkt_burst = qede_xmit_pkts;
+	} else {
+		DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
+		dev->tx_pkt_burst = qede_xmit_pkts_regular;
+	}
 }
 
 static void
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 3b486a0a4..985e49f1c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -2234,6 +2234,131 @@ qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
 }
 #endif
 
+uint16_t
+qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct qede_tx_queue *txq = p_txq;
+	struct qede_dev *qdev = txq->qdev;
+	struct ecore_dev *edev = &qdev->edev;
+	struct eth_tx_1st_bd *bd1;
+	struct eth_tx_2nd_bd *bd2;
+	struct eth_tx_3rd_bd *bd3;
+	struct rte_mbuf *m_seg = NULL;
+	struct rte_mbuf *mbuf;
+	struct qede_tx_entry *sw_tx_ring;
+	uint16_t nb_tx_pkts;
+	uint16_t bd_prod;
+	uint16_t idx;
+	uint16_t nb_frags = 0;
+	uint16_t nb_pkt_sent = 0;
+	uint8_t nbds;
+	uint64_t tx_ol_flags;
+	/* BD1 */
+	uint16_t bd1_bf;
+	uint8_t bd1_bd_flags_bf;
+
+	if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+		PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
+			   nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+		qede_process_tx_compl(edev, txq);
+	}
+
+	nb_tx_pkts  = nb_pkts;
+	bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+	sw_tx_ring = txq->sw_tx_ring;
+
+	while (nb_tx_pkts--) {
+		/* Init flags/values */
+		nbds = 0;
+		bd1 = NULL;
+		bd2 = NULL;
+		bd3 = NULL;
+		bd1_bf = 0;
+		bd1_bd_flags_bf = 0;
+		nb_frags = 0;
+
+		mbuf = *tx_pkts++;
+		assert(mbuf);
+
+
+		/* Check minimum TX BDS availability against available BDs */
+		if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+			break;
+
+		tx_ol_flags = mbuf->ol_flags;
+		bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+		if (unlikely(txq->nb_tx_avail <
+				ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+			break;
+		bd1_bf |=
+		       (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+			<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+		/* Offload the IP checksum in the hardware */
+		if (tx_ol_flags & PKT_TX_IP_CKSUM)
+			bd1_bd_flags_bf |=
+				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+
+		/* L4 checksum offload (tcp or udp) */
+		if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+		    (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
+			bd1_bd_flags_bf |=
+				1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+		/* Fill the entry in the SW ring and the BDs in the FW ring */
+		idx = TX_PROD(txq);
+		sw_tx_ring[idx].mbuf = mbuf;
+
+		/* BD1 */
+		bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
+		memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+		nbds++;
+
+		/* Map MBUF linear data for DMA and set in the BD1 */
+		QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
+				     mbuf->data_len);
+		bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
+		bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+
+		/* Handle fragmented MBUF */
+		if (unlikely(mbuf->nb_segs > 1)) {
+			m_seg = mbuf->next;
+
+			/* Encode scatter gather buffer descriptors */
+			nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
+						     nbds - 1);
+		}
+
+		bd1->data.nbds = nbds + nb_frags;
+
+		txq->nb_tx_avail -= bd1->data.nbds;
+		txq->sw_tx_prod++;
+		bd_prod =
+		    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+		print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+#endif
+		nb_pkt_sent++;
+		txq->xmit_pkts++;
+	}
+
+	/* Write value of prod idx into bd_prod */
+	txq->tx_db.data.bd_prod = bd_prod;
+	rte_wmb();
+	rte_compiler_barrier();
+	DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
+	rte_wmb();
+
+	/* Check again for Tx completions */
+	qede_process_tx_compl(edev, txq);
+
+	PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+		   nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
+
+	return nb_pkt_sent;
+}
+
 uint16_t
 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index a4c634e88..d7ff870b2 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -275,6 +275,8 @@ uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,
 			    uint16_t nb_pkts);
+uint16_t qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts,
+				uint16_t nb_pkts);
 
 uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
 			     uint16_t nb_pkts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/qede: enhance receive data path CPU utilization
  2020-01-02 17:59 [dpdk-dev] [PATCH 1/2] net/qede: enhance receive data path CPU utilization Shahed Shaikh
  2020-01-02 17:59 ` [dpdk-dev] [PATCH 2/2] net/qede: enhance transmit " Shahed Shaikh
@ 2020-01-14  6:11 ` Jerin Jacob
  1 sibling, 0 replies; 3+ messages in thread
From: Jerin Jacob @ 2020-01-14  6:11 UTC (permalink / raw)
  To: Shahed Shaikh; +Cc: dpdk-dev, Rasesh Mody, Jerin Jacob, GR-Everest-DPDK-Dev

On Thu, Jan 2, 2020 at 11:29 PM Shahed Shaikh <shshaikh@marvell.com> wrote:
>
> Use light weight receive packet handler for non-LRO and
> non-scatter packets to improve CPU utilization in receive data path.
> We achieved ~18% CPU cycles improvement using this handler.
>
> Signed-off-by: Shahed Shaikh <shshaikh@marvell.com>

Series applied to dpdk-next-net-mrvl/master. Thanks.

> ---
>  drivers/net/qede/qede_ethdev.c |  36 ++++--
>  drivers/net/qede/qede_rxtx.c   | 222 +++++++++++++++++++++++++++++++++
>  drivers/net/qede/qede_rxtx.h   |   4 +-
>  3 files changed, 253 insertions(+), 9 deletions(-)

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-01-14  6:11 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-02 17:59 [dpdk-dev] [PATCH 1/2] net/qede: enhance receive data path CPU utilization Shahed Shaikh
2020-01-02 17:59 ` [dpdk-dev] [PATCH 2/2] net/qede: enhance transmit " Shahed Shaikh
2020-01-14  6:11 ` [dpdk-dev] [PATCH 1/2] net/qede: enhance receive " Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).