DPDK patches and discussions
 help / color / mirror / Atom feed
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, zhihong.wang@intel.com
Cc: dev@dpdk.org, Marvin Liu <yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH 2/2] net/virtio: use indirect ring in packed datapath
Date: Mon, 28 Sep 2020 16:20:52 +0800	[thread overview]
Message-ID: <20200928082052.61872-2-yong.liu@intel.com> (raw)
In-Reply-To: <20200928082052.61872-1-yong.liu@intel.com>

Like split ring, packed ring will utilize indirect ring elements when
queuing mbufs need multiple descriptors. Thus each packet will take only
one slot when having multiple segments.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index f915b8a2c..b3b1586a7 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1756,7 +1756,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
-		int can_push = 0, slots, need;
+		int can_push = 0, use_indirect = 0, slots, need;
 
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -1768,12 +1768,15 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
 			can_push = 1;
-
+		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+			use_indirect = 1;
 		/* How many main ring entries are needed to this Tx?
+		 * indirect   => 1
 		 * any_layout => number of segments
 		 * default    => number of segments + 1
 		 */
-		slots = txm->nb_segs + !can_push;
+		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
 		need = slots - vq->vq_free_cnt;
 
 		/* Positive value indicates it need free vring descriptors */
@@ -1791,7 +1794,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		if (can_push)
 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
 		else
-			virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
+			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
+						      use_indirect, 0,
 						      in_order);
 
 		virtio_update_packet_stats(&txvq->stats, txm);
diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c b/drivers/net/virtio/virtio_rxtx_packed_avx.c
index 6a8214725..ce035b574 100644
--- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
@@ -207,19 +207,26 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
 	struct virtqueue *vq = txvq->vq;
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
-	uint16_t slots, can_push;
+	uint16_t slots, can_push = 0, use_indirect = 0;
 	int16_t need;
 
+	/* optimize ring usage */
+	if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+	      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+	    rte_mbuf_refcnt_read(txm) == 1 &&
+	    RTE_MBUF_DIRECT(txm) &&
+	    txm->nb_segs == 1 &&
+	    rte_pktmbuf_headroom(txm) >= hdr_size)
+		can_push = 1;
+	else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+		 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+		use_indirect = 1;
 	/* How many main ring entries are needed to this Tx?
+	 * indirect   => 1
 	 * any_layout => number of segments
 	 * default    => number of segments + 1
 	 */
-	can_push = rte_mbuf_refcnt_read(txm) == 1 &&
-		   RTE_MBUF_DIRECT(txm) &&
-		   txm->nb_segs == 1 &&
-		   rte_pktmbuf_headroom(txm) >= hdr_size;
-
-	slots = txm->nb_segs + !can_push;
+	slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
 	need = slots - vq->vq_free_cnt;
 
 	/* Positive value indicates it need free vring descriptors */
@@ -234,7 +241,8 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
 	}
 
 	/* Enqueue Packet buffers */
-	virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1);
+	virtqueue_enqueue_xmit_packed(txvq, txm, slots, use_indirect,
+				can_push, 1);
 
 	txvq->stats.bytes += txm->pkt_len;
 	return 0;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 7d910a0a1..753dfb85c 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -686,7 +686,8 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
 
 static inline void
 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
-			      uint16_t needed, int can_push, int in_order)
+			      uint16_t needed, int use_indirect, int can_push,
+			      int in_order)
 {
 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
@@ -722,6 +723,25 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 		/* if offload disabled, it is not zeroed below, do it now */
 		if (!vq->hw->has_tx_offload)
 			virtqueue_clear_net_hdr(hdr);
+	} else if (use_indirect) {
+		/* setup tx ring slot to point to indirect
+		 * descriptor list stored in reserved region.
+		 *
+		 * the first slot in indirect ring is already preset
+		 * to point to the header in reserved region
+		 */
+		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+			RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
+		start_dp[idx].len   = (needed + 1) *
+			sizeof(struct vring_packed_desc);
+		/* reset flags for indirect desc */
+		head_flags = VRING_DESC_F_INDIRECT;
+		head_flags |= vq->vq_packed.cached_flags;
+		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
+		/* loop below will fill in rest of the indirect elements */
+		start_dp = txr[idx].tx_packed_indir;
+		idx = 1;
 	} else {
 		/* setup first tx ring slot to point to header
 		 * stored in reserved region.
@@ -767,6 +787,15 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
 	start_dp[prev].id = id;
 
+	if (use_indirect) {
+		idx = head_idx;
+		if (++idx >= vq->vq_nentries) {
+			idx -= vq->vq_nentries;
+			vq->vq_packed.cached_flags ^=
+				VRING_PACKED_DESC_F_AVAIL_USED;
+		}
+	}
+
 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
 	vq->vq_avail_idx = idx;
 
-- 
2.17.1


  reply	other threads:[~2020-09-28  8:26 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-28  8:20 [dpdk-dev] [PATCH 1/2] net/virtio: setup Tx region for packed ring Marvin Liu
2020-09-28  8:20 ` Marvin Liu [this message]
2020-09-30 15:24   ` [dpdk-dev] [PATCH 2/2] net/virtio: use indirect ring in packed datapath Maxime Coquelin
2020-09-30 16:19   ` Maxime Coquelin
2020-09-30 15:19 ` [dpdk-dev] [PATCH 1/2] net/virtio: setup Tx region for packed ring Maxime Coquelin
2020-09-30 16:19 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200928082052.61872-2-yong.liu@intel.com \
    --to=yong.liu@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).