DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jens Freimann <jfreimann@redhat.com>
To: dev@dpdk.org
Cc: tiwei.bie@intel.com, yliu@fridaylinux.org,
	maxime.coquelin@redhat.com, mst@redhat.com, jens@freimann.org
Subject: [dpdk-dev] [PATCH v4 07/20] net/virtio: implement transmit path for packed queues
Date: Thu, 19 Apr 2018 09:07:38 +0200	[thread overview]
Message-ID: <20180419070751.8933-8-jfreimann@redhat.com> (raw)
In-Reply-To: <20180419070751.8933-1-jfreimann@redhat.com>

This implements the transmit path for devices with
support for Virtio 1.1.

Add the feature bit for Virtio 1.1 and enable code to
add buffers to vring and mark descriptors as available.

This is based on a patch by Yuanhan Liu.

Signed-off-by: Jens Freiman <jfreimann@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c |   8 ++-
 drivers/net/virtio/virtio_ethdev.h |   2 +
 drivers/net/virtio/virtio_rxtx.c   | 104 ++++++++++++++++++++++++++++++++++++-
 3 files changed, 112 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 0c9540b89..c5c2a268b 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -383,6 +383,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 	vq->hw = hw;
 	vq->vq_queue_index = vtpci_queue_idx;
 	vq->vq_nentries = vq_size;
+	if (vtpci_packed_queue(hw))
+		vq->vq_ring.avail_wrap_counter = 1;
 
 	/*
 	 * Reserve a memzone for vring elements
@@ -1329,7 +1331,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
 		eth_dev->rx_pkt_burst = &virtio_recv_pkts;
 	}
 
-	if (hw->use_simple_tx) {
+	if (vtpci_packed_queue(hw)) {
+		PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u",
+			eth_dev->data->port_id);
+		eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+	} else if (hw->use_simple_tx) {
 		PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
 			eth_dev->data->port_id);
 		eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index bb40064ea..5420d7648 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -85,6 +85,8 @@ uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
 
 uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b32..b749babf3 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -38,6 +38,103 @@
 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
 #endif
 
+
+/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup_packed(struct virtqueue *vq)
+{
+	uint16_t idx;
+	uint16_t size = vq->vq_nentries;
+	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
+	struct vq_desc_extra *dxp;
+
+	idx = vq->vq_used_cons_idx;
+	while (desc_is_used(&desc[idx]) &&
+	       vq->vq_free_cnt < size) {
+		dxp = &vq->vq_descx[idx];
+		vq->vq_free_cnt += dxp->ndescs;
+		idx = vq->vq_used_cons_idx + dxp->ndescs;
+		idx = idx >= size ? idx - size : idx;
+	}
+}
+
+uint16_t
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+		     uint16_t nb_pkts)
+{
+	struct virtnet_tx *txvq = tx_queue;
+	struct virtqueue *vq = txvq->vq;
+	uint16_t i;
+	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
+	uint16_t idx;
+	struct vq_desc_extra *dxp;
+
+	if (unlikely(nb_pkts < 1))
+		return nb_pkts;
+
+	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+	if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
+		virtio_xmit_cleanup_packed(vq);
+
+	for (i = 0; i < nb_pkts; i++) {
+		struct rte_mbuf *txm = tx_pkts[i];
+		struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+		uint16_t head_idx;
+		int wrap_counter;
+		int descs_used;
+
+		if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
+			virtio_xmit_cleanup_packed(vq);
+
+			if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
+				PMD_TX_LOG(ERR,
+					   "No free tx descriptors to transmit");
+				break;
+			}
+		}
+
+		txvq->stats.bytes += txm->pkt_len;
+
+		vq->vq_free_cnt -= txm->nb_segs + 1;
+
+		wrap_counter = vq->vq_ring.avail_wrap_counter;
+		idx = update_pq_avail_index(vq);
+		head_idx = idx;
+
+		dxp = &vq->vq_descx[idx];
+		if (dxp->cookie != NULL)
+			rte_pktmbuf_free(dxp->cookie);
+		dxp->cookie = txm;
+
+		desc[idx].addr  = txvq->virtio_net_hdr_mem +
+				  RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+		desc[idx].len   = vq->hw->vtnet_hdr_size;
+		desc[idx].flags = VRING_DESC_F_NEXT;
+		descs_used = 1;
+
+		do {
+			idx = update_pq_avail_index(vq);
+			desc[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
+			desc[idx].len   = txm->data_len;
+			desc[idx].flags = VRING_DESC_F_NEXT;
+			desc[idx].index = head_idx;
+			descs_used++;
+		} while ((txm = txm->next) != NULL);
+
+		desc[idx].flags &= ~VRING_DESC_F_NEXT;
+
+		rte_smp_wmb();
+		_set_desc_avail(&desc[head_idx], wrap_counter);
+		vq->vq_descx[head_idx].ndescs = descs_used;
+	}
+
+	txvq->stats.packets += i;
+	txvq->stats.errors  += nb_pkts - i;
+
+	return i;
+}
+
 int
 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
 {
@@ -547,6 +644,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (vtpci_packed_queue(hw)) {
+		vq->vq_ring.avail_wrap_counter = 1;
+	}
+
 	if (hw->use_simple_tx) {
 		for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
 			vq->vq_ring.avail->ring[desc_idx] =
@@ -567,7 +668,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
 			vq->vq_ring.avail->ring[desc_idx] = desc_idx;
 	}
 
-	VIRTQUEUE_DUMP(vq);
+	if (!vtpci_packed_queue(hw))
+		VIRTQUEUE_DUMP(vq);
 
 	return 0;
 }
-- 
2.14.3

  parent reply	other threads:[~2018-04-19  7:08 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-19  7:07 [dpdk-dev] [PATCH v4 00/20] implement packed virtqueues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 01/20] net/virtio: vring init for packed queues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 02/20] net/virtio: add virtio 1.1 defines Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 03/20] net/virtio: add packed virtqueue helpers Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 04/20] net/virtio: flush packed receive virtqueues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 05/20] net/virtio: dump packed virtqueue data Jens Freimann
2018-04-25  4:13   ` Wang, Xiao W
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 06/20] net/virtio-user: add option to use packed queues Jens Freimann
2018-04-19  7:07 ` Jens Freimann [this message]
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 08/20] net/virtio: implement receive path for " Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 09/20] net/virtio: add virtio send command packed queue support Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 10/20] net/virtio: add support for mergeable buffers with packed virtqueues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 11/20] net/virtio: add support for event suppression Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 12/20] vhost: add virtio packed virtqueue defines Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 13/20] vhost: add helpers for packed virtqueues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 14/20] vhost: vring address setup for packed queues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 15/20] vhost: dequeue " Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 16/20] vhost: packed queue enqueue path Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 17/20] vhost: add support for mergeable buffers with packed virtqueues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 18/20] vhost: add event suppression for packed queues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 19/20] net/virtio: by default disable packed virtqueues Jens Freimann
2018-04-19  7:07 ` [dpdk-dev] [PATCH v4 20/20] vhost: " Jens Freimann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180419070751.8933-8-jfreimann@redhat.com \
    --to=jfreimann@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jens@freimann.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=mst@redhat.com \
    --cc=tiwei.bie@intel.com \
    --cc=yliu@fridaylinux.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).