DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: tiwei.bie@intel.com, zhihong.wang@intel.com, amorenoz@redhat.com,
	xiao.w.wang@intel.com, dev@dpdk.org, jfreimann@redhat.com
Cc: stable@dpdk.org, Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH 03/15] net/virtio: move control path fonctions in virtqueue file
Date: Thu, 29 Aug 2019 09:59:48 +0200	[thread overview]
Message-ID: <20190829080000.20806-4-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20190829080000.20806-1-maxime.coquelin@redhat.com>

Virtio-vdpa driver needs to implement the control path,
so move related functions to virtqueue file so that it
can be used by both Virtio PMD and Virtio-vdpa drivers.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 252 ----------------------------
 drivers/net/virtio/virtqueue.c     | 255 +++++++++++++++++++++++++++++
 drivers/net/virtio/virtqueue.h     |   5 +
 3 files changed, 260 insertions(+), 252 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f96588b9d..3682ee318 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -140,226 +140,6 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
 
 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
 
-static struct virtio_pmd_ctrl *
-virtio_send_command_packed(struct virtnet_ctl *cvq,
-			   struct virtio_pmd_ctrl *ctrl,
-			   int *dlen, int pkt_num)
-{
-	struct virtqueue *vq = cvq->vq;
-	int head;
-	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
-	struct virtio_pmd_ctrl *result;
-	uint16_t flags;
-	int sum = 0;
-	int nb_descs = 0;
-	int k;
-
-	/*
-	 * Format is enforced in qemu code:
-	 * One TX packet for header;
-	 * At least one TX packet per argument;
-	 * One RX packet for ACK.
-	 */
-	head = vq->vq_avail_idx;
-	flags = vq->vq_packed.cached_flags;
-	desc[head].addr = cvq->virtio_net_hdr_mem;
-	desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
-	vq->vq_free_cnt--;
-	nb_descs++;
-	if (++vq->vq_avail_idx >= vq->vq_nentries) {
-		vq->vq_avail_idx -= vq->vq_nentries;
-		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
-	}
-
-	for (k = 0; k < pkt_num; k++) {
-		desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
-			+ sizeof(struct virtio_net_ctrl_hdr)
-			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
-		desc[vq->vq_avail_idx].len = dlen[k];
-		desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
-			vq->vq_packed.cached_flags;
-		sum += dlen[k];
-		vq->vq_free_cnt--;
-		nb_descs++;
-		if (++vq->vq_avail_idx >= vq->vq_nentries) {
-			vq->vq_avail_idx -= vq->vq_nentries;
-			vq->vq_packed.cached_flags ^=
-				VRING_PACKED_DESC_F_AVAIL_USED;
-		}
-	}
-
-	desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
-		+ sizeof(struct virtio_net_ctrl_hdr);
-	desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
-	desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
-		vq->vq_packed.cached_flags;
-	vq->vq_free_cnt--;
-	nb_descs++;
-	if (++vq->vq_avail_idx >= vq->vq_nentries) {
-		vq->vq_avail_idx -= vq->vq_nentries;
-		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
-	}
-
-	virtio_wmb(vq->hw->weak_barriers);
-	desc[head].flags = VRING_DESC_F_NEXT | flags;
-
-	virtio_wmb(vq->hw->weak_barriers);
-	virtqueue_notify(vq);
-
-	/* wait for used descriptors in virtqueue */
-	while (!desc_is_used(&desc[head], vq))
-		usleep(100);
-
-	virtio_rmb(vq->hw->weak_barriers);
-
-	/* now get used descriptors */
-	vq->vq_free_cnt += nb_descs;
-	vq->vq_used_cons_idx += nb_descs;
-	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
-		vq->vq_used_cons_idx -= vq->vq_nentries;
-		vq->vq_packed.used_wrap_counter ^= 1;
-	}
-
-	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
-			"vq->vq_avail_idx=%d\n"
-			"vq->vq_used_cons_idx=%d\n"
-			"vq->vq_packed.cached_flags=0x%x\n"
-			"vq->vq_packed.used_wrap_counter=%d\n",
-			vq->vq_free_cnt,
-			vq->vq_avail_idx,
-			vq->vq_used_cons_idx,
-			vq->vq_packed.cached_flags,
-			vq->vq_packed.used_wrap_counter);
-
-	result = cvq->virtio_net_hdr_mz->addr;
-	return result;
-}
-
-static struct virtio_pmd_ctrl *
-virtio_send_command_split(struct virtnet_ctl *cvq,
-			  struct virtio_pmd_ctrl *ctrl,
-			  int *dlen, int pkt_num)
-{
-	struct virtio_pmd_ctrl *result;
-	struct virtqueue *vq = cvq->vq;
-	uint32_t head, i;
-	int k, sum = 0;
-
-	head = vq->vq_desc_head_idx;
-
-	/*
-	 * Format is enforced in qemu code:
-	 * One TX packet for header;
-	 * At least one TX packet per argument;
-	 * One RX packet for ACK.
-	 */
-	vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
-	vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
-	vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
-	vq->vq_free_cnt--;
-	i = vq->vq_split.ring.desc[head].next;
-
-	for (k = 0; k < pkt_num; k++) {
-		vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
-		vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
-			+ sizeof(struct virtio_net_ctrl_hdr)
-			+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
-		vq->vq_split.ring.desc[i].len = dlen[k];
-		sum += dlen[k];
-		vq->vq_free_cnt--;
-		i = vq->vq_split.ring.desc[i].next;
-	}
-
-	vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
-	vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
-			+ sizeof(struct virtio_net_ctrl_hdr);
-	vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
-	vq->vq_free_cnt--;
-
-	vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
-
-	vq_update_avail_ring(vq, head);
-	vq_update_avail_idx(vq);
-
-	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
-
-	virtqueue_notify(vq);
-
-	rte_rmb();
-	while (VIRTQUEUE_NUSED(vq) == 0) {
-		rte_rmb();
-		usleep(100);
-	}
-
-	while (VIRTQUEUE_NUSED(vq)) {
-		uint32_t idx, desc_idx, used_idx;
-		struct vring_used_elem *uep;
-
-		used_idx = (uint32_t)(vq->vq_used_cons_idx
-				& (vq->vq_nentries - 1));
-		uep = &vq->vq_split.ring.used->ring[used_idx];
-		idx = (uint32_t) uep->id;
-		desc_idx = idx;
-
-		while (vq->vq_split.ring.desc[desc_idx].flags &
-				VRING_DESC_F_NEXT) {
-			desc_idx = vq->vq_split.ring.desc[desc_idx].next;
-			vq->vq_free_cnt++;
-		}
-
-		vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
-		vq->vq_desc_head_idx = idx;
-
-		vq->vq_used_cons_idx++;
-		vq->vq_free_cnt++;
-	}
-
-	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
-			vq->vq_free_cnt, vq->vq_desc_head_idx);
-
-	result = cvq->virtio_net_hdr_mz->addr;
-	return result;
-}
-
-static int
-virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
-		    int *dlen, int pkt_num)
-{
-	virtio_net_ctrl_ack status = ~0;
-	struct virtio_pmd_ctrl *result;
-	struct virtqueue *vq;
-
-	ctrl->status = status;
-
-	if (!cvq || !cvq->vq) {
-		PMD_INIT_LOG(ERR, "Control queue is not supported.");
-		return -1;
-	}
-
-	rte_spinlock_lock(&cvq->lock);
-	vq = cvq->vq;
-
-	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
-		"vq->hw->cvq = %p vq = %p",
-		vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
-
-	if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
-		rte_spinlock_unlock(&cvq->lock);
-		return -1;
-	}
-
-	memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
-		sizeof(struct virtio_pmd_ctrl));
-
-	if (vtpci_packed_queue(vq->hw))
-		result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
-	else
-		result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
-
-	rte_spinlock_unlock(&cvq->lock);
-	return result->status;
-}
-
 static int
 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
 {
@@ -401,38 +181,6 @@ virtio_get_nr_vq(struct virtio_hw *hw)
 	return nr_vq;
 }
 
-static void
-virtio_init_vring(struct virtqueue *vq)
-{
-	int size = vq->vq_nentries;
-	uint8_t *ring_mem = vq->vq_ring_virt_mem;
-
-	PMD_INIT_FUNC_TRACE();
-
-	memset(ring_mem, 0, vq->vq_ring_size);
-
-	vq->vq_used_cons_idx = 0;
-	vq->vq_desc_head_idx = 0;
-	vq->vq_avail_idx = 0;
-	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
-	vq->vq_free_cnt = vq->vq_nentries;
-	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-	if (vtpci_packed_queue(vq->hw)) {
-		vring_init_packed(&vq->vq_packed.ring, ring_mem,
-				  VIRTIO_PCI_VRING_ALIGN, size);
-		vring_desc_init_packed(vq, size);
-	} else {
-		struct vring *vr = &vq->vq_split.ring;
-
-		vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
-		vring_desc_init_split(vr->desc, size);
-	}
-	/*
-	 * Disable device(host) interrupting guest
-	 */
-	virtqueue_disable_intr(vq);
-}
-
 static int
 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 {
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 5ff1e3587..db630e07c 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2010-2015 Intel Corporation
  */
 #include <stdint.h>
+#include <unistd.h>
 
 #include <rte_mbuf.h>
 
@@ -141,3 +142,257 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
 	else
 		virtqueue_rxvq_flush_split(vq);
 }
+
+static struct virtio_pmd_ctrl *
+virtio_send_command_packed(struct virtnet_ctl *cvq,
+			   struct virtio_pmd_ctrl *ctrl,
+			   int *dlen, int pkt_num)
+{
+	struct virtqueue *vq = cvq->vq;
+	int head;
+	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+	struct virtio_pmd_ctrl *result;
+	uint16_t flags;
+	int sum = 0;
+	int nb_descs = 0;
+	int k;
+
+	/*
+	 * Format is enforced in qemu code:
+	 * One TX packet for header;
+	 * At least one TX packet per argument;
+	 * One RX packet for ACK.
+	 */
+	head = vq->vq_avail_idx;
+	flags = vq->vq_packed.cached_flags;
+	desc[head].addr = cvq->virtio_net_hdr_mem;
+	desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+	vq->vq_free_cnt--;
+	nb_descs++;
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
+		vq->vq_avail_idx -= vq->vq_nentries;
+		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
+	}
+
+	for (k = 0; k < pkt_num; k++) {
+		desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+			+ sizeof(struct virtio_net_ctrl_hdr)
+			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+		desc[vq->vq_avail_idx].len = dlen[k];
+		desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
+			vq->vq_packed.cached_flags;
+		sum += dlen[k];
+		vq->vq_free_cnt--;
+		nb_descs++;
+		if (++vq->vq_avail_idx >= vq->vq_nentries) {
+			vq->vq_avail_idx -= vq->vq_nentries;
+			vq->vq_packed.cached_flags ^=
+				VRING_PACKED_DESC_F_AVAIL_USED;
+		}
+	}
+
+	desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+		+ sizeof(struct virtio_net_ctrl_hdr);
+	desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+	desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+		vq->vq_packed.cached_flags;
+	vq->vq_free_cnt--;
+	nb_descs++;
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
+		vq->vq_avail_idx -= vq->vq_nentries;
+		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
+	}
+
+	virtio_wmb(vq->hw->weak_barriers);
+	desc[head].flags = VRING_DESC_F_NEXT | flags;
+
+	virtio_wmb(vq->hw->weak_barriers);
+	virtqueue_notify(vq);
+
+	/* wait for used descriptors in virtqueue */
+	while (!desc_is_used(&desc[head], vq))
+		usleep(100);
+
+	virtio_rmb(vq->hw->weak_barriers);
+
+	/* now get used descriptors */
+	vq->vq_free_cnt += nb_descs;
+	vq->vq_used_cons_idx += nb_descs;
+	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+		vq->vq_used_cons_idx -= vq->vq_nentries;
+		vq->vq_packed.used_wrap_counter ^= 1;
+	}
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
+			"vq->vq_avail_idx=%d\n"
+			"vq->vq_used_cons_idx=%d\n"
+			"vq->vq_packed.cached_flags=0x%x\n"
+			"vq->vq_packed.used_wrap_counter=%d\n",
+			vq->vq_free_cnt,
+			vq->vq_avail_idx,
+			vq->vq_used_cons_idx,
+			vq->vq_packed.cached_flags,
+			vq->vq_packed.used_wrap_counter);
+
+	result = cvq->virtio_net_hdr_mz->addr;
+	return result;
+}
+
+static struct virtio_pmd_ctrl *
+virtio_send_command_split(struct virtnet_ctl *cvq,
+			  struct virtio_pmd_ctrl *ctrl,
+			  int *dlen, int pkt_num)
+{
+	struct virtio_pmd_ctrl *result;
+	struct virtqueue *vq = cvq->vq;
+	uint32_t head, i;
+	int k, sum = 0;
+
+	head = vq->vq_desc_head_idx;
+
+	/*
+	 * Format is enforced in qemu code:
+	 * One TX packet for header;
+	 * At least one TX packet per argument;
+	 * One RX packet for ACK.
+	 */
+	vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+	vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+	vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+	vq->vq_free_cnt--;
+	i = vq->vq_split.ring.desc[head].next;
+
+	for (k = 0; k < pkt_num; k++) {
+		vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+		vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+			+ sizeof(struct virtio_net_ctrl_hdr)
+			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+		vq->vq_split.ring.desc[i].len = dlen[k];
+		sum += dlen[k];
+		vq->vq_free_cnt--;
+		i = vq->vq_split.ring.desc[i].next;
+	}
+
+	vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+	vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+			+ sizeof(struct virtio_net_ctrl_hdr);
+	vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
+	vq->vq_free_cnt--;
+
+	vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
+
+	vq_update_avail_ring(vq, head);
+	vq_update_avail_idx(vq);
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+
+	virtqueue_notify(vq);
+
+	rte_rmb();
+	while (VIRTQUEUE_NUSED(vq) == 0) {
+		rte_rmb();
+		usleep(100);
+	}
+
+	while (VIRTQUEUE_NUSED(vq)) {
+		uint32_t idx, desc_idx, used_idx;
+		struct vring_used_elem *uep;
+
+		used_idx = (uint32_t)(vq->vq_used_cons_idx
+				& (vq->vq_nentries - 1));
+		uep = &vq->vq_split.ring.used->ring[used_idx];
+		idx = (uint32_t)uep->id;
+		desc_idx = idx;
+
+		while (vq->vq_split.ring.desc[desc_idx].flags &
+				VRING_DESC_F_NEXT) {
+			desc_idx = vq->vq_split.ring.desc[desc_idx].next;
+			vq->vq_free_cnt++;
+		}
+
+		vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+		vq->vq_desc_head_idx = idx;
+
+		vq->vq_used_cons_idx++;
+		vq->vq_free_cnt++;
+	}
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
+			vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+	result = cvq->virtio_net_hdr_mz->addr;
+	return result;
+}
+
+int
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+		    int *dlen, int pkt_num)
+{
+	virtio_net_ctrl_ack status = ~0;
+	struct virtio_pmd_ctrl *result;
+	struct virtqueue *vq;
+
+	ctrl->status = status;
+
+	if (!cvq || !cvq->vq) {
+		PMD_INIT_LOG(ERR, "Control queue is not supported.");
+		return -1;
+	}
+
+	rte_spinlock_lock(&cvq->lock);
+	vq = cvq->vq;
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+		"vq->hw->cvq = %p vq = %p",
+		vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+
+	if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+		rte_spinlock_unlock(&cvq->lock);
+		return -1;
+	}
+
+	memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
+		sizeof(struct virtio_pmd_ctrl));
+
+	if (vtpci_packed_queue(vq->hw))
+		result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
+	else
+		result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
+
+	rte_spinlock_unlock(&cvq->lock);
+	return result->status;
+}
+
+void
+virtio_init_vring(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+	PMD_INIT_FUNC_TRACE();
+
+	memset(ring_mem, 0, vq->vq_ring_size);
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+	if (vq->vq_descx)
+		memset(vq->vq_descx, 0,
+			sizeof(struct vq_desc_extra) * vq->vq_nentries);
+	if (vtpci_packed_queue(vq->hw)) {
+		vring_init_packed(&vq->vq_packed.ring, ring_mem,
+				  VIRTIO_PCI_VRING_ALIGN, size);
+		vring_desc_init_packed(vq, size);
+	} else {
+		struct vring *vr = &vq->vq_split.ring;
+
+		vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+		vring_desc_init_split(vr->desc, size);
+	}
+	/*
+	 * Disable device(host) interrupting guest
+	 */
+	virtqueue_disable_intr(vq);
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index c6dd4a347..4d8d069c7 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -480,6 +480,11 @@ virtqueue_notify(struct virtqueue *vq)
 	VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
 }
 
+int virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+		    int *dlen, int pkt_num);
+
+void virtio_init_vring(struct virtqueue *vq);
+
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTQUEUE_DUMP(vq) do { \
 	uint16_t used_idx, nused; \
-- 
2.21.0


  parent reply	other threads:[~2019-08-29  8:00 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-29  7:59 [dpdk-dev] [PATCH 00/15] Introduce Virtio vDPA driver Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 01/15] vhost: remove vhost kernel header inclusion Maxime Coquelin
2019-09-02  6:03   ` Tiwei Bie
2019-09-03  7:24     ` Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 02/15] vhost: configure vDPA as soon as the device is ready Maxime Coquelin
2019-09-02  8:34   ` Ye Xiaolong
2019-09-02  9:02     ` Wang, Xiao W
2019-09-03  7:34       ` Maxime Coquelin
2019-09-03 10:58         ` Wang, Xiao W
2019-08-29  7:59 ` Maxime Coquelin [this message]
2019-09-02  6:05   ` [dpdk-dev] [PATCH 03/15] net/virtio: move control path fonctions in virtqueue file Tiwei Bie
2019-08-29  7:59 ` [dpdk-dev] [PATCH 04/15] net/virtio: add virtio PCI subsystem device ID declaration Maxime Coquelin
2019-09-02  6:14   ` Tiwei Bie
2019-09-03  7:25     ` Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 05/15] net/virtio: save notify bar ID in virtio HW struct Maxime Coquelin
2019-09-02  6:17   ` Tiwei Bie
2019-08-29  7:59 ` [dpdk-dev] [PATCH 06/15] net/virtio: add skeleton for virtio vDPA driver Maxime Coquelin
2019-09-02  6:27   ` Tiwei Bie
2019-09-03  7:25     ` Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 07/15] net/virtio: add vDPA ops to get number of queue Maxime Coquelin
2019-09-02  6:32   ` Tiwei Bie
2019-08-29  7:59 ` [dpdk-dev] [PATCH 08/15] net/virtio: add virtio vDPA op to get features Maxime Coquelin
2019-09-02  6:43   ` Tiwei Bie
2019-09-03  7:27     ` Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 09/15] net/virtio: add virtio vDPA op to get protocol features Maxime Coquelin
2019-09-02  6:46   ` Tiwei Bie
2019-08-29  7:59 ` [dpdk-dev] [PATCH 10/15] net/virtio: add vDPA op to configure and start the device Maxime Coquelin
2019-09-03  5:30   ` Tiwei Bie
2019-09-03  7:40     ` Maxime Coquelin
2019-09-03  8:49       ` Tiwei Bie
2019-09-04  4:06         ` Jason Wang
2019-09-04  6:56           ` Maxime Coquelin
2019-09-05  2:57             ` Tiwei Bie
2019-08-29  7:59 ` [dpdk-dev] [PATCH 11/15] net/virtio: add vDPA op to stop and close " Maxime Coquelin
2019-09-02  7:07   ` Tiwei Bie
2019-09-03  7:30     ` Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 12/15] net/virtio: add vDPA op to set features Maxime Coquelin
2019-08-29  7:59 ` [dpdk-dev] [PATCH 13/15] net/virtio: add vDPA ops to get VFIO FDs Maxime Coquelin
2019-09-03  4:47   ` Tiwei Bie
2019-08-29  7:59 ` [dpdk-dev] [PATCH 14/15] net/virtio: add vDPA op to get notification area Maxime Coquelin
2019-09-03  5:02   ` Tiwei Bie
2019-09-03  7:36     ` Maxime Coquelin
2019-09-03  8:40       ` Tiwei Bie
2019-08-29  8:00 ` [dpdk-dev] [PATCH 15/15] doc: add documentation for Virtio vDPA driver Maxime Coquelin
2019-09-09 11:55 ` [dpdk-dev] [PATCH 00/15] Introduce " Shahaf Shuler
2019-09-10  7:46   ` Maxime Coquelin
2019-09-10 13:44     ` Shahaf Shuler
2019-09-10 13:56       ` Maxime Coquelin
2019-09-11  5:15         ` Shahaf Shuler
2019-09-11  7:15           ` Maxime Coquelin
2019-10-24  6:32 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190829080000.20806-4-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=amorenoz@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jfreimann@redhat.com \
    --cc=stable@dpdk.org \
    --cc=tiwei.bie@intel.com \
    --cc=xiao.w.wang@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).