automatic DPDK test reports
 help / color / mirror / Atom feed
* |WARNING| pw111179-111183 [PATCH] [v8, 5/5] examples/vhost: support async dequeue data path
@ 2022-05-16 11:20 dpdklab
  0 siblings, 0 replies; only message in thread
From: dpdklab @ 2022-05-16 11:20 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 12093 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/111179

_apply patch failure_

Submitter: Ding, Xuan <xuan.ding@intel.com>
Date: Monday, May 16 2022 11:10:41 
Applied on: CommitID:c0c305ee9e0e7c9feca6412266a778f330d20c19
Apply patch set 111179-111183 failed:

Checking patch doc/guides/prog_guide/vhost_lib.rst...
Hunk #1 succeeded at 276 (offset -6 lines).
Checking patch doc/guides/rel_notes/release_22_07.rst...
error: while searching for:
  Added an API which can get the number of inflight packets in
  vhost async data path without using lock.

Removed Items
-------------


error: patch failed: doc/guides/rel_notes/release_22_07.rst:70
Checking patch lib/vhost/rte_vhost_async.h...
Hunk #1 succeeded at 187 (offset -17 lines).
Checking patch lib/vhost/version.map...
error: while searching for:

	# added in 22.07
	rte_vhost_async_get_inflight_thread_unsafe;

};

INTERNAL {

error: patch failed: lib/vhost/version.map:90
Checking patch lib/vhost/virtio_net.c...
Applied patch doc/guides/prog_guide/vhost_lib.rst cleanly.
Applying patch doc/guides/rel_notes/release_22_07.rst with 1 reject...
Rejected hunk #1.
Applied patch lib/vhost/rte_vhost_async.h cleanly.
Applying patch lib/vhost/version.map with 1 reject...
Rejected hunk #1.
Applied patch lib/vhost/virtio_net.c cleanly.
diff a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst	(rejected hunks)
@@ -70,6 +70,11 @@ New Features
   Added an API which can get the number of inflight packets in
   vhost async data path without using lock.
 
+* **Added vhost async dequeue API to receive pkts from guest.**
+
+  Added vhost async dequeue API which can leverage DMA devices to
+  accelerate receiving pkts from guest.
+
 Removed Items
 -------------
 
diff a/lib/vhost/version.map b/lib/vhost/version.map	(rejected hunks)
@@ -90,7 +90,7 @@ EXPERIMENTAL {
 
 	# added in 22.07
 	rte_vhost_async_get_inflight_thread_unsafe;
-
+	rte_vhost_async_try_dequeue_burst;
 };
 
 INTERNAL {
Checking patch doc/guides/sample_app_ug/vhost.rst...
Checking patch examples/vhost/main.c...
error: while searching for:
	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;

	if (builtin_net_driver) {
		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
	} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
		uint16_t enqueue_fail = 0;
		int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;

		complete_async_pkts(vdev);
		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);

		enqueue_fail = nr_xmit - ret;
		if (enqueue_fail)
			free_pkts(&m[ret], nr_xmit - ret);
	} else {
		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
						m, nr_xmit);
	}

	if (enable_stats) {
		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,

error: patch failed: examples/vhost/main.c:1029
Hunk #10 succeeded at 1087 (offset 18 lines).
Hunk #11 succeeded at 1359 (offset 18 lines).
error: while searching for:
		}
	}

	if (builtin_net_driver) {
		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
						pkts, rx_count);
	} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
		uint16_t enqueue_fail = 0;
		int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;

		complete_async_pkts(vdev);
		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
					VIRTIO_RXQ, pkts, rx_count, dma_id, 0);

		enqueue_fail = rx_count - enqueue_count;
		if (enqueue_fail)
			free_pkts(&pkts[enqueue_count], enqueue_fail);

	} else {
		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
						pkts, rx_count);
	}

	if (enable_stats) {
		__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,

error: patch failed: examples/vhost/main.c:1355
Hunk #13 succeeded at 1443 (offset 36 lines).
Hunk #14 succeeded at 1475 (offset 36 lines).
Hunk #15 succeeded at 1555 (offset 36 lines).
error: while searching for:
		vdev->vid);

	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
		uint16_t n_pkt = 0;
		int pkts_inflight;
		int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
		pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, VIRTIO_RXQ);
		struct rte_mbuf *m_cpl[pkts_inflight];

		while (pkts_inflight) {
			n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
						m_cpl, pkts_inflight, dma_id, 0);
			free_pkts(m_cpl, n_pkt);
			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
										VIRTIO_RXQ);
		}

		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
	}

	rte_free(vdev);
}

/*
 * A new device is added to a data core. First the device is added to the main linked list
 * and then allocated to a specific data core.

error: patch failed: examples/vhost/main.c:1535
Hunk #17 succeeded at 1661 (offset -19 lines).
Hunk #18 succeeded at 1685 (offset -19 lines).
Hunk #19 succeeded at 1727 (offset -19 lines).
error: while searching for:
	if (queue_id != VIRTIO_RXQ)
		return 0;

	if (dma_bind[vid].dmas[queue_id].async_enabled) {
		if (!enable) {
			uint16_t n_pkt = 0;
			int pkts_inflight;
			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, queue_id);
			int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
			struct rte_mbuf *m_cpl[pkts_inflight];

			while (pkts_inflight) {
				n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
							m_cpl, pkts_inflight, dma_id, 0);
				free_pkts(m_cpl, n_pkt);
				pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
											queue_id);
			}
		}
	}

	return 0;

error: patch failed: examples/vhost/main.c:1647
Hunk #21 succeeded at 1982 (offset -9 lines).
Checking patch examples/vhost/main.h...
Hunk #1 succeeded at 62 (offset 1 line).
Hunk #2 succeeded at 101 (offset 1 line).
Hunk #3 succeeded at 112 (offset 1 line).
Checking patch examples/vhost/virtio_net.c...
Applied patch doc/guides/sample_app_ug/vhost.rst cleanly.
Applying patch examples/vhost/main.c with 4 rejects...
Hunk #1 applied cleanly.
Hunk #2 applied cleanly.
Hunk #3 applied cleanly.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
Hunk #6 applied cleanly.
Hunk #7 applied cleanly.
Hunk #8 applied cleanly.
Rejected hunk #9.
Hunk #10 applied cleanly.
Hunk #11 applied cleanly.
Rejected hunk #12.
Hunk #13 applied cleanly.
Hunk #14 applied cleanly.
Hunk #15 applied cleanly.
Rejected hunk #16.
Hunk #17 applied cleanly.
Hunk #18 applied cleanly.
Hunk #19 applied cleanly.
Rejected hunk #20.
Hunk #21 applied cleanly.
Applied patch examples/vhost/main.h cleanly.
Applied patch examples/vhost/virtio_net.c cleanly.
diff a/examples/vhost/main.c b/examples/vhost/main.c	(rejected hunks)
@@ -1029,22 +1060,7 @@ drain_vhost(struct vhost_dev *vdev)
 	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
 	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
 
-	if (builtin_net_driver) {
-		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
-	} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
-		uint16_t enqueue_fail = 0;
-		int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
-
-		complete_async_pkts(vdev);
-		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
-
-		enqueue_fail = nr_xmit - ret;
-		if (enqueue_fail)
-			free_pkts(&m[ret], nr_xmit - ret);
-	} else {
-		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-						m, nr_xmit);
-	}
+	ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
 
 	if (enable_stats) {
 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
@@ -1355,25 +1397,8 @@ drain_eth_rx(struct vhost_dev *vdev)
 		}
 	}
 
-	if (builtin_net_driver) {
-		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
-						pkts, rx_count);
-	} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
-		uint16_t enqueue_fail = 0;
-		int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
-
-		complete_async_pkts(vdev);
-		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
-					VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
-
-		enqueue_fail = rx_count - enqueue_count;
-		if (enqueue_fail)
-			free_pkts(&pkts[enqueue_count], enqueue_fail);
-
-	} else {
-		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-						pkts, rx_count);
-	}
+	enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+					VIRTIO_RXQ, pkts, rx_count);
 
 	if (enable_stats) {
 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
@@ -1535,27 +1596,79 @@ destroy_device(int vid)
 		vdev->vid);
 
 	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
-		uint16_t n_pkt = 0;
-		int pkts_inflight;
-		int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
-		pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, VIRTIO_RXQ);
-		struct rte_mbuf *m_cpl[pkts_inflight];
-
-		while (pkts_inflight) {
-			n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
-						m_cpl, pkts_inflight, dma_id, 0);
-			free_pkts(m_cpl, n_pkt);
-			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
-										VIRTIO_RXQ);
-		}
-
+		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
 		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
 	}
 
+	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
+		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
+		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
+		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
+	}
+
 	rte_free(vdev);
 }
 
+static inline int
+get_socketid_by_vid(int vid)
+{
+	int i;
+	char ifname[PATH_MAX];
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (i = 0; i < nb_sockets; i++) {
+		char *file = socket_files + i * PATH_MAX;
+		if (strcmp(file, ifname) == 0)
+			return i;
+	}
+
+	return -1;
+}
+
+static int
+init_vhost_queue_ops(int vid)
+{
+	if (builtin_net_driver) {
+		vdev_queue_ops[vid].enqueue_pkt_burst = builtin_enqueue_pkts;
+		vdev_queue_ops[vid].dequeue_pkt_burst = builtin_dequeue_pkts;
+	} else {
+		if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled)
+			vdev_queue_ops[vid].enqueue_pkt_burst = async_enqueue_pkts;
+		else
+			vdev_queue_ops[vid].enqueue_pkt_burst = sync_enqueue_pkts;
+
+		if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled)
+			vdev_queue_ops[vid].dequeue_pkt_burst = async_dequeue_pkts;
+		else
+			vdev_queue_ops[vid].dequeue_pkt_burst = sync_dequeue_pkts;
+	}
+
+	return 0;
+}
+
+static inline int
+vhost_async_channel_register(int vid)
+{
+	int rx_ret = 0, tx_ret = 0;
+
+	if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
+		rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
+		if (rx_ret == 0)
+			dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
+	}
+
+	if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id != INVALID_DMA_ID) {
+		tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
+		if (tx_ret == 0)
+			dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
+	}
+
+	return rx_ret | tx_ret;
+}
+
+
+
 /*
  * A new device is added to a data core. First the device is added to the main linked list
  * and then allocated to a specific data core.
@@ -1647,22 +1764,9 @@ vring_state_changed(int vid, uint16_t queue_id, int enable)
 	if (queue_id != VIRTIO_RXQ)
 		return 0;
 
-	if (dma_bind[vid].dmas[queue_id].async_enabled) {
-		if (!enable) {
-			uint16_t n_pkt = 0;
-			int pkts_inflight;
-			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, queue_id);
-			int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
-			struct rte_mbuf *m_cpl[pkts_inflight];
-
-			while (pkts_inflight) {
-				n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
-							m_cpl, pkts_inflight, dma_id, 0);
-				free_pkts(m_cpl, n_pkt);
-				pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
-											queue_id);
-			}
-		}
+	if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
+		if (!enable)
+			vhost_clear_queue_thread_unsafe(vdev, queue_id);
 	}
 
 	return 0;

https://lab.dpdk.org/results/dashboard/patchsets/22130/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-05-16 11:20 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-16 11:20 |WARNING| pw111179-111183 [PATCH] [v8, 5/5] examples/vhost: support async dequeue data path dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).