DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH RFC] examples/vhost: remove the callbacks in app
@ 2021-08-31 12:59 Sunil Pai G
  0 siblings, 0 replies; only message in thread
From: Sunil Pai G @ 2021-08-31 12:59 UTC (permalink / raw)
  To: dev; +Cc: harry.van.haaren, Jiayu.Hu, maxime.coquelin, sunil.pai.g

Since the vhost library performs the dmadev offload,
its no longer necessary to have them in app.

Note to the reader:
-------------------
The intent of this patch is to explore possible different approaches
of async implementations.
Please consider this patch for discussions only and not for
merge/upstream.

This patch is dependent on the series:
https://patches.dpdk.org/project/dpdk/list/?series=18407.

Signed-off-by: Sunil Pai G <sunil.pai.g@intel.com>
---
 examples/vhost/ioat.c | 150 +++++++-----------------------------------
 examples/vhost/ioat.h |  39 +----------
 examples/vhost/main.c |  27 ++++----
 3 files changed, 39 insertions(+), 177 deletions(-)

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 457f8171f0..1f0c35b338 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -3,27 +3,16 @@
  */
 
 #include <sys/uio.h>
-#ifdef RTE_RAW_IOAT
-#include <rte_rawdev.h>
-#include <rte_ioat_rawdev.h>
+
 
 #include "ioat.h"
 #include "main.h"
+#include <rte_dmadev.h>
 
-struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
-
-struct packet_tracker {
-	unsigned short size_track[MAX_ENQUEUED_SIZE];
-	unsigned short next_read;
-	unsigned short next_write;
-	unsigned short last_remain;
-	unsigned short ioat_space;
-};
-
-struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
+struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE] = {0};
 
 int
-open_ioat(const char *value)
+open_dmadev(const char *value)
 {
 	struct dma_for_vhost *dma_info = dma_bind;
 	char *input = strndup(value, strlen(value) + 1);
@@ -31,8 +20,10 @@ open_ioat(const char *value)
 	char *ptrs[2];
 	char *start, *end, *substr;
 	int64_t vid, vring_id;
-	struct rte_ioat_rawdev_config config;
-	struct rte_rawdev_info info = { .dev_private = &config };
+	struct rte_dmadev_info dev_info = {0};
+	struct rte_dmadev_conf dev_conf = {0};
+	struct rte_dmadev_vchan_conf vchan_conf = {0};
+	uint64_t capab = RTE_DMADEV_CAPA_MEM_TO_MEM | RTE_DMADEV_CAPA_OPS_COPY;
 	char name[32];
 	int dev_id;
 	int ret = 0;
@@ -91,29 +82,34 @@ open_ioat(const char *value)
 
 		rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
 				name, sizeof(name));
-		dev_id = rte_rawdev_get_dev_id(name);
+		dev_id = rte_dmadev_get_dev_id(name);
 		if (dev_id == (uint16_t)(-ENODEV) ||
 		dev_id == (uint16_t)(-EINVAL)) {
 			ret = -1;
 			goto out;
 		}
 
-		if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
-		strstr(info.driver_name, "ioat") == NULL) {
+		if (!rte_dmadev_info_get(dev_id, &dev_info)) {
+			if (!((dev_info.dev_capa & capab) && dev_info.max_vchans >= 1)) {
+				ret = -1;
+				goto out;
+			}
+		}
+
+		dev_conf.nb_vchans = 1;
+		dev_conf.enable_silent = false;
+		if (rte_dmadev_configure(dev_id, &dev_conf)) {
 			ret = -1;
 			goto out;
 		}
 
+		vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+		vchan_conf.nb_desc = DMADEV_RING_SIZE;
+		ret = rte_dmadev_vchan_setup(dev_id, 0, &vchan_conf);
+
 		(dma_info + vid)->dmas[vring_id].dev_id = dev_id;
 		(dma_info + vid)->dmas[vring_id].is_valid = true;
-		config.ring_size = IOAT_RING_SIZE;
-		config.hdls_disable = true;
-		if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
-			ret = -1;
-			goto out;
-		}
-		rte_rawdev_start(dev_id);
-		cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE - 1;
+		rte_dmadev_start(dev_id);
 		dma_info->nr++;
 		i++;
 	}
@@ -122,101 +118,3 @@ open_ioat(const char *value)
 	return ret;
 }
 
-int32_t
-ioat_transfer_data_cb(int vid, uint16_t queue_id,
-		struct rte_vhost_async_desc *descs,
-		struct rte_vhost_async_status *opaque_data, uint16_t count)
-{
-	uint32_t i_desc;
-	uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
-	struct rte_vhost_iov_iter *src = NULL;
-	struct rte_vhost_iov_iter *dst = NULL;
-	unsigned long i_seg;
-	unsigned short mask = MAX_ENQUEUED_SIZE - 1;
-	unsigned short write = cb_tracker[dev_id].next_write;
-
-	if (!opaque_data) {
-		for (i_desc = 0; i_desc < count; i_desc++) {
-			src = descs[i_desc].src;
-			dst = descs[i_desc].dst;
-			i_seg = 0;
-			if (cb_tracker[dev_id].ioat_space < src->nr_segs)
-				break;
-			while (i_seg < src->nr_segs) {
-				rte_ioat_enqueue_copy(dev_id,
-					(uintptr_t)(src->iov[i_seg].iov_base)
-						+ src->offset,
-					(uintptr_t)(dst->iov[i_seg].iov_base)
-						+ dst->offset,
-					src->iov[i_seg].iov_len,
-					0,
-					0);
-				i_seg++;
-			}
-			write &= mask;
-			cb_tracker[dev_id].size_track[write] = src->nr_segs;
-			cb_tracker[dev_id].ioat_space -= src->nr_segs;
-			write++;
-		}
-	} else {
-		/* Opaque data is not supported */
-		return -1;
-	}
-	/* ring the doorbell */
-	rte_ioat_perform_ops(dev_id);
-	cb_tracker[dev_id].next_write = write;
-	return i_desc;
-}
-
-int32_t
-ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
-		struct rte_vhost_async_status *opaque_data,
-		uint16_t max_packets)
-{
-	if (!opaque_data) {
-		uintptr_t dump[255];
-		int n_seg;
-		unsigned short read, write;
-		unsigned short nb_packet = 0;
-		unsigned short mask = MAX_ENQUEUED_SIZE - 1;
-		unsigned short i;
-
-		uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2
-				+ VIRTIO_RXQ].dev_id;
-		n_seg = rte_ioat_completed_ops(dev_id, 255, NULL, NULL, dump, dump);
-		if (n_seg < 0) {
-			RTE_LOG(ERR,
-				VHOST_DATA,
-				"fail to poll completed buf on IOAT device %u",
-				dev_id);
-			return 0;
-		}
-		if (n_seg == 0)
-			return 0;
-
-		cb_tracker[dev_id].ioat_space += n_seg;
-		n_seg += cb_tracker[dev_id].last_remain;
-
-		read = cb_tracker[dev_id].next_read;
-		write = cb_tracker[dev_id].next_write;
-		for (i = 0; i < max_packets; i++) {
-			read &= mask;
-			if (read == write)
-				break;
-			if (n_seg >= cb_tracker[dev_id].size_track[read]) {
-				n_seg -= cb_tracker[dev_id].size_track[read];
-				read++;
-				nb_packet++;
-			} else {
-				break;
-			}
-		}
-		cb_tracker[dev_id].next_read = read;
-		cb_tracker[dev_id].last_remain = n_seg;
-		return nb_packet;
-	}
-	/* Opaque data is not supported */
-	return -1;
-}
-
-#endif /* RTE_RAW_IOAT */
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 62e163c585..415e33d5a8 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -10,7 +10,7 @@
 #include <rte_vhost_async.h>
 
 #define MAX_VHOST_DEVICE 1024
-#define IOAT_RING_SIZE 4096
+#define DMADEV_RING_SIZE 4096
 #define MAX_ENQUEUED_SIZE 4096
 
 struct dma_info {
@@ -24,40 +24,5 @@ struct dma_for_vhost {
 	uint16_t nr;
 };
 
-#ifdef RTE_RAW_IOAT
-int open_ioat(const char *value);
-
-int32_t
-ioat_transfer_data_cb(int vid, uint16_t queue_id,
-		struct rte_vhost_async_desc *descs,
-		struct rte_vhost_async_status *opaque_data, uint16_t count);
-
-int32_t
-ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
-		struct rte_vhost_async_status *opaque_data,
-		uint16_t max_packets);
-#else
-static int open_ioat(const char *value __rte_unused)
-{
-	return -1;
-}
-
-static int32_t
-ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
-		struct rte_vhost_async_desc *descs __rte_unused,
-		struct rte_vhost_async_status *opaque_data __rte_unused,
-		uint16_t count __rte_unused)
-{
-	return -1;
-}
-
-static int32_t
-ioat_check_completed_copies_cb(int vid __rte_unused,
-		uint16_t queue_id __rte_unused,
-		struct rte_vhost_async_status *opaque_data __rte_unused,
-		uint16_t max_packets __rte_unused)
-{
-	return -1;
-}
-#endif
+int open_dmadev(const char *value);
 #endif /* _IOAT_H_ */
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index bc3d71c898..6d59d26534 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -96,6 +96,7 @@ static int builtin_net_driver;
 static int async_vhost_driver;
 
 static char *dma_type;
+extern struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
 
 /* Specify timeout (in useconds) between retries on RX. */
 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
@@ -200,7 +201,7 @@ static inline int
 open_dma(const char *value)
 {
 	if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0)
-		return open_ioat(value);
+		return open_dmadev(value);
 
 	return -1;
 }
@@ -848,9 +849,10 @@ complete_async_pkts(struct vhost_dev *vdev)
 {
 	struct rte_mbuf *p_cpl[MAX_PKT_BURST];
 	uint16_t complete_count;
+	int dmadev_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
 
 	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
-					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
+					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dmadev_id);
 	if (complete_count) {
 		free_pkts(p_cpl, complete_count);
 		__atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
@@ -887,6 +889,7 @@ drain_vhost(struct vhost_dev *vdev)
 	uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
 	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
 	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+	int dmadev_id = dma_bind[vdev->vid].dmas[ VIRTIO_RXQ].dev_id;
 
 	if (builtin_net_driver) {
 		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
@@ -897,7 +900,7 @@ drain_vhost(struct vhost_dev *vdev)
 
 		complete_async_pkts(vdev);
 		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-					m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+					m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr, dmadev_id);
 		__atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
 
 		if (cpu_cpl_nr)
@@ -1193,7 +1196,7 @@ drain_eth_rx(struct vhost_dev *vdev)
 {
 	uint16_t rx_count, enqueue_count;
 	struct rte_mbuf *pkts[MAX_PKT_BURST];
-
+	int dmadev_id = dma_bind[0].dmas[vdev->vmdq_rx_q * 2 + VIRTIO_RXQ].dev_id;
 	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
 				    pkts, MAX_PKT_BURST);
 
@@ -1229,7 +1232,7 @@ drain_eth_rx(struct vhost_dev *vdev)
 		complete_async_pkts(vdev);
 		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
 					VIRTIO_RXQ, pkts, rx_count,
-					m_cpu_cpl, &cpu_cpl_nr);
+					m_cpu_cpl, &cpu_cpl_nr, dmadev_id);
 		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
 					__ATOMIC_SEQ_CST);
 
@@ -1360,7 +1363,7 @@ destroy_device(int vid)
 	struct vhost_dev *vdev = NULL;
 	int lcore;
 	uint16_t i;
-
+	int dmadev_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
 		if (vdev->vid == vid)
 			break;
@@ -1410,7 +1413,7 @@ destroy_device(int vid)
 
 		while (vdev->pkts_inflight) {
 			n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
-						m_cpl, vdev->pkts_inflight);
+						m_cpl, vdev->pkts_inflight, dmadev_id);
 			free_pkts(m_cpl, n_pkt);
 			__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
 		}
@@ -1487,18 +1490,13 @@ new_device(int vid)
 
 	if (async_vhost_driver) {
 		struct rte_vhost_async_config config = {0};
-		struct rte_vhost_async_channel_ops channel_ops;
 
 		if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) {
-			channel_ops.transfer_data = ioat_transfer_data_cb;
-			channel_ops.check_completed_copies =
-				ioat_check_completed_copies_cb;
 
 			config.features = RTE_VHOST_ASYNC_INORDER;
 			config.async_threshold = 256;
 
-			return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
-				config, &channel_ops);
+			return rte_vhost_async_channel_register(vid, VIRTIO_RXQ, config);
 		}
 	}
 
@@ -1509,6 +1507,7 @@ static int
 vring_state_changed(int vid, uint16_t queue_id, int enable)
 {
 	struct vhost_dev *vdev = NULL;
+	int dmadev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
 
 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
 		if (vdev->vid == vid)
@@ -1527,7 +1526,7 @@ vring_state_changed(int vid, uint16_t queue_id, int enable)
 
 			while (vdev->pkts_inflight) {
 				n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
-							m_cpl, vdev->pkts_inflight);
+							m_cpl, vdev->pkts_inflight, dmadev_id);
 				free_pkts(m_cpl, n_pkt);
 				__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
 			}
-- 
2.25.1


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-08-31 13:00 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-31 12:59 [dpdk-dev] [PATCH RFC] examples/vhost: remove the callbacks in app Sunil Pai G

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).