DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ferruh Yigit <ferruh.yigit@intel.com>
To: dev@dpdk.org
Cc: Tetsuya Mukawa <mukawa@igel.co.jp>,
	Yuanhan Liu <yuanhan.liu@linux.intel.com>,
	Ferruh Yigit <ferruh.yigit@intel.com>
Subject: [dpdk-dev] [PATCH v2] vhost: add support for dynamic vhost PMD creation
Date: Wed, 18 May 2016 18:10:22 +0100	[thread overview]
Message-ID: <1463591422-25408-1-git-send-email-ferruh.yigit@intel.com> (raw)
In-Reply-To: <20160509213124.GK5641@yliu-dev.sh.intel.com>

Add rte_eth_from_vhost() API to create vhost PMD dynamically from
applications.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>

---

v2:
* drop rte_ prefix from non-public function
* re-use eth_rx_queue_setup/eth_tx_queue_setup
* pass vdev options as parameter to API
---
 drivers/net/vhost/rte_eth_vhost.c           | 130 ++++++++++++++++++++++++++++
 drivers/net/vhost/rte_eth_vhost.h           |  26 ++++++
 drivers/net/vhost/rte_pmd_vhost_version.map |   7 ++
 3 files changed, 163 insertions(+)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 310cbef..8019eb1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -796,6 +796,79 @@ error:
 	return -1;
 }
 
+static int
+eth_from_vhost_create(const char *name, char *iface_name, uint16_t nb_queues,
+		const unsigned int numa_node, struct rte_mempool *mb_pool)
+{
+	struct rte_eth_dev_data *data = NULL;
+	struct pmd_internal *internal = NULL;
+	struct rte_eth_dev *dev = NULL;
+	struct internal_list *list;
+	int port_id;
+	int ret;
+	int i;
+
+	port_id = eth_dev_vhost_create(name, iface_name, nb_queues, numa_node);
+	if (port_id < 0)
+		return -1;
+
+	dev = &rte_eth_devices[port_id];
+	data = dev->data;
+
+	data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_queues,
+			0, numa_node);
+	if (data->rx_queues == NULL)
+		goto error;
+
+	data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_queues,
+			0, numa_node);
+	if (data->tx_queues == NULL)
+		goto error;
+
+	for (i = 0; i < nb_queues; i++) {
+		ret = eth_rx_queue_setup(dev, i, 0, numa_node, NULL, mb_pool);
+		if (ret < 0)
+			goto error;
+	}
+
+	for (i = 0; i < nb_queues; i++) {
+		ret = eth_tx_queue_setup(dev, i, 0, numa_node, NULL);
+		if (ret < 0)
+			goto error;
+	}
+
+	return port_id;
+
+error:
+	internal = data->dev_private;
+	list = find_internal_resource(internal->iface_name);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (internal)
+		free(internal->dev_name);
+	free(vring_states[port_id]);
+	free(data->mac_addrs);
+	rte_eth_dev_release_port(dev);
+	if (data->rx_queues) {
+		for (i = 0; i < nb_queues; i++)
+			free(data->rx_queues[i]);
+		rte_free(data->rx_queues);
+	}
+	if (data->tx_queues) {
+		for (i = 0; i < nb_queues; i++)
+			free(data->tx_queues[i]);
+		rte_free(data->tx_queues);
+	}
+	rte_free(internal);
+	rte_free(list);
+	rte_free(data);
+
+	return -1;
+}
+
 static inline int
 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
 {
@@ -827,6 +900,63 @@ open_queues(const char *key __rte_unused, const char *value, void *extra_args)
 	return 0;
 }
 
+int
+rte_eth_from_vhost(const char *name, char *iface_name_arg,
+		const unsigned int numa_node, struct rte_mempool *mb_pool,
+		const char *params)
+{
+	char *iface_name = iface_name_arg;
+	struct rte_kvargs *kvlist = NULL;
+	uint16_t queues = 1;
+	int port_id;
+	int ret;
+
+	if (!name || !mb_pool)
+		return -1;
+
+	if (params) {
+		kvlist = rte_kvargs_parse(params, valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
+			ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
+						 &open_iface, &iface_name);
+			if (ret < 0) {
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
+			ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
+						 &open_queues, &queues);
+			if (ret < 0) {
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+	}
+
+	if (!iface_name || !queues)
+		return -1;
+
+	port_id = eth_from_vhost_create(name, iface_name, queues, numa_node,
+			mb_pool);
+	if (port_id < 0)
+		return port_id;
+
+	ret = rte_vhost_driver_register(iface_name);
+	if (ret < 0)
+		return ret;
+
+	ret = vhost_driver_session_start();
+	if (ret < 0)
+		return ret;
+
+	return port_id;
+}
+
 static int
 rte_pmd_vhost_devinit(const char *name, const char *params)
 {
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index ff5d877..480dac8 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -102,6 +102,32 @@ struct rte_eth_vhost_queue_event {
 int rte_eth_vhost_get_queue_event(uint8_t port_id,
 		struct rte_eth_vhost_queue_event *event);
 
+/**
+ * Create a new ethdev from vhost device
+ *
+ * @param name
+ *    Name to be given to the new ethdev
+ * @param iface_name
+ *    Specifies a path to connect to a QEMU virtio-net device
+ * @param numa_node
+ *    The numa node on which the memory for this port is to be allocated
+ * @param mb_pool
+ *    Memory pool used by created ethdev
+ * @param params
+ *    Optional argument list, supported arguments:
+ *       iface : iface_name,
+ *       queues: number of rx/tx queues [default to 1]
+ *    These values can override direct API params when conflict.
+ *    sample params="iface=/tmp/sock0,queues=1"
+ *
+ * @return
+ *  - On success, created ehtdev port_id.
+ *  - On failure, a negative value.
+ */
+int rte_eth_from_vhost(const char *name, char *iface_name_arg,
+		const unsigned int numa_node, struct rte_mempool *mb_pool,
+		const char *params);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 65bf3a8..bb2fe29 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -8,3 +8,10 @@ DPDK_16.04 {
 
 	local: *;
 };
+
+DPDK_16.07 {
+	global:
+
+	rte_eth_from_vhost;
+
+} DPDK_16.04;
-- 
2.5.5

  parent reply	other threads:[~2016-05-18 17:10 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-05 18:11 [dpdk-dev] [PATCH] " Ferruh Yigit
2016-05-09 21:31 ` Yuanhan Liu
2016-05-10 17:11   ` Ferruh Yigit
2016-05-18 17:10   ` Ferruh Yigit [this message]
2016-05-19  8:33     ` [dpdk-dev] [PATCH v2] " Thomas Monjalon
2016-05-19 16:28       ` Ferruh Yigit
2016-05-19 16:44         ` Thomas Monjalon
2016-05-20  1:59           ` Yuanhan Liu
2016-05-20 10:37           ` Bruce Richardson
2016-05-20 12:03             ` Thomas Monjalon
2016-05-23 13:24             ` Yuanhan Liu
2016-05-23 17:06               ` Ferruh Yigit
2016-05-24  5:11                 ` Yuanhan Liu
2016-05-24  9:42                   ` Bruce Richardson
2016-05-25  4:41                     ` Yuanhan Liu
2016-05-25 11:54                       ` Thomas Monjalon
2016-05-26  7:58                         ` Yuanhan Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1463591422-25408-1-git-send-email-ferruh.yigit@intel.com \
    --to=ferruh.yigit@intel.com \
    --cc=dev@dpdk.org \
    --cc=mukawa@igel.co.jp \
    --cc=yuanhan.liu@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).