DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ouyang Changchun <changchun.ouyang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/6] lib_vhost: Support multiple queues in virtio dev
Date: Thu, 21 May 2015 15:49:37 +0800	[thread overview]
Message-ID: <1432194581-15301-3-git-send-email-changchun.ouyang@intel.com> (raw)
In-Reply-To: <1432194581-15301-1-git-send-email-changchun.ouyang@intel.com>

Each virtio device could have multiple queues, say 2 or 4, at most 8.
Enabling this feature allows virtio device/port on guest has the ability to
use different vCPU to receive/transmit packets from/to each queue.

In multiple queues mode, virtio device readiness means all queues of
this virtio device are ready, cleanup/destroy a virtio device also
requires clearing all queues belong to it.

Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
 lib/librte_vhost/rte_virtio_net.h             |  15 ++-
 lib/librte_vhost/vhost_rxtx.c                 |  32 ++++---
 lib/librte_vhost/vhost_user/vhost-net-user.c  |   4 +-
 lib/librte_vhost/vhost_user/virtio-net-user.c |  97 +++++++++++++++----
 lib/librte_vhost/vhost_user/virtio-net-user.h |   2 +
 lib/librte_vhost/virtio-net.c                 | 132 +++++++++++++++++---------
 6 files changed, 201 insertions(+), 81 deletions(-)

diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 5d38185..3e82bef 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -59,6 +59,10 @@ struct rte_mbuf;
 /* Backend value set by guest. */
 #define VIRTIO_DEV_STOPPED -1
 
+/**
+ * Maximum number of virtqueues per device.
+ */
+#define VIRTIO_MAX_VIRTQUEUES 8
 
 /* Enum for virtqueue management. */
 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
@@ -96,13 +100,14 @@ struct vhost_virtqueue {
  * Device structure contains all configuration information relating to the device.
  */
 struct virtio_net {
-	struct vhost_virtqueue	*virtqueue[VIRTIO_QNUM];	/**< Contains all virtqueue information. */
 	struct virtio_memory	*mem;		/**< QEMU memory and memory region information. */
+	struct vhost_virtqueue	*virtqueue[VIRTIO_QNUM * VIRTIO_MAX_VIRTQUEUES]; /**< Contains all virtqueue information. */
 	uint64_t		features;	/**< Negotiated feature set. */
 	uint64_t		device_fh;	/**< device identifier. */
 	uint32_t		flags;		/**< Device flags. Only used to check if device is running on data core. */
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
 	char			ifname[IF_NAME_SZ];	/**< Name of the tap device or socket path. */
+	uint32_t                num_virt_queues;
 	void			*priv;		/**< private context */
 } __rte_cache_aligned;
 
@@ -220,4 +225,12 @@ uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
 uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
 
+/**
+ * This function set the queue number to one vhost device.
+ * @param q_number
+ *  queue number one vhost device.
+ * @return
+ *  0 if success, -1 if q_number exceed the max.
+ */
+int rte_vhost_q_num_set(uint32_t q_number);
 #endif /* _VIRTIO_NET_H_ */
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 4809d32..19f9518 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -67,12 +67,12 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 	uint8_t success = 0;
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
-	if (unlikely(queue_id != VIRTIO_RXQ)) {
-		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
-		return 0;
+	if (unlikely(queue_id >= VIRTIO_QNUM * dev->num_virt_queues)) {
+		LOG_DEBUG(VHOST_DATA, "queue id: %d invalid.\n", queue_id);
+		return -1;
 	}
 
-	vq = dev->virtqueue[VIRTIO_RXQ];
+	vq = dev->virtqueue[queue_id];
 	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
 
 	/*
@@ -188,8 +188,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 }
 
 static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
-	uint16_t res_end_idx, struct rte_mbuf *pkt)
+copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t queue_id,
+	uint16_t res_base_idx, uint16_t res_end_idx,
+	struct rte_mbuf *pkt)
 {
 	uint32_t vec_idx = 0;
 	uint32_t entry_success = 0;
@@ -217,9 +218,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
 	 * Convert from gpa to vva
 	 * (guest physical addr -> vhost virtual addr)
 	 */
-	vq = dev->virtqueue[VIRTIO_RXQ];
 	vb_addr =
 		gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+	vq = dev->virtqueue[queue_id];
 	vb_hdr_addr = vb_addr;
 
 	/* Prefetch buffer address. */
@@ -407,11 +408,12 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
 		dev->device_fh);
-	if (unlikely(queue_id != VIRTIO_RXQ)) {
-		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+	if (unlikely(queue_id >= VIRTIO_QNUM * dev->num_virt_queues)) {
+		LOG_DEBUG(VHOST_DATA, "queue id: %d invalid.\n", queue_id);
+		return -1;
 	}
 
-	vq = dev->virtqueue[VIRTIO_RXQ];
+	vq = dev->virtqueue[queue_id];
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 
 	if (count == 0)
@@ -493,7 +495,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 
 		res_end_idx = res_cur_idx;
 
-		entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
+		entry_success = copy_from_mbuf_to_vring(dev, queue_id, res_base_idx,
 			res_end_idx, pkts[pkt_idx]);
 
 		rte_compiler_barrier();
@@ -543,12 +545,12 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 	uint16_t free_entries, entry_success = 0;
 	uint16_t avail_idx;
 
-	if (unlikely(queue_id != VIRTIO_TXQ)) {
-		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
-		return 0;
+	if (unlikely(queue_id >= VIRTIO_QNUM * dev->num_virt_queues)) {
+		LOG_DEBUG(VHOST_DATA, "queue id:%d invalid.\n", queue_id);
+		return -1;
 	}
 
-	vq = dev->virtqueue[VIRTIO_TXQ];
+	vq = dev->virtqueue[queue_id];
 	avail_idx =  *((volatile uint16_t *)&vq->avail->idx);
 
 	/* If there are no available buffers then return. */
diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.c b/lib/librte_vhost/vhost_user/vhost-net-user.c
index 31f1215..b66a653 100644
--- a/lib/librte_vhost/vhost_user/vhost-net-user.c
+++ b/lib/librte_vhost/vhost_user/vhost-net-user.c
@@ -378,7 +378,9 @@ vserver_message_handler(int connfd, void *dat, int *remove)
 		ops->set_owner(ctx);
 		break;
 	case VHOST_USER_RESET_OWNER:
-		ops->reset_owner(ctx);
+		RTE_LOG(INFO, VHOST_CONFIG,
+			"(%"PRIu64") VHOST_NET_RESET_OWNER\n", ctx.fh);
+		user_reset_owner(ctx, &msg.payload.state);
 		break;
 
 	case VHOST_USER_SET_MEM_TABLE:
diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.c b/lib/librte_vhost/vhost_user/virtio-net-user.c
index c1ffc38..bdb2d40 100644
--- a/lib/librte_vhost/vhost_user/virtio-net-user.c
+++ b/lib/librte_vhost/vhost_user/virtio-net-user.c
@@ -209,22 +209,56 @@ static int
 virtio_is_ready(struct virtio_net *dev)
 {
 	struct vhost_virtqueue *rvq, *tvq;
+	uint32_t q_idx;
 
 	/* mq support in future.*/
-	rvq = dev->virtqueue[VIRTIO_RXQ];
-	tvq = dev->virtqueue[VIRTIO_TXQ];
-	if (rvq && tvq && rvq->desc && tvq->desc &&
-		(rvq->kickfd != (eventfd_t)-1) &&
-		(rvq->callfd != (eventfd_t)-1) &&
-		(tvq->kickfd != (eventfd_t)-1) &&
-		(tvq->callfd != (eventfd_t)-1)) {
-		RTE_LOG(INFO, VHOST_CONFIG,
-			"virtio is now ready for processing.\n");
-		return 1;
+	for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+                uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+                uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+
+		rvq = dev->virtqueue[virt_rx_q_idx];
+		tvq = dev->virtqueue[virt_tx_q_idx];
+		if ((rvq == NULL) || (tvq == NULL) ||
+			(rvq->desc == NULL) || (tvq->desc == NULL) ||
+			(rvq->kickfd == (eventfd_t)-1) ||
+			(rvq->callfd == (eventfd_t)-1) ||
+			(tvq->kickfd == (eventfd_t)-1) ||
+			(tvq->callfd == (eventfd_t)-1)) {
+			RTE_LOG(INFO, VHOST_CONFIG,
+				"virtio isn't ready for processing.\n");
+			return 0;
+		}
 	}
 	RTE_LOG(INFO, VHOST_CONFIG,
-		"virtio isn't ready for processing.\n");
-	return 0;
+		"virtio is now ready for processing.\n");
+	return 1;
+}
+
+static int
+virtio_is_ready_for_reset(struct virtio_net *dev)
+{
+	struct vhost_virtqueue *rvq, *tvq;
+	uint32_t q_idx;
+
+	/* mq support in future.*/
+	for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+		uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+		uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+
+		rvq = dev->virtqueue[virt_rx_q_idx];
+		tvq = dev->virtqueue[virt_tx_q_idx];
+		if ((rvq == NULL) || (tvq == NULL) ||
+			(rvq->kickfd != (eventfd_t)-1) ||
+			(tvq->kickfd != (eventfd_t)-1)) {
+			RTE_LOG(INFO, VHOST_CONFIG,
+				"virtio isn't ready for reset.\n");
+			return 0;
+		}
+	}
+
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"virtio is now ready for reset.\n");
+	return 1;
 }
 
 void
@@ -290,15 +324,42 @@ user_get_vring_base(struct vhost_device_ctx ctx,
 	 * sent and only sent in vhost_vring_stop.
 	 * TODO: cleanup the vring, it isn't usable since here.
 	 */
-	if (((int)dev->virtqueue[VIRTIO_RXQ]->kickfd) >= 0) {
-		close(dev->virtqueue[VIRTIO_RXQ]->kickfd);
-		dev->virtqueue[VIRTIO_RXQ]->kickfd = (eventfd_t)-1;
+	if (((int)dev->virtqueue[state->index]->kickfd) >= 0) {
+		close(dev->virtqueue[state->index]->kickfd);
+		dev->virtqueue[state->index]->kickfd = (eventfd_t)-1;
 	}
-	if (((int)dev->virtqueue[VIRTIO_TXQ]->kickfd) >= 0) {
-		close(dev->virtqueue[VIRTIO_TXQ]->kickfd);
-		dev->virtqueue[VIRTIO_TXQ]->kickfd = (eventfd_t)-1;
+
+	return 0;
+}
+
+/*
+ * when virtio is stopped, qemu will send us the RESET_OWNER message.
+ */
+int
+user_reset_owner(struct vhost_device_ctx ctx,
+	struct vhost_vring_state *state)
+{
+	struct virtio_net *dev = get_device(ctx);
+
+	/* We have to stop the queue (virtio) if it is running. */
+	if (dev->flags & VIRTIO_DEV_RUNNING)
+		notify_ops->destroy_device(dev);
+
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"reset owner --- state idx:%d state num:%d\n", state->index, state->num);
+	/*
+	 * Based on current qemu vhost-user implementation, this message is
+	 * sent and only sent in vhost_net_stop_one.
+	 * TODO: cleanup the vring, it isn't usable since here.
+	 */
+	if (((int)dev->virtqueue[state->index]->kickfd) >= 0) {
+		close(dev->virtqueue[state->index]->kickfd);
+		dev->virtqueue[state->index]->kickfd = (eventfd_t)-1;
 	}
 
+	if (virtio_is_ready_for_reset(dev))
+		ops->reset_owner(ctx);
+
 	return 0;
 }
 
diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.h b/lib/librte_vhost/vhost_user/virtio-net-user.h
index df24860..2429836 100644
--- a/lib/librte_vhost/vhost_user/virtio-net-user.h
+++ b/lib/librte_vhost/vhost_user/virtio-net-user.h
@@ -46,4 +46,6 @@ void user_set_vring_kick(struct vhost_device_ctx, struct VhostUserMsg *);
 int user_get_vring_base(struct vhost_device_ctx, struct vhost_vring_state *);
 
 void user_destroy_device(struct vhost_device_ctx);
+
+int user_reset_owner(struct vhost_device_ctx ctx, struct vhost_vring_state *state);
 #endif
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 4672e67..680f1b8 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -66,9 +66,11 @@ static struct virtio_net_config_ll *ll_root;
 /* Features supported by this lib. */
 #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
 				(1ULL << VIRTIO_NET_F_CTRL_VQ) | \
-				(1ULL << VIRTIO_NET_F_CTRL_RX))
+				(1ULL << VIRTIO_NET_F_CTRL_RX) | \
+				(1ULL << VIRTIO_NET_F_MQ))
 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
 
+static uint32_t q_num = 1;
 
 /*
  * Converts QEMU virtual address to Vhost virtual address. This function is
@@ -177,6 +179,8 @@ add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
 static void
 cleanup_device(struct virtio_net *dev)
 {
+	uint32_t q_idx;
+
 	/* Unmap QEMU memory file if mapped. */
 	if (dev->mem) {
 		munmap((void *)(uintptr_t)dev->mem->mapped_address,
@@ -185,14 +189,18 @@ cleanup_device(struct virtio_net *dev)
 	}
 
 	/* Close any event notifiers opened by device. */
-	if ((int)dev->virtqueue[VIRTIO_RXQ]->callfd >= 0)
-		close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
-	if ((int)dev->virtqueue[VIRTIO_RXQ]->kickfd >= 0)
-		close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
-	if ((int)dev->virtqueue[VIRTIO_TXQ]->callfd >= 0)
-		close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
-	if ((int)dev->virtqueue[VIRTIO_TXQ]->kickfd >= 0)
-		close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
+	for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+		uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+		uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+		if ((int)dev->virtqueue[virt_rx_q_idx]->callfd >= 0)
+			close((int)dev->virtqueue[virt_rx_q_idx]->callfd);
+		if ((int)dev->virtqueue[virt_rx_q_idx]->kickfd >= 0)
+			close((int)dev->virtqueue[virt_rx_q_idx]->kickfd);
+		if ((int)dev->virtqueue[virt_tx_q_idx]->callfd >= 0)
+			close((int)dev->virtqueue[virt_tx_q_idx]->callfd);
+		if ((int)dev->virtqueue[virt_tx_q_idx]->kickfd >= 0)
+			close((int)dev->virtqueue[virt_tx_q_idx]->kickfd);
+	}
 }
 
 /*
@@ -201,7 +209,10 @@ cleanup_device(struct virtio_net *dev)
 static void
 free_device(struct virtio_net_config_ll *ll_dev)
 {
-	/* Free any malloc'd memory */
+	/*
+	 * Free any malloc'd memory, just need free once even in multi Q case
+	 * as they are malloc'd once.
+	 */
 	free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
 	free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
 	free(ll_dev);
@@ -240,9 +251,10 @@ rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
  *  Initialise all variables in device structure.
  */
 static void
-init_device(struct virtio_net *dev)
+init_device(struct virtio_net *dev, uint8_t reset_owner)
 {
 	uint64_t vq_offset;
+	uint32_t q_idx;
 
 	/*
 	 * Virtqueues have already been malloced so
@@ -251,19 +263,27 @@ init_device(struct virtio_net *dev)
 	vq_offset = offsetof(struct virtio_net, mem);
 
 	/* Set everything to 0. */
-	memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
-		(sizeof(struct virtio_net) - (size_t)vq_offset));
-	memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
-	memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
-
-	dev->virtqueue[VIRTIO_RXQ]->kickfd = (eventfd_t)-1;
-	dev->virtqueue[VIRTIO_RXQ]->callfd = (eventfd_t)-1;
-	dev->virtqueue[VIRTIO_TXQ]->kickfd = (eventfd_t)-1;
-	dev->virtqueue[VIRTIO_TXQ]->callfd = (eventfd_t)-1;
-
-	/* Backends are set to -1 indicating an inactive device. */
-	dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
-	dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
+	if (!reset_owner)
+		memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
+			(sizeof(struct virtio_net) - (size_t)vq_offset));
+
+	dev->num_virt_queues = q_num;
+
+	for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+		uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+		uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+		memset(dev->virtqueue[virt_rx_q_idx], 0, sizeof(struct vhost_virtqueue));
+		memset(dev->virtqueue[virt_tx_q_idx], 0, sizeof(struct vhost_virtqueue));
+
+		dev->virtqueue[virt_rx_q_idx]->kickfd = (eventfd_t)-1;
+		dev->virtqueue[virt_rx_q_idx]->callfd = (eventfd_t)-1;
+		dev->virtqueue[virt_tx_q_idx]->kickfd = (eventfd_t)-1;
+		dev->virtqueue[virt_tx_q_idx]->callfd = (eventfd_t)-1;
+
+		/* Backends are set to -1 indicating an inactive device. */
+		dev->virtqueue[virt_rx_q_idx]->backend = VIRTIO_DEV_STOPPED;
+		dev->virtqueue[virt_tx_q_idx]->backend = VIRTIO_DEV_STOPPED;
+	}
 }
 
 /*
@@ -276,6 +296,7 @@ new_device(struct vhost_device_ctx ctx)
 {
 	struct virtio_net_config_ll *new_ll_dev;
 	struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
+	uint32_t q_idx;
 
 	/* Setup device and virtqueues. */
 	new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
@@ -286,7 +307,7 @@ new_device(struct vhost_device_ctx ctx)
 		return -1;
 	}
 
-	virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
+	virtqueue_rx = malloc(sizeof(struct vhost_virtqueue) * q_num);
 	if (virtqueue_rx == NULL) {
 		free(new_ll_dev);
 		RTE_LOG(ERR, VHOST_CONFIG,
@@ -295,7 +316,7 @@ new_device(struct vhost_device_ctx ctx)
 		return -1;
 	}
 
-	virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
+	virtqueue_tx = malloc(sizeof(struct vhost_virtqueue) * q_num);
 	if (virtqueue_tx == NULL) {
 		free(virtqueue_rx);
 		free(new_ll_dev);
@@ -305,11 +326,16 @@ new_device(struct vhost_device_ctx ctx)
 		return -1;
 	}
 
-	new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
-	new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
+	memset(new_ll_dev->dev.virtqueue, 0, sizeof(new_ll_dev->dev.virtqueue));
+	for (q_idx = 0; q_idx < q_num; q_idx++) {
+		uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+		uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+		new_ll_dev->dev.virtqueue[virt_rx_q_idx] = virtqueue_rx + q_idx;
+		new_ll_dev->dev.virtqueue[virt_tx_q_idx] = virtqueue_tx + q_idx;
+	}
 
 	/* Initialise device and virtqueues. */
-	init_device(&new_ll_dev->dev);
+	init_device(&new_ll_dev->dev, 0);
 
 	new_ll_dev->next = NULL;
 
@@ -398,7 +424,7 @@ reset_owner(struct vhost_device_ctx ctx)
 	ll_dev = get_config_ll_entry(ctx);
 
 	cleanup_device(&ll_dev->dev);
-	init_device(&ll_dev->dev);
+	init_device(&ll_dev->dev, 1);
 
 	return 0;
 }
@@ -429,6 +455,7 @@ static int
 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
 {
 	struct virtio_net *dev;
+	uint32_t q_idx;
 
 	dev = get_device(ctx);
 	if (dev == NULL)
@@ -440,22 +467,26 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
 	dev->features = *pu;
 
 	/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
-	if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
-		LOG_DEBUG(VHOST_CONFIG,
-			"(%"PRIu64") Mergeable RX buffers enabled\n",
-			dev->device_fh);
-		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
-			sizeof(struct virtio_net_hdr_mrg_rxbuf);
-		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
-			sizeof(struct virtio_net_hdr_mrg_rxbuf);
-	} else {
-		LOG_DEBUG(VHOST_CONFIG,
-			"(%"PRIu64") Mergeable RX buffers disabled\n",
-			dev->device_fh);
-		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
-			sizeof(struct virtio_net_hdr);
-		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
-			sizeof(struct virtio_net_hdr);
+	for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+		uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+		uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+		if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
+			LOG_DEBUG(VHOST_CONFIG,
+				"(%"PRIu64") Mergeable RX buffers enabled\n",
+				dev->device_fh);
+			dev->virtqueue[virt_rx_q_idx]->vhost_hlen =
+				sizeof(struct virtio_net_hdr_mrg_rxbuf);
+			dev->virtqueue[virt_tx_q_idx]->vhost_hlen =
+				sizeof(struct virtio_net_hdr_mrg_rxbuf);
+		} else {
+			LOG_DEBUG(VHOST_CONFIG,
+				"(%"PRIu64") Mergeable RX buffers disabled\n",
+				dev->device_fh);
+			dev->virtqueue[virt_rx_q_idx]->vhost_hlen =
+				sizeof(struct virtio_net_hdr);
+			dev->virtqueue[virt_tx_q_idx]->vhost_hlen =
+				sizeof(struct virtio_net_hdr);
+		}
 	}
 	return 0;
 }
@@ -736,6 +767,15 @@ int rte_vhost_feature_enable(uint64_t feature_mask)
 	return -1;
 }
 
+int rte_vhost_q_num_set(uint32_t q_number)
+{
+	if (q_number > VIRTIO_MAX_VIRTQUEUES)
+		return -1;
+
+	q_num = q_number;
+	return 0;
+}
+
 /*
  * Register ops so that we can add/remove device to data core.
  */
-- 
1.8.4.2

  parent reply	other threads:[~2015-05-21  7:49 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-21  7:49 [dpdk-dev] [PATCH 0/6] Support multiple queues in vhost Ouyang Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 1/6] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-08-24 10:41   ` Qiu, Michael
2015-08-25  0:38     ` Ouyang, Changchun
2015-05-21  7:49 ` Ouyang Changchun [this message]
2015-06-03  2:47   ` [dpdk-dev] [PATCH 2/6] lib_vhost: Support multiple queues in virtio dev Xie, Huawei
2015-05-21  7:49 ` [dpdk-dev] [PATCH 3/6] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-02  3:33   ` Xie, Huawei
2015-05-21  7:49 ` [dpdk-dev] [PATCH 4/6] vhost: Add new command line option: rxq Ouyang Changchun
2015-05-22  1:39   ` Thomas F Herbert
2015-05-22  6:05     ` Ouyang, Changchun
2015-05-22 12:51       ` Thomas F Herbert
2015-05-23  1:25         ` Ouyang, Changchun
2015-05-26  7:21           ` Ouyang, Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 5/6] vhost: Support multiple queues Ouyang Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 6/6] virtio: Resolve for control queue Ouyang Changchun
2015-05-22  1:13 ` [dpdk-dev] [PATCH 0/6] Support multiple queues in vhost Thomas F Herbert
2015-05-22  6:08   ` Ouyang, Changchun
2015-06-10  5:52 ` [dpdk-dev] [PATCH v2 0/7] " Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 1/7] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 2/7] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-11  9:54     ` Panu Matilainen
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 3/7] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 4/7] vhost: Add new command line option: rxq Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 5/7] vhost: Support multiple queues Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 6/7] virtio: Resolve for control queue Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 7/7] vhost: Add per queue stats info Ouyang Changchun
2015-06-15  7:56   ` [dpdk-dev] [PATCH v3 0/9] Support multiple queues in vhost Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 1/9] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 2/9] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-18 13:16       ` Flavio Leitner
2015-06-19  1:06         ` Ouyang, Changchun
2015-06-18 13:34       ` Flavio Leitner
2015-06-19  1:17         ` Ouyang, Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 3/9] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 4/9] lib_vhost: Check the virtqueue address's validity Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 5/9] vhost: Add new command line option: rxq Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 6/9] vhost: Support multiple queues Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 7/9] virtio: Resolve for control queue Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 8/9] vhost: Add per queue stats info Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 9/9] doc: Update doc for vhost multiple queues Ouyang Changchun
2015-08-12  8:02     ` [dpdk-dev] [PATCH v4 00/12] Support multiple queues in vhost Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 01/12] ixgbe: support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-08-12  8:22         ` Vincent JARDIN
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 02/12] vhost: support multiple queues in virtio dev Ouyang Changchun
2015-08-13 12:52         ` Flavio Leitner
2015-08-14  2:29           ` Ouyang, Changchun
2015-08-14 12:16             ` Flavio Leitner
2015-08-19  3:52         ` Yuanhan Liu
2015-08-19  5:54           ` Ouyang, Changchun
2015-08-19  6:28             ` Yuanhan Liu
2015-08-19  6:39               ` Yuanhan Liu
2015-09-03  2:27         ` Tetsuya Mukawa
2015-09-06  2:25           ` Ouyang, Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 03/12] vhost: update version map file Ouyang Changchun
2015-08-12  8:24         ` Panu Matilainen
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 04/12] vhost: set memory layout for multiple queues mode Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 05/12] vhost: check the virtqueue address's validity Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 06/12] vhost: support protocol feature Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 07/12] vhost: add new command line option: rxq Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 08/12] vhost: support multiple queues Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 09/12] virtio: resolve for control queue Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 10/12] vhost: add per queue stats info Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 11/12] vhost: alloc core to virtq Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 12/12] doc: update doc for vhost multiple queues Ouyang Changchun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1432194581-15301-3-git-send-email-changchun.ouyang@intel.com \
    --to=changchun.ouyang@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).