DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ouyang Changchun <changchun.ouyang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 5/7] vhost: Support multiple queues
Date: Wed, 10 Jun 2015 13:52:27 +0800	[thread overview]
Message-ID: <1433915549-18571-6-git-send-email-changchun.ouyang@intel.com> (raw)
In-Reply-To: <1433915549-18571-1-git-send-email-changchun.ouyang@intel.com>

Sample vhost leverage the VMDq+RSS in HW to receive packets and distribute them
into different queue in the pool according to 5 tuples.

And enable multiple queues mode in vhost/virtio layer.

HW queue numbers in pool exactly same with the queue number in virtio device,
e.g. rxq = 4, the queue number is 4, it means 4 HW queues in each VMDq pool,
and 4 queues in each virtio device/port, one maps to each.

=========================================
==================|   |==================|
       vport0     |   |      vport1      |
---  ---  ---  ---|   |---  ---  ---  ---|
q0 | q1 | q2 | q3 |   |q0 | q1 | q2 | q3 |
/\= =/\= =/\= =/\=|   |/\= =/\= =/\= =/\=|
||   ||   ||   ||      ||   ||   ||   ||
||   ||   ||   ||      ||   ||   ||   ||
||= =||= =||= =||=|   =||== ||== ||== ||=|
q0 | q1 | q2 | q3 |   |q0 | q1 | q2 | q3 |

------------------|   |------------------|
     VMDq pool0   |   |    VMDq pool1    |
==================|   |==================|

In RX side, it firstly polls each queue of the pool and gets the packets from
it and enqueue them into its corresponding queue in virtio device/port.
In TX side, it dequeue packets from each queue of virtio device/port and send
to either physical port or another virtio device according to its destination
MAC address.

Changes in v2:
  - check queue num per pool in VMDq and queue pair number per vhost device
  - remove the unnecessary calling q_num_set api
  - fix checkpatch errors

Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
 examples/vhost/main.c | 132 ++++++++++++++++++++++++++++++--------------------
 1 file changed, 79 insertions(+), 53 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 09ed0ca..76b6ae7 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1002,8 +1002,9 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 
 	/* Enable stripping of the vlan tag as we handle routing. */
 	if (vlan_strip)
-		rte_eth_dev_set_vlan_strip_on_queue(ports[0],
-			(uint16_t)vdev->vmdq_rx_q, 1);
+		for (i = 0; i < (int)rxq; i++)
+			rte_eth_dev_set_vlan_strip_on_queue(ports[0],
+				(uint16_t)(vdev->vmdq_rx_q + i), 1);
 
 	/* Set device as ready for RX. */
 	vdev->ready = DEVICE_RX;
@@ -1018,7 +1019,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 static inline void
 unlink_vmdq(struct vhost_dev *vdev)
 {
-	unsigned i = 0;
+	unsigned i = 0, j = 0;
 	unsigned rx_count;
 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 
@@ -1031,15 +1032,19 @@ unlink_vmdq(struct vhost_dev *vdev)
 		vdev->vlan_tag = 0;
 
 		/*Clear out the receive buffers*/
-		rx_count = rte_eth_rx_burst(ports[0],
-					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
+		for (i = 0; i < rxq; i++) {
+			rx_count = rte_eth_rx_burst(ports[0],
+					(uint16_t)vdev->vmdq_rx_q + i,
+					pkts_burst, MAX_PKT_BURST);
 
-		while (rx_count) {
-			for (i = 0; i < rx_count; i++)
-				rte_pktmbuf_free(pkts_burst[i]);
+			while (rx_count) {
+				for (j = 0; j < rx_count; j++)
+					rte_pktmbuf_free(pkts_burst[j]);
 
-			rx_count = rte_eth_rx_burst(ports[0],
-					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
+				rx_count = rte_eth_rx_burst(ports[0],
+					(uint16_t)vdev->vmdq_rx_q + i,
+					pkts_burst, MAX_PKT_BURST);
+			}
 		}
 
 		vdev->ready = DEVICE_MAC_LEARNING;
@@ -1051,7 +1056,7 @@ unlink_vmdq(struct vhost_dev *vdev)
  * the packet on that devices RX queue. If not then return.
  */
 static inline int __attribute__((always_inline))
-virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
+virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m, uint32_t q_idx)
 {
 	struct virtio_net_data_ll *dev_ll;
 	struct ether_hdr *pkt_hdr;
@@ -1066,7 +1071,7 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 
 	while (dev_ll != NULL) {
 		if ((dev_ll->vdev->ready == DEVICE_RX) && ether_addr_cmp(&(pkt_hdr->d_addr),
-				          &dev_ll->vdev->mac_address)) {
+					&dev_ll->vdev->mac_address)) {
 
 			/* Drop the packet if the TX packet is destined for the TX device. */
 			if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
@@ -1084,7 +1089,9 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 				LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh);
 			} else {
 				/*send the packet to the local virtio device*/
-				ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1);
+				ret = rte_vhost_enqueue_burst(tdev,
+					VIRTIO_RXQ + q_idx * VIRTIO_QNUM,
+					&m, 1);
 				if (enable_stats) {
 					rte_atomic64_add(
 					&dev_statistics[tdev->device_fh].rx_total_atomic,
@@ -1161,7 +1168,8 @@ find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
  * or the physical port.
  */
 static inline void __attribute__((always_inline))
-virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
+virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m,
+		uint16_t vlan_tag, uint32_t q_idx)
 {
 	struct mbuf_table *tx_q;
 	struct rte_mbuf **m_table;
@@ -1171,7 +1179,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 	struct ether_hdr *nh;
 
 	/*check if destination is local VM*/
-	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
+	if ((vm2vm_mode == VM2VM_SOFTWARE) &&
+		(virtio_tx_local(vdev, m, q_idx) == 0)) {
 		rte_pktmbuf_free(m);
 		return;
 	}
@@ -1335,49 +1344,60 @@ switch_worker(__attribute__((unused)) void *arg)
 			}
 			if (likely(vdev->ready == DEVICE_RX)) {
 				/*Handle guest RX*/
-				rx_count = rte_eth_rx_burst(ports[0],
-					vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
+				for (i = 0; i < rxq; i++) {
+					rx_count = rte_eth_rx_burst(ports[0],
+						vdev->vmdq_rx_q + i, pkts_burst, MAX_PKT_BURST);
 
-				if (rx_count) {
-					/*
-					* Retry is enabled and the queue is full then we wait and retry to avoid packet loss
-					* Here MAX_PKT_BURST must be less than virtio queue size
-					*/
-					if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) {
-						for (retry = 0; retry < burst_rx_retry_num; retry++) {
-							rte_delay_us(burst_rx_delay_time);
-							if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
-								break;
+					if (rx_count) {
+						/*
+						* Retry is enabled and the queue is full then we wait and retry to avoid packet loss
+						* Here MAX_PKT_BURST must be less than virtio queue size
+						*/
+						if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev,
+											VIRTIO_RXQ + i * VIRTIO_QNUM))) {
+							for (retry = 0; retry < burst_rx_retry_num; retry++) {
+								rte_delay_us(burst_rx_delay_time);
+								if (rx_count <= rte_vring_available_entries(dev,
+											VIRTIO_RXQ + i * VIRTIO_QNUM))
+									break;
+							}
+						}
+						ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ + i * VIRTIO_QNUM,
+											pkts_burst, rx_count);
+						if (enable_stats) {
+							rte_atomic64_add(
+							&dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
+							rx_count);
+							rte_atomic64_add(
+							&dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count);
+						}
+						while (likely(rx_count)) {
+							rx_count--;
+							rte_pktmbuf_free(pkts_burst[rx_count]);
 						}
 					}
-					ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count);
-					if (enable_stats) {
-						rte_atomic64_add(
-						&dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
-						rx_count);
-						rte_atomic64_add(
-						&dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count);
-					}
-					while (likely(rx_count)) {
-						rx_count--;
-						rte_pktmbuf_free(pkts_burst[rx_count]);
-					}
-
 				}
 			}
 
 			if (likely(!vdev->remove)) {
 				/* Handle guest TX*/
-				tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
-				/* If this is the first received packet we need to learn the MAC and setup VMDQ */
-				if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
-					if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
-						while (tx_count)
-							rte_pktmbuf_free(pkts_burst[--tx_count]);
+				for (i = 0; i < rxq; i++) {
+					tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ + i * 2,
+							mbuf_pool, pkts_burst, MAX_PKT_BURST);
+					/*
+					 * If this is the first received packet we need to learn
+					 * the MAC and setup VMDQ
+					 */
+					if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
+						if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
+							while (tx_count)
+								rte_pktmbuf_free(pkts_burst[--tx_count]);
+						}
 					}
+					while (tx_count)
+						virtio_tx_route(vdev, pkts_burst[--tx_count],
+								(uint16_t)dev->device_fh, i);
 				}
-				while (tx_count)
-					virtio_tx_route(vdev, pkts_burst[--tx_count], (uint16_t)dev->device_fh);
 			}
 
 			/*move to the next device in the list*/
@@ -2636,6 +2656,13 @@ new_device (struct virtio_net *dev)
 	struct vhost_dev *vdev;
 	uint32_t regionidx;
 
+	if ((rxq > 1) && (dev->num_virt_queues != rxq)) {
+		RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") queue num in VMDq pool:"
+			"%d != queue pair num in vhost dev:%d\n",
+			dev->device_fh, rxq, dev->num_virt_queues);
+		return -1;
+	}
+
 	vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
 	if (vdev == NULL) {
 		RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
@@ -2681,12 +2708,12 @@ new_device (struct virtio_net *dev)
 		}
 	}
 
-
 	/* Add device to main ll */
 	ll_dev = get_data_ll_free_entry(&ll_root_free);
 	if (ll_dev == NULL) {
-		RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
-			"of %d devices per core has been reached\n",
+		RTE_LOG(INFO, VHOST_DATA,
+			"(%"PRIu64") No free entry found in linked list."
+			"Device limit of %d devices per core has been reached\n",
 			dev->device_fh, num_devices);
 		if (vdev->regions_hpa)
 			rte_free(vdev->regions_hpa);
@@ -2695,8 +2722,7 @@ new_device (struct virtio_net *dev)
 	}
 	ll_dev->vdev = vdev;
 	add_data_ll_entry(&ll_root_used, ll_dev);
-	vdev->vmdq_rx_q
-		= dev->device_fh * queues_per_pool + vmdq_queue_base;
+	vdev->vmdq_rx_q	= dev->device_fh * rxq + vmdq_queue_base;
 
 	if (zero_copy) {
 		uint32_t index = vdev->vmdq_rx_q;
-- 
1.8.4.2

  parent reply	other threads:[~2015-06-10  5:52 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-21  7:49 [dpdk-dev] [PATCH 0/6] Support multiple queues in vhost Ouyang Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 1/6] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-08-24 10:41   ` Qiu, Michael
2015-08-25  0:38     ` Ouyang, Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 2/6] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-03  2:47   ` Xie, Huawei
2015-05-21  7:49 ` [dpdk-dev] [PATCH 3/6] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-02  3:33   ` Xie, Huawei
2015-05-21  7:49 ` [dpdk-dev] [PATCH 4/6] vhost: Add new command line option: rxq Ouyang Changchun
2015-05-22  1:39   ` Thomas F Herbert
2015-05-22  6:05     ` Ouyang, Changchun
2015-05-22 12:51       ` Thomas F Herbert
2015-05-23  1:25         ` Ouyang, Changchun
2015-05-26  7:21           ` Ouyang, Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 5/6] vhost: Support multiple queues Ouyang Changchun
2015-05-21  7:49 ` [dpdk-dev] [PATCH 6/6] virtio: Resolve for control queue Ouyang Changchun
2015-05-22  1:13 ` [dpdk-dev] [PATCH 0/6] Support multiple queues in vhost Thomas F Herbert
2015-05-22  6:08   ` Ouyang, Changchun
2015-06-10  5:52 ` [dpdk-dev] [PATCH v2 0/7] " Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 1/7] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 2/7] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-11  9:54     ` Panu Matilainen
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 3/7] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 4/7] vhost: Add new command line option: rxq Ouyang Changchun
2015-06-10  5:52   ` Ouyang Changchun [this message]
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 6/7] virtio: Resolve for control queue Ouyang Changchun
2015-06-10  5:52   ` [dpdk-dev] [PATCH v2 7/7] vhost: Add per queue stats info Ouyang Changchun
2015-06-15  7:56   ` [dpdk-dev] [PATCH v3 0/9] Support multiple queues in vhost Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 1/9] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 2/9] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-18 13:16       ` Flavio Leitner
2015-06-19  1:06         ` Ouyang, Changchun
2015-06-18 13:34       ` Flavio Leitner
2015-06-19  1:17         ` Ouyang, Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 3/9] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 4/9] lib_vhost: Check the virtqueue address's validity Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 5/9] vhost: Add new command line option: rxq Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 6/9] vhost: Support multiple queues Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 7/9] virtio: Resolve for control queue Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 8/9] vhost: Add per queue stats info Ouyang Changchun
2015-06-15  7:56     ` [dpdk-dev] [PATCH v3 9/9] doc: Update doc for vhost multiple queues Ouyang Changchun
2015-08-12  8:02     ` [dpdk-dev] [PATCH v4 00/12] Support multiple queues in vhost Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 01/12] ixgbe: support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-08-12  8:22         ` Vincent JARDIN
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 02/12] vhost: support multiple queues in virtio dev Ouyang Changchun
2015-08-13 12:52         ` Flavio Leitner
2015-08-14  2:29           ` Ouyang, Changchun
2015-08-14 12:16             ` Flavio Leitner
2015-08-19  3:52         ` Yuanhan Liu
2015-08-19  5:54           ` Ouyang, Changchun
2015-08-19  6:28             ` Yuanhan Liu
2015-08-19  6:39               ` Yuanhan Liu
2015-09-03  2:27         ` Tetsuya Mukawa
2015-09-06  2:25           ` Ouyang, Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 03/12] vhost: update version map file Ouyang Changchun
2015-08-12  8:24         ` Panu Matilainen
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 04/12] vhost: set memory layout for multiple queues mode Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 05/12] vhost: check the virtqueue address's validity Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 06/12] vhost: support protocol feature Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 07/12] vhost: add new command line option: rxq Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 08/12] vhost: support multiple queues Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 09/12] virtio: resolve for control queue Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 10/12] vhost: add per queue stats info Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 11/12] vhost: alloc core to virtq Ouyang Changchun
2015-08-12  8:02       ` [dpdk-dev] [PATCH v4 12/12] doc: update doc for vhost multiple queues Ouyang Changchun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1433915549-18571-6-git-send-email-changchun.ouyang@intel.com \
    --to=changchun.ouyang@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).