DPDK patches and discussions
 help / color / mirror / Atom feed
From: Huawei Xie <huawei.xie@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 03/14] add bak two hpa region functions
Date: Mon, 20 Oct 2014 12:38:15 +0800	[thread overview]
Message-ID: <1413779906-28113-4-git-send-email-huawei.xie@intel.com> (raw)
In-Reply-To: <1413779906-28113-1-git-send-email-huawei.xie@intel.com>

check_hpa_regions, fill_hpa_memory_regions and hpa memory region data structure are added back.
zero copy logic will be implemented in vhost example.

Signed-off-by: Huawei Xie <huawei.xie@intel.com>
---
 examples/vhost/main.c | 147 ++++++++++++++++++++++++++++++++++++++++++++++++++
 examples/vhost/main.h |  15 ++++++
 2 files changed, 162 insertions(+)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 11d5d17..169a5f3 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -2481,6 +2481,153 @@ destroy_device (volatile struct virtio_net *dev)
 }
 
 /*
+ * Calculate the region count of physical continous regions for one particular
+ * region of whose vhost virtual address is continous. The particular region
+ * start from vva_start, with size of 'size' in argument.
+ */
+static uint32_t
+check_hpa_regions(uint64_t vva_start, uint64_t size)
+{
+	uint32_t i, nregions = 0, page_size = getpagesize();
+	uint64_t cur_phys_addr = 0, next_phys_addr = 0;
+	if (vva_start % page_size) {
+		LOG_DEBUG(VHOST_CONFIG,
+			"in check_countinous: vva start(%p) mod page_size(%d) "
+			"has remainder\n",
+			(void *)(uintptr_t)vva_start, page_size);
+		return 0;
+	}
+	if (size % page_size) {
+		LOG_DEBUG(VHOST_CONFIG,
+			"in check_countinous: "
+			"size((%"PRIu64")) mod page_size(%d) has remainder\n",
+			size, page_size);
+		return 0;
+	}
+	for (i = 0; i < size - page_size; i = i + page_size) {
+		cur_phys_addr
+			= rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
+		next_phys_addr = rte_mem_virt2phy(
+			(void *)(uintptr_t)(vva_start + i + page_size));
+		if ((cur_phys_addr + page_size) != next_phys_addr) {
+			++nregions;
+			LOG_DEBUG(VHOST_CONFIG,
+				"in check_continuous: hva addr:(%p) is not "
+				"continuous with hva addr:(%p), diff:%d\n",
+				(void *)(uintptr_t)(vva_start + (uint64_t)i),
+				(void *)(uintptr_t)(vva_start + (uint64_t)i
+				+ page_size), page_size);
+			LOG_DEBUG(VHOST_CONFIG,
+				"in check_continuous: hpa addr:(%p) is not "
+				"continuous with hpa addr:(%p), "
+				"diff:(%"PRIu64")\n",
+				(void *)(uintptr_t)cur_phys_addr,
+				(void *)(uintptr_t)next_phys_addr,
+				(next_phys_addr-cur_phys_addr));
+		}
+	}
+	return nregions;
+}
+
+/*
+ * Divide each region whose vhost virtual address is continous into a few
+ * sub-regions, make sure the physical address within each sub-region are
+ * continous. And fill offset(to GPA) and size etc. information of each
+ * sub-region into regions_hpa.
+ */
+static uint32_t
+fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct virtio_memory *virtio_memory)
+{
+	uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = getpagesize();
+	uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
+
+	if (mem_region_hpa == NULL)
+		return 0;
+
+	for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
+		vva_start = virtio_memory->regions[regionidx].guest_phys_address +
+			virtio_memory->regions[regionidx].address_offset;
+		mem_region_hpa[regionidx_hpa].guest_phys_address
+			= virtio_memory->regions[regionidx].guest_phys_address;
+		mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
+			rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) -
+			mem_region_hpa[regionidx_hpa].guest_phys_address;
+		LOG_DEBUG(VHOST_CONFIG,
+			"in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
+			regionidx_hpa,
+			(void *)(uintptr_t)
+			(mem_region_hpa[regionidx_hpa].guest_phys_address));
+		LOG_DEBUG(VHOST_CONFIG,
+			"in fill_hpa_regions: host  phys addr start[%d]:(%p)\n",
+			regionidx_hpa,
+			(void *)(uintptr_t)
+			(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
+		for (i = 0, k = 0;
+			i < virtio_memory->regions[regionidx].memory_size -
+				page_size;
+			i += page_size) {
+			cur_phys_addr = rte_mem_virt2phy(
+					(void *)(uintptr_t)(vva_start + i));
+			next_phys_addr = rte_mem_virt2phy(
+					(void *)(uintptr_t)(vva_start +
+					i + page_size));
+			if ((cur_phys_addr + page_size) != next_phys_addr) {
+				mem_region_hpa[regionidx_hpa].guest_phys_address_end =
+					mem_region_hpa[regionidx_hpa].guest_phys_address +
+					k + page_size;
+				mem_region_hpa[regionidx_hpa].memory_size
+					= k + page_size;
+				LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
+					"phys addr end  [%d]:(%p)\n",
+					regionidx_hpa,
+					(void *)(uintptr_t)
+					(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
+				LOG_DEBUG(VHOST_CONFIG,
+					"in fill_hpa_regions: guest phys addr "
+					"size [%d]:(%p)\n",
+					regionidx_hpa,
+					(void *)(uintptr_t)
+					(mem_region_hpa[regionidx_hpa].memory_size));
+				mem_region_hpa[regionidx_hpa + 1].guest_phys_address
+					= mem_region_hpa[regionidx_hpa].guest_phys_address_end;
+				++regionidx_hpa;
+				mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
+					next_phys_addr -
+					mem_region_hpa[regionidx_hpa].guest_phys_address;
+				LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
+					" phys addr start[%d]:(%p)\n",
+					regionidx_hpa,
+					(void *)(uintptr_t)
+					(mem_region_hpa[regionidx_hpa].guest_phys_address));
+				LOG_DEBUG(VHOST_CONFIG,
+					"in fill_hpa_regions: host  phys addr "
+					"start[%d]:(%p)\n",
+					regionidx_hpa,
+					(void *)(uintptr_t)
+					(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
+				k = 0;
+			} else {
+				k += page_size;
+			}
+		}
+		mem_region_hpa[regionidx_hpa].guest_phys_address_end
+			= mem_region_hpa[regionidx_hpa].guest_phys_address
+			+ k + page_size;
+		mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
+		LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end  "
+			"[%d]:(%p)\n", regionidx_hpa,
+			(void *)(uintptr_t)
+			(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
+		LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
+			"[%d]:(%p)\n", regionidx_hpa,
+			(void *)(uintptr_t)
+			(mem_region_hpa[regionidx_hpa].memory_size));
+		++regionidx_hpa;
+	}
+	return regionidx_hpa;
+}
+
+/*
  * A new device is added to a data core. First the device is added to the main linked list
  * and the allocated to a specific data core.
  */
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index c15d938..ebb0b5b 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -57,6 +57,21 @@
 #define RTE_LOGTYPE_VHOST_DATA   RTE_LOGTYPE_USER2
 #define RTE_LOGTYPE_VHOST_PORT   RTE_LOGTYPE_USER3
 
+/**
+ * Information relating to memory regions including offsets to
+ * addresses in host physical space.
+ */
+struct virtio_memory_regions_hpa {
+	/**< Base guest physical address of region. */
+	uint64_t    guest_phys_address;
+	/**< End guest physical address of region. */
+	uint64_t    guest_phys_address_end;
+	/**< Size of region. */
+	uint64_t    memory_size;
+	/**< Offset of region for gpa to hpa translation. */
+	uint64_t    host_phys_addr_offset;
+};
+
 /*
  * Device linked list structure for data path.
  */
-- 
1.8.1.4

  parent reply	other threads:[~2014-10-20  4:30 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-10-20  4:38 [dpdk-dev] [PATCH 00/14] new vhost example Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 01/14] copy old " Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 02/14] remove virtio_dev_rx, virtio_dev_merge_rx, copy_from_mbuf_to_ring, virtio_dev_tx, virtio_dev_merge_tx Huawei Xie
2014-10-20  4:38 ` Huawei Xie [this message]
2014-10-20  4:38 ` [dpdk-dev] [PATCH 04/14] dev->vdev Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 05/14] hpa region Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 06/14] enqueue_burst, dequeue_burst, mac learning Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 07/14] patch virtio_tx_route Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 08/14] remove gpa_to_vva, base_index Huawei Xie
2014-10-23  9:43   ` Thomas Monjalon
2014-10-20  4:38 ` [dpdk-dev] [PATCH 09/14] other APIs Huawei Xie
2014-10-23  9:44   ` Thomas Monjalon
2014-10-20  4:38 ` [dpdk-dev] [PATCH 10/14] vmdq_rx_q Huawei Xie
2014-10-23  9:45   ` Thomas Monjalon
2014-10-20  4:38 ` [dpdk-dev] [PATCH 11/14] minor fixes Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 12/14] add branch hint Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 13/14] INC_VEC Huawei Xie
2014-10-20  4:38 ` [dpdk-dev] [PATCH 14/14] Makefile Huawei Xie
2014-10-23  9:48   ` Thomas Monjalon
2014-10-20  5:21 ` [dpdk-dev] [PATCH 00/14] new vhost example Ouyang, Changchun
2014-10-23 11:10 ` Thomas Monjalon
2014-10-23 16:17   ` Xie, Huawei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1413779906-28113-4-git-send-email-huawei.xie@intel.com \
    --to=huawei.xie@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).