DPDK patches and discussions
 help / color / mirror / Atom feed
From: Huawei Xie <huawei.xie@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v6 05/25] lib/librte_vhost: remove zero copy memory region generation logic
Date: Thu,  9 Oct 2014 02:54:39 +0800	[thread overview]
Message-ID: <1412794499-4332-6-git-send-email-huawei.xie@intel.com> (raw)
In-Reply-To: <1412794499-4332-1-git-send-email-huawei.xie@intel.com>

Currently zero copy feature isn't generic as it couples closely with nic. It isn't put in
the vhost lib in this version.
gpa(guest physical address) to hpa(host physical address) mapping region logic is removed.

Signed-off-by: Huawei Xie <huawei.xie@intel.com>
---
 lib/librte_vhost/rte_virtio_net.h |  19 ----
 lib/librte_vhost/virtio-net.c     | 177 --------------------------------------
 2 files changed, 196 deletions(-)

diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 4d43f2c..f3509aa 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -40,7 +40,6 @@
 /* Backend value set by guest. */
 #define VIRTIO_DEV_STOPPED -1
 
-#define PAGE_SIZE   4096
 
 /* Enum for virtqueue management. */
 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
@@ -108,20 +107,6 @@ struct virtio_memory_regions {
 	uint64_t	address_offset;			/* Offset of region for address translation. */
 };
 
-/*
- * Information relating to memory regions including offsets to
- * addresses in host physical space.
- */
-struct virtio_memory_regions_hpa {
-	/* Base guest physical address of region. */
-	uint64_t	guest_phys_address;
-	/* End guest physical address of region. */
-	uint64_t	guest_phys_address_end;
-	/* Size of region. */
-	uint64_t	memory_size;
-	/* Offset of region for gpa to hpa translation. */
-	uint64_t	host_phys_addr_offset;
-};
 
 /*
  * Memory structure includes region and mapping information.
@@ -131,10 +116,6 @@ struct virtio_memory {
 	uint64_t			mapped_address;			/* Mapped address of memory file base in our applications memory space. */
 	uint64_t			mapped_size;			/* Total size of memory file. */
 	uint32_t			nregions;				/* Number of memory regions. */
-	 /* Number of memory regions for gpa to hpa translation. */
-	uint32_t			nregions_hpa;
-	/* Memory region information for gpa to hpa translation. */
-	struct virtio_memory_regions_hpa  *regions_hpa;
 	/* Memory region information. */
 	struct virtio_memory_regions      regions[0];
 };
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 4089584..4f11b28 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -344,8 +344,6 @@ cleanup_device(struct virtio_net *dev)
 	/* Unmap QEMU memory file if mapped. */
 	if (dev->mem) {
 		munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
-		if (dev->mem->regions_hpa)
-			free(dev->mem->regions_hpa);
 		free(dev->mem);
 	}
 
@@ -577,153 +575,6 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
 	return 0;
 }
 
-/*
- * Calculate the region count of physical continous regions for one particular
- * region of whose vhost virtual address is continous. The particular region
- * start from vva_start, with size of 'size' in argument.
- */
-static uint32_t check_hpa_regions(uint64_t vva_start, uint64_t size)
-{
-	uint32_t i, nregions = 0, page_size = PAGE_SIZE;
-	uint64_t cur_phys_addr = 0, next_phys_addr = 0;
-	if (vva_start % page_size) {
-		LOG_DEBUG(VHOST_CONFIG,
-			"in check_countinous: vva start(%p) mod page_size(%d) "
-			"has remainder\n",
-			(void *)(uintptr_t)vva_start, page_size);
-		return 0;
-	}
-	if (size % page_size) {
-		LOG_DEBUG(VHOST_CONFIG,
-			"in check_countinous: "
-			"size((%"PRIu64")) mod page_size(%d) has remainder\n",
-			size, page_size);
-		return 0;
-	}
-	for (i = 0; i < size - page_size; i = i + page_size) {
-		cur_phys_addr
-			= rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
-		next_phys_addr = rte_mem_virt2phy(
-			(void *)(uintptr_t)(vva_start + i + page_size));
-		if ((cur_phys_addr + page_size) != next_phys_addr) {
-			++nregions;
-			LOG_DEBUG(VHOST_CONFIG,
-				"in check_continuous: hva addr:(%p) is not "
-				"continuous with hva addr:(%p), diff:%d\n",
-				(void *)(uintptr_t)(vva_start + (uint64_t)i),
-				(void *)(uintptr_t)(vva_start + (uint64_t)i
-				+ page_size), page_size);
-			LOG_DEBUG(VHOST_CONFIG,
-				"in check_continuous: hpa addr:(%p) is not "
-				"continuous with hpa addr:(%p), "
-				"diff:(%"PRIu64")\n",
-				(void *)(uintptr_t)cur_phys_addr,
-				(void *)(uintptr_t)next_phys_addr,
-				(next_phys_addr-cur_phys_addr));
-		}
-	}
-	return nregions;
-}
-
-/*
- * Divide each region whose vhost virtual address is continous into a few
- * sub-regions, make sure the physical address within each sub-region are
- * continous. And fill offset(to GPA) and size etc. information of each
- * sub-region into regions_hpa.
- */
-static uint32_t fill_hpa_memory_regions(void *memory)
-{
-	uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = PAGE_SIZE;
-	uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
-	struct virtio_memory *virtio_memory = (struct virtio_memory *)memory;
-	struct virtio_memory_regions_hpa *mem_region_hpa
-		= virtio_memory->regions_hpa;
-
-	if (mem_region_hpa == NULL)
-		return 0;
-
-	for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
-		vva_start = virtio_memory->regions[regionidx].guest_phys_address
-			+ virtio_memory->regions[regionidx].address_offset;
-		mem_region_hpa[regionidx_hpa].guest_phys_address
-			= virtio_memory->regions[regionidx].guest_phys_address;
-		mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
-			rte_mem_virt2phy((void *)(uintptr_t)(vva_start))
-			- mem_region_hpa[regionidx_hpa].guest_phys_address;
-		LOG_DEBUG(VHOST_CONFIG,
-			"in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
-			regionidx_hpa,
-			(void *)(uintptr_t)
-			(mem_region_hpa[regionidx_hpa].guest_phys_address));
-		LOG_DEBUG(VHOST_CONFIG,
-			"in fill_hpa_regions: host  phys addr start[%d]:(%p)\n",
-			regionidx_hpa,
-			(void *)(uintptr_t)
-			(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
-		for (i = 0, k = 0;
-			i < virtio_memory->regions[regionidx].memory_size
-				- page_size;
-			i += page_size) {
-			cur_phys_addr = rte_mem_virt2phy(
-					(void *)(uintptr_t)(vva_start + i));
-			next_phys_addr = rte_mem_virt2phy(
-					(void *)(uintptr_t)(vva_start
-					+ i + page_size));
-			if ((cur_phys_addr + page_size) != next_phys_addr) {
-				mem_region_hpa[regionidx_hpa].guest_phys_address_end =
-					mem_region_hpa[regionidx_hpa].guest_phys_address
-					+ k + page_size;
-				mem_region_hpa[regionidx_hpa].memory_size
-					= k + page_size;
-				LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
-					"phys addr end  [%d]:(%p)\n",
-					regionidx_hpa,
-					(void *)(uintptr_t)
-					(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
-				LOG_DEBUG(VHOST_CONFIG,
-					"in fill_hpa_regions: guest phys addr "
-					"size [%d]:(%p)\n",
-					regionidx_hpa,
-					(void *)(uintptr_t)
-					(mem_region_hpa[regionidx_hpa].memory_size));
-				mem_region_hpa[regionidx_hpa + 1].guest_phys_address
-					= mem_region_hpa[regionidx_hpa].guest_phys_address_end;
-				++regionidx_hpa;
-				mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
-					next_phys_addr
-					- mem_region_hpa[regionidx_hpa].guest_phys_address;
-				LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
-					" phys addr start[%d]:(%p)\n",
-					regionidx_hpa,
-					(void *)(uintptr_t)
-					(mem_region_hpa[regionidx_hpa].guest_phys_address));
-				LOG_DEBUG(VHOST_CONFIG,
-					"in fill_hpa_regions: host  phys addr "
-					"start[%d]:(%p)\n",
-					regionidx_hpa,
-					(void *)(uintptr_t)
-					(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
-				k = 0;
-			} else {
-				k += page_size;
-			}
-		}
-		mem_region_hpa[regionidx_hpa].guest_phys_address_end
-			= mem_region_hpa[regionidx_hpa].guest_phys_address
-			+ k + page_size;
-		mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
-		LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end  "
-			"[%d]:(%p)\n", regionidx_hpa,
-			(void *)(uintptr_t)
-			(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
-		LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
-			"[%d]:(%p)\n", regionidx_hpa,
-			(void *)(uintptr_t)
-			(mem_region_hpa[regionidx_hpa].memory_size));
-		++regionidx_hpa;
-	}
-	return regionidx_hpa;
-}
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
@@ -817,7 +668,6 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
 		}
 	}
 	mem->nregions = valid_regions;
-	mem->nregions_hpa = mem->nregions;
 	dev->mem = mem;
 
 	/*
@@ -828,34 +678,7 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
 		dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
 			+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
 
-		dev->mem->nregions_hpa
-			+= check_hpa_regions(
-				dev->mem->regions[regionidx].guest_phys_address
-				+ dev->mem->regions[regionidx].address_offset,
-				dev->mem->regions[regionidx].memory_size);
-	}
-	if (dev->mem->regions_hpa != NULL) {
-		free(dev->mem->regions_hpa);
-		dev->mem->regions_hpa = NULL;
-	}
-
-	dev->mem->regions_hpa = (struct virtio_memory_regions_hpa *) calloc(1,
-		(sizeof(struct virtio_memory_regions_hpa)
-		* dev->mem->nregions_hpa));
-	if (dev->mem->regions_hpa == NULL) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-			"(%"PRIu64") Failed to allocate memory for "
-			"dev->mem->regions_hpa.\n", dev->device_fh);
-		return -1;
 	}
-	if (fill_hpa_memory_regions(
-		(void *)dev->mem) != dev->mem->nregions_hpa) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-			"in set_mem_table: hpa memory regions number mismatch: "
-			"[%d]\n", dev->mem->nregions_hpa);
-		return -1;
-	}
-
 	return 0;
 }
 
-- 
1.8.1.4

  parent reply	other threads:[~2014-10-08 18:48 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-10-08 18:54 [dpdk-dev] [PATCH v6 00/25] user space vhost library Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 01/25] lib/librte_vhost: move src files from examples/vhost to lib/librte_vhost Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 02/25] lib/librte_vhost: rename main.c to vhost_rxtx.c and virtio-net.h to rte_virtio_net.h Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 03/25] lib/librte_vhost: clean unused codes in vhost_rxtx.c Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 04/25] lib/librte_vhost: remove mac learning, VMDQ, mac/vlan and other switching related logic Huawei Xie
2014-10-08 18:54 ` Huawei Xie [this message]
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 06/25] lib/librte_vhost: remove retry logic Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 07/25] lib/librte_vhost: patch virtio_dev_merge_tx to return packets to upper layer Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 08/25] lib/librte_vhost: calculate mbuf size Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 09/25] lib/librte_vhost: add queue_id parameter to vhost rx/tx functions Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 10/25] lib/librte_vhost: define PACKET_BURST Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 11/25] lib/librte_vhost: rte_vhost_en/dequeue_burst API Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 12/25] lib/librte_vhost: move virtio_net_config_ll structure to virtio_net.c Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 13/25] lib/librte_vhost: remove index parameter Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 14/25] lib/librte_vhost: call get_virtio_net_callbacks to get internal ops in register_cuse_device Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 15/25] lib/librte_vhost: rte_vhost_driver_register and rte_vhost_session_start API Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 16/25] lib/librte_vhost: rename init_virtio_net to rte_vhost_callback_register Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 17/25] lib/librte_vhost: vhost APIs Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 18/25] lib/librte_vhost: add debug print Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 19/25] lib/librte_vhost: VHOST SUPPORTED FEATURES Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 20/25] lib/librte_vhost: header file cleanups Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 21/25] lib/librte_vhost: static variable fixes Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 22/25] lib/librte_vhost: add priv context field to virtio_net structure Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 23/25] lib/librte_vhost: coding style fixes Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 24/25] lib/librte_vhost: add TODO/FIXME for identified issues Huawei Xie
2014-10-08 18:54 ` [dpdk-dev] [PATCH v6 25/25] lib/librte_vhost: add vhost support in Makefile Huawei Xie
2014-10-09 18:36 ` [dpdk-dev] [PATCH v6 00/25] user space vhost library Xie, Huawei
2014-10-11  8:31 ` Ouyang, Changchun
2014-10-13 19:51 ` Thomas Monjalon
2014-10-13 20:53   ` Xie, Huawei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1412794499-4332-6-git-send-email-huawei.xie@intel.com \
    --to=huawei.xie@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).