From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id A4EBC7E80 for ; Mon, 20 Oct 2014 06:30:37 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga103.jf.intel.com with ESMTP; 19 Oct 2014 21:35:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,749,1406617200"; d="scan'208";a="621638318" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga002.jf.intel.com with ESMTP; 19 Oct 2014 21:38:45 -0700 Received: from shecgisg003.sh.intel.com (shecgisg003.sh.intel.com [10.239.29.90]) by shvmail01.sh.intel.com with ESMTP id s9K4cf0A003016; Mon, 20 Oct 2014 12:38:41 +0800 Received: from shecgisg003.sh.intel.com (localhost [127.0.0.1]) by shecgisg003.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s9K4cdxM028169; Mon, 20 Oct 2014 12:38:41 +0800 Received: (from hxie5@localhost) by shecgisg003.sh.intel.com (8.13.6/8.13.6/Submit) id s9K4cd0r028165; Mon, 20 Oct 2014 12:38:39 +0800 From: Huawei Xie To: dev@dpdk.org Date: Mon, 20 Oct 2014 12:38:15 +0800 Message-Id: <1413779906-28113-4-git-send-email-huawei.xie@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1413779906-28113-1-git-send-email-huawei.xie@intel.com> References: <1413779906-28113-1-git-send-email-huawei.xie@intel.com> Subject: [dpdk-dev] [PATCH 03/14] add bak two hpa region functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 20 Oct 2014 04:30:38 -0000 check_hpa_regions, fill_hpa_memory_regions and hpa memory region data structure are added back. zero copy logic will be implemented in vhost example. Signed-off-by: Huawei Xie --- examples/vhost/main.c | 147 ++++++++++++++++++++++++++++++++++++++++++++++++++ examples/vhost/main.h | 15 ++++++ 2 files changed, 162 insertions(+) diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 11d5d17..169a5f3 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -2481,6 +2481,153 @@ destroy_device (volatile struct virtio_net *dev) } /* + * Calculate the region count of physical continous regions for one particular + * region of whose vhost virtual address is continous. The particular region + * start from vva_start, with size of 'size' in argument. + */ +static uint32_t +check_hpa_regions(uint64_t vva_start, uint64_t size) +{ + uint32_t i, nregions = 0, page_size = getpagesize(); + uint64_t cur_phys_addr = 0, next_phys_addr = 0; + if (vva_start % page_size) { + LOG_DEBUG(VHOST_CONFIG, + "in check_countinous: vva start(%p) mod page_size(%d) " + "has remainder\n", + (void *)(uintptr_t)vva_start, page_size); + return 0; + } + if (size % page_size) { + LOG_DEBUG(VHOST_CONFIG, + "in check_countinous: " + "size((%"PRIu64")) mod page_size(%d) has remainder\n", + size, page_size); + return 0; + } + for (i = 0; i < size - page_size; i = i + page_size) { + cur_phys_addr + = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i)); + next_phys_addr = rte_mem_virt2phy( + (void *)(uintptr_t)(vva_start + i + page_size)); + if ((cur_phys_addr + page_size) != next_phys_addr) { + ++nregions; + LOG_DEBUG(VHOST_CONFIG, + "in check_continuous: hva addr:(%p) is not " + "continuous with hva addr:(%p), diff:%d\n", + (void *)(uintptr_t)(vva_start + (uint64_t)i), + (void *)(uintptr_t)(vva_start + (uint64_t)i + + page_size), page_size); + LOG_DEBUG(VHOST_CONFIG, + "in check_continuous: hpa addr:(%p) is not " + "continuous with hpa addr:(%p), " + "diff:(%"PRIu64")\n", + (void *)(uintptr_t)cur_phys_addr, + (void *)(uintptr_t)next_phys_addr, + (next_phys_addr-cur_phys_addr)); + } + } + return nregions; +} + +/* + * Divide each region whose vhost virtual address is continous into a few + * sub-regions, make sure the physical address within each sub-region are + * continous. And fill offset(to GPA) and size etc. information of each + * sub-region into regions_hpa. + */ +static uint32_t +fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct virtio_memory *virtio_memory) +{ + uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = getpagesize(); + uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start; + + if (mem_region_hpa == NULL) + return 0; + + for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) { + vva_start = virtio_memory->regions[regionidx].guest_phys_address + + virtio_memory->regions[regionidx].address_offset; + mem_region_hpa[regionidx_hpa].guest_phys_address + = virtio_memory->regions[regionidx].guest_phys_address; + mem_region_hpa[regionidx_hpa].host_phys_addr_offset = + rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) - + mem_region_hpa[regionidx_hpa].guest_phys_address; + LOG_DEBUG(VHOST_CONFIG, + "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address)); + LOG_DEBUG(VHOST_CONFIG, + "in fill_hpa_regions: host phys addr start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].host_phys_addr_offset)); + for (i = 0, k = 0; + i < virtio_memory->regions[regionidx].memory_size - + page_size; + i += page_size) { + cur_phys_addr = rte_mem_virt2phy( + (void *)(uintptr_t)(vva_start + i)); + next_phys_addr = rte_mem_virt2phy( + (void *)(uintptr_t)(vva_start + + i + page_size)); + if ((cur_phys_addr + page_size) != next_phys_addr) { + mem_region_hpa[regionidx_hpa].guest_phys_address_end = + mem_region_hpa[regionidx_hpa].guest_phys_address + + k + page_size; + mem_region_hpa[regionidx_hpa].memory_size + = k + page_size; + LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest " + "phys addr end [%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address_end)); + LOG_DEBUG(VHOST_CONFIG, + "in fill_hpa_regions: guest phys addr " + "size [%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].memory_size)); + mem_region_hpa[regionidx_hpa + 1].guest_phys_address + = mem_region_hpa[regionidx_hpa].guest_phys_address_end; + ++regionidx_hpa; + mem_region_hpa[regionidx_hpa].host_phys_addr_offset = + next_phys_addr - + mem_region_hpa[regionidx_hpa].guest_phys_address; + LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest" + " phys addr start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address)); + LOG_DEBUG(VHOST_CONFIG, + "in fill_hpa_regions: host phys addr " + "start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].host_phys_addr_offset)); + k = 0; + } else { + k += page_size; + } + } + mem_region_hpa[regionidx_hpa].guest_phys_address_end + = mem_region_hpa[regionidx_hpa].guest_phys_address + + k + page_size; + mem_region_hpa[regionidx_hpa].memory_size = k + page_size; + LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end " + "[%d]:(%p)\n", regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address_end)); + LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size " + "[%d]:(%p)\n", regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].memory_size)); + ++regionidx_hpa; + } + return regionidx_hpa; +} + +/* * A new device is added to a data core. First the device is added to the main linked list * and the allocated to a specific data core. */ diff --git a/examples/vhost/main.h b/examples/vhost/main.h index c15d938..ebb0b5b 100644 --- a/examples/vhost/main.h +++ b/examples/vhost/main.h @@ -57,6 +57,21 @@ #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2 #define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3 +/** + * Information relating to memory regions including offsets to + * addresses in host physical space. + */ +struct virtio_memory_regions_hpa { + /**< Base guest physical address of region. */ + uint64_t guest_phys_address; + /**< End guest physical address of region. */ + uint64_t guest_phys_address_end; + /**< Size of region. */ + uint64_t memory_size; + /**< Offset of region for gpa to hpa translation. */ + uint64_t host_phys_addr_offset; +}; + /* * Device linked list structure for data path. */ -- 1.8.1.4