From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id B16272C02 for ; Sat, 1 Apr 2017 09:25:52 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=intel.com; i=@intel.com; q=dns/txt; s=intel; t=1491031552; x=1522567552; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=r5gJ6Q06Xv+7nPCSPQ5Z5lNxSQFDwgUzflHnc7KPxnk=; b=WZnGid8PKV4vaJAX2r9wiD56uF9vaG3LaBtg9VgHym7UFbEZ9EaJxXwA QwxnepsDr8YMYhIii8XqQ5NF6OozZw==; Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Apr 2017 00:25:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.36,256,1486454400"; d="scan'208";a="67700670" Received: from yliu-dev.sh.intel.com ([10.239.67.162]) by orsmga002.jf.intel.com with ESMTP; 01 Apr 2017 00:25:28 -0700 From: Yuanhan Liu To: dev@dpdk.org Cc: Maxime Coquelin , Harris James R , Liu Changpeng , Yuanhan Liu Date: Sat, 1 Apr 2017 15:22:43 +0800 Message-Id: <1491031380-1499-6-git-send-email-yuanhan.liu@linux.intel.com> X-Mailer: git-send-email 1.9.0 In-Reply-To: <1491031380-1499-1-git-send-email-yuanhan.liu@linux.intel.com> References: <1490705142-893-1-git-send-email-yuanhan.liu@linux.intel.com> <1491031380-1499-1-git-send-email-yuanhan.liu@linux.intel.com> Subject: [dpdk-dev] [PATCH v4 05/22] vhost: export guest memory regions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 01 Apr 2017 07:25:53 -0000 Some vhost-user driver may need this info to setup its own page tables for GPA (guest physical addr) to HPA (host physical addr) translation. SPDK (Storage Performance Development Kit) is one example. Besides, by exporting this memory info, we could also export the gpa_to_vva() as an inline function, which helps for performance. Otherwise, it has to be referenced indirectly by a "vid". Signed-off-by: Yuanhan Liu Reviewed-by: Maxime Coquelin --- v2: add API comments --- lib/librte_vhost/rte_vhost_version.map | 1 + lib/librte_vhost/rte_virtio_net.h | 38 ++++++++++++++++++++++++++++++++++ lib/librte_vhost/vhost.c | 23 ++++++++++++++++++++ lib/librte_vhost/vhost.h | 28 ++----------------------- lib/librte_vhost/vhost_user.c | 12 +++++------ 5 files changed, 70 insertions(+), 32 deletions(-) diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map index 1150017..664a5f3 100644 --- a/lib/librte_vhost/rte_vhost_version.map +++ b/lib/librte_vhost/rte_vhost_version.map @@ -35,6 +35,7 @@ DPDK_17.05 { rte_vhost_driver_enable_features; rte_vhost_driver_get_features; rte_vhost_driver_set_features; + rte_vhost_get_mem_table; rte_vhost_get_mtu; } DPDK_16.07; diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h index 8c8e67e..8c1c172 100644 --- a/lib/librte_vhost/rte_virtio_net.h +++ b/lib/librte_vhost/rte_virtio_net.h @@ -59,6 +59,28 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM}; /** + * Information relating to memory regions including offsets to + * addresses in QEMUs memory file. + */ +struct rte_vhost_mem_region { + uint64_t guest_phys_addr; + uint64_t guest_user_addr; + uint64_t host_user_addr; + uint64_t size; + void *mmap_addr; + uint64_t mmap_size; + int fd; +}; + +/** + * Memory structure includes region and mapping information. + */ +struct rte_vhost_memory { + uint32_t nregions; + struct rte_vhost_mem_region regions[0]; +}; + +/** * Device and vring operations. */ struct virtio_net_device_ops { @@ -246,4 +268,20 @@ uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id, uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count); +/** + * Get guest mem table: a list of memory regions. + * + * An rte_vhost_vhost_memory object will be allocated internaly, to hold the + * guest memory regions. Application should free it at destroy_device() + * callback. + * + * @param vid + * vhost device ID + * @param mem + * To store the returned mem regions + * @return + * 0 on success, -1 on failure + */ +int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem); + #endif /* _VIRTIO_NET_H_ */ diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 7d7bb3c..2b41652 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -359,6 +359,29 @@ struct virtio_net * return 0; } +int +rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem) +{ + struct virtio_net *dev; + struct rte_vhost_memory *m; + size_t size; + + dev = get_device(vid); + if (!dev) + return -1; + + size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region); + m = malloc(size); + if (!m) + return -1; + + m->nregions = dev->mem->nregions; + memcpy(m->regions, dev->mem->regions, size); + *mem = m; + + return 0; +} + uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id) { diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 6186216..6d1986a 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -166,7 +166,7 @@ struct guest_page { */ struct virtio_net { /* Frontend (QEMU) memory and memory region information */ - struct virtio_memory *mem; + struct rte_vhost_memory *mem; uint64_t features; uint64_t protocol_features; int vid; @@ -192,30 +192,6 @@ struct virtio_net { struct guest_page *guest_pages; } __rte_cache_aligned; -/** - * Information relating to memory regions including offsets to - * addresses in QEMUs memory file. - */ -struct virtio_memory_region { - uint64_t guest_phys_addr; - uint64_t guest_user_addr; - uint64_t host_user_addr; - uint64_t size; - void *mmap_addr; - uint64_t mmap_size; - int fd; -}; - - -/** - * Memory structure includes region and mapping information. - */ -struct virtio_memory { - uint32_t nregions; - struct virtio_memory_region regions[0]; -}; - - /* Macros for printing using RTE_LOG */ #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1 @@ -255,7 +231,7 @@ struct virtio_memory { static inline uint64_t __attribute__((always_inline)) gpa_to_vva(struct virtio_net *dev, uint64_t gpa) { - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; uint32_t i; for (i = 0; i < dev->mem->nregions; i++) { diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 7fe07bc..30cf8f8 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -92,7 +92,7 @@ free_mem_region(struct virtio_net *dev) { uint32_t i; - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; if (!dev || !dev->mem) return; @@ -310,7 +310,7 @@ static uint64_t qva_to_vva(struct virtio_net *dev, uint64_t qva) { - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; uint32_t i; /* Find the region where the address lives. */ @@ -438,7 +438,7 @@ } static void -add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg, +add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { uint64_t reg_size = reg->size; @@ -498,7 +498,7 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) { struct VhostUserMemory memory = pmsg->payload.memory; - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; void *mmap_addr; uint64_t mmap_size; uint64_t mmap_offset; @@ -525,8 +525,8 @@ sizeof(struct guest_page)); } - dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct virtio_memory) + - sizeof(struct virtio_memory_region) * memory.nregions, 0); + dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) + + sizeof(struct rte_vhost_mem_region) * memory.nregions, 0); if (dev->mem == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to allocate memory for dev->mem\n", -- 1.9.0