From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CE86AA0613 for ; Thu, 26 Sep 2019 09:42:03 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A505F1BECC; Thu, 26 Sep 2019 09:42:03 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 83D751BECB for ; Thu, 26 Sep 2019 09:42:01 +0200 (CEST) X-Amp-Result: UNKNOWN X-Amp-Original-Verdict: FILE UNKNOWN X-Amp-File-Uploaded: False Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 26 Sep 2019 00:41:54 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,551,1559545200"; d="scan'208";a="189041921" Received: from dpdk-virtio-tbie-2.sh.intel.com (HELO ___) ([10.67.104.73]) by fmsmga008.fm.intel.com with ESMTP; 26 Sep 2019 00:41:52 -0700 Date: Thu, 26 Sep 2019 15:39:05 +0800 From: Tiwei Bie To: Jin Yu Cc: dev@dpdk.org, changpeng.liu@intel.com, maxime.coquelin@redhat.com, zhihong.wang@intel.com, Lin Li , Xun Ni , Yu Zhang Message-ID: <20190926073905.GA12440@___> References: <20190917145234.16951> <20190920120102.29828-1-jin.yu@intel.com> <20190920120102.29828-5-jin.yu@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Disposition: inline In-Reply-To: <20190920120102.29828-5-jin.yu@intel.com> User-Agent: Mutt/1.9.4 (2018-02-28) Subject: Re: [dpdk-dev] [PATCH v7 04/10] vhost: add two new messages to support a shared buffer X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On Fri, Sep 20, 2019 at 08:00:56PM +0800, Jin Yu wrote: > This patch introduces two new messages VHOST_USER_GET_INFLIGHT_FD > and VHOST_USER_SET_INFLIGHT_FD to support transferring a shared > buffer between qemu and backend. > > Signed-off-by: Lin Li > Signed-off-by: Xun Ni > Signed-off-by: Yu Zhang > Signed-off-by: Jin Yu > --- > lib/librte_vhost/vhost.h | 7 + > lib/librte_vhost/vhost_user.c | 260 +++++++++++++++++++++++++++++++++- > lib/librte_vhost/vhost_user.h | 4 +- > 3 files changed, 269 insertions(+), 2 deletions(-) > > diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h > index 884befa85..d67ba849a 100644 > --- a/lib/librte_vhost/vhost.h > +++ b/lib/librte_vhost/vhost.h > @@ -286,6 +286,12 @@ struct guest_page { > uint64_t size; > }; > > +struct inflight_mem_info { > + int fd; > + void *addr; > + uint64_t size; > +}; > + > /** > * Device structure contains all configuration information relating > * to the device. > @@ -303,6 +309,7 @@ struct virtio_net { > uint32_t nr_vring; > int dequeue_zero_copy; > struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; > + struct inflight_mem_info *inflight_info; > #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) > char ifname[IF_NAME_SZ]; > uint64_t log_size; > diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c > index c9e29ece8..66582a30f 100644 > --- a/lib/librte_vhost/vhost_user.c > +++ b/lib/librte_vhost/vhost_user.c > @@ -37,6 +37,10 @@ > #ifdef RTE_LIBRTE_VHOST_POSTCOPY > #include > #endif > +#ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */ > +#include > +#define MEMFD_SUPPORTED > +#endif > > #include > #include > @@ -49,6 +53,15 @@ > #define VIRTIO_MIN_MTU 68 > #define VIRTIO_MAX_MTU 65535 > > +#define INFLIGHT_ALIGNMENT 64 > +#define INFLIGHT_VERSION 0xabcd Why the version is 0xabcd? Shouldn't it be 1? https://git.qemu.org/?p=qemu.git;a=blob;f=docs/interop/vhost-user.rst;h=7827b710aa0a223f6f184b229f4b5ced6420b638;hb=HEAD#l551 https://git.qemu.org/?p=qemu.git;a=blob;f=docs/interop/vhost-user.rst;h=7827b710aa0a223f6f184b229f4b5ced6420b638;hb=HEAD#l658 > +#define VIRTQUEUE_MAX_SIZE 1024 > + > +#define CLOEXEC 0x0001U > + > +#define ALIGN_DOWN(n, m) ((n) / (m) * (m)) > +#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) > + > static const char *vhost_message_str[VHOST_USER_MAX] = { > [VHOST_USER_NONE] = "VHOST_USER_NONE", > [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", > @@ -78,6 +91,8 @@ static const char *vhost_message_str[VHOST_USER_MAX] = { > [VHOST_USER_POSTCOPY_ADVISE] = "VHOST_USER_POSTCOPY_ADVISE", > [VHOST_USER_POSTCOPY_LISTEN] = "VHOST_USER_POSTCOPY_LISTEN", > [VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END", > + [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD", > + [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD", > }; > > static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg); > @@ -160,6 +175,21 @@ vhost_backend_cleanup(struct virtio_net *dev) > dev->log_addr = 0; > } > > + if (dev->inflight_info->addr) { > + munmap(dev->inflight_info->addr, dev->inflight_info->size); > + dev->inflight_info->addr = NULL; > + } > + > + if (dev->inflight_info->fd > 0) { > + close(dev->inflight_info->fd); > + dev->inflight_info->fd = -1; > + } > + > + if (dev->inflight_info) { > + free(dev->inflight_info); > + dev->inflight_info = NULL; > + } You should check dev->inflight_info first before dereferencing it. > + > if (dev->slave_req_fd >= 0) { > close(dev->slave_req_fd); > dev->slave_req_fd = -1; > @@ -1165,6 +1195,233 @@ virtio_is_ready(struct virtio_net *dev) > return 1; > } > > +static int > +mem_create(const char *name, unsigned int flags) > +{ > +#ifdef MEMFD_SUPPORTED > + return memfd_create(name, flags); > +#else > + RTE_LOG(ERR, VHOST_CONFIG, > + "doesn't support memfd--name:%s and flag:%x\n", > + name, flags); You probably don't want to always print an error when memfd isn't available. > + return -1; > +#endif > +} > + > +static void * > +inflight_mem_alloc(const char *name, size_t size, int *fd) > +{ > + void *ptr; > + int mfd = -1; > + char fname[20] = "/tmp/memfd-XXXXXX"; > + > + *fd = -1; > + mfd = mem_create(name, CLOEXEC); > + if (mfd != -1) { > + if (ftruncate(mfd, size) == -1) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "ftruncate fail for alloc inflight buffer\n"); > + close(mfd); > + return NULL; > + } > + } else { > + mfd = mkstemp(fname); > + unlink(fname); > + > + if (mfd == -1) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "mkstemp fail for alloc inflight buffer\n"); > + return NULL; > + } > + > + if (ftruncate(mfd, size) == -1) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "ftruncate fail for alloc inflight buffer\n"); > + close(mfd); > + return NULL; > + } > + } > + > + ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0); > + if (ptr == MAP_FAILED) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "mmap fail for alloc inflight buffer\n"); > + close(mfd); > + return NULL; > + } > + > + *fd = mfd; > + return ptr; > +} > + > +static uint32_t > +get_pervq_shm_size_split(uint16_t queue_size) > +{ > + return ALIGN_UP(sizeof(struct rte_vhost_inflight_desc_split) * > + queue_size + sizeof(uint64_t) + sizeof(uint16_t) * 4, > + INFLIGHT_ALIGNMENT); > +} > + > +static uint32_t > +get_pervq_shm_size_packed(uint16_t queue_size) > +{ > + return ALIGN_UP(sizeof(struct rte_vhost_inflight_desc_packed) * > + queue_size + sizeof(uint64_t) + sizeof(uint16_t) * 6 + > + sizeof(uint8_t) * 9, INFLIGHT_ALIGNMENT); > +} > + > +static int > +vhost_user_get_inflight_fd(struct virtio_net **pdev, > + VhostUserMsg *msg, > + int main_fd __rte_unused) > +{ > + int fd, i, j; > + uint64_t pervq_inflight_size, mmap_size; > + void *addr; > + uint16_t num_queues, queue_size; > + struct virtio_net *dev = *pdev; > + struct rte_vhost_inflight_info_packed *inflight_packed = NULL; > + > + if (msg->size != sizeof(msg->payload.inflight)) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "Invalid get_inflight_fd message size is %d", > + msg->size); > + return RTE_VHOST_MSG_RESULT_ERR; > + } > + > + dev->inflight_info = calloc(1, sizeof(struct inflight_mem_info)); You need to check whether dev->inflight_info has been allocated, otherwise memory may leak. > + if (!dev->inflight_info) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "Failed to alloc dev inflight area"); > + return RTE_VHOST_MSG_RESULT_ERR; > + } > + > + num_queues = msg->payload.inflight.num_queues; > + queue_size = msg->payload.inflight.queue_size; > + > + RTE_LOG(INFO, VHOST_CONFIG, "get_inflight_fd num_queues: %u\n", > + msg->payload.inflight.num_queues); > + RTE_LOG(INFO, VHOST_CONFIG, "get_inflight_fd queue_size: %u\n", > + msg->payload.inflight.queue_size); > + > + if (vq_is_packed(dev)) > + pervq_inflight_size = get_pervq_shm_size_packed(queue_size); > + else > + pervq_inflight_size = get_pervq_shm_size_split(queue_size); > + > + mmap_size = num_queues * pervq_inflight_size; > + addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd); > + if (!addr) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "Failed to alloc vhost inflight area"); > + msg->payload.inflight.mmap_size = 0; > + return RTE_VHOST_MSG_RESULT_ERR; > + } > + memset(addr, 0, mmap_size); > + > + dev->inflight_info->addr = addr; > + dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size; > + dev->inflight_info->fd = msg->fds[0] = fd; > + msg->payload.inflight.mmap_offset = 0; > + msg->fd_num = 1; > + > + if (vq_is_packed(dev)) { > + for (i = 0; i < num_queues; i++) { > + inflight_packed = > + (struct rte_vhost_inflight_info_packed *)addr; > + inflight_packed->used_wrap_counter = 1; > + inflight_packed->old_used_wrap_counter = 1; > + for (j = 0; j < queue_size; j++) > + inflight_packed->desc[j].next = j + 1; > + addr = (void *)((char *)addr + pervq_inflight_size); > + } > + } > + > + RTE_LOG(INFO, VHOST_CONFIG, > + "send inflight mmap_size: %"PRIu64"\n", > + msg->payload.inflight.mmap_size); > + RTE_LOG(INFO, VHOST_CONFIG, > + "send inflight mmap_offset: %"PRIu64"\n", > + msg->payload.inflight.mmap_offset); > + RTE_LOG(INFO, VHOST_CONFIG, > + "send inflight fd: %d\n", msg->fds[0]); > + > + return RTE_VHOST_MSG_RESULT_REPLY; > +} > + > +static int > +vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, > + int main_fd __rte_unused) > +{ > + int fd; > + uint64_t mmap_size, mmap_offset; > + uint16_t num_queues, queue_size; > + uint32_t pervq_inflight_size; > + void *addr; > + struct virtio_net *dev = *pdev; > + > + fd = msg->fds[0]; > + if (msg->size != sizeof(msg->payload.inflight) || fd < 0) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "Invalid set_inflight_fd message size is %d,fd is %d\n", > + msg->size, fd); > + return RTE_VHOST_MSG_RESULT_ERR; > + } > + > + mmap_size = msg->payload.inflight.mmap_size; > + mmap_offset = msg->payload.inflight.mmap_offset; > + num_queues = msg->payload.inflight.num_queues; > + queue_size = msg->payload.inflight.queue_size; > + > + if (vq_is_packed(dev)) > + pervq_inflight_size = get_pervq_shm_size_packed(queue_size); > + else > + pervq_inflight_size = get_pervq_shm_size_split(queue_size); > + > + RTE_LOG(INFO, VHOST_CONFIG, > + "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size); > + RTE_LOG(INFO, VHOST_CONFIG, > + "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset); > + RTE_LOG(INFO, VHOST_CONFIG, > + "set_inflight_fd num_queues: %u\n", num_queues); > + RTE_LOG(INFO, VHOST_CONFIG, > + "set_inflight_fd queue_size: %u\n", queue_size); > + RTE_LOG(INFO, VHOST_CONFIG, > + "set_inflight_fd fd: %d\n", fd); > + RTE_LOG(INFO, VHOST_CONFIG, > + "set_inflight_fd pervq_inflight_size: %d\n", > + pervq_inflight_size); > + > + if (!dev->inflight_info) { > + dev->inflight_info = calloc(1, > + sizeof(struct inflight_mem_info)); > + if (dev->inflight_info == NULL) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "Failed to alloc dev inflight area"); > + return RTE_VHOST_MSG_RESULT_ERR; > + } > + } > + > + if (dev->inflight_info->addr) > + munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr won't be zeroed when below mmap() fails. > + > + addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, > + fd, mmap_offset); > + if (addr == MAP_FAILED) { > + RTE_LOG(ERR, VHOST_CONFIG, "failed to mmap share memory.\n"); > + return RTE_VHOST_MSG_RESULT_ERR; > + } > + > + if (dev->inflight_info->fd) > + close(dev->inflight_info->fd); > + > + dev->inflight_info->fd = fd; > + dev->inflight_info->addr = addr; > + dev->inflight_info->size = mmap_size; > + > + return RTE_VHOST_MSG_RESULT_OK; > +} > + > static int > vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg, > int main_fd __rte_unused) > @@ -1762,9 +2019,10 @@ static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = { > [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise, > [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen, > [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end, > + [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd, > + [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd, > }; > > - > /* return bytes# of read on success or negative val on failure. */ > static int > read_vhost_message(int sockfd, struct VhostUserMsg *msg) > diff --git a/lib/librte_vhost/vhost_user.h b/lib/librte_vhost/vhost_user.h > index 17a1d7bca..6563f7315 100644 > --- a/lib/librte_vhost/vhost_user.h > +++ b/lib/librte_vhost/vhost_user.h > @@ -54,7 +54,9 @@ typedef enum VhostUserRequest { > VHOST_USER_POSTCOPY_ADVISE = 28, > VHOST_USER_POSTCOPY_LISTEN = 29, > VHOST_USER_POSTCOPY_END = 30, > - VHOST_USER_MAX = 31 > + VHOST_USER_GET_INFLIGHT_FD = 31, > + VHOST_USER_SET_INFLIGHT_FD = 32, > + VHOST_USER_MAX = 33 > } VhostUserRequest; > > typedef enum VhostUserSlaveRequest { > -- > 2.17.2 >