From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 910BFA0548; Wed, 16 Jun 2021 16:59:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1363A4067A; Wed, 16 Jun 2021 16:59:22 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id 9BAD540140 for ; Wed, 16 Jun 2021 16:59:20 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1623855560; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=vRU/yS7XRh6/d0gInvfA9xXfZYEU2X6uT3aXzVoZQPU=; b=DaUdr2KIbRF2+V5nOXYoxfPjZ6IPgP683ENTKpr6v3tDFwHJ8K96TLkR9fZ+QFGlz0DNr6 41hy1pKfdgY00UbUo4hmlcAPlskKV2CO16mzl/8ItBIEepmCC46xsCqwWHVVjIWte4Au83 xSEuhiiOIvgrODSU4DhTT96JDbDwOzE= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-598-PXFWTdfWMRS13j0wR8ZkiQ-1; Wed, 16 Jun 2021 10:59:18 -0400 X-MC-Unique: PXFWTdfWMRS13j0wR8ZkiQ-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id B8D441084F41; Wed, 16 Jun 2021 14:59:17 +0000 (UTC) Received: from [10.36.110.41] (unknown [10.36.110.41]) by smtp.corp.redhat.com (Postfix) with ESMTPS id B46005D9DE; Wed, 16 Jun 2021 14:59:13 +0000 (UTC) To: dev@dpdk.org, david.marchand@redhat.com, chenbo.xia@intel.com References: <20210615084241.139097-1-maxime.coquelin@redhat.com> <20210615084241.139097-5-maxime.coquelin@redhat.com> From: Maxime Coquelin Message-ID: Date: Wed, 16 Jun 2021 16:59:12 +0200 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.11.0 MIME-Version: 1.0 In-Reply-To: <20210615084241.139097-5-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 Authentication-Results: relay.mimecast.com; auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 7bit Subject: Re: [dpdk-dev] [PATCH v2 4/6] vhost: improve NUMA reallocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On 6/15/21 10:42 AM, Maxime Coquelin wrote: > This patch improves the numa_realloc() function by making use > of rte_realloc_socket(), which takes care of the memory copy > and freeing of the old data. > > Suggested-by: David Marchand > Signed-off-by: Maxime Coquelin > --- > lib/vhost/vhost_user.c | 177 +++++++++++++++++------------------------ > 1 file changed, 73 insertions(+), 104 deletions(-) > > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c > index 0e9e26ebe0..b298312db6 100644 > --- a/lib/vhost/vhost_user.c > +++ b/lib/vhost/vhost_user.c > @@ -480,139 +480,108 @@ vhost_user_set_vring_num(struct virtio_net **pdev, > static struct virtio_net* > numa_realloc(struct virtio_net *dev, int index) > { > - int oldnode, newnode; > + int node; > struct virtio_net *old_dev; > - struct vhost_virtqueue *old_vq, *vq; > - struct vring_used_elem *new_shadow_used_split; > - struct vring_used_elem_packed *new_shadow_used_packed; > - struct batch_copy_elem *new_batch_copy_elems; > + struct vhost_virtqueue *vq; > + struct batch_copy_elem *bce; > + struct guest_page *gp; > + struct rte_vhost_memory *mem; > + size_t mem_size; > int ret; > > if (dev->flags & VIRTIO_DEV_RUNNING) > return dev; > > old_dev = dev; > - vq = old_vq = dev->virtqueue[index]; > - > - ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, > - MPOL_F_NODE | MPOL_F_ADDR); > + vq = dev->virtqueue[index]; > > - /* check if we need to reallocate vq */ > - ret |= get_mempolicy(&oldnode, NULL, 0, old_vq, > - MPOL_F_NODE | MPOL_F_ADDR); > + ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR); > if (ret) { > - VHOST_LOG_CONFIG(ERR, > - "Unable to get vq numa information.\n"); > + VHOST_LOG_CONFIG(ERR, "Unable to get virtqueue %d numa information.\n", index); > return dev; > } > - if (oldnode != newnode) { > - VHOST_LOG_CONFIG(INFO, > - "reallocate vq from %d to %d node\n", oldnode, newnode); > - vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode); > - if (!vq) > - return dev; > > - memcpy(vq, old_vq, sizeof(*vq)); > + vq = rte_realloc_socket(vq, sizeof(*vq), 0, node); > + if (!vq) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc virtqueue %d on node %d\n", > + index, node); > + return dev; > + } > > - if (vq_is_packed(dev)) { > - new_shadow_used_packed = rte_malloc_socket(NULL, > - vq->size * > - sizeof(struct vring_used_elem_packed), > - RTE_CACHE_LINE_SIZE, > - newnode); > - if (new_shadow_used_packed) { > - rte_free(vq->shadow_used_packed); > - vq->shadow_used_packed = new_shadow_used_packed; > - } > - } else { > - new_shadow_used_split = rte_malloc_socket(NULL, > - vq->size * > - sizeof(struct vring_used_elem), > - RTE_CACHE_LINE_SIZE, > - newnode); > - if (new_shadow_used_split) { > - rte_free(vq->shadow_used_split); > - vq->shadow_used_split = new_shadow_used_split; > - } > - } > + if (vq != dev->virtqueue[index]) { > + VHOST_LOG_CONFIG(INFO, "reallocated virtqueue on node %d\n", node); > + dev->virtqueue[index] = vq; > + vhost_user_iotlb_init(dev, index); > + } > > - new_batch_copy_elems = rte_malloc_socket(NULL, > - vq->size * sizeof(struct batch_copy_elem), > - RTE_CACHE_LINE_SIZE, > - newnode); > - if (new_batch_copy_elems) { > - rte_free(vq->batch_copy_elems); > - vq->batch_copy_elems = new_batch_copy_elems; > + if (vq_is_packed(dev)) { > + struct vring_used_elem_packed *sup; > + > + sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup), > + RTE_CACHE_LINE_SIZE, node); > + if (!sup) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow packed on node %d\n", node); > + return dev; > } > + vq->shadow_used_packed = sup; > > - if (vq->log_cache) { > - struct log_cache_entry *log_cache; > + } else { > + struct vring_used_elem *sus; > > - log_cache = rte_realloc_socket(vq->log_cache, > - sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR, > - 0, newnode); > - if (log_cache) > - vq->log_cache = log_cache; > + sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus), > + RTE_CACHE_LINE_SIZE, node); > + if (!sus) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow split on node %d\n", node); > + return dev; > } > - > - rte_free(old_vq); > + vq->shadow_used_split = sus; > } > > - /* check if we need to reallocate dev */ > - ret = get_mempolicy(&oldnode, NULL, 0, old_dev, > - MPOL_F_NODE | MPOL_F_ADDR); > - if (ret) { > - VHOST_LOG_CONFIG(ERR, > - "Unable to get dev numa information.\n"); > - goto out; > + bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce), > + RTE_CACHE_LINE_SIZE, node); > + if (!bce) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc batch copy elem on node %d\n", node); > + return dev; > } > - if (oldnode != newnode) { > - struct rte_vhost_memory *old_mem; > - struct guest_page *old_gp; > - ssize_t mem_size, gp_size; > + vq->batch_copy_elems = bce; > > - VHOST_LOG_CONFIG(INFO, > - "reallocate dev from %d to %d node\n", > - oldnode, newnode); > - dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); > - if (!dev) { > - dev = old_dev; > - goto out; > - } > - > - memcpy(dev, old_dev, sizeof(*dev)); > - rte_free(old_dev); > + if (vq->log_cache) { > + struct log_cache_entry *lc; > > - mem_size = sizeof(struct rte_vhost_memory) + > - sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; > - old_mem = dev->mem; > - dev->mem = rte_malloc_socket(NULL, mem_size, 0, newnode); > - if (!dev->mem) { > - dev->mem = old_mem; > - goto out; > + lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node); > + if (!lc) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc log cache on node %d\n", node); > + return dev; > } > + vq->log_cache = lc; > + } > > - memcpy(dev->mem, old_mem, mem_size); > - rte_free(old_mem); > - > - gp_size = dev->max_guest_pages * sizeof(*dev->guest_pages); > - old_gp = dev->guest_pages; > - dev->guest_pages = rte_malloc_socket(NULL, gp_size, RTE_CACHE_LINE_SIZE, newnode); > - if (!dev->guest_pages) { > - dev->guest_pages = old_gp; > - goto out; > - } > + dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node); > + if (!dev) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc dev on node %d\n", node); > + return old_dev; > + } > > - memcpy(dev->guest_pages, old_gp, gp_size); > - rte_free(old_gp); > + if (dev != old_dev) { > + VHOST_LOG_CONFIG(INFO, "reallocated device on node %d\n", node); > + vhost_devices[dev->vid] = dev; > } > > -out: > - dev->virtqueue[index] = vq; > - vhost_devices[dev->vid] = dev; > + mem_size = sizeof(struct rte_vhost_memory) + > + sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; > + mem = rte_realloc_socket(dev->mem, mem_size, 0, node); > + if (!mem) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc mem table on node %d\n", node); > + return dev; > + } "dev->mem = mem;" is missing here. > > - if (old_vq != vq) > - vhost_user_iotlb_init(dev, index); > + gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), > + RTE_CACHE_LINE_SIZE, node); > + if (!gp) { > + VHOST_LOG_CONFIG(ERR, "Failed to realloc guest pages on node %d\n", node); > + return dev; > + } > + dev->guest_pages = gp; > > return dev; > } >