DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Burakov, Anatoly" <anatoly.burakov@intel.com>
To: Tiwei Bie <tiwei.bie@intel.com>,
	maxime.coquelin@redhat.com, zhihong.wang@intel.com, dev@dpdk.org
Cc: seanbh@gmail.com, stable@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] net/virtio-user: fix memory hotplug support
Date: Thu, 23 Aug 2018 10:05:35 +0100	[thread overview]
Message-ID: <992313c9-cd03-03db-888b-f5dd8c072cf0@intel.com> (raw)
In-Reply-To: <20180823025721.18300-1-tiwei.bie@intel.com>

On 23-Aug-18 3:57 AM, Tiwei Bie wrote:
> Deadlock can occur when allocating memory if a vhost-kernel
> based virtio-user device is in use. Besides, it's possible
> to have much more than 64 non-contiguous hugepage backed
> memory regions due to the memory hotplug, which may cause
> problems when handling VHOST_SET_MEM_TABLE request. A better
> solution is to have the virtio-user pass all the VA ranges
> reserved by DPDK to vhost-kernel.
> 
> Bugzilla ID: 81
> Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
> Cc: stable@dpdk.org
> 
> Reported-by: Seán Harte <seanbh@gmail.com>
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> ---
>   drivers/net/virtio/virtio_user/vhost_kernel.c | 64 ++++++++-----------
>   1 file changed, 27 insertions(+), 37 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
> index b2444096c..49bd1b821 100644
> --- a/drivers/net/virtio/virtio_user/vhost_kernel.c
> +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
> @@ -70,41 +70,12 @@ static uint64_t vhost_req_user_to_kernel[] = {
>   	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
>   };
>   
> -struct walk_arg {
> -	struct vhost_memory_kernel *vm;
> -	uint32_t region_nr;
> -};
> -static int
> -add_memory_region(const struct rte_memseg_list *msl __rte_unused,
> -		const struct rte_memseg *ms, size_t len, void *arg)
> -{
> -	struct walk_arg *wa = arg;
> -	struct vhost_memory_region *mr;
> -	void *start_addr;
> -
> -	if (wa->region_nr >= max_regions)
> -		return -1;
> -
> -	mr = &wa->vm->regions[wa->region_nr++];
> -	start_addr = ms->addr;
> -
> -	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
> -	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
> -	mr->memory_size = len;
> -	mr->mmap_offset = 0;
> -
> -	return 0;
> -}
> -
> -/* By default, vhost kernel module allows 64 regions, but DPDK allows
> - * 256 segments. As a relief, below function merges those virtually
> - * adjacent memsegs into one region.
> - */
>   static struct vhost_memory_kernel *
>   prepare_vhost_memory_kernel(void)
>   {
> +	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
>   	struct vhost_memory_kernel *vm;
> -	struct walk_arg wa;
> +	uint32_t region_nr = 0, i;
>   
>   	vm = malloc(sizeof(struct vhost_memory_kernel) +
>   			max_regions *
> @@ -112,15 +83,34 @@ prepare_vhost_memory_kernel(void)
>   	if (!vm)
>   		return NULL;
>   
> -	wa.region_nr = 0;
> -	wa.vm = vm;
> +	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
> +		struct rte_memseg_list *msl = &mcfg->memsegs[i];
> +		struct vhost_memory_region *mr;
> +		void *start_addr;
> +		uint64_t len;

There is a rte_memseg_list_walk() - please do not iterate over memseg 
lists manually.

>   
> -	if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) {
> -		free(vm);
> -		return NULL;
> +		start_addr = msl->base_va;
> +		len = msl->page_sz * msl->memseg_arr.len;
> +
> +		if (start_addr == NULL || len == 0)
> +			continue;
> +
> +		if (region_nr >= max_regions) {
> +			free(vm);
> +			return NULL;
> +		}
> +
> +		mr = &vm->regions[region_nr++];
> +		mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
> +		mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
> +		mr->memory_size = len;
> +		mr->mmap_offset = 0; /* flags_padding */
> +
> +		PMD_DRV_LOG(DEBUG, "index=%u, addr=%p len=%" PRIu64,
> +				i, start_addr, len);
>   	}
>   
> -	vm->nregions = wa.region_nr;
> +	vm->nregions = region_nr;
>   	vm->padding = 0;
>   	return vm;
>   }
> 


-- 
Thanks,
Anatoly

  reply	other threads:[~2018-08-23  9:05 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-23  2:57 Tiwei Bie
2018-08-23  9:05 ` Burakov, Anatoly [this message]
2018-08-23 11:19   ` Sean Harte
2018-08-23 14:01     ` Burakov, Anatoly
2018-08-23 15:33       ` Sean Harte
2018-08-24  4:49       ` Tiwei Bie
2018-08-24  8:59         ` Burakov, Anatoly
2018-08-24  9:35           ` Tiwei Bie
2018-08-24 10:41             ` Burakov, Anatoly
2018-08-24 15:19               ` Burakov, Anatoly
2018-08-24 15:51                 ` Sean Harte
2018-08-27  9:15                   ` Burakov, Anatoly
2018-08-28 13:27                     ` Sean Harte
2018-08-28 14:12                       ` Burakov, Anatoly

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=992313c9-cd03-03db-888b-f5dd8c072cf0@intel.com \
    --to=anatoly.burakov@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=seanbh@gmail.com \
    --cc=stable@dpdk.org \
    --cc=tiwei.bie@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).