From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id BB63AA0613
	for <public@inbox.dpdk.org>; Thu, 26 Sep 2019 10:07:32 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 82C641BEC2;
	Thu, 26 Sep 2019 10:07:32 +0200 (CEST)
Received: from mga12.intel.com (mga12.intel.com [192.55.52.136])
 by dpdk.org (Postfix) with ESMTP id 917141BEB2
 for <dev@dpdk.org>; Thu, 26 Sep 2019 10:07:30 +0200 (CEST)
X-Amp-Result: UNKNOWN
X-Amp-Original-Verdict: FILE UNKNOWN
X-Amp-File-Uploaded: False
Received: from fmsmga005.fm.intel.com ([10.253.24.32])
 by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 26 Sep 2019 01:07:29 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.64,551,1559545200"; d="scan'208";a="389528303"
Received: from dpdk-virtio-tbie-2.sh.intel.com (HELO ___) ([10.67.104.73])
 by fmsmga005.fm.intel.com with ESMTP; 26 Sep 2019 01:07:28 -0700
Date: Thu, 26 Sep 2019 16:04:40 +0800
From: Tiwei Bie <tiwei.bie@intel.com>
To: Jin Yu <jin.yu@intel.com>
Cc: dev@dpdk.org, changpeng.liu@intel.com, maxime.coquelin@redhat.com,
 zhihong.wang@intel.com, Lin Li <lilin24@baidu.com>,
 Xun Ni <nixun@baidu.com>, Yu Zhang <zhangyu31@baidu.com>
Message-ID: <20190926080440.GA17894@___>
References: <20190917145234.16951> <20190920120102.29828-1-jin.yu@intel.com>
 <20190920120102.29828-6-jin.yu@intel.com>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Disposition: inline
In-Reply-To: <20190920120102.29828-6-jin.yu@intel.com>
User-Agent: Mutt/1.9.4 (2018-02-28)
Subject: Re: [dpdk-dev] [PATCH v7 05/10] vhost: checkout the resubmit
 inflight information
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

On Fri, Sep 20, 2019 at 08:00:57PM +0800, Jin Yu wrote:
> @@ -1458,6 +1472,188 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>  	return RTE_VHOST_MSG_RESULT_OK;
>  }
>  
> +static int
> +resubmit_desc_compare(const void *a, const void *b)
> +{
> +	const struct rte_vhost_resubmit_desc *desc0 =
> +		(const struct rte_vhost_resubmit_desc *)a;
> +	const struct rte_vhost_resubmit_desc *desc1 =
> +		(const struct rte_vhost_resubmit_desc *)b;
> +
> +	if (desc1->counter > desc0->counter &&
> +		(desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2)

Why is this hardcoded to 1024 * 2 here?

> +		return 1;
> +
> +	return -1;
> +}
> +
> +static int
> +vhost_check_queue_inflights_split(struct virtio_net *dev,
> +	struct vhost_virtqueue *vq)
> +{
> +	uint16_t i = 0;
> +	uint16_t resubmit_num = 0, last_io, num;
> +	struct vring_used *used = vq->used;
> +	struct rte_vhost_resubmit_info *resubmit = NULL;
> +	struct rte_vhost_inflight_info_split *inflight_split;
> +
> +	if (!(dev->protocol_features &
> +		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> +		return RTE_VHOST_MSG_RESULT_OK;
> +
> +	if ((!vq->inflight_split))
> +		return RTE_VHOST_MSG_RESULT_ERR;
> +
> +	if (!vq->inflight_split->version) {
> +		vq->inflight_split->version = INFLIGHT_VERSION;
> +		return RTE_VHOST_MSG_RESULT_OK;
> +	}
> +
> +	inflight_split = vq->inflight_split;
> +	vq->resubmit_inflight = NULL;

This is invoked in vhost message handler, you need to
check whether it has been allocated first.


> +	vq->global_counter = 0;
> +	last_io = inflight_split->last_inflight_io;
> +
> +	if (inflight_split->used_idx != used->idx) {
> +		inflight_split->desc[last_io].inflight = 0;
> +		rte_compiler_barrier();
> +		inflight_split->used_idx = used->idx;
> +	}
> +
> +	for (i = 0; i < inflight_split->desc_num; i++) {
> +		if (inflight_split->desc[i].inflight == 1)
> +			resubmit_num++;
> +	}
> +
> +	vq->last_avail_idx += resubmit_num;
> +
> +	if (resubmit_num) {
> +		resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
> +		if (!resubmit) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +			 "Failed to allocate memory for resubmit info.\n");
> +			return RTE_VHOST_MSG_RESULT_ERR;
> +		}
> +
> +		resubmit->resubmit_list = calloc(resubmit_num,
> +			sizeof(struct rte_vhost_resubmit_desc));
> +		if (!resubmit->resubmit_list) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +			 "Failed to allocate memory for inflight desc.\n");
> +			return RTE_VHOST_MSG_RESULT_ERR;
> +		}
> +
> +		num = 0;
> +		for (i = 0; i < vq->inflight_split->desc_num; i++) {
> +			if (vq->inflight_split->desc[i].inflight == 1) {
> +				resubmit->resubmit_list[num].index = i;
> +				resubmit->resubmit_list[num].counter =
> +					inflight_split->desc[i].counter;
> +				num++;
> +			}
> +		}
> +		resubmit->resubmit_num = num;
> +
> +		if (resubmit->resubmit_num > 1)
> +			qsort(resubmit->resubmit_list, resubmit->resubmit_num,
> +				sizeof(struct rte_vhost_resubmit_desc),
> +				resubmit_desc_compare);
> +
> +		vq->global_counter = resubmit->resubmit_list[0].counter + 1;
> +		vq->resubmit_inflight = resubmit;
> +	}
> +
> +	return RTE_VHOST_MSG_RESULT_OK;
> +}
> +
> +static int
> +vhost_check_queue_inflights_packed(struct virtio_net *dev,
> +						 struct vhost_virtqueue *vq)
> +{
> +	uint16_t i = 0;
> +	uint16_t resubmit_num = 0, old_used_idx, num;
> +	struct rte_vhost_resubmit_info *resubmit = NULL;
> +	struct rte_vhost_inflight_info_packed *inflight_packed;
> +
> +	if (!(dev->protocol_features &
> +		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> +		return RTE_VHOST_MSG_RESULT_OK;
> +
> +	if ((!vq->inflight_packed))
> +		return RTE_VHOST_MSG_RESULT_ERR;
> +
> +	if (!vq->inflight_packed->version) {
> +		vq->inflight_packed->version = INFLIGHT_VERSION;
> +		return RTE_VHOST_MSG_RESULT_OK;
> +	}
> +
> +	inflight_packed = vq->inflight_packed;
> +	vq->resubmit_inflight = NULL;

Ditto.


> +	vq->global_counter = 0;
> +	old_used_idx = inflight_packed->old_used_idx;
> +
> +	if (inflight_packed->used_idx != old_used_idx) {
> +		if (inflight_packed->desc[old_used_idx].inflight == 0) {
> +			inflight_packed->old_used_idx =
> +				inflight_packed->used_idx;
> +			inflight_packed->old_used_wrap_counter =
> +				inflight_packed->used_wrap_counter;
> +			inflight_packed->old_free_head =
> +				inflight_packed->free_head;
> +		} else {
> +			inflight_packed->used_idx =
> +				inflight_packed->old_used_idx;
> +			inflight_packed->used_wrap_counter =
> +				inflight_packed->old_used_wrap_counter;
> +			inflight_packed->free_head =
> +				inflight_packed->old_free_head;
> +		}
> +	}
> +
> +	for (i = 0; i < inflight_packed->desc_num; i++) {
> +		if (inflight_packed->desc[i].inflight == 1)
> +			resubmit_num++;
> +	}
> +
> +	if (resubmit_num) {
> +		resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
> +		if (resubmit == NULL) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +			 "Failed to allocate memory for resubmit info.\n");
> +			return RTE_VHOST_MSG_RESULT_ERR;
> +		}
> +
> +		resubmit->resubmit_list = calloc(resubmit_num,
> +			sizeof(struct rte_vhost_resubmit_desc));
> +		if (resubmit->resubmit_list == NULL) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +			 "Failed to allocate memory for resubmit desc.\n");
> +			return RTE_VHOST_MSG_RESULT_ERR;
> +		}
> +
> +		num = 0;
> +		for (i = 0; i < inflight_packed->desc_num; i++) {
> +			if (vq->inflight_packed->desc[i].inflight == 1) {
> +				resubmit->resubmit_list[num].index = i;
> +				resubmit->resubmit_list[num].counter =
> +					inflight_packed->desc[i].counter;
> +				num++;
> +			}
> +		}
> +		resubmit->resubmit_num = num;
> +
> +		if (resubmit->resubmit_num > 1)
> +			qsort(resubmit->resubmit_list, resubmit->resubmit_num,
> +				sizeof(struct rte_vhost_resubmit_desc),
> +				resubmit_desc_compare);
> +
> +		vq->global_counter = resubmit->resubmit_list[0].counter + 1;
> +		vq->resubmit_inflight = resubmit;
> +	}
> +
> +	return RTE_VHOST_MSG_RESULT_OK;
> +}
> +
>  static int
>  vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  			int main_fd __rte_unused)
> @@ -1499,6 +1695,20 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  		close(vq->kickfd);
>  	vq->kickfd = file.fd;
>  
> +	if (vq_is_packed(dev)) {
> +		if (vhost_check_queue_inflights_packed(dev, vq)) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +				"Failed to inflights for vq: %d\n", file.index);
> +			return RTE_VHOST_MSG_RESULT_ERR;
> +		}
> +	} else {
> +		if (vhost_check_queue_inflights_split(dev, vq)) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +				"Failed to inflights for vq: %d\n", file.index);
> +			return RTE_VHOST_MSG_RESULT_ERR;
> +		}
> +	}
> +
>  	return RTE_VHOST_MSG_RESULT_OK;
>  }
>  
> -- 
> 2.17.2
>