From: Tiwei Bie <tiwei.bie@intel.com>
To: Jin Yu <jin.yu@intel.com>
Cc: dev@dpdk.org, changpeng.liu@intel.com,
maxime.coquelin@redhat.com, zhihong.wang@intel.com,
Lin Li <lilin24@baidu.com>, Xun Ni <nixun@baidu.com>,
Yu Zhang <zhangyu31@baidu.com>
Subject: Re: [dpdk-dev] [PATCH v7 05/10] vhost: checkout the resubmit inflight information
Date: Thu, 26 Sep 2019 16:04:40 +0800 [thread overview]
Message-ID: <20190926080440.GA17894@___> (raw)
In-Reply-To: <20190920120102.29828-6-jin.yu@intel.com>
On Fri, Sep 20, 2019 at 08:00:57PM +0800, Jin Yu wrote:
> @@ -1458,6 +1472,188 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
> return RTE_VHOST_MSG_RESULT_OK;
> }
>
> +static int
> +resubmit_desc_compare(const void *a, const void *b)
> +{
> + const struct rte_vhost_resubmit_desc *desc0 =
> + (const struct rte_vhost_resubmit_desc *)a;
> + const struct rte_vhost_resubmit_desc *desc1 =
> + (const struct rte_vhost_resubmit_desc *)b;
> +
> + if (desc1->counter > desc0->counter &&
> + (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2)
Why is this hardcoded to 1024 * 2 here?
> + return 1;
> +
> + return -1;
> +}
> +
> +static int
> +vhost_check_queue_inflights_split(struct virtio_net *dev,
> + struct vhost_virtqueue *vq)
> +{
> + uint16_t i = 0;
> + uint16_t resubmit_num = 0, last_io, num;
> + struct vring_used *used = vq->used;
> + struct rte_vhost_resubmit_info *resubmit = NULL;
> + struct rte_vhost_inflight_info_split *inflight_split;
> +
> + if (!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> + return RTE_VHOST_MSG_RESULT_OK;
> +
> + if ((!vq->inflight_split))
> + return RTE_VHOST_MSG_RESULT_ERR;
> +
> + if (!vq->inflight_split->version) {
> + vq->inflight_split->version = INFLIGHT_VERSION;
> + return RTE_VHOST_MSG_RESULT_OK;
> + }
> +
> + inflight_split = vq->inflight_split;
> + vq->resubmit_inflight = NULL;
This is invoked in vhost message handler, you need to
check whether it has been allocated first.
> + vq->global_counter = 0;
> + last_io = inflight_split->last_inflight_io;
> +
> + if (inflight_split->used_idx != used->idx) {
> + inflight_split->desc[last_io].inflight = 0;
> + rte_compiler_barrier();
> + inflight_split->used_idx = used->idx;
> + }
> +
> + for (i = 0; i < inflight_split->desc_num; i++) {
> + if (inflight_split->desc[i].inflight == 1)
> + resubmit_num++;
> + }
> +
> + vq->last_avail_idx += resubmit_num;
> +
> + if (resubmit_num) {
> + resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
> + if (!resubmit) {
> + RTE_LOG(ERR, VHOST_CONFIG,
> + "Failed to allocate memory for resubmit info.\n");
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> +
> + resubmit->resubmit_list = calloc(resubmit_num,
> + sizeof(struct rte_vhost_resubmit_desc));
> + if (!resubmit->resubmit_list) {
> + RTE_LOG(ERR, VHOST_CONFIG,
> + "Failed to allocate memory for inflight desc.\n");
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> +
> + num = 0;
> + for (i = 0; i < vq->inflight_split->desc_num; i++) {
> + if (vq->inflight_split->desc[i].inflight == 1) {
> + resubmit->resubmit_list[num].index = i;
> + resubmit->resubmit_list[num].counter =
> + inflight_split->desc[i].counter;
> + num++;
> + }
> + }
> + resubmit->resubmit_num = num;
> +
> + if (resubmit->resubmit_num > 1)
> + qsort(resubmit->resubmit_list, resubmit->resubmit_num,
> + sizeof(struct rte_vhost_resubmit_desc),
> + resubmit_desc_compare);
> +
> + vq->global_counter = resubmit->resubmit_list[0].counter + 1;
> + vq->resubmit_inflight = resubmit;
> + }
> +
> + return RTE_VHOST_MSG_RESULT_OK;
> +}
> +
> +static int
> +vhost_check_queue_inflights_packed(struct virtio_net *dev,
> + struct vhost_virtqueue *vq)
> +{
> + uint16_t i = 0;
> + uint16_t resubmit_num = 0, old_used_idx, num;
> + struct rte_vhost_resubmit_info *resubmit = NULL;
> + struct rte_vhost_inflight_info_packed *inflight_packed;
> +
> + if (!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> + return RTE_VHOST_MSG_RESULT_OK;
> +
> + if ((!vq->inflight_packed))
> + return RTE_VHOST_MSG_RESULT_ERR;
> +
> + if (!vq->inflight_packed->version) {
> + vq->inflight_packed->version = INFLIGHT_VERSION;
> + return RTE_VHOST_MSG_RESULT_OK;
> + }
> +
> + inflight_packed = vq->inflight_packed;
> + vq->resubmit_inflight = NULL;
Ditto.
> + vq->global_counter = 0;
> + old_used_idx = inflight_packed->old_used_idx;
> +
> + if (inflight_packed->used_idx != old_used_idx) {
> + if (inflight_packed->desc[old_used_idx].inflight == 0) {
> + inflight_packed->old_used_idx =
> + inflight_packed->used_idx;
> + inflight_packed->old_used_wrap_counter =
> + inflight_packed->used_wrap_counter;
> + inflight_packed->old_free_head =
> + inflight_packed->free_head;
> + } else {
> + inflight_packed->used_idx =
> + inflight_packed->old_used_idx;
> + inflight_packed->used_wrap_counter =
> + inflight_packed->old_used_wrap_counter;
> + inflight_packed->free_head =
> + inflight_packed->old_free_head;
> + }
> + }
> +
> + for (i = 0; i < inflight_packed->desc_num; i++) {
> + if (inflight_packed->desc[i].inflight == 1)
> + resubmit_num++;
> + }
> +
> + if (resubmit_num) {
> + resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
> + if (resubmit == NULL) {
> + RTE_LOG(ERR, VHOST_CONFIG,
> + "Failed to allocate memory for resubmit info.\n");
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> +
> + resubmit->resubmit_list = calloc(resubmit_num,
> + sizeof(struct rte_vhost_resubmit_desc));
> + if (resubmit->resubmit_list == NULL) {
> + RTE_LOG(ERR, VHOST_CONFIG,
> + "Failed to allocate memory for resubmit desc.\n");
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> +
> + num = 0;
> + for (i = 0; i < inflight_packed->desc_num; i++) {
> + if (vq->inflight_packed->desc[i].inflight == 1) {
> + resubmit->resubmit_list[num].index = i;
> + resubmit->resubmit_list[num].counter =
> + inflight_packed->desc[i].counter;
> + num++;
> + }
> + }
> + resubmit->resubmit_num = num;
> +
> + if (resubmit->resubmit_num > 1)
> + qsort(resubmit->resubmit_list, resubmit->resubmit_num,
> + sizeof(struct rte_vhost_resubmit_desc),
> + resubmit_desc_compare);
> +
> + vq->global_counter = resubmit->resubmit_list[0].counter + 1;
> + vq->resubmit_inflight = resubmit;
> + }
> +
> + return RTE_VHOST_MSG_RESULT_OK;
> +}
> +
> static int
> vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
> int main_fd __rte_unused)
> @@ -1499,6 +1695,20 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
> close(vq->kickfd);
> vq->kickfd = file.fd;
>
> + if (vq_is_packed(dev)) {
> + if (vhost_check_queue_inflights_packed(dev, vq)) {
> + RTE_LOG(ERR, VHOST_CONFIG,
> + "Failed to inflights for vq: %d\n", file.index);
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> + } else {
> + if (vhost_check_queue_inflights_split(dev, vq)) {
> + RTE_LOG(ERR, VHOST_CONFIG,
> + "Failed to inflights for vq: %d\n", file.index);
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> + }
> +
> return RTE_VHOST_MSG_RESULT_OK;
> }
>
> --
> 2.17.2
>
next prev parent reply other threads:[~2019-09-26 8:07 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20190917145234.16951>
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 00/10] vhost: support inflight share memory protocol feature Jin Yu
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 01/10] vhost: add the inflight description Jin Yu
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 02/10] vhost: add packed ring Jin Yu
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 03/10] vhost: add the inflight structure Jin Yu
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 04/10] vhost: add two new messages to support a shared buffer Jin Yu
2019-09-26 7:39 ` Tiwei Bie
2019-09-26 15:06 ` Yu, Jin
2019-09-27 2:12 ` Tiwei Bie
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 05/10] vhost: checkout the resubmit inflight information Jin Yu
2019-09-26 8:04 ` Tiwei Bie [this message]
2019-09-26 15:52 ` Yu, Jin
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 06/10] vhost: add the APIs to operate inflight ring Jin Yu
2019-09-20 12:00 ` [dpdk-dev] [PATCH v7 07/10] vhost: add APIs for user getting " Jin Yu
2019-09-20 12:01 ` [dpdk-dev] [PATCH v7 08/10] vhost: add vring functions packed ring support Jin Yu
2019-09-20 12:01 ` [dpdk-dev] [PATCH v7 09/10] vhost: add an API for judging vq format Jin Yu
2019-09-20 12:01 ` [dpdk-dev] [PATCH v7 10/10] vhost: add vhost-user-blk example which support inflight Jin Yu
2019-09-25 14:45 ` Tiwei Bie
2019-09-26 14:29 ` Yu, Jin
2019-09-26 14:40 ` Tiwei Bie
2019-09-25 14:25 ` [dpdk-dev] [PATCH v7 00/10] vhost: support inflight share memory protocol feature Tiwei Bie
2019-09-26 14:00 ` Yu, Jin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190926080440.GA17894@___ \
--to=tiwei.bie@intel.com \
--cc=changpeng.liu@intel.com \
--cc=dev@dpdk.org \
--cc=jin.yu@intel.com \
--cc=lilin24@baidu.com \
--cc=maxime.coquelin@redhat.com \
--cc=nixun@baidu.com \
--cc=zhangyu31@baidu.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).