From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 65922A0C43; Fri, 8 Oct 2021 00:14:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B28FB41443; Fri, 8 Oct 2021 00:14:15 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id 247E341443 for ; Fri, 8 Oct 2021 00:14:14 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1633644853; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=N1d2l+UmCX9qmE1dbTbPmsL9Kl/u2RFqNKq7w1qSn8Y=; b=JISSSVrANBFE+bXtbrN6kMGXRx6se512DdaJoJ47kRdMz2lckKhyAdvyl/WOnHPWGriRR2 BgsLP+uMKSYihPTrg0uyv9PxZKs5edaB5qrDO84bC8d/OOW1H3Vm9gXJXVIAC/ysmnAIUT hxJ7jc3BVqI5nGPnL92FuDp8Mlukhss= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-20-lPw1mSvLOwSh2URTyO-g7A-1; Thu, 07 Oct 2021 18:14:12 -0400 X-MC-Unique: lPw1mSvLOwSh2URTyO-g7A-1 Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.phx2.redhat.com [10.5.11.16]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 73D108C2FB7; Thu, 7 Oct 2021 22:00:54 +0000 (UTC) Received: from max-t490s.redhat.com (unknown [10.39.208.18]) by smtp.corp.redhat.com (Postfix) with ESMTP id C30495C1B4; Thu, 7 Oct 2021 22:00:45 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com, yuanx.wang@intel.com, wenwux.ma@intel.com, bruce.richardson@intel.com, john.mcnamara@intel.com Cc: Maxime Coquelin Date: Fri, 8 Oct 2021 00:00:09 +0200 Message-Id: <20211007220013.355530-11-maxime.coquelin@redhat.com> In-Reply-To: <20211007220013.355530-1-maxime.coquelin@redhat.com> References: <20211007220013.355530-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.16 Authentication-Results: relay.mimecast.com; auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset="US-ASCII" Subject: [dpdk-dev] [RFC 10/14] vhost: simplify async enqueue completion X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" vhost_poll_enqueue_completed() assumes some inflight packets could have been completed in a previous call but not returned to the application. But this is not the case, since check_completed_copies callback is never called with more than the current count as argument. In other words, async->last_pkts_n is always 0. Removing it greatly simplfies the function. Signed-off-by: Maxime Coquelin --- lib/vhost/vhost.h | 1 - lib/vhost/virtio_net.c | 76 ++++++++++++++++-------------------------- 2 files changed, 28 insertions(+), 49 deletions(-) diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 812d4c55a5..1a2cd21a1d 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -141,7 +141,6 @@ struct vhost_async { struct async_inflight_info *pkts_info; uint16_t pkts_idx; uint16_t pkts_inflight_n; - uint16_t last_pkts_n; union { struct vring_used_elem *descs_split; struct vring_used_elem_packed *buffers_packed; diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index b295dc1d39..c5651f1e0f 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -1618,7 +1618,11 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, vq->shadow_used_idx); async->desc_idx_split += vq->shadow_used_idx; + async->pkts_idx += pkt_idx; + if (async->pkts_idx >= vq->size) + async->pkts_idx -= vq->size; + async->pkts_inflight_n += pkt_idx; vq->shadow_used_idx = 0; } @@ -1920,68 +1924,44 @@ static __rte_always_inline uint16_t vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) { - struct vhost_virtqueue *vq; - struct vhost_async *async; - struct async_inflight_info *pkts_info; + struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; + struct vhost_async *async = vq->async; + struct async_inflight_info *pkts_info = async->pkts_info; int32_t n_cpl; - uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0; - uint16_t start_idx, pkts_idx, vq_size; - uint16_t from, i; + uint16_t n_descs = 0, n_buffers = 0; + uint16_t start_idx, from, i; - vq = dev->virtqueue[queue_id]; - async = vq->async; - pkts_idx = async->pkts_idx % vq->size; - pkts_info = async->pkts_info; - vq_size = vq->size; - start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx, - vq_size, async->pkts_inflight_n); - - if (count > async->last_pkts_n) { - n_cpl = async->ops.check_completed_copies(dev->vid, - queue_id, 0, count - async->last_pkts_n); - if (likely(n_cpl >= 0)) { - n_pkts_cpl = n_cpl; - } else { - VHOST_LOG_DATA(ERR, - "(%d) %s: failed to check completed copies for queue id %d.\n", + start_idx = virtio_dev_rx_async_get_info_idx(async->pkts_idx, + vq->size, async->pkts_inflight_n); + + n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count); + if (unlikely(n_cpl < 0)) { + VHOST_LOG_DATA(ERR, "(%d) %s: failed to check completed copies for queue id %d.\n", dev->vid, __func__, queue_id); - n_pkts_cpl = 0; - } + return 0; } - n_pkts_cpl += async->last_pkts_n; - n_pkts_put = RTE_MIN(n_pkts_cpl, count); - if (unlikely(n_pkts_put == 0)) { - async->last_pkts_n = n_pkts_cpl; + if (n_cpl == 0) return 0; - } - if (vq_is_packed(dev)) { - for (i = 0; i < n_pkts_put; i++) { - from = (start_idx + i) % vq_size; - n_buffers += pkts_info[from].nr_buffers; - pkts[i] = pkts_info[from].mbuf; - } - } else { - for (i = 0; i < n_pkts_put; i++) { - from = (start_idx + i) & (vq_size - 1); - n_descs += pkts_info[from].descs; - pkts[i] = pkts_info[from].mbuf; - } + for (i = 0; i < n_cpl; i++) { + from = (start_idx + i) % vq->size; + /* Only used with packed ring */ + n_buffers += pkts_info[from].nr_buffers; + /* Only used with split ring */ + n_descs += pkts_info[from].descs; + pkts[i] = pkts_info[from].mbuf; } - async->last_pkts_n = n_pkts_cpl - n_pkts_put; - async->pkts_inflight_n -= n_pkts_put; + + async->pkts_inflight_n -= n_cpl; if (likely(vq->enabled && vq->access_ok)) { if (vq_is_packed(dev)) { write_back_completed_descs_packed(vq, n_buffers); - vhost_vring_call_packed(dev, vq); } else { write_back_completed_descs_split(vq, n_descs); - - __atomic_add_fetch(&vq->used->idx, n_descs, - __ATOMIC_RELEASE); + __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE); vhost_vring_call_split(dev, vq); } } else { @@ -1994,7 +1974,7 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id, } } - return n_pkts_put; + return n_cpl; } uint16_t -- 2.31.1