From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6D0D1A0C4B; Wed, 14 Jul 2021 11:17:38 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 25850412D8; Wed, 14 Jul 2021 11:17:38 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 54F2F412D4 for ; Wed, 14 Jul 2021 11:17:36 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10044"; a="271426148" X-IronPort-AV: E=Sophos;i="5.84,238,1620716400"; d="scan'208";a="271426148" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Jul 2021 02:17:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,238,1620716400"; d="scan'208";a="505159968" Received: from dpdk_jiangcheng.sh.intel.com ([10.67.119.149]) by fmsmga002.fm.intel.com with ESMTP; 14 Jul 2021 02:17:33 -0700 From: Cheng Jiang To: maxime.coquelin@redhat.com, Chenbo.Xia@intel.com Cc: dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com, Cheng Jiang Date: Wed, 14 Jul 2021 09:01:05 +0000 Message-Id: <20210714090109.18523-2-cheng1.jiang@intel.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20210714090109.18523-1-cheng1.jiang@intel.com> References: <20210602042802.31943-1-cheng1.jiang@intel.com> <20210714090109.18523-1-cheng1.jiang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The async vhost ops callback should return -1 when there are something wrong in the callback, so the return type should be changed into int32_t. The issue in vhost example is also fixed in this patch. Signed-off-by: Cheng Jiang --- examples/vhost/ioat.c | 4 +-- examples/vhost/ioat.h | 4 +-- lib/vhost/rte_vhost_async.h | 4 +-- lib/vhost/virtio_net.c | 58 ++++++++++++++++++++++++++++++++----- 4 files changed, 56 insertions(+), 14 deletions(-) diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index 2a2c2d7202..457f8171f0 100644 --- a/examples/vhost/ioat.c +++ b/examples/vhost/ioat.c @@ -122,7 +122,7 @@ open_ioat(const char *value) return ret; } -uint32_t +int32_t ioat_transfer_data_cb(int vid, uint16_t queue_id, struct rte_vhost_async_desc *descs, struct rte_vhost_async_status *opaque_data, uint16_t count) @@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id, return i_desc; } -uint32_t +int32_t ioat_check_completed_copies_cb(int vid, uint16_t queue_id, struct rte_vhost_async_status *opaque_data, uint16_t max_packets) diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h index 1aa28ed6a3..b57b5645b0 100644 --- a/examples/vhost/ioat.h +++ b/examples/vhost/ioat.h @@ -27,12 +27,12 @@ struct dma_for_vhost { #ifdef RTE_RAW_IOAT int open_ioat(const char *value); -uint32_t +int32_t ioat_transfer_data_cb(int vid, uint16_t queue_id, struct rte_vhost_async_desc *descs, struct rte_vhost_async_status *opaque_data, uint16_t count); -uint32_t +int32_t ioat_check_completed_copies_cb(int vid, uint16_t queue_id, struct rte_vhost_async_status *opaque_data, uint16_t max_packets); diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h index 6faa31f5ad..bc81cd0caa 100644 --- a/lib/vhost/rte_vhost_async.h +++ b/lib/vhost/rte_vhost_async.h @@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops { * @return * number of descs processed */ - uint32_t (*transfer_data)(int vid, uint16_t queue_id, + int32_t (*transfer_data)(int vid, uint16_t queue_id, struct rte_vhost_async_desc *descs, struct rte_vhost_async_status *opaque_data, uint16_t count); @@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops { * @return * number of async descs completed */ - uint32_t (*check_completed_copies)(int vid, uint16_t queue_id, + int32_t (*check_completed_copies)(int vid, uint16_t queue_id, struct rte_vhost_async_status *opaque_data, uint16_t max_packets); }; diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index b93482587c..8156796a46 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct async_inflight_info *pkts_info = vq->async_pkts_info; uint32_t n_pkts = 0, pkt_err = 0; uint32_t num_async_pkts = 0, num_done_pkts = 0; + int32_t n_enq; struct { uint16_t pkt_idx; uint16_t last_avail_idx; @@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD || ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) { - n_pkts = vq->async_ops.transfer_data(dev->vid, + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); + if (n_enq >= 0) { + n_pkts = n_enq; + } else { + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n", + dev->vid, __func__, queue_id); + n_pkts = 0; + } + iovec_idx = 0; it_idx = 0; @@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, } if (pkt_burst_idx) { - n_pkts = vq->async_ops.transfer_data(dev->vid, - queue_id, tdes, 0, pkt_burst_idx); + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); + if (n_enq >= 0) { + n_pkts = n_enq; + } else { + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n", + dev->vid, __func__, queue_id); + n_pkts = 0; + } + vq->async_pkts_inflight_n += n_pkts; if (unlikely(n_pkts < pkt_burst_idx)) @@ -1903,6 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, uint16_t async_descs_idx = 0; uint16_t num_buffers; uint16_t num_descs; + int32_t n_enq; struct rte_vhost_iov_iter *it_pool = vq->it_pool; struct iovec *vec_pool = vq->vec_pool; @@ -1983,8 +2000,16 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, */ if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD || ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) { - n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, - tdes, 0, pkt_burst_idx); + n_enq = vq->async_ops.transfer_data(dev->vid, + queue_id, tdes, 0, pkt_burst_idx); + if (n_enq >= 0) { + n_pkts = n_enq; + } else { + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n", + dev->vid, __func__, queue_id); + n_pkts = 0; + } + iovec_idx = 0; it_idx = 0; segs_await = 0; @@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, } while (pkt_idx < count); if (pkt_burst_idx) { - n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); + if (n_enq >= 0) { + n_pkts = n_enq; + } else { + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n", + dev->vid, __func__, queue_id); + n_pkts = 0; + } + vq->async_pkts_inflight_n += n_pkts; if (unlikely(n_pkts < pkt_burst_idx)) @@ -2091,6 +2124,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, uint16_t start_idx, pkts_idx, vq_size; struct async_inflight_info *pkts_info; uint16_t from, i; + int32_t n_poll; if (!dev) return 0; @@ -2118,9 +2152,17 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx, vq_size, vq->async_pkts_inflight_n); - if (count > vq->async_last_pkts_n) - n_pkts_cpl = vq->async_ops.check_completed_copies(vid, + if (count > vq->async_last_pkts_n) { + n_poll = vq->async_ops.check_completed_copies(vid, queue_id, 0, count - vq->async_last_pkts_n); + if (n_poll >= 0) { + n_pkts_cpl = n_poll; + } else { + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n", + dev->vid, __func__, queue_id); + n_pkts_cpl = 0; + } + } n_pkts_cpl += vq->async_last_pkts_n; n_pkts_put = RTE_MIN(count, n_pkts_cpl); -- 2.29.2