From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 85265A0C48;
	Tue,  6 Jul 2021 22:29:34 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 4C7BE413E3;
	Tue,  6 Jul 2021 22:29:17 +0200 (CEST)
Received: from mga18.intel.com (mga18.intel.com [134.134.136.126])
 by mails.dpdk.org (Postfix) with ESMTP id C994E413C6
 for <dev@dpdk.org>; Tue,  6 Jul 2021 22:29:13 +0200 (CEST)
X-IronPort-AV: E=McAfee;i="6200,9189,10037"; a="196470335"
X-IronPort-AV: E=Sophos;i="5.83,329,1616482800"; d="scan'208";a="196470335"
Received: from fmsmga003.fm.intel.com ([10.253.24.29])
 by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 06 Jul 2021 13:29:12 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.83,329,1616482800"; d="scan'208";a="486522077"
Received: from silpixa00399126.ir.intel.com ([10.237.223.29])
 by FMSMGA003.fm.intel.com with ESMTP; 06 Jul 2021 13:29:11 -0700
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Chengwen Feng <fengchengwen@huawei.com>,
 Jerin Jacob <jerinjacobk@gmail.com>, Jerin Jacob <jerinj@marvell.com>,
 =?UTF-8?q?Morten=20Br=C3=B8rup?= <mb@smartsharesystems.com>,
 Bruce Richardson <bruce.richardson@intel.com>
Date: Tue,  6 Jul 2021 21:28:37 +0100
Message-Id: <20210706202841.661302-6-bruce.richardson@intel.com>
X-Mailer: git-send-email 2.30.2
In-Reply-To: <20210706202841.661302-1-bruce.richardson@intel.com>
References: <1625231891-2963-1-git-send-email-fengchengwen@huawei.com>
 <20210706202841.661302-1-bruce.richardson@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Subject: [dpdk-dev] [RFC UPDATE PATCH 5/9] dmadev: drop cookie typedef
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Rather than having a special type for the index values used in dmadev,
just use regular int types, with appropriate return value notifications.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.h      | 59 ++++++++++++------------------------
 lib/dmadev/rte_dmadev_core.h | 12 ++++----
 2 files changed, 26 insertions(+), 45 deletions(-)

diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 2bfc0b619..8cfe14dd2 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -106,29 +106,6 @@ extern "C" {
 #include <rte_errno.h>
 #include <rte_compat.h>
 
-/**
- * dma_cookie_t - an opaque DMA cookie
- *
- * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
- * code.
- * When using cookies, comply with the following rules:
- * a) Cookies for each virtual queue are independent.
- * b) For a virt queue, the cookie are monotonically incremented, when it reach
- *    the INT_MAX, it wraps back to zero.
- * c) The initial cookie of a virt queue is zero, after the device is stopped or
- *    reset, the virt queue's cookie needs to be reset to zero.
- * Example:
- *    step-1: start one dmadev
- *    step-2: enqueue a copy operation, the cookie return is 0
- *    step-3: enqueue a copy operation again, the cookie return is 1
- *    ...
- *    step-101: stop the dmadev
- *    step-102: start the dmadev
- *    step-103: enqueue a copy operation, the cookie return is 0
- *    ...
- */
-typedef int32_t dma_cookie_t;
-
 /**
  * dma_scatterlist - can hold scatter DMA operation request
  */
@@ -517,13 +494,14 @@ rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, rte_iova_t src, rte_iova_t dst,
 		uint32_t length, uint64_t flags)
 {
@@ -552,13 +530,14 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, rte_iova_t src, rte_iova_t dst,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
 		   const struct dma_scatterlist *sg,
 		   uint32_t sg_len, uint64_t flags)
@@ -590,13 +569,14 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
 		rte_iova_t dst, uint32_t length, uint64_t flags)
 {
@@ -627,13 +607,14 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
 		   const struct dma_scatterlist *sg, uint32_t sg_len,
 		   uint64_t flags)
@@ -716,8 +697,8 @@ rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
  *   The identifier of virt queue.
  * @param nb_cpls
  *   The maximum number of completed operations that can be processed.
- * @param[out] cookie
- *   The last completed operation's cookie.
+ * @param[out] last_idx
+ *   The last completed operation's index, as returned when entry was enqueued
  * @param[out] has_error
  *   Indicates if there are transfer error.
  *
@@ -730,11 +711,11 @@ rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
 __rte_experimental
 static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
-		     dma_cookie_t *cookie, bool *has_error)
+		     uint16_t *last_idx, bool *has_error)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
 	has_error = false;
-	return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
+	return (*dev->completed)(dev, vq_id, nb_cpls, last_idx, has_error);
 }
 
 /**
@@ -752,8 +733,8 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
  *   Indicates the size of status array.
  * @param[out] status
  *   The error code of operations that failed to complete.
- * @param[out] cookie
- *   The last failed completed operation's cookie.
+ * @param[out] last_idx
+ *   The last failed completed operation's index.
  *
  * @return
  *   The number of operations that failed to complete.
@@ -765,10 +746,10 @@ __rte_experimental
 static inline uint16_t
 rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
 			   const uint16_t nb_status, uint32_t *status,
-			   dma_cookie_t *cookie)
+			   uint16_t *last_idx)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
-	return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
+	return (*dev->completed_fails)(dev, vq_id, nb_status, status, last_idx);
 }
 
 struct rte_dmadev_stats {
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 80b56ed83..7fbefe8f9 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -16,22 +16,22 @@
 
 struct rte_dmadev;
 
-typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
 				      rte_iova_t src, rte_iova_t dst,
 				      uint32_t length, uint64_t flags);
 /**< @internal Function used to enqueue a copy operation. */
 
-typedef dma_cookie_t (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
 					 const struct dma_scatterlist *sg,
 					 uint32_t sg_len, uint64_t flags);
 /**< @internal Function used to enqueue a scatter list copy operation. */
 
-typedef dma_cookie_t (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
 				      uint64_t pattern, rte_iova_t dst,
 				      uint32_t length, uint64_t flags);
 /**< @internal Function used to enqueue a fill operation. */
 
-typedef dma_cookie_t (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
 			uint64_t pattern, const struct dma_scatterlist *sg,
 			uint32_t sg_len, uint64_t flags);
 /**< @internal Function used to enqueue a scatter list fill operation. */
@@ -44,12 +44,12 @@ typedef int (*dmadev_perform_t)(struct rte_dmadev *dev, uint16_t vq_id);
 
 typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vq_id,
 				       const uint16_t nb_cpls,
-				       dma_cookie_t *cookie, bool *has_error);
+				       uint16_t *last_idx, bool *has_error);
 /**< @internal Function used to return number of successful completed operations */
 
 typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
 			uint16_t vq_id, const uint16_t nb_status,
-			uint32_t *status, dma_cookie_t *cookie);
+			uint32_t *status, uint16_t *last_idx);
 /**< @internal Function used to return number of failed completed operations */
 
 #define RTE_DMADEV_NAME_MAX_LEN	64 /**< Max length of name of DMA PMD */
-- 
2.30.2