From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E9684A04AF; Fri, 21 Aug 2020 18:30:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 17F3C1C0B7; Fri, 21 Aug 2020 18:29:58 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id DC1CA1C0B5 for ; Fri, 21 Aug 2020 18:29:55 +0200 (CEST) IronPort-SDR: qARe6/DlVJGUBn7uL+F/+ikvuIOd09jhIfkXZnZUC2nheGW+qub9BqRFTwWH9vUtLbcv7bkaY1 WB+sE9PZYNfA== X-IronPort-AV: E=McAfee;i="6000,8403,9719"; a="143209439" X-IronPort-AV: E=Sophos;i="5.76,337,1592895600"; d="scan'208";a="143209439" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Aug 2020 09:29:55 -0700 IronPort-SDR: JkOoGyPRPYvm2HgYYliORsRuIF+SjZ2XUq+qY36pUAV6KZJIk4zY4sheP+i95yhIL2JhEya2G3 H9128qkUXwKw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,337,1592895600"; d="scan'208";a="297992607" Received: from silpixa00399126.ir.intel.com ([10.237.222.56]) by orsmga006.jf.intel.com with ESMTP; 21 Aug 2020 09:29:53 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: cheng1.jiang@intel.com, patrick.fu@intel.com, ping.yu@intel.com, kevin.laatz@intel.com, Bruce Richardson Date: Fri, 21 Aug 2020 17:29:28 +0100 Message-Id: <20200821162944.29840-3-bruce.richardson@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20200821162944.29840-1-bruce.richardson@intel.com> References: <20200721095140.719297-1-bruce.richardson@intel.com> <20200821162944.29840-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 02/18] raw/ioat: split header for readability X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Rather than having a single long complicated header file for general use we can split things so that there is one header with all the publically needed information - data structs and function prototypes - while the rest of the internal details are put separately. This makes it easier to read, understand and use the APIs. Signed-off-by: Bruce Richardson --- There are a couple of checkpatch errors about spacing in this patch, however, it appears that these are false positives. --- drivers/raw/ioat/meson.build | 1 + drivers/raw/ioat/rte_ioat_rawdev.h | 144 +--------------------- drivers/raw/ioat/rte_ioat_rawdev_fns.h | 164 +++++++++++++++++++++++++ 3 files changed, 171 insertions(+), 138 deletions(-) create mode 100644 drivers/raw/ioat/rte_ioat_rawdev_fns.h diff --git a/drivers/raw/ioat/meson.build b/drivers/raw/ioat/meson.build index 0878418aee..f66e9b605e 100644 --- a/drivers/raw/ioat/meson.build +++ b/drivers/raw/ioat/meson.build @@ -8,4 +8,5 @@ sources = files('ioat_rawdev.c', deps += ['rawdev', 'bus_pci', 'mbuf'] install_headers('rte_ioat_rawdev.h', + 'rte_ioat_rawdev_fns.h', 'rte_ioat_spec.h') diff --git a/drivers/raw/ioat/rte_ioat_rawdev.h b/drivers/raw/ioat/rte_ioat_rawdev.h index 4bc6491d91..7ace5c085a 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev.h +++ b/drivers/raw/ioat/rte_ioat_rawdev.h @@ -14,12 +14,7 @@ * @b EXPERIMENTAL: these structures and APIs may change without prior notice */ -#include -#include -#include -#include -#include -#include "rte_ioat_spec.h" +#include /** Name of the device driver */ #define IOAT_PMD_RAWDEV_NAME rawdev_ioat @@ -38,38 +33,6 @@ struct rte_ioat_rawdev_config { bool hdls_disable; /**< if set, ignore user-supplied handle params */ }; -/** - * @internal - * Structure representing a device instance - */ -struct rte_ioat_rawdev { - struct rte_rawdev *rawdev; - const struct rte_memzone *mz; - const struct rte_memzone *desc_mz; - - volatile struct rte_ioat_registers *regs; - phys_addr_t status_addr; - phys_addr_t ring_addr; - - unsigned short ring_size; - struct rte_ioat_generic_hw_desc *desc_ring; - bool hdls_disable; - __m128i *hdls; /* completion handles for returning to user */ - - - unsigned short next_read; - unsigned short next_write; - - /* some statistics for tracking, if added/changed update xstats fns*/ - uint64_t enqueue_failed __rte_cache_aligned; - uint64_t enqueued; - uint64_t started; - uint64_t completed; - - /* to report completions, the device will write status back here */ - volatile uint64_t status __rte_cache_aligned; -}; - /** * Enqueue a copy operation onto the ioat device * @@ -104,38 +67,7 @@ struct rte_ioat_rawdev { static inline int rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl, - int fence) -{ - struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; - unsigned short read = ioat->next_read; - unsigned short write = ioat->next_write; - unsigned short mask = ioat->ring_size - 1; - unsigned short space = mask + read - write; - struct rte_ioat_generic_hw_desc *desc; - - if (space == 0) { - ioat->enqueue_failed++; - return 0; - } - - ioat->next_write = write + 1; - write &= mask; - - desc = &ioat->desc_ring[write]; - desc->size = length; - /* set descriptor write-back every 16th descriptor */ - desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3); - desc->src_addr = src; - desc->dest_addr = dst; - if (!ioat->hdls_disable) - ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl, - (int64_t)src_hdl); - - rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]); - - ioat->enqueued++; - return 1; -} + int fence); /** * Trigger hardware to begin performing enqueued copy operations @@ -147,31 +79,7 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, * The rawdev device id of the ioat instance */ static inline void -rte_ioat_do_copies(int dev_id) -{ - struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; - ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u - .control.completion_update = 1; - rte_compiler_barrier(); - ioat->regs->dmacount = ioat->next_write; - ioat->started = ioat->enqueued; -} - -/** - * @internal - * Returns the index of the last completed operation. - */ -static inline int -rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error) -{ - uint64_t status = ioat->status; - - /* lower 3 bits indicate "transfer status" : active, idle, halted. - * We can ignore bit 0. - */ - *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED); - return (status - ioat->ring_addr) >> 6; -} +rte_ioat_do_copies(int dev_id); /** * Returns details of copy operations that have been completed @@ -206,49 +114,9 @@ rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error) */ static inline int rte_ioat_completed_copies(int dev_id, uint8_t max_copies, - uintptr_t *src_hdls, uintptr_t *dst_hdls) -{ - struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; - unsigned short mask = (ioat->ring_size - 1); - unsigned short read = ioat->next_read; - unsigned short end_read, count; - int error; - int i = 0; - - end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask; - count = (end_read - (read & mask)) & mask; - - if (error) { - rte_errno = EIO; - return -1; - } - - if (ioat->hdls_disable) { - read += count; - goto end; - } - - if (count > max_copies) - count = max_copies; - - for (; i < count - 1; i += 2, read += 2) { - __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]); - __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]); + uintptr_t *src_hdls, uintptr_t *dst_hdls); - _mm_storeu_si128((void *)&src_hdls[i], - _mm_unpacklo_epi64(hdls0, hdls1)); - _mm_storeu_si128((void *)&dst_hdls[i], - _mm_unpackhi_epi64(hdls0, hdls1)); - } - for (; i < count; i++, read++) { - uintptr_t *hdls = (void *)&ioat->hdls[read & mask]; - src_hdls[i] = hdls[0]; - dst_hdls[i] = hdls[1]; - } -end: - ioat->next_read = read; - ioat->completed += count; - return count; -} +/* include the implementation details from a separate file */ +#include "rte_ioat_rawdev_fns.h" #endif /* _RTE_IOAT_RAWDEV_H_ */ diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h new file mode 100644 index 0000000000..06b4edcbb0 --- /dev/null +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ +#ifndef _RTE_IOAT_RAWDEV_FNS_H_ +#define _RTE_IOAT_RAWDEV_FNS_H_ + +#include +#include +#include +#include "rte_ioat_spec.h" + +/** + * @internal + * Structure representing a device instance + */ +struct rte_ioat_rawdev { + struct rte_rawdev *rawdev; + const struct rte_memzone *mz; + const struct rte_memzone *desc_mz; + + volatile struct rte_ioat_registers *regs; + phys_addr_t status_addr; + phys_addr_t ring_addr; + + unsigned short ring_size; + bool hdls_disable; + struct rte_ioat_generic_hw_desc *desc_ring; + __m128i *hdls; /* completion handles for returning to user */ + + + unsigned short next_read; + unsigned short next_write; + + /* some statistics for tracking, if added/changed update xstats fns*/ + uint64_t enqueue_failed __rte_cache_aligned; + uint64_t enqueued; + uint64_t started; + uint64_t completed; + + /* to report completions, the device will write status back here */ + volatile uint64_t status __rte_cache_aligned; +}; + +/** + * Enqueue a copy operation onto the ioat device + */ +static inline int +rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, + unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl, + int fence) +{ + struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; + unsigned short read = ioat->next_read; + unsigned short write = ioat->next_write; + unsigned short mask = ioat->ring_size - 1; + unsigned short space = mask + read - write; + struct rte_ioat_generic_hw_desc *desc; + + if (space == 0) { + ioat->enqueue_failed++; + return 0; + } + + ioat->next_write = write + 1; + write &= mask; + + desc = &ioat->desc_ring[write]; + desc->size = length; + /* set descriptor write-back every 16th descriptor */ + desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3); + desc->src_addr = src; + desc->dest_addr = dst; + + if (!ioat->hdls_disable) + ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl, + (int64_t)src_hdl); + rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]); + + ioat->enqueued++; + return 1; +} + +/** + * Trigger hardware to begin performing enqueued copy operations + */ +static inline void +rte_ioat_do_copies(int dev_id) +{ + struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; + ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u + .control.completion_update = 1; + rte_compiler_barrier(); + ioat->regs->dmacount = ioat->next_write; + ioat->started = ioat->enqueued; +} + +/** + * @internal + * Returns the index of the last completed operation. + */ +static inline int +rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error) +{ + uint64_t status = ioat->status; + + /* lower 3 bits indicate "transfer status" : active, idle, halted. + * We can ignore bit 0. + */ + *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED); + return (status - ioat->ring_addr) >> 6; +} + +/** + * Returns details of copy operations that have been completed + */ +static inline int +rte_ioat_completed_copies(int dev_id, uint8_t max_copies, + uintptr_t *src_hdls, uintptr_t *dst_hdls) +{ + struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; + unsigned short mask = (ioat->ring_size - 1); + unsigned short read = ioat->next_read; + unsigned short end_read, count; + int error; + int i = 0; + + end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask; + count = (end_read - (read & mask)) & mask; + + if (error) { + rte_errno = EIO; + return -1; + } + + if (ioat->hdls_disable) { + read += count; + goto end; + } + + if (count > max_copies) + count = max_copies; + + for (; i < count - 1; i += 2, read += 2) { + __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]); + __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]); + + _mm_storeu_si128((void *)&src_hdls[i], + _mm_unpacklo_epi64(hdls0, hdls1)); + _mm_storeu_si128((void *)&dst_hdls[i], + _mm_unpackhi_epi64(hdls0, hdls1)); + } + for (; i < count; i++, read++) { + uintptr_t *hdls = (void *)&ioat->hdls[read & mask]; + src_hdls[i] = hdls[0]; + dst_hdls[i] = hdls[1]; + } + +end: + ioat->next_read = read; + ioat->completed += count; + return count; +} + +#endif /* _RTE_IOAT_RAWDEV_FNS_H_ */ -- 2.25.1