From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 90F11A0526; Tue, 21 Jul 2020 11:56:58 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 92FC31C0AF; Tue, 21 Jul 2020 11:56:19 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 6EE141C07B for ; Tue, 21 Jul 2020 11:56:15 +0200 (CEST) IronPort-SDR: qx5TBZSZn1CzRNBvKJERaD2qaCUwZ5E8bpwEkIEtN9j3Yk87fQc5AgpI2k3LN3Wyufau9C86/o QG+4V1BDoqyQ== X-IronPort-AV: E=McAfee;i="6000,8403,9688"; a="138191455" X-IronPort-AV: E=Sophos;i="5.75,378,1589266800"; d="scan'208";a="138191455" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Jul 2020 02:56:15 -0700 IronPort-SDR: GXF3SHJf4N0cRqzGr8APC6bhA/WXTe0wEQlKryJkZ5fRHYD4GmYtkE0XQPeJiU7TtJLjcUY31O set+99VP9xCw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,378,1589266800"; d="scan'208";a="488024483" Received: from silpixa00399126.ir.intel.com ([10.237.222.36]) by fmsmga005.fm.intel.com with ESMTP; 21 Jul 2020 02:56:13 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: cheng1.jiang@intel.com, patrick.fu@intel.com, kevin.laatz@intel.com, Bruce Richardson Date: Tue, 21 Jul 2020 10:51:35 +0100 Message-Id: <20200721095140.719297-16-bruce.richardson@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20200721095140.719297-1-bruce.richardson@intel.com> References: <20200721095140.719297-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH 20.11 15/20] raw/ioat: add data path support for idxd devices X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for doing copies using DSA hardware. This is implemented by just switching on the device type field at the start of the inline functions. Since there is no hardware which will have both device types present this branch will always be predictable after the first call, meaning it has little to no perf penalty. Signed-off-by: Bruce Richardson --- drivers/raw/ioat/ioat_common.c | 1 + drivers/raw/ioat/ioat_rawdev.c | 1 + drivers/raw/ioat/rte_ioat_rawdev_fns.h | 165 +++++++++++++++++++++++-- 3 files changed, 158 insertions(+), 9 deletions(-) diff --git a/drivers/raw/ioat/ioat_common.c b/drivers/raw/ioat/ioat_common.c index 699661e27..33e5bb4a6 100644 --- a/drivers/raw/ioat/ioat_common.c +++ b/drivers/raw/ioat/ioat_common.c @@ -143,6 +143,7 @@ idxd_rawdev_create(const char *name, struct rte_device *dev, idxd = rawdev->dev_private; *idxd = *base_idxd; /* copy over the main fields already passed in */ + idxd->public.type = RTE_IDXD_DEV; idxd->rawdev = rawdev; idxd->mz = mz; diff --git a/drivers/raw/ioat/ioat_rawdev.c b/drivers/raw/ioat/ioat_rawdev.c index 8f9c8b56f..48fe32d0a 100644 --- a/drivers/raw/ioat/ioat_rawdev.c +++ b/drivers/raw/ioat/ioat_rawdev.c @@ -252,6 +252,7 @@ ioat_rawdev_create(const char *name, struct rte_pci_device *dev) rawdev->driver_name = dev->device.driver->name; ioat = rawdev->dev_private; + ioat->type = RTE_IOAT_DEV; ioat->rawdev = rawdev; ioat->mz = mz; ioat->regs = dev->mem_resource[0].addr; diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h index 7090ac0a1..98af40894 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -194,8 +194,8 @@ struct rte_idxd_rawdev { /** * Enqueue a copy operation onto the ioat device */ -static inline int -rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, +static __rte_always_inline int +__ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl, int fence) { @@ -233,8 +233,8 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, /** * Trigger hardware to begin performing enqueued copy operations */ -static inline void -rte_ioat_do_copies(int dev_id) +static __rte_always_inline void +__ioat_perform_ops(int dev_id) { struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u @@ -248,8 +248,8 @@ rte_ioat_do_copies(int dev_id) * @internal * Returns the index of the last completed operation. */ -static inline int -rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error) +static __rte_always_inline int +__ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error) { uint64_t status = ioat->status; @@ -263,8 +263,8 @@ rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error) /** * Returns details of copy operations that have been completed */ -static inline int -rte_ioat_completed_copies(int dev_id, uint8_t max_copies, +static __rte_always_inline int +__ioat_completed_ops(int dev_id, uint8_t max_copies, uintptr_t *src_hdls, uintptr_t *dst_hdls) { struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private; @@ -274,7 +274,7 @@ rte_ioat_completed_copies(int dev_id, uint8_t max_copies, int error; int i = 0; - end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask; + end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask; count = (end_read - (read & mask)) & mask; if (error) { @@ -311,4 +311,151 @@ rte_ioat_completed_copies(int dev_id, uint8_t max_copies, return count; } +static __rte_always_inline int +__idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst, + unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl, + int fence __rte_unused) +{ + struct rte_idxd_rawdev *idxd = rte_rawdevs[dev_id].dev_private; + struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch]; + uint32_t op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) | + IDXD_FLAG_CACHE_CONTROL; + + /* check for room in the handle ring */ + if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl) { + rte_errno = ENOSPC; + return 0; + } + if (b->op_count >= BATCH_SIZE) { + /* TODO change to submit batch and move on */ + rte_errno = ENOSPC; + return 0; + } + /* check that we can actually use the current batch */ + if (b->submitted) { + rte_errno = ENOSPC; + return 0; + } + + /* write the descriptor */ + b->ops[b->op_count++] = (struct rte_idxd_hw_desc){ + .op_flags = op_flags, + .src = src, + .dst = dst, + .size = length + }; + + /* store the completion details */ + if (!idxd->hdls_disable) + idxd->hdl_ring[idxd->next_free_hdl] = (struct rte_idxd_user_hdl) { + .src = src_hdl, + .dst =dst_hdl + }; + if (++idxd->next_free_hdl == idxd->hdl_ring_sz) + idxd->next_free_hdl = 0; + + return 1; +} + +static __rte_always_inline void +__idxd_movdir64b(volatile void *dst, const void *src) +{ + asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" + : + : "a" (dst), "d" (src)); +} + +static __rte_always_inline void +__idxd_perform_ops(int dev_id) +{ + struct rte_idxd_rawdev *idxd = rte_rawdevs[dev_id].dev_private; + struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch]; + + if (b->submitted || b->op_count == 0) + return; + b->hdl_end = idxd->next_free_hdl; + b->comp.status = 0; + b->submitted = 1; + b->batch_desc.size = b->op_count + 1; + __idxd_movdir64b(idxd->portal, &b->batch_desc); + + if (++idxd->next_batch == idxd->batch_ring_sz) + idxd->next_batch = 0; +} + +static __rte_always_inline int +__idxd_completed_ops(int dev_id, uint8_t max_ops, + uintptr_t *src_hdls, uintptr_t *dst_hdls) +{ + struct rte_idxd_rawdev *idxd = rte_rawdevs[dev_id].dev_private; + struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed]; + uint16_t h_idx = idxd->next_ret_hdl; + int n = 0; + + while (b->submitted && b->comp.status != 0) { + idxd->last_completed_hdl = b->hdl_end; + b->submitted = 0; + b->op_count = 0; + if (++idxd->next_completed == idxd->batch_ring_sz) + idxd->next_completed = 0; + b = &idxd->batch_ring[idxd->next_completed]; + } + + if (!idxd->hdls_disable) + for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) { + src_hdls[n] = idxd->hdl_ring[h_idx].src; + dst_hdls[n] = idxd->hdl_ring[h_idx].dst; + if (++h_idx == idxd->hdl_ring_sz) + h_idx = 0; + } + else + while (h_idx != idxd->last_completed_hdl) { + n++; + if (++h_idx == idxd->hdl_ring_sz) + h_idx = 0; + } + + idxd->next_ret_hdl = h_idx; + + return n; +} + +static inline int +rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, + unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl, + int fence) +{ + enum rte_ioat_dev_type *type = rte_rawdevs[dev_id].dev_private; + if (*type == RTE_IDXD_DEV) + return __idxd_enqueue_copy(dev_id, src, dst, length, + src_hdl, dst_hdl, fence); + else + return __ioat_enqueue_copy(dev_id, src, dst, length, + src_hdl, dst_hdl, fence); +} + +static inline void +rte_ioat_do_copies(int dev_id) +{ + enum rte_ioat_dev_type *type = rte_rawdevs[dev_id].dev_private; + if (*type == RTE_IDXD_DEV) + return __idxd_perform_ops(dev_id); + else + return __ioat_perform_ops(dev_id); +} + +static inline int +rte_ioat_completed_copies(int dev_id, uint8_t max_copies, + uintptr_t *src_hdls, uintptr_t *dst_hdls) +{ + enum rte_ioat_dev_type *type = rte_rawdevs[dev_id].dev_private; + if (*type == RTE_IDXD_DEV) + return __idxd_completed_ops(dev_id, max_copies, + src_hdls, dst_hdls); + else + return __ioat_completed_ops(dev_id, max_copies, + src_hdls, dst_hdls); +} + + #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */ -- 2.25.1