From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7AFAC439CF; Fri, 26 Jan 2024 10:01:27 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0042A42670; Fri, 26 Jan 2024 10:01:22 +0100 (CET) Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by mails.dpdk.org (Postfix) with ESMTP id CEA7940270 for ; Fri, 26 Jan 2024 10:01:19 +0100 (CET) Received: from mail.maildlp.com (unknown [172.19.163.48]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4TLs9d6g5bzGq05; Fri, 26 Jan 2024 17:00:53 +0800 (CST) Received: from dggpeml500024.china.huawei.com (unknown [7.185.36.10]) by mail.maildlp.com (Postfix) with ESMTPS id D5A2018007A; Fri, 26 Jan 2024 17:01:17 +0800 (CST) Received: from localhost.localdomain (10.50.165.33) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.35; Fri, 26 Jan 2024 17:01:17 +0800 From: Chengwen Feng To: , CC: , Subject: [PATCH 2/2] dma/skeleton: support fill ops Date: Fri, 26 Jan 2024 08:57:26 +0000 Message-ID: <20240126085726.54581-3-fengchengwen@huawei.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20240126085726.54581-1-fengchengwen@huawei.com> References: <20240126085726.54581-1-fengchengwen@huawei.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.50.165.33] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500024.china.huawei.com (7.185.36.10) X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for fill operation. Signed-off-by: Chengwen Feng --- drivers/dma/skeleton/skeleton_dmadev.c | 53 +++++++++++++++++++++++--- drivers/dma/skeleton/skeleton_dmadev.h | 16 +++++--- 2 files changed, 59 insertions(+), 10 deletions(-) diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c index d1d257a064..48f88f9fc1 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.c +++ b/drivers/dma/skeleton/skeleton_dmadev.c @@ -38,7 +38,8 @@ skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_SVA | RTE_DMA_CAPA_OPS_COPY | - RTE_DMA_CAPA_OPS_COPY_SG; + RTE_DMA_CAPA_OPS_COPY_SG | + RTE_DMA_CAPA_OPS_FILL; dev_info->max_vchans = 1; dev_info->max_desc = SKELDMA_MAX_DESC; dev_info->min_desc = SKELDMA_MIN_DESC; @@ -100,8 +101,19 @@ do_copy_sg(struct skeldma_desc *desc) } } +static inline void +do_fill(struct skeldma_desc *desc) +{ + uint8_t *fills = (uint8_t *)&desc->fill.pattern; + uint8_t *dst = (uint8_t *)desc->fill.dst; + uint32_t i; + + for (i = 0; i < desc->fill.len; i++) + dst[i] = fills[i % 8]; +} + static uint32_t -cpucopy_thread(void *param) +cpuwork_thread(void *param) { #define SLEEP_THRESHOLD 10000 #define SLEEP_US_VAL 10 @@ -127,6 +139,8 @@ cpucopy_thread(void *param) rte_memcpy(desc->copy.dst, desc->copy.src, desc->copy.len); else if (desc->op == SKELDMA_OP_COPY_SG) do_copy_sg(desc); + else if (desc->op == SKELDMA_OP_FILL) + do_fill(desc); __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE); (void)rte_ring_enqueue(hw->desc_completed, (void *)desc); @@ -162,7 +176,7 @@ skeldma_start(struct rte_dma_dev *dev) * 1) fflush pending/running/completed ring to empty ring. * 2) init ring idx to zero. * 3) init running statistics. - * 4) mark cpucopy task exit_flag to false. + * 4) mark cpuwork task exit_flag to false. */ fflush_ring(hw, hw->desc_pending); fflush_ring(hw, hw->desc_running); @@ -178,9 +192,9 @@ skeldma_start(struct rte_dma_dev *dev) snprintf(name, sizeof(name), "dma-skel%d", dev->data->dev_id); ret = rte_thread_create_internal_control(&hw->thread, name, - cpucopy_thread, dev); + cpuwork_thread, dev); if (ret) { - SKELDMA_LOG(ERR, "Start cpucopy thread fail!"); + SKELDMA_LOG(ERR, "Start cpuwork thread fail!"); return -EINVAL; } @@ -462,6 +476,34 @@ skeldma_copy_sg(void *dev_private, uint16_t vchan, return hw->ridx++; } +static int +skeldma_fill(void *dev_private, uint16_t vchan, + uint64_t pattern, rte_iova_t dst, + uint32_t length, uint64_t flags) +{ + struct skeldma_hw *hw = dev_private; + struct skeldma_desc *desc; + int ret; + + RTE_SET_USED(vchan); + + ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc); + if (ret) + return -ENOSPC; + desc->op = SKELDMA_OP_FILL; + desc->ridx = hw->ridx; + desc->fill.dst = (void *)(uintptr_t)dst; + desc->fill.len = length; + desc->fill.pattern = pattern; + if (flags & RTE_DMA_OP_FLAG_SUBMIT) + submit(hw, desc); + else + (void)rte_ring_enqueue(hw->desc_pending, (void *)desc); + hw->submitted_count++; + + return hw->ridx++; +} + static int skeldma_submit(void *dev_private, uint16_t vchan) { @@ -573,6 +615,7 @@ skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id) dev->fp_obj->dev_private = dev->data->dev_private; dev->fp_obj->copy = skeldma_copy; dev->fp_obj->copy_sg = skeldma_copy_sg; + dev->fp_obj->fill = skeldma_fill; dev->fp_obj->submit = skeldma_submit; dev->fp_obj->completed = skeldma_completed; dev->fp_obj->completed_status = skeldma_completed_status; diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h index 7d32dd5095..c9bf3153ba 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.h +++ b/drivers/dma/skeleton/skeleton_dmadev.h @@ -16,6 +16,7 @@ enum skeldma_op { SKELDMA_OP_COPY, SKELDMA_OP_COPY_SG, + SKELDMA_OP_FILL, }; struct skeldma_desc { @@ -34,14 +35,19 @@ struct skeldma_desc { uint16_t nb_src; uint16_t nb_dst; } copy_sg; + struct { + void *dst; + uint32_t len; + uint64_t pattern; + } fill; }; }; struct skeldma_hw { - int lcore_id; /* cpucopy task affinity core */ + int lcore_id; /* cpuwork task affinity core */ int socket_id; - rte_thread_t thread; /* cpucopy task thread */ - volatile int exit_flag; /* cpucopy task exit flag */ + rte_thread_t thread; /* cpuwork task thread */ + volatile int exit_flag; /* cpuwork task exit flag */ struct skeldma_desc *desc_mem; @@ -57,7 +63,7 @@ struct skeldma_hw { * |get completed |------------------| | * | | | * | v v - * ----------- cpucopy thread working ----------- + * ----------- cpuwork thread working ----------- * |completed|<-------------------------------| running | * ----------- ----------- */ @@ -72,7 +78,7 @@ struct skeldma_hw { uint16_t last_ridx; uint64_t submitted_count; - /* Cache delimiter for cpucopy thread's operation data */ + /* Cache delimiter for cpuwork thread's operation data */ char cache2 __rte_cache_aligned; volatile uint32_t zero_req_count; uint64_t completed_count; -- 2.17.1