From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0085DA0C4E; Tue, 2 Nov 2021 13:42:50 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EB49D41157; Tue, 2 Nov 2021 13:42:28 +0100 (CET) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by mails.dpdk.org (Postfix) with ESMTP id A7CDB40A4B for ; Tue, 2 Nov 2021 13:42:23 +0100 (CET) Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.57]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4Hk8cy0qF3zZcYd; Tue, 2 Nov 2021 20:40:18 +0800 (CST) Received: from dggpeml500024.china.huawei.com (7.185.36.10) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov 2021 20:42:21 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov 2021 20:42:19 +0800 From: Chengwen Feng To: CC: Date: Tue, 2 Nov 2021 20:37:41 +0800 Message-ID: <20211102123743.13497-5-fengchengwen@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20211102123743.13497-1-fengchengwen@huawei.com> References: <20211030103619.29924-1-fengchengwen@huawei.com> <20211102123743.13497-1-fengchengwen@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 4/6] dma/hisilicon: add data path functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add data path functions for Kunpeng DMA devices. Signed-off-by: Chengwen Feng --- drivers/dma/hisilicon/hisi_dmadev.c | 206 ++++++++++++++++++++++++++++ drivers/dma/hisilicon/hisi_dmadev.h | 16 +++ 2 files changed, 222 insertions(+) diff --git a/drivers/dma/hisilicon/hisi_dmadev.c b/drivers/dma/hisilicon/hisi_dmadev.c index bcdcf4de4b..d03967cae3 100644 --- a/drivers/dma/hisilicon/hisi_dmadev.c +++ b/drivers/dma/hisilicon/hisi_dmadev.c @@ -529,6 +529,206 @@ hisi_dma_dump(const struct rte_dma_dev *dev, FILE *f) return 0; } +static int +hisi_dma_copy(void *dev_private, uint16_t vchan, + rte_iova_t src, rte_iova_t dst, + uint32_t length, uint64_t flags) +{ + struct hisi_dma_dev *hw = dev_private; + struct hisi_dma_sqe *sqe = &hw->sqe[hw->sq_tail]; + + RTE_SET_USED(vchan); + + if (((hw->sq_tail + 1) & hw->sq_depth_mask) == hw->sq_head) + return -ENOSPC; + + sqe->dw0 = rte_cpu_to_le_32(SQE_OPCODE_M2M); + sqe->dw1 = 0; + sqe->dw2 = 0; + sqe->length = rte_cpu_to_le_32(length); + sqe->src_addr = rte_cpu_to_le_64(src); + sqe->dst_addr = rte_cpu_to_le_64(dst); + hw->sq_tail = (hw->sq_tail + 1) & hw->sq_depth_mask; + hw->submitted++; + + if (flags & RTE_DMA_OP_FLAG_FENCE) + sqe->dw0 |= rte_cpu_to_le_32(SQE_FENCE_FLAG); + if (flags & RTE_DMA_OP_FLAG_SUBMIT) + rte_write32(rte_cpu_to_le_32(hw->sq_tail), hw->sq_tail_reg); + + return hw->ridx++; +} + +static int +hisi_dma_submit(void *dev_private, uint16_t vchan) +{ + struct hisi_dma_dev *hw = dev_private; + + RTE_SET_USED(vchan); + rte_write32(rte_cpu_to_le_32(hw->sq_tail), hw->sq_tail_reg); + + return 0; +} + +static inline void +hisi_dma_scan_cq(struct hisi_dma_dev *hw) +{ + volatile struct hisi_dma_cqe *cqe; + uint16_t csq_head = hw->cq_sq_head; + uint16_t cq_head = hw->cq_head; + uint16_t count = 0; + uint64_t misc; + + while (true) { + cqe = &hw->cqe[cq_head]; + misc = cqe->misc; + misc = rte_le_to_cpu_64(misc); + if (FIELD_GET(CQE_VALID_B, misc) != hw->cqe_vld) + break; + + csq_head = FIELD_GET(CQE_SQ_HEAD_MASK, misc); + if (unlikely(misc & CQE_STATUS_MASK)) + hw->status[csq_head] = FIELD_GET(CQE_STATUS_MASK, + misc); + + count++; + cq_head++; + if (cq_head == hw->cq_depth) { + hw->cqe_vld = !hw->cqe_vld; + cq_head = 0; + } + } + + if (count == 0) + return; + + hw->cq_head = cq_head; + hw->cq_sq_head = (csq_head + 1) & hw->sq_depth_mask; + hw->cqs_completed += count; + if (hw->cqs_completed >= HISI_DMA_CQ_RESERVED) { + rte_write32(rte_cpu_to_le_32(cq_head), hw->cq_head_reg); + hw->cqs_completed = 0; + } +} + +static inline uint16_t +hisi_dma_calc_cpls(struct hisi_dma_dev *hw, const uint16_t nb_cpls) +{ + uint16_t cpl_num; + + if (hw->cq_sq_head >= hw->sq_head) + cpl_num = hw->cq_sq_head - hw->sq_head; + else + cpl_num = hw->sq_depth_mask + 1 - hw->sq_head + hw->cq_sq_head; + + if (cpl_num > nb_cpls) + cpl_num = nb_cpls; + + return cpl_num; +} + +static uint16_t +hisi_dma_completed(void *dev_private, + uint16_t vchan, const uint16_t nb_cpls, + uint16_t *last_idx, bool *has_error) +{ + struct hisi_dma_dev *hw = dev_private; + uint16_t sq_head = hw->sq_head; + uint16_t cpl_num, i; + + RTE_SET_USED(vchan); + hisi_dma_scan_cq(hw); + + cpl_num = hisi_dma_calc_cpls(hw, nb_cpls); + for (i = 0; i < cpl_num; i++) { + if (hw->status[sq_head]) { + *has_error = true; + break; + } + sq_head = (sq_head + 1) & hw->sq_depth_mask; + } + if (i > 0) { + hw->cridx += i; + *last_idx = hw->cridx - 1; + hw->sq_head = sq_head; + } + hw->completed += i; + + return i; +} + +static enum rte_dma_status_code +hisi_dma_convert_status(uint16_t status) +{ + switch (status) { + case HISI_DMA_STATUS_SUCCESS: + return RTE_DMA_STATUS_SUCCESSFUL; + case HISI_DMA_STATUS_INVALID_OPCODE: + return RTE_DMA_STATUS_INVALID_OPCODE; + case HISI_DMA_STATUS_INVALID_LENGTH: + return RTE_DMA_STATUS_INVALID_LENGTH; + case HISI_DMA_STATUS_USER_ABORT: + return RTE_DMA_STATUS_USER_ABORT; + case HISI_DMA_STATUS_REMOTE_READ_ERROR: + case HISI_DMA_STATUS_AXI_READ_ERROR: + return RTE_DMA_STATUS_BUS_READ_ERROR; + case HISI_DMA_STATUS_AXI_WRITE_ERROR: + return RTE_DMA_STATUS_BUS_WRITE_ERROR; + case HISI_DMA_STATUS_DATA_POISON: + case HISI_DMA_STATUS_REMOTE_DATA_POISION: + return RTE_DMA_STATUS_DATA_POISION; + case HISI_DMA_STATUS_SQE_READ_ERROR: + case HISI_DMA_STATUS_SQE_READ_POISION: + return RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR; + case HISI_DMA_STATUS_LINK_DOWN_ERROR: + return RTE_DMA_STATUS_DEV_LINK_ERROR; + default: + return RTE_DMA_STATUS_ERROR_UNKNOWN; + } +} + +static uint16_t +hisi_dma_completed_status(void *dev_private, + uint16_t vchan, const uint16_t nb_cpls, + uint16_t *last_idx, enum rte_dma_status_code *status) +{ + struct hisi_dma_dev *hw = dev_private; + uint16_t sq_head = hw->sq_head; + uint16_t cpl_num, i; + + RTE_SET_USED(vchan); + hisi_dma_scan_cq(hw); + + cpl_num = hisi_dma_calc_cpls(hw, nb_cpls); + for (i = 0; i < cpl_num; i++) { + status[i] = hisi_dma_convert_status(hw->status[sq_head]); + hw->errors += !!status[i]; + hw->status[sq_head] = HISI_DMA_STATUS_SUCCESS; + sq_head = (sq_head + 1) & hw->sq_depth_mask; + } + if (likely(cpl_num > 0)) { + hw->cridx += cpl_num; + *last_idx = hw->cridx - 1; + hw->sq_head = sq_head; + } + hw->completed += cpl_num; + + return cpl_num; +} + +static uint16_t +hisi_dma_burst_capacity(const void *dev_private, uint16_t vchan) +{ + const struct hisi_dma_dev *hw = dev_private; + uint16_t sq_head = hw->sq_head; + uint16_t sq_tail = hw->sq_tail; + + RTE_SET_USED(vchan); + + return (sq_tail >= sq_head) ? hw->sq_depth_mask - sq_tail + sq_head : + sq_head - 1 - sq_tail; +} + static void hisi_dma_gen_pci_device_name(const struct rte_pci_device *pci_dev, char *name, size_t size) @@ -597,6 +797,12 @@ hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id, dev->device = &pci_dev->device; dev->dev_ops = &hisi_dmadev_ops; + dev->fp_obj->dev_private = dev->data->dev_private; + dev->fp_obj->copy = hisi_dma_copy; + dev->fp_obj->submit = hisi_dma_submit; + dev->fp_obj->completed = hisi_dma_completed; + dev->fp_obj->completed_status = hisi_dma_completed_status; + dev->fp_obj->burst_capacity = hisi_dma_burst_capacity; hw = dev->data->dev_private; hw->data = dev->data; diff --git a/drivers/dma/hisilicon/hisi_dmadev.h b/drivers/dma/hisilicon/hisi_dmadev.h index dd0315cd31..12e209c86e 100644 --- a/drivers/dma/hisilicon/hisi_dmadev.h +++ b/drivers/dma/hisilicon/hisi_dmadev.h @@ -115,6 +115,22 @@ enum { HISI_DMA_STATE_RUN, }; +/** + * Hardware complete status define: + */ +#define HISI_DMA_STATUS_SUCCESS 0x0 +#define HISI_DMA_STATUS_INVALID_OPCODE 0x1 +#define HISI_DMA_STATUS_INVALID_LENGTH 0x2 +#define HISI_DMA_STATUS_USER_ABORT 0x4 +#define HISI_DMA_STATUS_REMOTE_READ_ERROR 0x10 +#define HISI_DMA_STATUS_AXI_READ_ERROR 0x20 +#define HISI_DMA_STATUS_AXI_WRITE_ERROR 0x40 +#define HISI_DMA_STATUS_DATA_POISON 0x80 +#define HISI_DMA_STATUS_SQE_READ_ERROR 0x100 +#define HISI_DMA_STATUS_SQE_READ_POISION 0x200 +#define HISI_DMA_STATUS_REMOTE_DATA_POISION 0x400 +#define HISI_DMA_STATUS_LINK_DOWN_ERROR 0x800 + /** * After scanning the CQ array, the CQ head register needs to be updated. * Updating the register involves write memory barrier operations. -- 2.33.0