From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 87EC0A0C4E; Tue, 2 Nov 2021 10:21:25 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4A3154068F; Tue, 2 Nov 2021 10:21:25 +0100 (CET) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by mails.dpdk.org (Postfix) with ESMTP id F021840689 for ; Tue, 2 Nov 2021 10:21:23 +0100 (CET) Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.55]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4Hk45x2rD4zcZyP; Tue, 2 Nov 2021 17:16:37 +0800 (CST) Received: from dggpeml500024.china.huawei.com (7.185.36.10) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov 2021 17:21:20 +0800 Received: from [127.0.0.1] (10.67.100.224) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov 2021 17:21:20 +0800 To: Gagandeep Singh , , CC: References: <20210909111500.3901706-1-g.singh@nxp.com> <20211101085143.2472241-1-g.singh@nxp.com> <20211101085143.2472241-5-g.singh@nxp.com> From: fengchengwen Message-ID: <66f583fa-c9b0-ee04-2ec2-224517b7751f@huawei.com> Date: Tue, 2 Nov 2021 17:21:20 +0800 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.11.0 MIME-Version: 1.0 In-Reply-To: <20211101085143.2472241-5-g.singh@nxp.com> Content-Type: text/plain; charset="utf-8" Content-Language: en-US Content-Transfer-Encoding: 7bit X-Originating-IP: [10.67.100.224] X-ClientProxiedBy: dggems703-chm.china.huawei.com (10.3.19.180) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected Subject: Re: [dpdk-dev] [PATCH v2 4/6] dma/dpaa: support basic operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On 2021/11/1 16:51, Gagandeep Singh wrote: > This patch support basic DMA operations which includes > device capability and channel setup. > > Signed-off-by: Gagandeep Singh > --- > drivers/dma/dpaa/dpaa_qdma.c | 185 +++++++++++++++++++++++++++++++++++ > drivers/dma/dpaa/dpaa_qdma.h | 6 ++ > 2 files changed, 191 insertions(+) > > diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c > index 7808b3de7f..0240f40907 100644 > --- a/drivers/dma/dpaa/dpaa_qdma.c > +++ b/drivers/dma/dpaa/dpaa_qdma.c > @@ -8,6 +8,18 @@ > #include "dpaa_qdma.h" > #include "dpaa_qdma_logs.h" > > +static inline void > +qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr) > +{ > + ccdf->addr_hi = upper_32_bits(addr); > + ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr)); > +} > + > +static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len) staitc inline void stay one seperate line. > +{ > + csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK); > +} > + > static inline int ilog2(int x) > { > int log = 0; > @@ -84,6 +96,64 @@ static void fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan) > finally: > fsl_qdma->desc_allocated--; > } > + > +/* > + * Pre-request command descriptor and compound S/G for enqueue. > + */ > +static int fsl_qdma_pre_request_enqueue_comp_sd_desc( > + struct fsl_qdma_queue *queue, > + int size, int aligned) > +{ > + struct fsl_qdma_comp *comp_temp; > + struct fsl_qdma_sdf *sdf; > + struct fsl_qdma_ddf *ddf; > + struct fsl_qdma_format *csgf_desc; > + int i; > + > + for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLLOW); i++) { > + comp_temp = rte_zmalloc("qdma: comp temp", > + sizeof(*comp_temp), 0); > + if (!comp_temp) > + return -ENOMEM; > + > + comp_temp->virt_addr = > + dma_pool_alloc(size, aligned, &comp_temp->bus_addr); > + if (!comp_temp->virt_addr) { > + rte_free(comp_temp); > + return -ENOMEM; > + } > + > + comp_temp->desc_virt_addr = > + dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr); > + if (!comp_temp->desc_virt_addr) add free comp_temp_virt_addr and comp_temp and also free pre queue resource when fail > + return -ENOMEM; > + > + memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE); > + memset(comp_temp->desc_virt_addr, 0, > + FSL_QDMA_DESCRIPTOR_BUFFER_SIZE); > + > + csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1; > + sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr; > + ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1; > + /* Compound Command Descriptor(Frame List Table) */ > + qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr); > + /* It must be 32 as Compound S/G Descriptor */ > + qdma_csgf_set_len(csgf_desc, 32); > + /* Descriptor Buffer */ > + sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE << > + FSL_QDMA_CMD_RWTTYPE_OFFSET); > + ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE << > + FSL_QDMA_CMD_RWTTYPE_OFFSET); > + ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC << > + FSL_QDMA_CMD_LWC_OFFSET); > + > + list_add_tail(&comp_temp->list, &queue->comp_free); > + } > + > + return 0;> +} > + > + > static struct fsl_qdma_queue > *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma) > { > @@ -311,6 +381,79 @@ static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) > return 0; > } > > +static int fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan) > +{ > + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; > + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; > + int ret; > + > + if (fsl_queue->count++) > + goto finally; > + > + INIT_LIST_HEAD(&fsl_queue->comp_free); > + INIT_LIST_HEAD(&fsl_queue->comp_used); > + > + ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue, > + FSL_QDMA_COMMAND_BUFFER_SIZE, 64); > + if (ret) { > + DPAA_QDMA_ERR( > + "failed to alloc dma buffer for comp descriptor\n"); > + goto exit; > + } > + > +finally: > + return fsl_qdma->desc_allocated++; > + > +exit: > + return -ENOMEM; > +} > + > +static int > +dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, > + uint32_t info_sz) > +{ > +#define DPAADMA_MAX_DESC 64 > +#define DPAADMA_MIN_DESC 64 > + > + RTE_SET_USED(dev); > + RTE_SET_USED(info_sz); > + > + dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | > + RTE_DMA_CAPA_MEM_TO_DEV | > + RTE_DMA_CAPA_DEV_TO_DEV | > + RTE_DMA_CAPA_DEV_TO_MEM | > + RTE_DMA_CAPA_SILENT | > + RTE_DMA_CAPA_OPS_COPY; > + dev_info->max_vchans = 1; > + dev_info->max_desc = DPAADMA_MAX_DESC; > + dev_info->min_desc = DPAADMA_MIN_DESC; > + > + return 0; > +} > + > +static int > +dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma, uint16_t vchan) > +{ > + u32 i, start, end; > + > + start = fsl_qdma->free_block_id * QDMA_QUEUES; > + fsl_qdma->free_block_id++; > + > + end = start + 1; > + for (i = start; i < end; i++) { > + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; > + > + if (fsl_chan->free) { > + fsl_chan->free = false; > + fsl_qdma_alloc_chan_resources(fsl_chan); why not check fsl_qdma_alloc_chan_resources retcode ? > + fsl_qdma->vchan_map[vchan] = i; > + return 0; > + } > + } > + > + return -1; > +} > + > static void > dma_release(void *fsl_chan) > { > @@ -318,6 +461,45 @@ dma_release(void *fsl_chan) > fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan); > } > > +static int > +dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev, > + __rte_unused const struct rte_dma_conf *dev_conf, > + __rte_unused uint32_t conf_sz) > +{ > + return 0; > +} > + > +static int > +dpaa_qdma_start(__rte_unused struct rte_dma_dev *dev) > +{ > + return 0; > +} > + > +static int > +dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev) > +{ > + return 0; > +} > + > +static int > +dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev, > + uint16_t vchan, > + __rte_unused const struct rte_dma_vchan_conf *conf, > + __rte_unused uint32_t conf_sz) > +{ > + struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private; > + > + return dpaa_get_channel(fsl_qdma, vchan); > +} > + > +static struct rte_dma_dev_ops dpaa_qdma_ops = { > + .dev_info_get = dpaa_info_get, > + .dev_configure = dpaa_qdma_configure, > + .dev_start = dpaa_qdma_start, > + .dev_close = dpaa_qdma_close, > + .vchan_setup = dpaa_qdma_queue_setup, > +}; > + > static int > dpaa_qdma_init(struct rte_dma_dev *dmadev) > { > @@ -424,6 +606,9 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv, > } > > dpaa_dev->dmadev = dmadev; > + dmadev->dev_ops = &dpaa_qdma_ops; > + dmadev->device = &dpaa_dev->device; > + dmadev->fp_obj->dev_private = dmadev->data->dev_private; > > /* Invoke PMD device initialization function */ > ret = dpaa_qdma_init(dmadev); > diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h > index cc0d1f114e..f482b16334 100644 > --- a/drivers/dma/dpaa/dpaa_qdma.h > +++ b/drivers/dma/dpaa/dpaa_qdma.h > @@ -8,6 +8,12 @@ > #define CORE_NUMBER 4 > #define RETRIES 5 > > +#ifndef GENMASK > +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) > +#define GENMASK(h, l) \ > + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) > +#endif > + > #define FSL_QDMA_DMR 0x0 > #define FSL_QDMA_DSR 0x4 > #define FSL_QDMA_DEIER 0xe00 >