From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8240FA0C4E; Tue, 2 Nov 2021 13:00:03 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 41F7B4068F; Tue, 2 Nov 2021 13:00:03 +0100 (CET) Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by mails.dpdk.org (Postfix) with ESMTP id DBF3340689 for ; Tue, 2 Nov 2021 13:00:00 +0100 (CET) Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.54]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4Hk7hk18GJzSh3y; Tue, 2 Nov 2021 19:58:30 +0800 (CST) Received: from dggpeml500024.china.huawei.com (7.185.36.10) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov 2021 19:59:58 +0800 Received: from [127.0.0.1] (10.67.100.224) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov 2021 19:59:57 +0800 To: Radha Mohan Chintakuntla , , , , , , , CC: References: <20211026041300.28924-1-radhac@marvell.com> <20211102034019.28900-1-radhac@marvell.com> <20211102034019.28900-3-radhac@marvell.com> From: fengchengwen Message-ID: <43d57695-78be-96a9-b63a-ff27105f18e1@huawei.com> Date: Tue, 2 Nov 2021 19:59:57 +0800 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.11.0 MIME-Version: 1.0 In-Reply-To: <20211102034019.28900-3-radhac@marvell.com> Content-Type: text/plain; charset="utf-8" Content-Language: en-US Content-Transfer-Encoding: 7bit X-Originating-IP: [10.67.100.224] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected Subject: Re: [dpdk-dev] [PATCH v2 3/4] dma/cnxk: add dma channel operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On 2021/11/2 11:40, Radha Mohan Chintakuntla wrote: > Add functions for the dmadev vchan setup and DMA operations. > > Signed-off-by: Radha Mohan Chintakuntla ... > > +static int > +cnxk_dmadev_info_get(const struct rte_dma_dev *dev, > + struct rte_dma_info *dev_info, uint32_t size) > +{ > + RTE_SET_USED(dev); > + RTE_SET_USED(size); > + > + dev_info->max_vchans = 1; > + dev_info->nb_vchans = 1; > + dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | > + RTE_DMA_CAPA_MEM_TO_DEV | RTE_DMA_CAPA_DEV_TO_MEM | > + RTE_DMA_CAPA_OPS_COPY; > + dev_info->max_desc = DPI_MAX_DESC; > + dev_info->min_desc = 1; > + dev_info->max_sges = DPI_MAX_POINTER; > + > + return 0; > +} > + > +static int > +cnxk_dmadev_configure(struct rte_dma_dev *dev, > + const struct rte_dma_conf *conf, uint32_t conf_sz) > +{ > + struct cnxk_dpi_vf_s *dpivf = NULL; > + int rc = 0; > + > + RTE_SET_USED(conf); > + RTE_SET_USED(conf); > + RTE_SET_USED(conf_sz); > + RTE_SET_USED(conf_sz); > + dpivf = dev->fp_obj->dev_private; > + rc = roc_dpi_configure(&dpivf->rdpi); > + if (rc < 0) > + plt_err("DMA configure failed err = %d", rc); > + > + return rc; > +} > + > +static int > +cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, > + const struct rte_dma_vchan_conf *conf, > + uint32_t conf_sz) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; > + struct cnxk_dpi_compl_s *comp_data; > + union dpi_instr_hdr_s *header = &dpivf->conf.hdr; > + int i; > + > + RTE_SET_USED(vchan); > + RTE_SET_USED(conf_sz); > + > + header->s.pt = DPI_HDR_PT_ZBW_CA; > + > + switch (conf->direction) { > + case RTE_DMA_DIR_DEV_TO_MEM: > + header->s.xtype = DPI_XTYPE_INBOUND; > + header->s.lport = conf->src_port.pcie.coreid; > + header->s.fport = 0; > + header->s.pvfe = 1; > + break; > + case RTE_DMA_DIR_MEM_TO_DEV: > + header->s.xtype = DPI_XTYPE_OUTBOUND; > + header->s.lport = 0; > + header->s.fport = conf->dst_port.pcie.coreid; > + header->s.pvfe = 1; > + break; > + case RTE_DMA_DIR_MEM_TO_MEM: > + header->s.xtype = DPI_XTYPE_INTERNAL_ONLY; > + header->s.lport = 0; > + header->s.fport = 0; > + header->s.pvfe = 0; > + break; > + case RTE_DMA_DIR_DEV_TO_DEV: > + header->s.xtype = DPI_XTYPE_EXTERNAL_ONLY; > + header->s.lport = conf->src_port.pcie.coreid; > + header->s.fport = conf->dst_port.pcie.coreid; capability don't declare support DEV_TO_DEV, and framework will ensure not pass DEV_TO_DEV direction. so this code could remove... > + }; > + > + for (i = 0; i < conf->nb_desc; i++) { > + comp_data = rte_zmalloc(NULL, sizeof(*comp_data), 0); why not check comp_data's validation ? > + dpivf->conf.c_desc.compl_ptr[i] = comp_data; > + }; > + dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC; > + dpivf->conf.c_desc.head = 0; > + dpivf->conf.c_desc.tail = 0; > + > + return 0; > +} > + > +static int > +cnxk_dmadev_start(struct rte_dma_dev *dev) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; > + > + roc_dpi_enable(&dpivf->rdpi); > + > + return 0; > +} > + > +static int > +cnxk_dmadev_stop(struct rte_dma_dev *dev) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; > + > + roc_dpi_disable(&dpivf->rdpi); > + > + return 0; > +} > + > +static int > +cnxk_dmadev_close(struct rte_dma_dev *dev) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; > + > + roc_dpi_disable(&dpivf->rdpi); > + roc_dpi_dev_fini(&dpivf->rdpi); > + > + return 0; > +} > + > +static inline int > +__dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count) > +{ > + uint64_t *ptr = dpi->chunk_base; > + > + if ((cmd_count < DPI_MIN_CMD_SIZE) || (cmd_count > DPI_MAX_CMD_SIZE) || > + cmds == NULL) > + return -EINVAL; > + > + /* > + * Normally there is plenty of room in the current buffer for the > + * command > + */ > + if (dpi->chunk_head + cmd_count < dpi->pool_size_m1) { > + ptr += dpi->chunk_head; > + dpi->chunk_head += cmd_count; > + while (cmd_count--) > + *ptr++ = *cmds++; > + } else { > + int count; > + uint64_t *new_buff = dpi->chunk_next; > + > + dpi->chunk_next = > + (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0); > + if (!dpi->chunk_next) { > + plt_err("Failed to alloc next buffer from NPA"); > + return -ENOMEM; > + } > + > + /* > + * Figure out how many cmd words will fit in this buffer. > + * One location will be needed for the next buffer pointer. > + */ > + count = dpi->pool_size_m1 - dpi->chunk_head; > + ptr += dpi->chunk_head; > + cmd_count -= count; > + while (count--) > + *ptr++ = *cmds++; > + > + /* > + * chunk next ptr is 2 DWORDS > + * second DWORD is reserved. > + */ > + *ptr++ = (uint64_t)new_buff; > + *ptr = 0; > + > + /* > + * The current buffer is full and has a link to the next > + * buffers. Time to write the rest of the commands into the new > + * buffer. > + */ > + dpi->chunk_base = new_buff; > + dpi->chunk_head = cmd_count; > + ptr = new_buff; > + while (cmd_count--) > + *ptr++ = *cmds++; > + > + /* queue index may be greater than pool size */ > + if (dpi->chunk_head >= dpi->pool_size_m1) { > + new_buff = dpi->chunk_next; > + dpi->chunk_next = > + (void *)roc_npa_aura_op_alloc(dpi->aura_handle, > + 0); > + if (!dpi->chunk_next) { > + plt_err("Failed to alloc next buffer from NPA"); > + return -ENOMEM; > + } > + /* Write next buffer address */ > + *ptr = (uint64_t)new_buff; > + dpi->chunk_base = new_buff; > + dpi->chunk_head = 0; > + } > + } > + > + return 0; > +} > + > +static int > +cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, > + rte_iova_t dst, uint32_t length, uint64_t flags) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev_private; > + union dpi_instr_hdr_s *header = &dpivf->conf.hdr; > + struct cnxk_dpi_compl_s *comp_ptr; > + rte_iova_t fptr, lptr; > + int num_words = 0; > + int rc; > + > + RTE_SET_USED(vchan); > + > + comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail]; > + comp_ptr->cdata = DPI_REQ_CDATA; > + header->s.ptr = (uint64_t)comp_ptr; > + STRM_INC(dpivf->conf.c_desc); > + > + header->s.nfst = 1; > + header->s.nlst = 1; > + > + /* > + * For inbound case, src pointers are last pointers. > + * For all other cases, src pointers are first pointers. > + */ > + if (header->s.xtype == DPI_XTYPE_INBOUND) { > + fptr = dst; > + lptr = src; > + } else { > + fptr = src; > + lptr = dst; > + } > + > + dpivf->cmd[0] = header->u[0]; > + dpivf->cmd[1] = header->u[1]; > + dpivf->cmd[2] = header->u[2]; > + /* word3 is always 0 */ > + num_words += 4; > + dpivf->cmd[num_words++] = length; > + dpivf->cmd[num_words++] = fptr; > + dpivf->cmd[num_words++] = length; > + dpivf->cmd[num_words++] = lptr; > + > + rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words); > + if (!rc) { > + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { > + rte_wmb(); > + plt_write64(num_words, > + dpivf->rdpi.rbase + DPI_VDMA_DBELL); > + } > + dpivf->num_words = num_words; > + } > + > + return rc; I notice __dpi_queue_write will return 0 if success, but I should return the index in the range of [0, 0xffff]. > +} > + > +static uint16_t > +cnxk_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls, > + uint16_t *last_idx, bool *has_error) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev_private; > + int cnt; > + > + RTE_SET_USED(vchan); > + RTE_SET_USED(last_idx); > + RTE_SET_USED(has_error); > + for (cnt = 0; cnt < nb_cpls; cnt++) { > + struct cnxk_dpi_compl_s *comp_ptr = > + dpivf->conf.c_desc.compl_ptr[cnt]; > + > + if (comp_ptr->cdata) this mean error, should set *has_error = true. > + break; > + } > + > + dpivf->conf.c_desc.tail = cnt; and also return the last_idx which framework demands. > + > + return cnt; > +} > + > +static uint16_t > +cnxk_dmadev_completed_status(void *dev_private, uint16_t vchan, > + const uint16_t nb_cpls, uint16_t *last_idx, > + enum rte_dma_status_code *status) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev_private; > + int cnt; > + > + RTE_SET_USED(vchan); > + RTE_SET_USED(last_idx); > + for (cnt = 0; cnt < nb_cpls; cnt++) { > + struct cnxk_dpi_compl_s *comp_ptr = > + dpivf->conf.c_desc.compl_ptr[cnt]; > + status[cnt] = comp_ptr->cdata; > + } > + > + dpivf->conf.c_desc.tail = 0; return the last_idx which framework demands. > + return cnt; > +} > + > +static int > +cnxk_dmadev_submit(void *dev_private, uint16_t vchan __rte_unused) > +{ > + struct cnxk_dpi_vf_s *dpivf = dev_private; > + > + rte_wmb(); > + plt_write64(dpivf->num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL); > + > + return 0; > +} > + > +static const struct rte_dma_dev_ops cnxk_dmadev_ops = { > + .dev_info_get = cnxk_dmadev_info_get, > + .dev_configure = cnxk_dmadev_configure, > + .dev_start = cnxk_dmadev_start, > + .dev_stop = cnxk_dmadev_stop, > + .vchan_setup = cnxk_dmadev_vchan_setup, > + .dev_close = cnxk_dmadev_close, > +}; > + > static int > cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, > struct rte_pci_device *pci_dev) > @@ -50,6 +366,12 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, > > dmadev->device = &pci_dev->device; > dmadev->fp_obj->dev_private = dpivf; > + dmadev->dev_ops = &cnxk_dmadev_ops; > + > + dmadev->fp_obj->copy = cnxk_dmadev_copy; > + dmadev->fp_obj->submit = cnxk_dmadev_submit; > + dmadev->fp_obj->completed = cnxk_dmadev_completed; > + dmadev->fp_obj->completed_status = cnxk_dmadev_completed_status; > > rdpi = &dpivf->rdpi; > > diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h > index 9e0bb7b2ce..efb09af03e 100644 > --- a/drivers/dma/cnxk/cnxk_dmadev.h > +++ b/drivers/dma/cnxk/cnxk_dmadev.h > @@ -4,8 +4,39 @@ > #ifndef _CNXK_DMADEV_H_ > #define _CNXK_DMADEV_H_ > > +#define DPI_MAX_POINTER 15 > +#define DPI_QUEUE_STOP 0x0 > +#define DPI_QUEUE_START 0x1 > +#define STRM_INC(s) ((s).tail = ((s).tail + 1) % (s).max_cnt) > +#define DPI_MAX_DESC DPI_MAX_POINTER > + > +/* Set Completion data to 0xFF when request submitted, > + * upon successful request completion engine reset to completion status > + */ > +#define DPI_REQ_CDATA 0xFF > + > +struct cnxk_dpi_compl_s { > + uint64_t cdata; > + void *cb_data; > +}; > + > +struct cnxk_dpi_cdesc_data_s { > + struct cnxk_dpi_compl_s *compl_ptr[DPI_MAX_DESC]; > + uint16_t max_cnt; > + uint16_t head; > + uint16_t tail; > +}; > + > +struct cnxk_dpi_conf { > + union dpi_instr_hdr_s hdr; > + struct cnxk_dpi_cdesc_data_s c_desc; > +}; > + > struct cnxk_dpi_vf_s { > struct roc_dpi rdpi; > + struct cnxk_dpi_conf conf; > + uint64_t cmd[DPI_MAX_CMD_SIZE]; > + uint32_t num_words; > }; > > #endif > diff --git a/drivers/dma/cnxk/version.map b/drivers/dma/cnxk/version.map > new file mode 100644 > index 0000000000..4a76d1d52d > --- /dev/null > +++ b/drivers/dma/cnxk/version.map > @@ -0,0 +1,3 @@ > +DPDK_21 { should be DPDK_22 > + local: *; > +}; >