From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 8B830A0C4E;
	Tue,  2 Nov 2021 10:07:23 +0100 (CET)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 130D04069F;
	Tue,  2 Nov 2021 10:07:23 +0100 (CET)
Received: from szxga08-in.huawei.com (szxga08-in.huawei.com [45.249.212.255])
 by mails.dpdk.org (Postfix) with ESMTP id AD08B4068F
 for <dev@dpdk.org>; Tue,  2 Nov 2021 10:07:21 +0100 (CET)
Received: from dggemv703-chm.china.huawei.com (unknown [172.30.72.57])
 by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4Hk3rq6rSwz1DHwq;
 Tue,  2 Nov 2021 17:05:15 +0800 (CST)
Received: from dggpeml500024.china.huawei.com (7.185.36.10) by
 dggemv703-chm.china.huawei.com (10.3.19.46) with Microsoft SMTP Server
 (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id
 15.1.2308.15; Tue, 2 Nov 2021 17:07:17 +0800
Received: from [127.0.0.1] (10.67.100.224) by dggpeml500024.china.huawei.com
 (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2,
 cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.15; Tue, 2 Nov
 2021 17:07:17 +0800
To: Gagandeep Singh <g.singh@nxp.com>, <thomas@monjalon.net>, <dev@dpdk.org>
CC: <nipun.gupta@nxp.com>
References: <20210909111500.3901706-1-g.singh@nxp.com>
 <20211101085143.2472241-1-g.singh@nxp.com>
 <20211101085143.2472241-3-g.singh@nxp.com>
From: fengchengwen <fengchengwen@huawei.com>
Message-ID: <2243c59d-fbcf-eb3c-f24e-d6b943a82d73@huawei.com>
Date: Tue, 2 Nov 2021 17:07:17 +0800
User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101
 Thunderbird/68.11.0
MIME-Version: 1.0
In-Reply-To: <20211101085143.2472241-3-g.singh@nxp.com>
Content-Type: text/plain; charset="utf-8"
Content-Language: en-US
Content-Transfer-Encoding: 7bit
X-Originating-IP: [10.67.100.224]
X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To
 dggpeml500024.china.huawei.com (7.185.36.10)
X-CFilter-Loop: Reflected
Subject: Re: [dpdk-dev] [PATCH v2 2/6] dma/dpaa: add device probe and remove
 functionality
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

On 2021/11/1 16:51, Gagandeep Singh wrote:
> This patch add device initialisation functionality.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>

[snip]

> +
> +static void fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> +	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
> +	struct fsl_qdma_comp *comp_temp, *_comp_temp;
> +	int id;
> +
> +	if (--fsl_queue->count)
> +		goto finally;
> +
> +	id = (fsl_qdma->block_base - fsl_queue->block_base) /
> +	      fsl_qdma->block_offset;
> +
> +	while (rte_atomic32_read(&wait_task[id]) == 1)
> +		rte_delay_us(QDMA_DELAY);
> +
> +	list_for_each_entry_safe(comp_temp, _comp_temp,
> +				 &fsl_queue->comp_used,	list) {
> +		list_del(&comp_temp->list);
> +		dma_pool_free(comp_temp->virt_addr);
> +		dma_pool_free(comp_temp->desc_virt_addr);
> +		rte_free(comp_temp);
> +	}
> +
> +	list_for_each_entry_safe(comp_temp, _comp_temp,
> +				 &fsl_queue->comp_free, list) {
> +		list_del(&comp_temp->list);
> +		dma_pool_free(comp_temp->virt_addr);
> +		dma_pool_free(comp_temp->desc_virt_addr);
> +		rte_free(comp_temp);
> +	}
> +
> +finally:
> +	fsl_qdma->desc_allocated--;
> +}

add a blank line

> +static struct fsl_qdma_queue
> +*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *queue_head, *queue_temp;
> +	int len, i, j;
> +	int queue_num;
> +	int blocks;
> +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> +
> +	queue_num = fsl_qdma->n_queues;
> +	blocks = fsl_qdma->num_blocks;
> +
> +	len = sizeof(*queue_head) * queue_num * blocks;
> +	queue_head = rte_zmalloc("qdma: queue head", len, 0);
> +	if (!queue_head)
> +		return NULL;
> +
> +	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
> +		queue_size[i] = QDMA_QUEUE_SIZE;
> +
> +	for (j = 0; j < blocks; j++) {
> +		for (i = 0; i < queue_num; i++) {
> +			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
> +			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +				return NULL;
> +			}
> +			queue_temp = queue_head + i + (j * queue_num);
> +
> +			queue_temp->cq =
> +			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
> +				       queue_size[i],
> +				       sizeof(struct fsl_qdma_format) *
> +				       queue_size[i], &queue_temp->bus_addr);
> +
> +			memset(queue_temp->cq, 0x0, queue_size[i] *
> +			       sizeof(struct fsl_qdma_format));
> +

move memset after check queue_temp->cq validity.

> +			if (!queue_temp->cq)

queue_head and previous queue_temp->cq need also freed.

> +				return NULL;
> +
> +			queue_temp->block_base = fsl_qdma->block_base +
> +				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
> +			queue_temp->n_cq = queue_size[i];
> +			queue_temp->id = i;
> +			queue_temp->count = 0;
> +			queue_temp->virt_head = queue_temp->cq;
> +
> +		}
> +	}
> +	return queue_head;
> +}
> +
> +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
> +{
> +	struct fsl_qdma_queue *status_head;
> +	unsigned int status_size;
> +
> +	status_size = QDMA_STATUS_SIZE;
> +	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
> +	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +		return NULL;
> +	}
> +
> +	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
> +	if (!status_head)
> +		return NULL;
> +
> +	/*
> +	 * Buffer for queue command
> +	 */
> +	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
> +					 status_size,
> +					 sizeof(struct fsl_qdma_format) *
> +					 status_size,
> +					 &status_head->bus_addr);
> +
> +	memset(status_head->cq, 0x0, status_size *
> +	       sizeof(struct fsl_qdma_format));

move memset after check queue_temp->cq validity.

> +	if (!status_head->cq)

status_head also need free

> +		return NULL;
> +
> +	status_head->n_cq = status_size;
> +	status_head->virt_head = status_head->cq;
> +
> +	return status_head;
> +}
> +
> +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	void *ctrl = fsl_qdma->ctrl_base;
> +	void *block;
> +	int i, count = RETRIES;
> +	unsigned int j;
> +	u32 reg;
> +
> +	/* Disable the command queue and wait for idle state. */
> +	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
> +	reg |= FSL_QDMA_DMR_DQD;
> +	qdma_writel(reg, ctrl + FSL_QDMA_DMR);
> +	for (j = 0; j < fsl_qdma->num_blocks; j++) {
> +		block = fsl_qdma->block_base +
> +			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
> +		for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
> +			qdma_writel(0, block + FSL_QDMA_BCQMR(i));
> +	}
> +	while (true) {
> +		reg = qdma_readl(ctrl + FSL_QDMA_DSR);
> +		if (!(reg & FSL_QDMA_DSR_DB))
> +			break;
> +		if (count-- < 0)
> +			return -EBUSY;
> +		rte_delay_us(100);
> +	}
> +
> +	for (j = 0; j < fsl_qdma->num_blocks; j++) {
> +		block = fsl_qdma->block_base +
> +			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
> +
> +		/* Disable status queue. */
> +		qdma_writel(0, block + FSL_QDMA_BSQMR);
> +
> +		/*
> +		 * clear the command queue interrupt detect register for
> +		 * all queues.
> +		 */
> +		qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
> +	}
> +
> +	return 0;
> +}
> +
> +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *temp;
> +	void *ctrl = fsl_qdma->ctrl_base;
> +	void *block;
> +	u32 i, j;
> +	u32 reg;
> +	int ret, val;
> +
> +	/* Try to halt the qDMA engine first. */
> +	ret = fsl_qdma_halt(fsl_qdma);
> +	if (ret) {
> +		return ret;
> +	}
> +
> +	for (j = 0; j < fsl_qdma->num_blocks; j++) {
> +		block = fsl_qdma->block_base +
> +			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
> +		for (i = 0; i < fsl_qdma->n_queues; i++) {
> +			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
> +			/*
> +			 * Initialize Command Queue registers to
> +			 * point to the first
> +			 * command descriptor in memory.
> +			 * Dequeue Pointer Address Registers
> +			 * Enqueue Pointer Address Registers
> +			 */
> +
> +			qdma_writel(lower_32_bits(temp->bus_addr),
> +				    block + FSL_QDMA_BCQDPA_SADDR(i));
> +			qdma_writel(upper_32_bits(temp->bus_addr),
> +				    block + FSL_QDMA_BCQEDPA_SADDR(i));
> +			qdma_writel(lower_32_bits(temp->bus_addr),
> +				    block + FSL_QDMA_BCQEPA_SADDR(i));
> +			qdma_writel(upper_32_bits(temp->bus_addr),
> +				    block + FSL_QDMA_BCQEEPA_SADDR(i));
> +
> +			/* Initialize the queue mode. */
> +			reg = FSL_QDMA_BCQMR_EN;
> +			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
> +			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
> +			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
> +		}
> +
> +		/*
> +		 * Workaround for erratum: ERR010812.
> +		 * We must enable XOFF to avoid the enqueue rejection occurs.
> +		 * Setting SQCCMR ENTER_WM to 0x20.
> +		 */
> +
> +		qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
> +			    block + FSL_QDMA_SQCCMR);
> +
> +		/*
> +		 * Initialize status queue registers to point to the first
> +		 * command descriptor in memory.
> +		 * Dequeue Pointer Address Registers
> +		 * Enqueue Pointer Address Registers
> +		 */
> +
> +		qdma_writel(
> +			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
> +			    block + FSL_QDMA_SQEEPAR);
> +		qdma_writel(
> +			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
> +			    block + FSL_QDMA_SQEPAR);
> +		qdma_writel(
> +			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
> +			    block + FSL_QDMA_SQEDPAR);
> +		qdma_writel(
> +			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
> +			    block + FSL_QDMA_SQDPAR);
> +		/* Desiable status queue interrupt. */
> +
> +		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
> +		qdma_writel(0x0, block + FSL_QDMA_BSQICR);
> +		qdma_writel(0x0, block + FSL_QDMA_CQIER);
> +
> +		/* Initialize the status queue mode. */
> +		reg = FSL_QDMA_BSQMR_EN;
> +		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
> +		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
> +		qdma_writel(reg, block + FSL_QDMA_BSQMR);
> +	}
> +
> +	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
> +	reg &= ~FSL_QDMA_DMR_DQD;
> +	qdma_writel(reg, ctrl + FSL_QDMA_DMR);
> +
> +	return 0;
> +}
> +
> +static void
> +dma_release(void *fsl_chan)
> +{
> +	((struct fsl_qdma_chan *)fsl_chan)->free = true;
> +	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
> +}
> +
> +static int
> +dpaa_qdma_init(struct rte_dma_dev *dmadev)
> +{
> +	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
> +	struct fsl_qdma_chan *fsl_chan;
> +	uint64_t phys_addr;
> +	unsigned int len;
> +	int ccsr_qdma_fd;
> +	int regs_size;
> +	int ret;
> +	u32 i;
> +
> +	fsl_qdma->desc_allocated = 0;
> +	fsl_qdma->n_chans = VIRT_CHANNELS;
> +	fsl_qdma->n_queues = QDMA_QUEUES;
> +	fsl_qdma->num_blocks = QDMA_BLOCKS;
> +	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
> +
> +	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
> +	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
> +	if (!fsl_qdma->chans)
> +		return -1;
> +
> +	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
> +	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
> +	if (!fsl_qdma->status) {
> +		rte_free(fsl_qdma->chans);
> +		return -1;
> +	}
> +
> +	for (i = 0; i < fsl_qdma->num_blocks; i++) {
> +		rte_atomic32_init(&wait_task[i]);
> +		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
> +		if (!fsl_qdma->status[i])
> +			goto err;
> +	}
> +
> +	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
> +	if (unlikely(ccsr_qdma_fd < 0)) {
> +		goto err;
> +	}
> +
> +	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
> +	phys_addr = QDMA_CCSR_BASE;
> +	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
> +					 PROT_WRITE, MAP_SHARED,
> +					 ccsr_qdma_fd, phys_addr);
> +
> +	close(ccsr_qdma_fd);
> +	if (fsl_qdma->ctrl_base == MAP_FAILED) {
> +		goto err;
> +	}
> +
> +	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
> +	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
> +
> +	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
> +	if (!fsl_qdma->queue) {
> +		munmap(fsl_qdma->ctrl_base, regs_size);
> +		goto err;
> +	}
> +
> +	for (i = 0; i < fsl_qdma->n_chans; i++) {
> +		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
> +
> +		fsl_chan->qdma = fsl_qdma;
> +		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
> +							fsl_qdma->num_blocks);
> +		fsl_chan->free = true;
> +	}
> +
> +	ret = fsl_qdma_reg_init(fsl_qdma);
> +	if (ret) {
> +		munmap(fsl_qdma->ctrl_base, regs_size);
> +		goto err;
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(fsl_qdma->chans);
> +	rte_free(fsl_qdma->status);
> +
> +	return -1;
> +}
>  
>  static int
>  dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
> -		__rte_unused struct rte_dpaa_device *dpaa_dev)
> +		struct rte_dpaa_device *dpaa_dev)
>  {
> +	struct rte_dma_dev *dmadev;
> +	int ret;
> +
> +	dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
> +				      rte_socket_id(),
> +				      sizeof(struct fsl_qdma_engine));
> +	if (!dmadev) {
> +		return -EINVAL;
> +	}

no need {}

> +
> +	dpaa_dev->dmadev = dmadev;
> +
> +	/* Invoke PMD device initialization function */
> +	ret = dpaa_qdma_init(dmadev);
> +	if (ret) {
> +		(void)rte_dma_pmd_release(dpaa_dev->device.name);
> +		return ret;
> +	}
> +
> +	dmadev->state = RTE_DMA_DEV_READY;
>  	return 0;
>  }
>  
>  static int
> -dpaa_qdma_remove(__rte_unused struct rte_dpaa_device *dpaa_dev)
> +dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
>  {
> +	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
> +	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
> +	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
> +
> +	for (i = 0; i < max; i++) {
> +		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
> +
> +		if (fsl_chan->free == false)
> +			dma_release(fsl_chan);

where to release rte_dma_dev ?

> +	}
> +
> +	rte_free(fsl_qdma->status);
> +	rte_free(fsl_qdma->chans);
> +
>  	return 0;
>  }
>  

[snip]

>