DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Sachin Saxena (OSS)" <sachin.saxena@oss.nxp.com>
To: Apeksha Gupta <apeksha.gupta@nxp.com>, ferruh.yigit@intel.com
Cc: dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com
Subject: Re: [dpdk-dev] [PATCH 3/4] drivers/net/enetfec: queue configuration
Date: Sun, 4 Jul 2021 12:16:55 +0530	[thread overview]
Message-ID: <d0d67eb8-0b5e-6457-524e-06f0a285ae66@oss.nxp.com> (raw)
In-Reply-To: <20210430043424.19752-4-apeksha.gupta@nxp.com>



On 30-Apr-21 10:04 AM, Apeksha Gupta wrote:
> This patch added RX/TX queue configuration setup operations.
added -> adds
> On packet Rx the respective BD Ring status bit is set which is then
Suggestion:  Rx -> reception
> used for packet processing.
>
> Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
> ---
>   drivers/net/enetfec/enet_ethdev.c | 223 ++++++++++++++++++++++++++++++
>   1 file changed, 223 insertions(+)
>
> diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
> index 5f4f2cf9e..b4816179a 100644
> --- a/drivers/net/enetfec/enet_ethdev.c
> +++ b/drivers/net/enetfec/enet_ethdev.c
> @@ -48,6 +48,19 @@
>   
>   int enetfec_logtype_pmd;
>   
> +/* Supported Rx offloads */
> +static uint64_t dev_rx_offloads_sup =
> +		DEV_RX_OFFLOAD_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_UDP_CKSUM |
> +		DEV_RX_OFFLOAD_TCP_CKSUM |
> +		DEV_RX_OFFLOAD_VLAN_STRIP |
> +		DEV_RX_OFFLOAD_CHECKSUM;
> +
> +static uint64_t dev_tx_offloads_sup =
> +		DEV_TX_OFFLOAD_IPV4_CKSUM |
> +		DEV_TX_OFFLOAD_UDP_CKSUM |
> +		DEV_TX_OFFLOAD_TCP_CKSUM;
> +
>   /*
>    * This function is called to start or restart the FEC during a link
>    * change, transmit timeout or to reconfigure the FEC. The network
> @@ -176,8 +189,218 @@ enetfec_eth_open(struct rte_eth_dev *dev)
>   	return 0;
>   }
>   
> +
> +static int
> +enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
> +{
> +	ENET_PMD_INFO("%s: returning zero ", __func__);
> +	return 0;
> +}
> +
Remove this if not required.
> +static int
> +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
> +	     struct rte_eth_dev_info *dev_info)
> +{
> +	dev_info->max_rx_queues = ENET_MAX_Q;
> +	dev_info->max_tx_queues = ENET_MAX_Q;
> +	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
> +	dev_info->rx_offload_capa = dev_rx_offloads_sup;
> +	dev_info->tx_offload_capa = dev_tx_offloads_sup;
> +
> +	return 0;
> +}
> +
> +static const unsigned short offset_des_active_rxq[] = {
> +	ENET_RDAR_0, ENET_RDAR_1, ENET_RDAR_2
> +};
> +
> +static const unsigned short offset_des_active_txq[] = {
> +	ENET_TDAR_0, ENET_TDAR_1, ENET_TDAR_2
> +};
> +
> +static int
> +enetfec_tx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_desc,
> +			__rte_unused unsigned int socket_id,
> +			__rte_unused const struct rte_eth_txconf *tx_conf)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bdp, *bd_base;
> +	struct enetfec_priv_tx_q *txq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* allocate transmit queue */
> +	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
> +	if (!txq) {
> +		ENET_PMD_ERR("transmit queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_desc > MAX_TX_BD_RING_SIZE) {
> +		nb_desc = MAX_TX_BD_RING_SIZE;
> +		ENET_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n");
> +	}
> +	txq->bd.ring_size = nb_desc;
> +	fep->total_tx_ring_size += txq->bd.ring_size;
> +	fep->tx_queues[queue_idx] = txq;
> +
> +	rte_write32(fep->bd_addr_p_t[queue_idx],
> +		fep->hw_baseaddr_v + ENET_TD_START(queue_idx));
Do we need  rte_cpu_to_le_* ?
> +
> +	/* Set transmit descriptor base. */
> +	txq = fep->tx_queues[queue_idx];
> +	txq->fep = fep;
> +	size = dsize * txq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
> +	txq->bd.que_id = queue_idx;
> +	txq->bd.base = bd_base;
> +	txq->bd.cur = bd_base;
> +	txq->bd.d_size = dsize;
> +	txq->bd.d_size_log2 = dsize_log2;
> +	txq->bd.active_reg_desc =
> +			fep->hw_baseaddr_v + offset_des_active_txq[queue_idx];
> +	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
> +	txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
> +	bdp = txq->bd.base;
> +	bdp = txq->bd.cur;
> +
> +	for (i = 0; i < txq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);

rte_cpu_to_le_16(0) has no effect on '0'.

> +		if (txq->tx_mbuf[i]) {
Compare vs NULL
> +			rte_pktmbuf_free(txq->tx_mbuf[i]);
> +			txq->tx_mbuf[i] = NULL;
> +		}
> +		rte_write32(rte_cpu_to_le_32(0), &bdp->bd_bufaddr);

rte_cpu_to_le_32 has no effect.

> +		bdp = enet_get_nextdesc(bdp, &txq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &txq->bd);
> +	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
> +		     rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
> +	txq->dirty_tx = bdp;
> +	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
> +	return 0;
> +}
> +
> +static int
> +enetfec_rx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_rx_desc,
> +			 __rte_unused unsigned int socket_id,
> +			 __rte_unused const struct rte_eth_rxconf *rx_conf,
> +			struct rte_mempool *mb_pool)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bd_base;
> +	struct bufdesc  *bdp;
> +	struct enetfec_priv_rx_q *rxq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* allocate receive queue */
> +	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
> +	if (!rxq) {
> +		ENET_PMD_ERR("receive queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
> +		nb_rx_desc = MAX_RX_BD_RING_SIZE;
> +		ENET_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n");
> +	}
> +
> +	rxq->bd.ring_size = nb_rx_desc;
> +	fep->total_rx_ring_size += rxq->bd.ring_size;
> +	fep->rx_queues[queue_idx] = rxq;
> +
> +	rte_write32(fep->bd_addr_p_r[queue_idx],
> +			fep->hw_baseaddr_v + ENET_RD_START(queue_idx));
> +	rte_write32(PKT_MAX_BUF_SIZE,
> +			fep->hw_baseaddr_v + ENET_MRB_SIZE(queue_idx));
Please check if we need  rte_cpu_to_le_* .
> +
> +	/* Set receive descriptor base. */
> +	rxq = fep->rx_queues[queue_idx];
> +	rxq->pool = mb_pool;
> +	size = dsize * rxq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
> +	rxq->bd.que_id = queue_idx;
> +	rxq->bd.base = bd_base;
> +	rxq->bd.cur = bd_base;
> +	rxq->bd.d_size = dsize;
> +	rxq->bd.d_size_log2 = dsize_log2;
> +	rxq->bd.active_reg_desc =
> +			fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx];
> +	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
> +	rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
> +
> +	rxq->fep = fep;
> +	bdp = rxq->bd.base;
> +	rxq->bd.cur = bdp;
> +
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		/* Initialize Rx buffers from pktmbuf pool */
> +		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
> +		if (mbuf == NULL) {
> +			ENET_PMD_ERR("mbuf failed\n");
> +			goto err_alloc;
> +		}
> +
> +		/* Get the virtual address & physical address */
> +		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
> +				&bdp->bd_bufaddr);
> +
> +		rxq->rx_mbuf[i] = mbuf;
> +		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
> +
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +	}
> +
> +	/* Initialize the receive buffer descriptors. */
> +	bdp = rxq->bd.cur;
> +	for (i = 0; i < rxq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		if (rte_read32(&bdp->bd_bufaddr))
Compare vs 0
> +			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
> +				&bdp->bd_sc);
> +		else
> +			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
No effect of rte_cpu_to_le_16.
> +
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &rxq->bd);
> +	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
> +		     rte_read16(&bdp->bd_sc)),	&bdp->bd_sc);
> +	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
> +	rte_write32(0x0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
> +	return 0;
> +
> +err_alloc:
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		rte_pktmbuf_free(rxq->rx_mbuf[i]);
Only free, if mbuf was allocated earlier. (rxq->rx_mbuf[i] != NULL)
> +		rxq->rx_mbuf[i] = NULL;
> +	}
> +	rte_free(rxq);
> +	return -1;
> +}
> +
>   static const struct eth_dev_ops ops = {
>   	.dev_start = enetfec_eth_open,
> +	.dev_configure = enetfec_eth_configure,
Better to introduce function pointers later when they will be implemented.

> +	.dev_infos_get = enetfec_eth_info,
> +	.rx_queue_setup = enetfec_rx_queue_setup,
> +	.tx_queue_setup = enetfec_tx_queue_setup,
>   };
>   
>   static int


  parent reply	other threads:[~2021-07-04  6:47 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-30  4:34 [dpdk-dev] [PATCH 0/4] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-04-30  4:34 ` [dpdk-dev] [PATCH 1/4] drivers/net/enetfec: Introduce " Apeksha Gupta
2021-06-08 13:10   ` Andrew Rybchenko
2021-07-02 13:55     ` David Marchand
2021-07-04  2:57   ` Sachin Saxena (OSS)
2021-04-30  4:34 ` [dpdk-dev] [PATCH 2/4] drivers/net/enetfec: UIO support added Apeksha Gupta
2021-06-08 13:21   ` Andrew Rybchenko
2021-07-04  4:27   ` Sachin Saxena (OSS)
2021-04-30  4:34 ` [dpdk-dev] [PATCH 3/4] drivers/net/enetfec: queue configuration Apeksha Gupta
2021-06-08 13:38   ` Andrew Rybchenko
2021-07-04  6:46   ` Sachin Saxena (OSS) [this message]
2021-04-30  4:34 ` [dpdk-dev] [PATCH 4/4] drivers/net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-06-08 13:42   ` Andrew Rybchenko
2021-06-21  9:14     ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-07-05  8:48   ` [dpdk-dev] " Sachin Saxena (OSS)
2021-05-04 15:40 ` [dpdk-dev] [PATCH 0/4] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-07-04  2:55 ` Sachin Saxena (OSS)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d0d67eb8-0b5e-6457-524e-06f0a285ae66@oss.nxp.com \
    --to=sachin.saxena@oss.nxp.com \
    --cc=apeksha.gupta@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).