From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2C08BA0C40; Tue, 8 Jun 2021 15:38:39 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 99E0E40689; Tue, 8 Jun 2021 15:38:38 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id 43ADC4067A for ; Tue, 8 Jun 2021 15:38:37 +0200 (CEST) Received: from [192.168.38.17] (aros.oktetlabs.ru [192.168.38.17]) (using TLSv1.3 with cipher TLS_AES_128_GCM_SHA256 (128/128 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by shelob.oktetlabs.ru (Postfix) with ESMTPSA id BD2267F52A; Tue, 8 Jun 2021 16:38:36 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru BD2267F52A DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=oktetlabs.ru; s=default; t=1623159516; bh=FvthLA8tkoSUlya1Li57VRwIR/GQjWc8q24Y7jT2K80=; h=Subject:To:Cc:References:From:Date:In-Reply-To; b=uInpfZT6trsdtEd5rf6eKAQP1bLANdf6P8Iru8srfhWSqmhn0Af2cl8Qe8nbsLJyQ s8lB1SxbwMQnxCM7F/+yKuzfFe+RIjflRRidtbEORrWHvK7hoZDg6YPvdbLdPScqkA jeE30VA2kNw/ydRICS2aA7rdA1a0jfdQ85inhm58= To: Apeksha Gupta , ferruh.yigit@intel.com Cc: dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com References: <20210430043424.19752-1-apeksha.gupta@nxp.com> <20210430043424.19752-4-apeksha.gupta@nxp.com> From: Andrew Rybchenko Organization: OKTET Labs Message-ID: Date: Tue, 8 Jun 2021 16:38:36 +0300 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.10.0 MIME-Version: 1.0 In-Reply-To: <20210430043424.19752-4-apeksha.gupta@nxp.com> Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 7bit Subject: Re: [dpdk-dev] [PATCH 3/4] drivers/net/enetfec: queue configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Summary is incorrect. "net/enetfec: support queue configuration" ? On 4/30/21 7:34 AM, Apeksha Gupta wrote: > This patch added RX/TX queue configuration setup operations. RX -> Rx, TX -> Tx > On packet Rx the respective BD Ring status bit is set which is then > used for packet processing. > > Signed-off-by: Sachin Saxena > Signed-off-by: Apeksha Gupta > --- > drivers/net/enetfec/enet_ethdev.c | 223 ++++++++++++++++++++++++++++++ > 1 file changed, 223 insertions(+) > > diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c > index 5f4f2cf9e..b4816179a 100644 > --- a/drivers/net/enetfec/enet_ethdev.c > +++ b/drivers/net/enetfec/enet_ethdev.c > @@ -48,6 +48,19 @@ > > int enetfec_logtype_pmd; > > +/* Supported Rx offloads */ > +static uint64_t dev_rx_offloads_sup = > + DEV_RX_OFFLOAD_IPV4_CKSUM | > + DEV_RX_OFFLOAD_UDP_CKSUM | > + DEV_RX_OFFLOAD_TCP_CKSUM | > + DEV_RX_OFFLOAD_VLAN_STRIP | > + DEV_RX_OFFLOAD_CHECKSUM; > + > +static uint64_t dev_tx_offloads_sup = > + DEV_TX_OFFLOAD_IPV4_CKSUM | > + DEV_TX_OFFLOAD_UDP_CKSUM | > + DEV_TX_OFFLOAD_TCP_CKSUM; > + > /* > * This function is called to start or restart the FEC during a link > * change, transmit timeout or to reconfigure the FEC. The network > @@ -176,8 +189,218 @@ enetfec_eth_open(struct rte_eth_dev *dev) > return 0; > } > > + > +static int > +enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev) > +{ > + ENET_PMD_INFO("%s: returning zero ", __func__); > + return 0; > +} > + > +static int > +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, > + struct rte_eth_dev_info *dev_info) > +{ > + dev_info->max_rx_queues = ENET_MAX_Q; > + dev_info->max_tx_queues = ENET_MAX_Q; > + dev_info->min_mtu = RTE_ETHER_MIN_MTU; max_mtu? > + dev_info->rx_offload_capa = dev_rx_offloads_sup; > + dev_info->tx_offload_capa = dev_tx_offloads_sup; > + > + return 0; > +} > + > +static const unsigned short offset_des_active_rxq[] = { > + ENET_RDAR_0, ENET_RDAR_1, ENET_RDAR_2 > +}; > + > +static const unsigned short offset_des_active_txq[] = { > + ENET_TDAR_0, ENET_TDAR_1, ENET_TDAR_2 > +}; > + > +static int > +enetfec_tx_queue_setup(struct rte_eth_dev *dev, > + uint16_t queue_idx, > + uint16_t nb_desc, > + __rte_unused unsigned int socket_id, > + __rte_unused const struct rte_eth_txconf *tx_conf) It is incorrect to silently ignore tx_conf. Not everything in it has corresponding capabilties to be advertised by the driver and checked by ethdev. So, you need to check that not supported configuration is not requested. E.g. deferred start. > +{ > + struct enetfec_private *fep = dev->data->dev_private; > + unsigned int i; > + struct bufdesc *bdp, *bd_base; > + struct enetfec_priv_tx_q *txq; > + unsigned int size; > + unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : > + sizeof(struct bufdesc); > + unsigned int dsize_log2 = fls64(dsize); > + > + /* allocate transmit queue */ > + txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE); > + if (!txq) { Compare vs NULL > + ENET_PMD_ERR("transmit queue allocation failed"); > + return -ENOMEM; > + } > + > + if (nb_desc > MAX_TX_BD_RING_SIZE) { > + nb_desc = MAX_TX_BD_RING_SIZE; > + ENET_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n"); > + } > + txq->bd.ring_size = nb_desc; > + fep->total_tx_ring_size += txq->bd.ring_size; > + fep->tx_queues[queue_idx] = txq; > + > + rte_write32(fep->bd_addr_p_t[queue_idx], > + fep->hw_baseaddr_v + ENET_TD_START(queue_idx)); > + > + /* Set transmit descriptor base. */ > + txq = fep->tx_queues[queue_idx]; > + txq->fep = fep; > + size = dsize * txq->bd.ring_size; > + bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx]; > + txq->bd.que_id = queue_idx; > + txq->bd.base = bd_base; > + txq->bd.cur = bd_base; > + txq->bd.d_size = dsize; > + txq->bd.d_size_log2 = dsize_log2; > + txq->bd.active_reg_desc = > + fep->hw_baseaddr_v + offset_des_active_txq[queue_idx]; > + bd_base = (struct bufdesc *)(((void *)bd_base) + size); > + txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize); > + bdp = txq->bd.base; > + bdp = txq->bd.cur; > + > + for (i = 0; i < txq->bd.ring_size; i++) { > + /* Initialize the BD for every fragment in the page. */ > + rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc); > + if (txq->tx_mbuf[i]) { Compare vs NULL > + rte_pktmbuf_free(txq->tx_mbuf[i]); > + txq->tx_mbuf[i] = NULL; > + } > + rte_write32(rte_cpu_to_le_32(0), &bdp->bd_bufaddr); > + bdp = enet_get_nextdesc(bdp, &txq->bd); > + } > + > + /* Set the last buffer to wrap */ > + bdp = enet_get_prevdesc(bdp, &txq->bd); > + rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) | > + rte_read16(&bdp->bd_sc)), &bdp->bd_sc); > + txq->dirty_tx = bdp; > + dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx]; > + return 0; > +} > + > +static int > +enetfec_rx_queue_setup(struct rte_eth_dev *dev, > + uint16_t queue_idx, > + uint16_t nb_rx_desc, > + __rte_unused unsigned int socket_id, > + __rte_unused const struct rte_eth_rxconf *rx_conf, > + struct rte_mempool *mb_pool) > +{ > + struct enetfec_private *fep = dev->data->dev_private; > + unsigned int i; > + struct bufdesc *bd_base; > + struct bufdesc *bdp; > + struct enetfec_priv_rx_q *rxq; > + unsigned int size; > + unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : > + sizeof(struct bufdesc); > + unsigned int dsize_log2 = fls64(dsize); > + > + /* allocate receive queue */ > + rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE); > + if (!rxq) { Compare vs NULL > + ENET_PMD_ERR("receive queue allocation failed"); > + return -ENOMEM; > + } > + > + if (nb_rx_desc > MAX_RX_BD_RING_SIZE) { > + nb_rx_desc = MAX_RX_BD_RING_SIZE; > + ENET_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n"); > + } > + > + rxq->bd.ring_size = nb_rx_desc; > + fep->total_rx_ring_size += rxq->bd.ring_size; > + fep->rx_queues[queue_idx] = rxq; > + > + rte_write32(fep->bd_addr_p_r[queue_idx], > + fep->hw_baseaddr_v + ENET_RD_START(queue_idx)); > + rte_write32(PKT_MAX_BUF_SIZE, > + fep->hw_baseaddr_v + ENET_MRB_SIZE(queue_idx)); > + > + /* Set receive descriptor base. */ > + rxq = fep->rx_queues[queue_idx]; > + rxq->pool = mb_pool; > + size = dsize * rxq->bd.ring_size; > + bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx]; > + rxq->bd.que_id = queue_idx; > + rxq->bd.base = bd_base; > + rxq->bd.cur = bd_base; > + rxq->bd.d_size = dsize; > + rxq->bd.d_size_log2 = dsize_log2; > + rxq->bd.active_reg_desc = > + fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx]; > + bd_base = (struct bufdesc *)(((void *)bd_base) + size); > + rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize); > + > + rxq->fep = fep; > + bdp = rxq->bd.base; > + rxq->bd.cur = bdp; > + > + for (i = 0; i < nb_rx_desc; i++) { > + /* Initialize Rx buffers from pktmbuf pool */ > + struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool); > + if (mbuf == NULL) { > + ENET_PMD_ERR("mbuf failed\n"); > + goto err_alloc; > + } > + > + /* Get the virtual address & physical address */ > + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)), > + &bdp->bd_bufaddr); > + > + rxq->rx_mbuf[i] = mbuf; > + rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc); > + > + bdp = enet_get_nextdesc(bdp, &rxq->bd); > + } > + > + /* Initialize the receive buffer descriptors. */ > + bdp = rxq->bd.cur; > + for (i = 0; i < rxq->bd.ring_size; i++) { > + /* Initialize the BD for every fragment in the page. */ > + if (rte_read32(&bdp->bd_bufaddr)) Compare vs 0 > + rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), > + &bdp->bd_sc); > + else > + rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc); > + > + bdp = enet_get_nextdesc(bdp, &rxq->bd); > + } > + > + /* Set the last buffer to wrap */ > + bdp = enet_get_prevdesc(bdp, &rxq->bd); > + rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) | > + rte_read16(&bdp->bd_sc)), &bdp->bd_sc); > + dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx]; > + rte_write32(0x0, fep->rx_queues[queue_idx]->bd.active_reg_desc); > + return 0; > + > +err_alloc: > + for (i = 0; i < nb_rx_desc; i++) { > + rte_pktmbuf_free(rxq->rx_mbuf[i]); > + rxq->rx_mbuf[i] = NULL; > + } > + rte_free(rxq); > + return -1; Callback returns negative errno, not -1 > +} > + > static const struct eth_dev_ops ops = { > .dev_start = enetfec_eth_open, > + .dev_configure = enetfec_eth_configure, > + .dev_infos_get = enetfec_eth_info, > + .rx_queue_setup = enetfec_rx_queue_setup, > + .tx_queue_setup = enetfec_tx_queue_setup, Order in the structure should be in the same order as order in the eth_dev_ops for consistency. > }; > > static int >