From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 515E941C4D; Thu, 9 Feb 2023 10:43:15 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E32AA42D0D; Thu, 9 Feb 2023 10:43:01 +0100 (CET) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id B3C20400D5 for ; Thu, 9 Feb 2023 10:42:58 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1675935778; x=1707471778; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Ht0xK1q/vSzIoNjBwePjC48BCip/i7FPasF0rCif4Hs=; b=MkI8MoKCcpkJUTSczMP6Sp7aVvVkaVy3oYc1AoChvN+Z2TmClIY/OKLp k9r6u7oetXtSWx7h2h2xtdqG1qPWg4An41CtwKeHhUDY3FrFqZE2WoV9b 7P8pJ4zIfY9Wn6tJs+BzAafmCHjLMcl6ErICUpyEA0hEwAKk2cT+tHELz cLIbTmaEWF6fi/tO4hyBKhI5mexn4euKgnUe7ZM2ZJLGslUTslDP5F1ei /rfF3AWj43tRm5hKDNExM+Bqmiwim0ppaSruozaqI4FdpJ6EJZHFbcD3W h2/se+xv8rjDw8mEOzLvOKWkpukyYQrf8Y7pKovs8ry1SPgmIBKBILpgR Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10615"; a="309712321" X-IronPort-AV: E=Sophos;i="5.97,283,1669104000"; d="scan'208";a="309712321" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Feb 2023 01:42:57 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10615"; a="697964660" X-IronPort-AV: E=Sophos;i="5.97,283,1669104000"; d="scan'208";a="697964660" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by orsmga008.jf.intel.com with ESMTP; 09 Feb 2023 01:42:56 -0800 From: Mingxia Liu To: dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: Mingxia Liu Subject: [PATCH v5 03/21] net/cpfl: add Rx queue setup Date: Thu, 9 Feb 2023 08:45:23 +0000 Message-Id: <20230209084541.2712723-4-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230209084541.2712723-1-mingxia.liu@intel.com> References: <20230118075738.904616-1-mingxia.liu@intel.com> <20230209084541.2712723-1-mingxia.liu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for rx_queue_setup ops. Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 11 ++ drivers/net/cpfl/cpfl_rxtx.c | 232 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.h | 6 + 3 files changed, 249 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index abb9f8d617..fb530c7690 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -102,12 +102,22 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH, }; + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH, + }; + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = CPFL_MAX_RING_DESC, .nb_min = CPFL_MIN_RING_DESC, .nb_align = CPFL_ALIGN_RING_DESC, }; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = CPFL_MAX_RING_DESC, + .nb_min = CPFL_MIN_RING_DESC, + .nb_align = CPFL_ALIGN_RING_DESC, + }; + return 0; } @@ -525,6 +535,7 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a static const struct eth_dev_ops cpfl_eth_dev_ops = { .dev_configure = cpfl_dev_configure, .dev_close = cpfl_dev_close, + .rx_queue_setup = cpfl_rx_queue_setup, .tx_queue_setup = cpfl_tx_queue_setup, .dev_infos_get = cpfl_dev_info_get, .link_update = cpfl_dev_link_update, diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index e0f8484b19..4083e8c3b6 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -9,6 +9,25 @@ #include "cpfl_ethdev.h" #include "cpfl_rxtx.h" +static uint64_t +cpfl_rx_offload_convert(uint64_t offload) +{ + uint64_t ol = 0; + + if ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_IPV4_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_UDP_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_TCP_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0) + ol |= IDPF_RX_OFFLOAD_TIMESTAMP; + + return ol; +} + static uint64_t cpfl_tx_offload_convert(uint64_t offload) { @@ -94,6 +113,219 @@ cpfl_dma_zone_release(const struct rte_memzone *mz) rte_memzone_free(mz); } +static int +cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq, + uint16_t queue_idx, uint16_t rx_free_thresh, + uint16_t nb_desc, unsigned int socket_id, + struct rte_mempool *mp, uint8_t bufq_id) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + struct idpf_rx_queue *bufq; + uint16_t len; + int ret; + + bufq = rte_zmalloc_socket("cpfl bufq", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue."); + ret = -ENOMEM; + goto err_bufq1_alloc; + } + + bufq->mp = mp; + bufq->nb_rx_desc = nb_desc; + bufq->rx_free_thresh = rx_free_thresh; + bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx; + bufq->port_id = dev->data->port_id; + bufq->rx_hdr_len = 0; + bufq->adapter = adapter; + + len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM; + bufq->rx_buf_len = len; + + /* Allocate a little more to support bulk allocate. */ + len = nb_desc + IDPF_RX_MAX_BURST; + + mz = cpfl_dma_zone_reserve(dev, queue_idx, len, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, + socket_id, true); + if (mz == NULL) { + ret = -ENOMEM; + goto err_mz_reserve; + } + + bufq->rx_ring_phys_addr = mz->iova; + bufq->rx_ring = mz->addr; + bufq->mz = mz; + + bufq->sw_ring = + rte_zmalloc_socket("cpfl rx bufq sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + ret = -ENOMEM; + goto err_sw_ring_alloc; + } + + idpf_qc_split_rx_bufq_reset(bufq); + bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start + + queue_idx * vport->chunks_info.rx_buf_qtail_spacing); + bufq->q_set = true; + + if (bufq_id == 1) { + rxq->bufq1 = bufq; + } else if (bufq_id == 2) { + rxq->bufq2 = bufq; + } else { + PMD_INIT_LOG(ERR, "Invalid buffer queue index."); + ret = -EINVAL; + goto err_bufq_id; + } + + return 0; + +err_bufq_id: + rte_free(bufq->sw_ring); +err_sw_ring_alloc: + cpfl_dma_zone_release(mz); +err_mz_reserve: + rte_free(bufq); +err_bufq1_alloc: + return ret; +} + +static void +cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq) +{ + rte_free(bufq->sw_ring); + cpfl_dma_zone_release(bufq->mz); + rte_free(bufq); +} + +int +cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + struct idpf_rx_queue *rxq; + uint16_t rx_free_thresh; + uint64_t offloads; + bool is_splitq; + uint16_t len; + int ret; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* Check free threshold */ + rx_free_thresh = (rx_conf->rx_free_thresh == 0) ? + CPFL_DEFAULT_RX_FREE_THRESH : + rx_conf->rx_free_thresh; + if (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0) + return -EINVAL; + + /* Setup Rx queue */ + rxq = rte_zmalloc_socket("cpfl rxq", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure"); + ret = -ENOMEM; + goto err_rxq_alloc; + } + + is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT); + + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; + rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx; + rxq->port_id = dev->data->port_id; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->rx_hdr_len = 0; + rxq->adapter = adapter; + rxq->offloads = cpfl_rx_offload_convert(offloads); + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = len; + + /* Allocate a little more to support bulk allocate. */ + len = nb_desc + IDPF_RX_MAX_BURST; + mz = cpfl_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX, + socket_id, is_splitq); + if (mz == NULL) { + ret = -ENOMEM; + goto err_mz_reserve; + } + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = mz->addr; + rxq->mz = mz; + + if (!is_splitq) { + rxq->sw_ring = rte_zmalloc_socket("cpfl rxq sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + ret = -ENOMEM; + goto err_sw_ring_alloc; + } + + idpf_qc_single_rx_queue_reset(rxq); + rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start + + queue_idx * vport->chunks_info.rx_qtail_spacing); + } else { + idpf_qc_split_rx_descq_reset(rxq); + + /* Setup Rx buffer queues */ + ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx, + rx_free_thresh, nb_desc, + socket_id, mp, 1); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1"); + ret = -EINVAL; + goto err_bufq1_setup; + } + + ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1, + rx_free_thresh, nb_desc, + socket_id, mp, 2); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2"); + ret = -EINVAL; + goto err_bufq2_setup; + } + } + + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = rxq; + + return 0; + +err_bufq2_setup: + cpfl_rx_split_bufq_release(rxq->bufq1); +err_bufq1_setup: +err_sw_ring_alloc: + cpfl_dma_zone_release(mz); +err_mz_reserve: + rte_free(rxq); +err_rxq_alloc: + return ret; +} + static int cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq, uint16_t queue_idx, uint16_t nb_desc, diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 232630c5e9..e0221abfa3 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -16,10 +16,16 @@ /* Base address of the HW descriptor ring should be 128B aligned. */ #define CPFL_RING_BASE_ALIGN 128 +#define CPFL_DEFAULT_RX_FREE_THRESH 32 + #define CPFL_DEFAULT_TX_RS_THRESH 32 #define CPFL_DEFAULT_TX_FREE_THRESH 32 int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +int cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); #endif /* _CPFL_RXTX_H_ */ -- 2.25.1