From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C792A41C7F; Mon, 13 Feb 2023 04:17:47 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E09E642D1A; Mon, 13 Feb 2023 04:17:31 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id AB65B42BC9 for ; Mon, 13 Feb 2023 04:17:26 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676258246; x=1707794246; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=RuwyiMiB7hRjr1AAgdB+FerWTn49f1sT6G6p1G6vJ1c=; b=DK3Vav4hKrT1h/ioo9nEM1sIzFA2NML0yhy89ooORiBOZg+gFrh8lCNr o0b9dT7djzpNVfiRgQgjg+7gGJHY9ft1eENkC1wySSzsHvn/zLESTN92o GpAfjW9Iu7+I7bad+RF2OxhQSw/wlF3+56ZXi+4Pjmu4/6Opj+t/UMeZk IDG4dDJsbd33pJSMrTlAlDDrN8X/N1M+jaiUjD2wcj83zS9Q1p4o3CU9I GUqiYcfjoUzOG059C55ekyn9/XXfuqc2Mox+0eHKE539oLVPRAFHvRjFC NFxdem4lAt/R87LppqWvv+axeE130eMRSPZgSI95Rl88nKbVWSPxo7atP A==; X-IronPort-AV: E=McAfee;i="6500,9779,10619"; a="328504014" X-IronPort-AV: E=Sophos;i="5.97,291,1669104000"; d="scan'208";a="328504014" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Feb 2023 19:17:26 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10619"; a="777657893" X-IronPort-AV: E=Sophos;i="5.97,291,1669104000"; d="scan'208";a="777657893" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by fmsmga002.fm.intel.com with ESMTP; 12 Feb 2023 19:17:24 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu Subject: [PATCH v6 03/21] net/cpfl: add Rx queue setup Date: Mon, 13 Feb 2023 02:19:38 +0000 Message-Id: <20230213021956.2953088-4-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230213021956.2953088-1-mingxia.liu@intel.com> References: <20230209084541.2712723-1-mingxia.liu@intel.com> <20230213021956.2953088-1-mingxia.liu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for rx_queue_setup ops. Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 11 ++ drivers/net/cpfl/cpfl_rxtx.c | 232 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.h | 6 + 3 files changed, 249 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 5ca21c9772..3029f03d02 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -102,12 +102,22 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH, }; + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH, + }; + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = CPFL_MAX_RING_DESC, .nb_min = CPFL_MIN_RING_DESC, .nb_align = CPFL_ALIGN_RING_DESC, }; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = CPFL_MAX_RING_DESC, + .nb_min = CPFL_MIN_RING_DESC, + .nb_align = CPFL_ALIGN_RING_DESC, + }; + return 0; } @@ -525,6 +535,7 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a static const struct eth_dev_ops cpfl_eth_dev_ops = { .dev_configure = cpfl_dev_configure, .dev_close = cpfl_dev_close, + .rx_queue_setup = cpfl_rx_queue_setup, .tx_queue_setup = cpfl_tx_queue_setup, .dev_infos_get = cpfl_dev_info_get, .link_update = cpfl_dev_link_update, diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index 5b69ac0009..042b848ce2 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -9,6 +9,25 @@ #include "cpfl_ethdev.h" #include "cpfl_rxtx.h" +static uint64_t +cpfl_rx_offload_convert(uint64_t offload) +{ + uint64_t ol = 0; + + if ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_IPV4_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_UDP_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_TCP_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0) + ol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM; + if ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0) + ol |= IDPF_RX_OFFLOAD_TIMESTAMP; + + return ol; +} + static uint64_t cpfl_tx_offload_convert(uint64_t offload) { @@ -94,6 +113,219 @@ cpfl_dma_zone_release(const struct rte_memzone *mz) rte_memzone_free(mz); } +static int +cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq, + uint16_t queue_idx, uint16_t rx_free_thresh, + uint16_t nb_desc, unsigned int socket_id, + struct rte_mempool *mp, uint8_t bufq_id) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + struct idpf_rx_queue *bufq; + uint16_t len; + int ret; + + bufq = rte_zmalloc_socket("cpfl bufq", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue."); + ret = -ENOMEM; + goto err_bufq1_alloc; + } + + bufq->mp = mp; + bufq->nb_rx_desc = nb_desc; + bufq->rx_free_thresh = rx_free_thresh; + bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx; + bufq->port_id = dev->data->port_id; + bufq->rx_hdr_len = 0; + bufq->adapter = adapter; + + len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM; + bufq->rx_buf_len = len; + + /* Allocate a little more to support bulk allocate. */ + len = nb_desc + IDPF_RX_MAX_BURST; + + mz = cpfl_dma_zone_reserve(dev, queue_idx, len, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, + socket_id, true); + if (mz == NULL) { + ret = -ENOMEM; + goto err_mz_reserve; + } + + bufq->rx_ring_phys_addr = mz->iova; + bufq->rx_ring = mz->addr; + bufq->mz = mz; + + bufq->sw_ring = + rte_zmalloc_socket("cpfl rx bufq sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + ret = -ENOMEM; + goto err_sw_ring_alloc; + } + + idpf_qc_split_rx_bufq_reset(bufq); + bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start + + queue_idx * vport->chunks_info.rx_buf_qtail_spacing); + bufq->q_set = true; + + if (bufq_id == 1) { + rxq->bufq1 = bufq; + } else if (bufq_id == 2) { + rxq->bufq2 = bufq; + } else { + PMD_INIT_LOG(ERR, "Invalid buffer queue index."); + ret = -EINVAL; + goto err_bufq_id; + } + + return 0; + +err_bufq_id: + rte_free(bufq->sw_ring); +err_sw_ring_alloc: + cpfl_dma_zone_release(mz); +err_mz_reserve: + rte_free(bufq); +err_bufq1_alloc: + return ret; +} + +static void +cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq) +{ + rte_free(bufq->sw_ring); + cpfl_dma_zone_release(bufq->mz); + rte_free(bufq); +} + +int +cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + struct idpf_rx_queue *rxq; + uint16_t rx_free_thresh; + uint64_t offloads; + bool is_splitq; + uint16_t len; + int ret; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* Check free threshold */ + rx_free_thresh = (rx_conf->rx_free_thresh == 0) ? + CPFL_DEFAULT_RX_FREE_THRESH : + rx_conf->rx_free_thresh; + if (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0) + return -EINVAL; + + /* Setup Rx queue */ + rxq = rte_zmalloc_socket("cpfl rxq", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure"); + ret = -ENOMEM; + goto err_rxq_alloc; + } + + is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT); + + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; + rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx; + rxq->port_id = dev->data->port_id; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->rx_hdr_len = 0; + rxq->adapter = adapter; + rxq->offloads = cpfl_rx_offload_convert(offloads); + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = len; + + /* Allocate a little more to support bulk allocate. */ + len = nb_desc + IDPF_RX_MAX_BURST; + mz = cpfl_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX, + socket_id, is_splitq); + if (mz == NULL) { + ret = -ENOMEM; + goto err_mz_reserve; + } + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = mz->addr; + rxq->mz = mz; + + if (!is_splitq) { + rxq->sw_ring = rte_zmalloc_socket("cpfl rxq sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + ret = -ENOMEM; + goto err_sw_ring_alloc; + } + + idpf_qc_single_rx_queue_reset(rxq); + rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start + + queue_idx * vport->chunks_info.rx_qtail_spacing); + } else { + idpf_qc_split_rx_descq_reset(rxq); + + /* Setup Rx buffer queues */ + ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx, + rx_free_thresh, nb_desc, + socket_id, mp, 1); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1"); + ret = -EINVAL; + goto err_bufq1_setup; + } + + ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1, + rx_free_thresh, nb_desc, + socket_id, mp, 2); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2"); + ret = -EINVAL; + goto err_bufq2_setup; + } + } + + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = rxq; + + return 0; + +err_bufq2_setup: + cpfl_rx_split_bufq_release(rxq->bufq1); +err_bufq1_setup: +err_sw_ring_alloc: + cpfl_dma_zone_release(mz); +err_mz_reserve: + rte_free(rxq); +err_rxq_alloc: + return ret; +} + static int cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq, uint16_t queue_idx, uint16_t nb_desc, diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 232630c5e9..e0221abfa3 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -16,10 +16,16 @@ /* Base address of the HW descriptor ring should be 128B aligned. */ #define CPFL_RING_BASE_ALIGN 128 +#define CPFL_DEFAULT_RX_FREE_THRESH 32 + #define CPFL_DEFAULT_TX_RS_THRESH 32 #define CPFL_DEFAULT_TX_FREE_THRESH 32 int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +int cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); #endif /* _CPFL_RXTX_H_ */ -- 2.25.1