From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 96075A00C5; Thu, 27 Oct 2022 07:47:26 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A53C742BD6; Thu, 27 Oct 2022 07:47:05 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id 19AC542BA4 for ; Thu, 27 Oct 2022 07:46:59 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666849620; x=1698385620; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Ywe8WWYo9i14lJ6HsFQL/I+CdFwC6ArumYK1ys5zLQM=; b=UUzg5cwp/1/O72IdmFaovpwByjgdMXbdX1FJk+eMdcr1WgAplmIy5R1q XNnq/FeHidOVQrrpcqfyHd6OY6MB+idkwQLFGS+RCka7slMZX02GO81+Y Fa5I05F+qcPio9G0lHjCOEWz8/prNjA+GZt8nUe9j2XtLSEy3Bv+hYw2b N8iz8WQHCgh2LRp/VWu9X0CSyUNhHZPB0GsZ1w0veZNKNOeoErp4XYwHA M9M7NQbEQrsF8Pl1ZBppvqS7RAHWqy4lfWrCH979kdFWooHeFcW58/AUF UGYjDBkBPOGWJOkivGHeCV3w4HKmyHnKb6y78gfAeUmDIh3TAtwy/dDI6 Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10512"; a="309831059" X-IronPort-AV: E=Sophos;i="5.95,215,1661842800"; d="scan'208";a="309831059" Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2022 22:46:59 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10512"; a="583429220" X-IronPort-AV: E=Sophos;i="5.95,215,1661842800"; d="scan'208";a="583429220" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by orsmga003.jf.intel.com with ESMTP; 26 Oct 2022 22:46:57 -0700 From: Junfeng Guo To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Xiaoyun Li Subject: [PATCH v13 04/18] net/idpf: add Rx queue setup Date: Thu, 27 Oct 2022 13:44:51 +0800 Message-Id: <20221027054505.1369248-5-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221027054505.1369248-1-junfeng.guo@intel.com> References: <20221026101027.240583-2-junfeng.guo@intel.com> <20221027054505.1369248-1-junfeng.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for rx_queue_setup ops. Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- drivers/net/idpf/idpf_ethdev.c | 11 + drivers/net/idpf/idpf_rxtx.c | 400 +++++++++++++++++++++++++++++++++ drivers/net/idpf/idpf_rxtx.h | 46 ++++ 3 files changed, 457 insertions(+) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index 11f8b4ba1c..0585153f69 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -38,6 +38,7 @@ static void idpf_adapter_rel(struct idpf_adapter *adapter); static const struct eth_dev_ops idpf_eth_dev_ops = { .dev_configure = idpf_dev_configure, .dev_close = idpf_dev_close, + .rx_queue_setup = idpf_rx_queue_setup, .tx_queue_setup = idpf_tx_queue_setup, .dev_infos_get = idpf_dev_info_get, }; @@ -63,12 +64,22 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH, }; + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH, + }; + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = IDPF_MAX_RING_DESC, .nb_min = IDPF_MIN_RING_DESC, .nb_align = IDPF_ALIGN_RING_DESC, }; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = IDPF_MAX_RING_DESC, + .nb_min = IDPF_MIN_RING_DESC, + .nb_align = IDPF_ALIGN_RING_DESC, + }; + return 0; } diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index 4afa0a2560..25dd5d85d5 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -8,6 +8,21 @@ #include "idpf_ethdev.h" #include "idpf_rxtx.h" +static int +check_rx_thresh(uint16_t nb_desc, uint16_t thresh) +{ + /* The following constraints must be satisfied: + * thresh < rxq->nb_rx_desc + */ + if (thresh >= nb_desc) { + PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u", + thresh, nb_desc); + return -EINVAL; + } + + return 0; +} + static int check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, uint16_t tx_free_thresh) @@ -56,6 +71,87 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, return 0; } +static void +reset_split_rx_descq(struct idpf_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (rxq == NULL) + return; + + len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3); + i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + rxq->rx_tail = 0; + rxq->expected_gen_id = 1; +} + +static void +reset_split_rx_bufq(struct idpf_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (rxq == NULL) + return; + + len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc); + i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + + for (i = 0; i < IDPF_RX_MAX_BURST; i++) + rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf; + + /* The next descriptor id which can be received. */ + rxq->rx_next_avail = 0; + + /* The next descriptor id which can be refilled. */ + rxq->rx_tail = 0; + /* The number of descriptors which can be refilled. */ + rxq->nb_rx_hold = rxq->nb_rx_desc - 1; + + rxq->bufq1 = NULL; + rxq->bufq2 = NULL; +} + +static void +reset_single_rx_queue(struct idpf_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (rxq == NULL) + return; + + len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc); + i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + + for (i = 0; i < IDPF_RX_MAX_BURST; i++) + rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf; + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + + if (rxq->pkt_first_seg != NULL) + rte_pktmbuf_free(rxq->pkt_first_seg); + + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + static void reset_split_tx_descq(struct idpf_tx_queue *txq) { @@ -145,6 +241,310 @@ reset_single_tx_queue(struct idpf_tx_queue *txq) txq->next_rs = txq->rs_thresh - 1; } +static int +idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq, + uint16_t queue_idx, uint16_t rx_free_thresh, + uint16_t nb_desc, unsigned int socket_id, + struct rte_mempool *mp) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t len; + + bufq->mp = mp; + bufq->nb_rx_desc = nb_desc; + bufq->rx_free_thresh = rx_free_thresh; + bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx; + bufq->port_id = dev->data->port_id; + bufq->rx_hdr_len = 0; + bufq->adapter = adapter; + + len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM; + bufq->rx_buf_len = len; + + /* Allocate the software ring. */ + len = nb_desc + IDPF_RX_MAX_BURST; + bufq->sw_ring = + rte_zmalloc_socket("idpf rx bufq sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + return -ENOMEM; + } + + /* Allocate a liitle more to support bulk allocate. */ + len = nb_desc + IDPF_RX_MAX_BURST; + ring_size = RTE_ALIGN(len * + sizeof(struct virtchnl2_splitq_rx_buf_desc), + IDPF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "rx_buf_ring", queue_idx, + ring_size, IDPF_RING_BASE_ALIGN, + socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue."); + rte_free(bufq->sw_ring); + return -ENOMEM; + } + + /* Zero all the descriptors in the ring. */ + memset(mz->addr, 0, ring_size); + bufq->rx_ring_phys_addr = mz->iova; + bufq->rx_ring = mz->addr; + + bufq->mz = mz; + reset_split_rx_bufq(bufq); + bufq->q_set = true; + bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start + + queue_idx * vport->chunks_info.rx_buf_qtail_spacing); + + /* TODO: allow bulk or vec */ + + return 0; +} + +static int +idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_rx_queue *bufq1, *bufq2; + const struct rte_memzone *mz; + struct idpf_rx_queue *rxq; + uint16_t rx_free_thresh; + uint32_t ring_size; + uint64_t offloads; + uint16_t qid; + uint16_t len; + int ret; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* Check free threshold */ + rx_free_thresh = (rx_conf->rx_free_thresh == 0) ? + IDPF_DEFAULT_RX_FREE_THRESH : + rx_conf->rx_free_thresh; + if (check_rx_thresh(nb_desc, rx_free_thresh) != 0) + return -EINVAL; + + /* Setup Rx description queue */ + rxq = rte_zmalloc_socket("idpf rxq", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure"); + return -ENOMEM; + } + + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; + rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx; + rxq->port_id = dev->data->port_id; + rxq->rx_hdr_len = 0; + rxq->adapter = adapter; + rxq->offloads = offloads; + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = len; + + len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST; + ring_size = RTE_ALIGN(len * + sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3), + IDPF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "rx_cpmpl_ring", queue_idx, + ring_size, IDPF_RING_BASE_ALIGN, + socket_id); + + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); + ret = -ENOMEM; + goto free_rxq; + } + + /* Zero all the descriptors in the ring. */ + memset(mz->addr, 0, ring_size); + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = mz->addr; + + rxq->mz = mz; + reset_split_rx_descq(rxq); + + /* TODO: allow bulk or vec */ + + /* setup Rx buffer queue */ + bufq1 = rte_zmalloc_socket("idpf bufq1", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq1 == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue 1."); + ret = -ENOMEM; + goto free_mz; + } + qid = 2 * queue_idx; + ret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh, + nb_desc, socket_id, mp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1"); + ret = -EINVAL; + goto free_bufq1; + } + rxq->bufq1 = bufq1; + + bufq2 = rte_zmalloc_socket("idpf bufq2", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (bufq2 == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue 2."); + rte_free(bufq1->sw_ring); + rte_memzone_free(bufq1->mz); + ret = -ENOMEM; + goto free_bufq1; + } + qid = 2 * queue_idx + 1; + ret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh, + nb_desc, socket_id, mp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2"); + rte_free(bufq1->sw_ring); + rte_memzone_free(bufq1->mz); + ret = -EINVAL; + goto free_bufq2; + } + rxq->bufq2 = bufq2; + + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = rxq; + + return 0; + +free_bufq2: + rte_free(bufq2); +free_bufq1: + rte_free(bufq1); +free_mz: + rte_memzone_free(mz); +free_rxq: + rte_free(rxq); + + return ret; +} + +static int +idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + struct idpf_rx_queue *rxq; + uint16_t rx_free_thresh; + uint32_t ring_size; + uint64_t offloads; + uint16_t len; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* Check free threshold */ + rx_free_thresh = (rx_conf->rx_free_thresh == 0) ? + IDPF_DEFAULT_RX_FREE_THRESH : + rx_conf->rx_free_thresh; + if (check_rx_thresh(nb_desc, rx_free_thresh) != 0) + return -EINVAL; + + /* Setup Rx description queue */ + rxq = rte_zmalloc_socket("idpf rxq", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure"); + return -ENOMEM; + } + + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; + rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx; + rxq->port_id = dev->data->port_id; + rxq->rx_hdr_len = 0; + rxq->adapter = adapter; + rxq->offloads = offloads; + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = len; + + len = nb_desc + IDPF_RX_MAX_BURST; + rxq->sw_ring = + rte_zmalloc_socket("idpf rxq sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + rte_free(rxq); + return -ENOMEM; + } + + /* Allocate a liitle more to support bulk allocate. */ + len = nb_desc + IDPF_RX_MAX_BURST; + ring_size = RTE_ALIGN(len * + sizeof(struct virtchnl2_singleq_rx_buf_desc), + IDPF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx, + ring_size, IDPF_RING_BASE_ALIGN, + socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue."); + rte_free(rxq->sw_ring); + rte_free(rxq); + return -ENOMEM; + } + + /* Zero all the descriptors in the ring. */ + memset(mz->addr, 0, ring_size); + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = mz->addr; + + rxq->mz = mz; + reset_single_rx_queue(rxq); + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = rxq; + rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start + + queue_idx * vport->chunks_info.rx_qtail_spacing); + + return 0; +} + +int +idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct idpf_vport *vport = dev->data->dev_private; + + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) + return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc, + socket_id, rx_conf, mp); + else + return idpf_rx_split_queue_setup(dev, queue_idx, nb_desc, + socket_id, rx_conf, mp); +} + static int idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index c7ba15b058..3f3932c3eb 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -15,9 +15,51 @@ /* Base address of the HW descriptor ring should be 128B aligned. */ #define IDPF_RING_BASE_ALIGN 128 +#define IDPF_RX_MAX_BURST 32 +#define IDPF_DEFAULT_RX_FREE_THRESH 32 + #define IDPF_DEFAULT_TX_RS_THRESH 32 #define IDPF_DEFAULT_TX_FREE_THRESH 32 +struct idpf_rx_queue { + struct idpf_adapter *adapter; /* the adapter this queue belongs to */ + struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ + const struct rte_memzone *mz; /* memzone for Rx ring */ + volatile void *rx_ring; + struct rte_mbuf **sw_ring; /* address of SW ring */ + uint64_t rx_ring_phys_addr; /* Rx ring DMA address */ + + uint16_t nb_rx_desc; /* ring length */ + uint16_t rx_tail; /* current value of tail */ + volatile uint8_t *qrx_tail; /* register address of tail */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t nb_rx_hold; /* number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /* first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ + struct rte_mbuf fake_mbuf; /* dummy mbuf */ + + uint16_t rx_nb_avail; + uint16_t rx_next_avail; + + uint16_t port_id; /* device port ID */ + uint16_t queue_id; /* Rx queue index */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + uint8_t rxdid; + + bool q_set; /* if rx queue has been configured */ + bool q_started; /* if rx queue has been started */ + + /* only valid for split queue mode */ + uint8_t expected_gen_id; + struct idpf_rx_queue *bufq1; + struct idpf_rx_queue *bufq2; + + uint64_t offloads; + uint32_t hw_register_set; +}; + struct idpf_tx_entry { struct rte_mbuf *mbuf; uint16_t next_id; @@ -63,6 +105,10 @@ struct idpf_tx_queue { struct idpf_tx_queue *complq; }; +int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); -- 2.34.1