From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 25755A2EDB for ; Tue, 1 Oct 2019 13:18:53 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4FA261BEB0; Tue, 1 Oct 2019 13:17:43 +0200 (CEST) Received: from inva020.nxp.com (inva020.nxp.com [92.121.34.13]) by dpdk.org (Postfix) with ESMTP id 33E581BE3D for ; Tue, 1 Oct 2019 13:17:34 +0200 (CEST) Received: from inva020.nxp.com (localhost [127.0.0.1]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 0E0C91A02BC; Tue, 1 Oct 2019 13:17:34 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id A61471A0096; Tue, 1 Oct 2019 13:17:31 +0200 (CEST) Received: from GDB1.ap.freescale.net (GDB1.ap.freescale.net [10.232.132.179]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 2878C4029A; Tue, 1 Oct 2019 19:17:28 +0800 (SGT) From: Gagandeep Singh To: dev@dpdk.org, ferruh.yigit@intel.com Cc: thomas@monjalon.net, Gagandeep Singh Date: Tue, 1 Oct 2019 16:32:03 +0530 Message-Id: <20191001110209.6047-9-g.singh@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20191001110209.6047-1-g.singh@nxp.com> References: <20190826130246.30485-1-g.singh@nxp.com> <20191001110209.6047-1-g.singh@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v3 08/14] net/ppfe: add queue setup and release operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add RX/TX queue setup operations and supported checksum offloads. Signed-off-by: Gagandeep Singh Acked-by: Nipun Gupta Acked-by: Akhil Goyal --- doc/guides/nics/features/ppfe.ini | 2 + doc/guides/nics/ppfe.rst | 1 + drivers/net/ppfe/pfe_hif.c | 115 ++++++++++++++++++++++++++++++ drivers/net/ppfe/pfe_hif.h | 1 + drivers/net/ppfe/pfe_hif_lib.c | 50 +++++++++++++ drivers/net/ppfe/ppfe_ethdev.c | 93 ++++++++++++++++++++++++ 6 files changed, 262 insertions(+) diff --git a/doc/guides/nics/features/ppfe.ini b/doc/guides/nics/features/ppfe.ini index cd5f836a3..4e38ffd24 100644 --- a/doc/guides/nics/features/ppfe.ini +++ b/doc/guides/nics/features/ppfe.ini @@ -4,6 +4,8 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +L3 checksum offload = Y +L4 checksum offload = Y Linux VFIO = Y ARMv8 = Y Usage doc = Y diff --git a/doc/guides/nics/ppfe.rst b/doc/guides/nics/ppfe.rst index 29b02957f..c95525e5b 100644 --- a/doc/guides/nics/ppfe.rst +++ b/doc/guides/nics/ppfe.rst @@ -93,6 +93,7 @@ the kernel layer for link status. PPFE Features ~~~~~~~~~~~~~~ +- L3/L4 checksum offload - ARMv8 Supported PPFE SoCs diff --git a/drivers/net/ppfe/pfe_hif.c b/drivers/net/ppfe/pfe_hif.c index 940f7419f..024ca3d77 100644 --- a/drivers/net/ppfe/pfe_hif.c +++ b/drivers/net/ppfe/pfe_hif.c @@ -43,6 +43,121 @@ pfe_hif_free_descr(struct pfe_hif *hif) rte_free(hif->descr_baseaddr_v); } +/* + * pfe_hif_init_buffers + * This function initializes the HIF Rx/Tx ring descriptors and + * initialize Rx queue with buffers. + */ +int +pfe_hif_init_buffers(struct pfe_hif *hif) +{ + struct hif_desc *desc, *first_desc_p; + uint32_t i = 0; + + PMD_INIT_FUNC_TRACE(); + + /* Check enough Rx buffers available in the shared memory */ + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size) + return -ENOMEM; + + hif->rx_base = hif->descr_baseaddr_v; + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc)); + + /*Initialize Rx descriptors */ + desc = hif->rx_base; + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p; + + for (i = 0; i < hif->rx_ring_size; i++) { + /* Initialize Rx buffers from the shared memory */ + struct rte_mbuf *mbuf = + (struct rte_mbuf *)hif->shm->rx_buf_pool[i]; + + /* PPFE mbuf structure is as follow: + * ----------------------------------------------------------+ + * | mbuf | priv | headroom (annotation + PPFE data) | data | + * ----------------------------------------------------------+ + * + * As we are expecting additional information like parse + * results, eth id, queue id from PPFE block along with data. + * so we have to provide additional memory for each packet to + * HIF rx rings so that PPFE block can write its headers. + * so, we are giving the data pointor to HIF rings whose + * calculation is as below: + * mbuf->data_pointor - Required_header_size + * + * We are utilizing the HEADROOM area to receive the PPFE + * block headers. On packet reception, HIF driver will use + * PPFE headers information based on which it will decide + * the clients and fill the parse results. + * after that application can use/overwrite the HEADROOM area. + */ + hif->rx_buf_vaddr[i] = + (void *)((size_t)mbuf->buf_addr + mbuf->data_off - + PFE_PKT_HEADER_SZ); + hif->rx_buf_addr[i] = + (void *)(size_t)(rte_pktmbuf_iova(mbuf) - + PFE_PKT_HEADER_SZ); + hif->rx_buf_len[i] = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; + + hif->shm->rx_buf_pool[i] = NULL; + + writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]), + &desc->data); + writel(0, &desc->status); + + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM + | BD_CTRL_DIR | BD_CTRL_DESC_EN + | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl); + + /* Chain descriptors */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next); + desc++; + } + + /* Overwrite last descriptor to chain it to first one*/ + desc--; + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next); + + hif->rxtoclean_index = 0; + + /*Initialize Rx buffer descriptor ring base address */ + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR); + + hif->tx_base = hif->rx_base + hif->rx_ring_size; + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p + + hif->rx_ring_size; + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc)); + + /*Initialize tx descriptors */ + desc = hif->tx_base; + + for (i = 0; i < hif->tx_ring_size; i++) { + /* Chain descriptors */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next); + writel(0, &desc->ctrl); + desc++; + } + + /* Overwrite last descriptor to chain it to first one */ + desc--; + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next); + hif->txavail = hif->tx_ring_size; + hif->txtosend = 0; + hif->txtoclean = 0; + hif->txtoflush = 0; + + /*Initialize Tx buffer descriptor ring base address */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR); + + return 0; +} + /* * pfe_hif_client_register * diff --git a/drivers/net/ppfe/pfe_hif.h b/drivers/net/ppfe/pfe_hif.h index 483db75da..80f78551c 100644 --- a/drivers/net/ppfe/pfe_hif.h +++ b/drivers/net/ppfe/pfe_hif.h @@ -143,5 +143,6 @@ void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int int pfe_hif_init(struct pfe *pfe); void pfe_hif_exit(struct pfe *pfe); void pfe_hif_rx_idle(struct pfe_hif *hif); +int pfe_hif_init_buffers(struct pfe_hif *hif); #endif /* _PFE_HIF_H_ */ diff --git a/drivers/net/ppfe/pfe_hif_lib.c b/drivers/net/ppfe/pfe_hif_lib.c index 2012d896a..f5e290f27 100644 --- a/drivers/net/ppfe/pfe_hif_lib.c +++ b/drivers/net/ppfe/pfe_hif_lib.c @@ -15,6 +15,56 @@ unsigned int emac_txq_cnt; /*HIF shared memory Global variable */ struct hif_shm ghif_shm; +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool. + * This function should be called after pfe_hif_exit + * + * @param[in] hif_shm Shared memory address location in DDR + */ +void +pfe_hif_shm_clean(struct hif_shm *hif_shm) +{ + unsigned int i; + void *pkt; + + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { + pkt = hif_shm->rx_buf_pool[i]; + if (pkt) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + } +} + +/* Initialize shared memory used between HIF driver and clients, + * allocate rx_buffer_pool required for HIF Rx descriptors. + * This function should be called before initializing HIF driver. + * + * @param[in] hif_shm Shared memory address location in DDR + * @rerurn 0 - on succes, <0 on fail to initialize + */ +int +pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool) +{ + unsigned int i; + struct rte_mbuf *mbuf; + + memset(hif_shm, 0, sizeof(struct hif_shm)); + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT; + + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { + mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool)); + if (mbuf) + hif_shm->rx_buf_pool[i] = mbuf; + else + goto err0; + } + + return 0; + +err0: + PFE_PMD_ERR("Low memory"); + pfe_hif_shm_clean(hif_shm); + return -ENOMEM; +} + /*This function sends indication to HIF driver * * @param[in] hif hif context diff --git a/drivers/net/ppfe/ppfe_ethdev.c b/drivers/net/ppfe/ppfe_ethdev.c index 19467b90d..4619a9d28 100644 --- a/drivers/net/ppfe/ppfe_ethdev.c +++ b/drivers/net/ppfe/ppfe_ethdev.c @@ -17,6 +17,17 @@ struct pfe_vdev_init_params { int8_t gem_id; }; static struct pfe *g_pfe; +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; /* TODO: make pfe_svr a runtime option. * Driver should be able to get the SVR @@ -285,16 +296,98 @@ pfe_eth_info(struct rte_eth_dev *dev, dev_info->max_rx_queues = dev->data->nb_rx_queues; dev_info->max_tx_queues = dev->data->nb_tx_queues; dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; + dev_info->rx_offload_capa = dev_rx_offloads_sup; + dev_info->tx_offload_capa = dev_tx_offloads_sup; return 0; } +/* Only first mb_pool given on first call of this API will be used + * in whole system, also nb_rx_desc and rx_conf are unused params + */ +static int +pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + int rc = 0; + struct pfe *pfe; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + pfe = priv->pfe; + + if (queue_idx >= EMAC_RXQ_CNT) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, EMAC_RXQ_CNT); + return -1; + } + + if (!pfe->hif.setuped) { + rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); + if (rc) { + PFE_PMD_ERR("Could not allocate buffer descriptors"); + return -1; + } + + pfe->hif.shm->pool = mb_pool; + if (pfe_hif_init_buffers(&pfe->hif)) { + PFE_PMD_ERR("Could not initialize buffer descriptors"); + return -1; + } + hif_init(); + hif_rx_enable(); + hif_tx_enable(); + pfe->hif.setuped = 1; + } + dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; + priv->client.rx_q[queue_idx].queue_id = queue_idx; + + return 0; +} + +static void +pfe_rx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static void +pfe_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static int +pfe_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + if (queue_idx >= emac_txq_cnt) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, emac_txq_cnt); + return -1; + } + dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; + priv->client.tx_q[queue_idx].queue_id = queue_idx; + return 0; +} + static const struct eth_dev_ops ops = { .dev_start = pfe_eth_open, .dev_stop = pfe_eth_stop, .dev_close = pfe_eth_close, .dev_configure = pfe_eth_configure, .dev_infos_get = pfe_eth_info, + .rx_queue_setup = pfe_rx_queue_setup, + .rx_queue_release = pfe_rx_queue_release, + .tx_queue_setup = pfe_tx_queue_setup, + .tx_queue_release = pfe_tx_queue_release, }; static int -- 2.17.1