From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 89F40A09DF for ; Wed, 2 Dec 2020 18:01:06 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 877DCC93E; Wed, 2 Dec 2020 18:01:04 +0100 (CET) Received: from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8]) by dpdk.org (Postfix) with ESMTP id AA8EFC93E for ; Wed, 2 Dec 2020 18:01:02 +0100 (CET) Received: from localhost (scalar.blr.asicdesigners.com [10.193.185.94]) by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id 0B2H0xgY019205 for ; Wed, 2 Dec 2020 09:01:00 -0800 From: Rahul Lakkireddy To: stable@dpdk.org Date: Wed, 2 Dec 2020 22:14:55 +0530 Message-Id: <1606927495-27287-1-git-send-email-rahul.lakkireddy@chelsio.com> X-Mailer: git-send-email 2.5.3 Subject: [dpdk-stable] [PATCH 19.11] net/cxgbe: fix queue DMA ring leaks during port close X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" [ upstream commit 6b78a629954c3857d4bc651a673fe102958a12db ] Free up the DMA memzones properly for all the port's queues during port close. So, rework DMA ring allocation/free logic to use rte_eth_dma_zone_reserve()/rte_eth_dma_zone_free() helper functions for allocating/freeing the memzones. The firmware event queue doesn't have an associated freelist queue. So, remove check that tries to give memzone name for a non-existent freelist queue. Also, add a missing free for the control queue mempools. Fixes: 0462d115441d ("cxgbe: add device related operations") Signed-off-by: Rahul Lakkireddy --- drivers/net/cxgbe/base/adapter.h | 1 + drivers/net/cxgbe/cxgbe_ethdev.c | 7 +- drivers/net/cxgbe/sge.c | 175 +++++++++++++++++-------------- 3 files changed, 101 insertions(+), 82 deletions(-) diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h index db654ad9cd..eabd70a213 100644 --- a/drivers/net/cxgbe/base/adapter.h +++ b/drivers/net/cxgbe/base/adapter.h @@ -816,6 +816,7 @@ int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq); int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq); void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq); void t4_sge_eth_clear_queues(struct port_info *pi); +void t4_sge_eth_release_queues(struct port_info *pi); int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, unsigned int cnt); int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index abd455c6ea..502954c37c 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -333,12 +333,7 @@ void cxgbe_dev_close(struct rte_eth_dev *eth_dev) return; cxgbe_down(pi); - - /* - * We clear queues only if both tx and rx path of the port - * have been disabled - */ - t4_sge_eth_clear_queues(pi); + t4_sge_eth_release_queues(pi); } /* Start the device. diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index aba85a2090..7587c46aab 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -1421,16 +1421,49 @@ int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) return ctrl_xmit(q, mbuf); } +static int cxgbe_dma_mzone_name(char *name, size_t len, uint16_t port_id, + uint16_t queue_id, const char *ring_name) +{ + return snprintf(name, len, "eth_p%d_q%d_%s", + port_id, queue_id, ring_name); +} + +static int cxgbe_dma_zone_free(const struct rte_eth_dev *dev, + const char *ring_name, + uint16_t queue_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int rc = 0; + + rc = cxgbe_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, + queue_id, ring_name); + if (rc >= RTE_MEMZONE_NAMESIZE) { + RTE_ETHDEV_LOG(ERR, "ring name too long\n"); + return -ENAMETOOLONG; + } + + mz = rte_memzone_lookup(z_name); + if (mz) + rc = rte_memzone_free(mz); + else + rc = -ENOENT; + + return rc; +} + /** * alloc_ring - allocate resources for an SGE descriptor ring - * @dev: the PCI device's core device + * @dev: the port associated with the queue + * @z_name: memzone's name + * @queue_id: queue index + * @socket_id: preferred socket id for memory allocations * @nelem: the number of descriptors * @elem_size: the size of each descriptor + * @stat_size: extra space in HW ring for status information * @sw_size: the size of the SW state associated with each ring element * @phys: the physical address of the allocated ring * @metadata: address of the array holding the SW state for the ring - * @stat_size: extra space in HW ring for status information - * @node: preferred node for memory allocations * * Allocates resources for an SGE descriptor ring, such as Tx queues, * free buffer lists, or response queues. Each SGE ring requires @@ -1440,39 +1473,34 @@ int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) * of the function), the bus address of the HW ring, and the address * of the SW ring. */ -static void *alloc_ring(size_t nelem, size_t elem_size, - size_t sw_size, dma_addr_t *phys, void *metadata, - size_t stat_size, __rte_unused uint16_t queue_id, - int socket_id, const char *z_name, - const char *z_name_sw) +static void *alloc_ring(struct rte_eth_dev *dev, const char *z_name, + uint16_t queue_id, int socket_id, size_t nelem, + size_t elem_size, size_t stat_size, size_t sw_size, + dma_addr_t *phys, void *metadata) { size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *tz; void *s = NULL; + snprintf(z_name_sw, sizeof(z_name_sw), "eth_p%d_q%d_%s_sw_ring", + dev->data->port_id, queue_id, z_name); + dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; " "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;" " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size, stat_size, queue_id, socket_id, z_name, z_name_sw); - tz = rte_memzone_lookup(z_name); - if (tz) { - dev_debug(adapter, "%s: tz exists...returning existing..\n", - __func__); - goto alloc_sw_ring; - } - /* * Allocate TX/RX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = rte_memzone_reserve_aligned(z_name, len, socket_id, - RTE_MEMZONE_IOVA_CONTIG, 4096); + tz = rte_eth_dma_zone_reserve(dev, z_name, queue_id, len, 4096, + socket_id); if (!tz) return NULL; -alloc_sw_ring: memset(tz->addr, 0, len); if (sw_size) { s = rte_zmalloc_socket(z_name_sw, nelem * sw_size, @@ -1788,21 +1816,15 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, struct fw_iq_cmd c; struct sge *s = &adap->sge; struct port_info *pi = eth_dev->data->dev_private; - char z_name[RTE_MEMZONE_NAMESIZE]; - char z_name_sw[RTE_MEMZONE_NAMESIZE]; unsigned int nb_refill; u8 pciechan; /* Size needs to be multiple of 16, including status entry. */ iq->size = cxgbe_roundup(iq->size, 16); - snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", - eth_dev->data->port_id, queue_id, - fwevtq ? "fwq_ring" : "rx_ring"); - snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); - - iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0, - queue_id, socket_id, z_name, z_name_sw); + iq->desc = alloc_ring(eth_dev, fwevtq ? "fwq_ring" : "rx_ring", + queue_id, socket_id, iq->size, iq->iqe_len, + 0, 0, &iq->phys_addr, NULL); if (!iq->desc) return -ENOMEM; @@ -1860,18 +1882,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->size = s->fl_starve_thres - 1 + 2 * 8; fl->size = cxgbe_roundup(fl->size, 8); - snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", - eth_dev->data->port_id, queue_id, - fwevtq ? "fwq_ring" : "fl_ring"); - snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); - - fl->desc = alloc_ring(fl->size, sizeof(__be64), + fl->desc = alloc_ring(eth_dev, "fl_ring", queue_id, socket_id, + fl->size, sizeof(__be64), s->stat_len, sizeof(struct rx_sw_desc), - &fl->addr, &fl->sdesc, s->stat_len, - queue_id, socket_id, z_name, z_name_sw); - - if (!fl->desc) - goto fl_nomem; + &fl->addr, &fl->sdesc); + if (!fl->desc) { + ret = -ENOMEM; + goto err; + } flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); c.iqns_to_fl0congen |= @@ -1991,8 +2009,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, refill_fl_err: t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, fl->cntxt_id, 0xffff); -fl_nomem: - ret = -ENOMEM; err: iq->cntxt_id = 0; iq->abs_id = 0; @@ -2058,21 +2074,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct fw_eq_eth_cmd c; struct sge *s = &adap->sge; struct port_info *pi = eth_dev->data->dev_private; - char z_name[RTE_MEMZONE_NAMESIZE]; - char z_name_sw[RTE_MEMZONE_NAMESIZE]; u8 pciechan; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); - snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", - eth_dev->data->port_id, queue_id, "tx_ring"); - snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); - - txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), - sizeof(struct tx_sw_desc), &txq->q.phys_addr, - &txq->q.sdesc, s->stat_len, queue_id, - socket_id, z_name, z_name_sw); + txq->q.desc = alloc_ring(eth_dev, "tx_ring", queue_id, socket_id, + txq->q.size, sizeof(struct tx_desc), + s->stat_len, sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc); if (!txq->q.desc) return -ENOMEM; @@ -2137,20 +2147,13 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, struct fw_eq_ctrl_cmd c; struct sge *s = &adap->sge; struct port_info *pi = eth_dev->data->dev_private; - char z_name[RTE_MEMZONE_NAMESIZE]; - char z_name_sw[RTE_MEMZONE_NAMESIZE]; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); - snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", - eth_dev->data->port_id, queue_id, "ctrl_tx_ring"); - snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); - - txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), - 0, &txq->q.phys_addr, - NULL, 0, queue_id, - socket_id, z_name, z_name_sw); + txq->q.desc = alloc_ring(eth_dev, "ctrl_tx_ring", queue_id, + socket_id, txq->q.size, sizeof(struct tx_desc), + 0, 0, &txq->q.phys_addr, NULL); if (!txq->q.desc) return -ENOMEM; @@ -2262,6 +2265,36 @@ void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq) } } +void t4_sge_eth_release_queues(struct port_info *pi) +{ + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *rxq; + struct sge_eth_txq *txq; + unsigned int i; + + rxq = &adap->sge.ethrxq[pi->first_qset]; + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { + /* Free only the queues allocated */ + if (rxq->rspq.desc) { + t4_sge_eth_rxq_release(adap, rxq); + cxgbe_dma_zone_free(rxq->rspq.eth_dev, "fl_ring", i); + cxgbe_dma_zone_free(rxq->rspq.eth_dev, "rx_ring", i); + rxq->rspq.eth_dev = NULL; + } + } + + txq = &adap->sge.ethtxq[pi->first_qset]; + for (i = 0; i < pi->n_tx_qsets; i++, txq++) { + /* Free only the queues allocated */ + if (txq->q.desc) { + t4_sge_eth_txq_release(adap, txq); + cxgbe_dma_zone_free(txq->eth_dev, "tx_ring", i); + txq->eth_dev = NULL; + } + } +} + void t4_sge_tx_monitor_start(struct adapter *adap) { rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); @@ -2281,21 +2314,6 @@ void t4_sge_tx_monitor_stop(struct adapter *adap) void t4_free_sge_resources(struct adapter *adap) { unsigned int i; - struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0]; - struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; - - /* clean up Ethernet Tx/Rx queues */ - for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) { - /* Free only the queues allocated */ - if (rxq->rspq.desc) { - t4_sge_eth_rxq_release(adap, rxq); - rxq->rspq.eth_dev = NULL; - } - if (txq->q.desc) { - t4_sge_eth_txq_release(adap, txq); - txq->eth_dev = NULL; - } - } /* clean up control Tx queues */ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { @@ -2305,12 +2323,17 @@ void t4_free_sge_resources(struct adapter *adap) reclaim_completed_tx_imm(&cq->q); t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, cq->q.cntxt_id); + cxgbe_dma_zone_free(adap->eth_dev, "ctrl_tx_ring", i); + rte_mempool_free(cq->mb_pool); free_txq(&cq->q); } } - if (adap->sge.fw_evtq.desc) + /* clean up firmware event queue */ + if (adap->sge.fw_evtq.desc) { free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + cxgbe_dma_zone_free(adap->eth_dev, "fwq_ring", 0); + } } /** -- 2.24.0