Use requested socket ID when allocating memory for transmit rings, receive rings, and completion queues. Use device NUMA ID when allocating context memory, notification queue rings, async completion queue rings, and VNIC attributes. Fixes: 6eb3cc2294fd ("net/bnxt: add initial Tx code") Fixes: 9738793f28ec ("net/bnxt: add VNIC functions and structs") Fixes: f8168ca0e690 ("net/bnxt: support thor controller") Fixes: bd0a14c99f65 ("net/bnxt: use dedicated CPR for async events") Fixes: 683e5cf79249 ("net/bnxt: use common NQ ring") Cc: stable@dpdk.org Signed-off-by: Lance Richardson Reviewed-by: Somnath Kotur Reviewed-by: Ajit Khaparde --- drivers/net/bnxt/bnxt_ethdev.c | 17 +++++++++-------- drivers/net/bnxt/bnxt_ring.c | 30 ++++++++++++++---------------- drivers/net/bnxt/bnxt_ring.h | 2 +- drivers/net/bnxt/bnxt_rxq.c | 4 ++-- drivers/net/bnxt/bnxt_txq.c | 4 ++-- drivers/net/bnxt/bnxt_vnic.c | 3 ++- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index d859ef503..d4b8762d5 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -580,13 +580,14 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp) return rc; } -static int bnxt_alloc_ctx_mem_buf(char *type, size_t size, +static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, struct bnxt_ctx_mem_buf_info *ctx) { if (!ctx) return -EINVAL; - ctx->va = rte_zmalloc(type, size, 0); + ctx->va = rte_zmalloc_socket(type, size, 0, + bp->eth_dev->device->numa_node); if (ctx->va == NULL) return -ENOMEM; rte_mem_lock_page(ctx->va); @@ -610,7 +611,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp) sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 4 bytes for each counter-id */ - rc = bnxt_alloc_ctx_mem_buf(type, + rc = bnxt_alloc_ctx_mem_buf(bp, type, max_fc * 4, &bp->flow_stat->rx_fc_in_tbl); if (rc) @@ -619,7 +620,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp) sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ - rc = bnxt_alloc_ctx_mem_buf(type, + rc = bnxt_alloc_ctx_mem_buf(bp, type, max_fc * 16, &bp->flow_stat->rx_fc_out_tbl); if (rc) @@ -628,7 +629,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp) sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 4 bytes for each counter-id */ - rc = bnxt_alloc_ctx_mem_buf(type, + rc = bnxt_alloc_ctx_mem_buf(bp, type, max_fc * 4, &bp->flow_stat->tx_fc_in_tbl); if (rc) @@ -637,7 +638,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp) sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ - rc = bnxt_alloc_ctx_mem_buf(type, + rc = bnxt_alloc_ctx_mem_buf(bp, type, max_fc * 16, &bp->flow_stat->tx_fc_out_tbl); if (rc) @@ -4518,7 +4519,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, if (!mz) { mz = rte_memzone_reserve_aligned(mz_name, rmem->nr_pages * 8, - SOCKET_ID_ANY, + bp->eth_dev->device->numa_node, RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_IOVA_CONTIG, @@ -4541,7 +4542,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, if (!mz) { mz = rte_memzone_reserve_aligned(mz_name, mem_size, - SOCKET_ID_ANY, + bp->eth_dev->device->numa_node, RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_IOVA_CONTIG, diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index e4055fa49..4a90ac264 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -94,7 +94,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp) * tx bd ring - Only non-zero length if tx_ring_info is not NULL * rx bd ring - Only non-zero length if rx_ring_info is not NULL */ -int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, +int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx, struct bnxt_tx_queue *txq, struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, @@ -203,7 +203,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, - SOCKET_ID_ANY, + socket_id, RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_IOVA_CONTIG, @@ -435,24 +435,23 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp) struct bnxt_cp_ring_info *nqr; struct bnxt_ring *ring; int ring_index = BNXT_NUM_ASYNC_CPR(bp); - unsigned int socket_id; uint8_t ring_type; int rc = 0; if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring) return 0; - socket_id = rte_lcore_to_socket_id(rte_get_main_lcore()); - nqr = rte_zmalloc_socket("nqr", sizeof(struct bnxt_cp_ring_info), - RTE_CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, + bp->eth_dev->device->numa_node); if (nqr == NULL) return -ENOMEM; ring = rte_zmalloc_socket("bnxt_cp_ring_struct", sizeof(struct bnxt_ring), - RTE_CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, + bp->eth_dev->device->numa_node); if (ring == NULL) { rte_free(nqr); return -ENOMEM; @@ -467,7 +466,8 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp) ring->fw_ring_id = INVALID_HW_RING_ID; nqr->cp_ring_struct = ring; - rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr"); + rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL, + NULL, nqr, NULL, "l2_nqr"); if (rc) { rte_free(ring); rte_free(nqr); @@ -805,22 +805,21 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp) { struct bnxt_cp_ring_info *cpr = NULL; struct bnxt_ring *ring = NULL; - unsigned int socket_id; if (BNXT_NUM_ASYNC_CPR(bp) == 0) return 0; - socket_id = rte_lcore_to_socket_id(rte_get_main_lcore()); - cpr = rte_zmalloc_socket("cpr", sizeof(struct bnxt_cp_ring_info), - RTE_CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, + bp->eth_dev->device->numa_node); if (cpr == NULL) return -ENOMEM; ring = rte_zmalloc_socket("bnxt_cp_ring_struct", sizeof(struct bnxt_ring), - RTE_CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, + bp->eth_dev->device->numa_node); if (ring == NULL) { rte_free(cpr); return -ENOMEM; @@ -836,7 +835,6 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp) bp->async_cp_ring = cpr; cpr->cp_ring_struct = ring; - return bnxt_alloc_rings(bp, 0, NULL, NULL, - bp->async_cp_ring, NULL, - "def_cp"); + return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL, + NULL, bp->async_cp_ring, NULL, "def_cp"); } diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index b8b666bb9..ef9586e64 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -67,7 +67,7 @@ struct bnxt_rx_ring_info; struct bnxt_cp_ring_info; void bnxt_free_ring(struct bnxt_ring *ring); int bnxt_alloc_ring_grps(struct bnxt *bp); -int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, +int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx, struct bnxt_tx_queue *txq, struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index 45e0c3d01..44b09e9c6 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -345,8 +345,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, eth_dev->data->rx_queues[queue_idx] = rxq; /* Allocate RX ring hardware descriptors */ - rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL, - "rxr"); + rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring, + NULL, "rxr"); if (rc) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!\n"); diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c index d95e1f752..bc789224d 100644 --- a/drivers/net/bnxt/bnxt_txq.c +++ b/drivers/net/bnxt/bnxt_txq.c @@ -149,8 +149,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, txq->port_id = eth_dev->data->port_id; /* Allocate TX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL, - "txr")) { + if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring, + NULL, "txr")) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!"); rc = -ENOMEM; goto err; diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index de5c14566..26253a7e1 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -145,7 +145,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve(mz_name, - entry_length * max_vnics, SOCKET_ID_ANY, + entry_length * max_vnics, + bp->eth_dev->device->numa_node, RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_IOVA_CONTIG); -- 2.25.1