From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 9036E1B44D for ; Fri, 5 Apr 2019 03:34:10 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from yskoh@mellanox.com) with ESMTPS (AES256-SHA encrypted); 5 Apr 2019 04:34:08 +0300 Received: from scfae-sc-2.mti.labs.mlnx (scfae-sc-2.mti.labs.mlnx [10.101.0.96]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x351Y2kU018492; Fri, 5 Apr 2019 04:34:06 +0300 From: Yongseok Koh To: shahafs@mellanox.com Cc: dev@dpdk.org Date: Thu, 4 Apr 2019 18:33:55 -0700 Message-Id: <20190405013357.14503-3-yskoh@mellanox.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190405013357.14503-1-yskoh@mellanox.com> References: <20190325193627.19726-1-yskoh@mellanox.com> <20190405013357.14503-1-yskoh@mellanox.com> Subject: [dpdk-dev] [PATCH v3 2/4] net/mlx5: remove redundant queue index X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 05 Apr 2019 01:34:11 -0000 Queue index is redundantly stored for both Rx and Tx structures. E.g. txq_ctrl->idx and txq->stats.idx. Both are consolidated to single storage - rxq->idx and txq->idx. Also, rxq and txq are moved to the beginning of its control structure (rxq_ctrl and txq_ctrl) for cacheline alignment. Signed-off-by: Yongseok Koh --- drivers/net/mlx5/mlx5_rxq.c | 29 ++++++++++++++--------------- drivers/net/mlx5/mlx5_rxtx.h | 10 ++++------ drivers/net/mlx5/mlx5_stats.c | 15 ++++++--------- drivers/net/mlx5/mlx5_trigger.c | 2 +- drivers/net/mlx5/mlx5_txq.c | 21 ++++++++++----------- 5 files changed, 35 insertions(+), 42 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index dcb97c2100..8a84b0a1b5 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -156,7 +156,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) } DRV_LOG(DEBUG, "port %u Rx queue %u allocated and configured %u segments", - rxq->port_id, rxq_ctrl->idx, wqe_n); + rxq->port_id, rxq->idx, wqe_n); return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ @@ -168,7 +168,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq->mprq_bufs)[i] = NULL; } DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", - rxq->port_id, rxq_ctrl->idx); + rxq->port_id, rxq->idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -241,7 +241,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) DRV_LOG(DEBUG, "port %u Rx queue %u allocated and configured %u segments" " (max %u packets)", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n, + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); return 0; error: @@ -253,7 +253,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = NULL; } DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -287,7 +287,7 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t i; DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", - rxq->port_id, rxq_ctrl->idx); + rxq->port_id, rxq->idx); if (rxq->mprq_bufs == NULL) return; assert(mlx5_rxq_check_vec_support(rxq) < 0); @@ -318,7 +318,7 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t i; DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq->idx); if (rxq->elts == NULL) return; /** @@ -364,7 +364,7 @@ void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); if (rxq_ctrl->ibv) mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); @@ -495,11 +495,11 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx)) + if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx)) rte_panic("port %u Rx queue %u is still used by a flow and" " cannot be removed\n", - PORT_ID(priv), rxq_ctrl->idx); - mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx); + PORT_ID(priv), rxq->idx); + mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx); } /** @@ -793,7 +793,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl) { DRV_LOG(ERR, "port %u Rx queue %u cannot allocate verbs resources", - dev->data->port_id, rxq_ctrl->idx); + dev->data->port_id, rxq_data->idx); rte_errno = ENOMEM; goto error; } @@ -1104,7 +1104,7 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", - dev->data->port_id, rxq_ibv->rxq_ctrl->idx); + dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx); ++ret; } return ret; @@ -1470,7 +1470,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = mp; - tmpl->rxq.stats.idx = idx; tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n); @@ -1479,7 +1478,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, #ifndef RTE_ARCH_64 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; #endif - tmpl->idx = idx; + tmpl->rxq.idx = idx; rte_atomic32_inc(&tmpl->refcnt); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; @@ -1592,7 +1591,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", - dev->data->port_id, rxq_ctrl->idx); + dev->data->port_id, rxq_ctrl->rxq.idx); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index ced9945888..7b58063ceb 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -41,7 +41,6 @@ #define MLX5_FLOW_TUNNEL 5 struct mlx5_rxq_stats { - unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t ipackets; /**< Total of successfully received packets. */ uint64_t ibytes; /**< Total of successfully received bytes. */ @@ -51,7 +50,6 @@ struct mlx5_rxq_stats { }; struct mlx5_txq_stats { - unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t opackets; /**< Total of successfully sent packets. */ uint64_t obytes; /**< Total of successfully sent bytes. */ @@ -116,6 +114,7 @@ struct mlx5_rxq_data { struct rte_mempool *mp; struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ + uint16_t idx; /* Queue index. */ struct mlx5_rxq_stats stats; uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ @@ -141,14 +140,13 @@ struct mlx5_rxq_ibv { /* RX queue control descriptor. */ struct mlx5_rxq_ctrl { + struct mlx5_rxq_data rxq; /* Data path structure. */ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ struct mlx5_priv *priv; /* Back pointer to private data. */ - struct mlx5_rxq_data rxq; /* Data path structure. */ unsigned int socket; /* CPU socket ID for allocations. */ unsigned int irq:1; /* Whether IRQ is enabled. */ - uint16_t idx; /* Queue index. */ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ }; @@ -205,6 +203,7 @@ struct mlx5_txq_data { volatile uint32_t *cq_db; /* Completion queue doorbell. */ volatile void *bf_reg; /* Blueflame register remapped. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ + uint16_t idx; /* Queue index. */ struct mlx5_txq_stats stats; /* TX queue counters. */ #ifndef RTE_ARCH_64 rte_spinlock_t *uar_lock; @@ -223,6 +222,7 @@ struct mlx5_txq_ibv { /* TX queue control descriptor. */ struct mlx5_txq_ctrl { + struct mlx5_txq_data txq; /* Data path structure. */ LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ unsigned int socket; /* CPU socket ID for allocations. */ @@ -230,10 +230,8 @@ struct mlx5_txq_ctrl { unsigned int max_tso_header; /* Max TSO header size. */ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */ struct mlx5_priv *priv; /* Back pointer to private data. */ - struct mlx5_txq_data txq; /* Data path structure. */ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ volatile void *bf_reg_orig; /* Blueflame register from verbs. */ - uint16_t idx; /* Queue index. */ }; /* mlx5_rxq.c */ diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 5af199d0d5..ed50667f45 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -386,7 +386,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (rxq == NULL) continue; - idx = rxq->stats.idx; + idx = rxq->idx; if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { #ifdef MLX5_PMD_SOFT_COUNTERS tmp.q_ipackets[idx] += rxq->stats.ipackets; @@ -407,7 +407,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (txq == NULL) continue; - idx = txq->stats.idx; + idx = txq->idx; if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { #ifdef MLX5_PMD_SOFT_COUNTERS tmp.q_opackets[idx] += txq->stats.opackets; @@ -442,21 +442,18 @@ mlx5_stats_reset(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; unsigned int i; - unsigned int idx; for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) continue; - idx = (*priv->rxqs)[i]->stats.idx; - (*priv->rxqs)[i]->stats = - (struct mlx5_rxq_stats){ .idx = idx }; + memset(&(*priv->rxqs)[i]->stats, 0, + sizeof(struct mlx5_rxq_stats)); } for (i = 0; (i != priv->txqs_n); ++i) { if ((*priv->txqs)[i] == NULL) continue; - idx = (*priv->txqs)[i]->stats.idx; - (*priv->txqs)[i]->stats = - (struct mlx5_txq_stats){ .idx = idx }; + memset(&(*priv->txqs)[i]->stats, 0, + sizeof(struct mlx5_txq_stats)); } mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); #ifndef MLX5_PMD_SOFT_COUNTERS diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 5b73f0ff03..7c1e5594d6 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -123,7 +123,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev) DRV_LOG(DEBUG, "port %u Rx queue %u registering" " mp %s having %u chunks", - dev->data->port_id, rxq_ctrl->idx, + dev->data->port_id, rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks); mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); ret = rxq_alloc_elts(rxq_ctrl); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1b3d89f2f6..4bd08cb035 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", - PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n); + PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", - PORT_ID(txq_ctrl->priv), txq_ctrl->idx); + PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -224,7 +224,7 @@ mlx5_tx_queue_release(void *dpdk_txq) if ((*priv->txqs)[i] == txq) { mlx5_txq_release(ETH_DEV(priv), i); DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", - PORT_ID(priv), txq_ctrl->idx); + PORT_ID(priv), txq->idx); break; } } @@ -273,7 +273,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) continue; txq = (*priv->txqs)[i]; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - assert(txq_ctrl->idx == (uint16_t)i); + assert(txq->idx == (uint16_t)i); /* UAR addr form verbs used to find dup and offset in page. */ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig; off = uar_va & (page_size - 1); /* offset in page. */ @@ -301,7 +301,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) DRV_LOG(ERR, "port %u call to mmap failed on UAR" " for txq %u", - dev->data->port_id, txq_ctrl->idx); + dev->data->port_id, txq->idx); rte_errno = ENXIO; return -rte_errno; } @@ -629,7 +629,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev) LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced", - dev->data->port_id, txq_ibv->txq_ctrl->idx); + dev->data->port_id, txq_ibv->txq_ctrl->txq.idx); ++ret; } return ret; @@ -778,7 +778,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->priv = priv; tmpl->socket = socket; tmpl->txq.elts_n = log2above(desc); - tmpl->idx = idx; + tmpl->txq.idx = idx; txq_set_params(tmpl); DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d", dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr); @@ -786,7 +786,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge); tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); - tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -893,12 +892,12 @@ int mlx5_txq_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_txq_ctrl *txq; + struct mlx5_txq_ctrl *txq_ctrl; int ret = 0; - LIST_FOREACH(txq, &priv->txqsctrl, next) { + LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) { DRV_LOG(DEBUG, "port %u Tx queue %u still referenced", - dev->data->port_id, txq->idx); + dev->data->port_id, txq_ctrl->txq.idx); ++ret; } return ret; -- 2.11.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id ABE6DA0679 for ; Fri, 5 Apr 2019 03:34:24 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6E5D51B45B; Fri, 5 Apr 2019 03:34:15 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 9036E1B44D for ; Fri, 5 Apr 2019 03:34:10 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from yskoh@mellanox.com) with ESMTPS (AES256-SHA encrypted); 5 Apr 2019 04:34:08 +0300 Received: from scfae-sc-2.mti.labs.mlnx (scfae-sc-2.mti.labs.mlnx [10.101.0.96]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x351Y2kU018492; Fri, 5 Apr 2019 04:34:06 +0300 From: Yongseok Koh To: shahafs@mellanox.com Cc: dev@dpdk.org Date: Thu, 4 Apr 2019 18:33:55 -0700 Message-Id: <20190405013357.14503-3-yskoh@mellanox.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190405013357.14503-1-yskoh@mellanox.com> References: <20190325193627.19726-1-yskoh@mellanox.com> <20190405013357.14503-1-yskoh@mellanox.com> Subject: [dpdk-dev] [PATCH v3 2/4] net/mlx5: remove redundant queue index X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190405013355.OCjA7Wy0prNfnp8Vd6Xn9bEIRpDfPzProALs9SrUegc@z> Queue index is redundantly stored for both Rx and Tx structures. E.g. txq_ctrl->idx and txq->stats.idx. Both are consolidated to single storage - rxq->idx and txq->idx. Also, rxq and txq are moved to the beginning of its control structure (rxq_ctrl and txq_ctrl) for cacheline alignment. Signed-off-by: Yongseok Koh --- drivers/net/mlx5/mlx5_rxq.c | 29 ++++++++++++++--------------- drivers/net/mlx5/mlx5_rxtx.h | 10 ++++------ drivers/net/mlx5/mlx5_stats.c | 15 ++++++--------- drivers/net/mlx5/mlx5_trigger.c | 2 +- drivers/net/mlx5/mlx5_txq.c | 21 ++++++++++----------- 5 files changed, 35 insertions(+), 42 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index dcb97c2100..8a84b0a1b5 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -156,7 +156,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) } DRV_LOG(DEBUG, "port %u Rx queue %u allocated and configured %u segments", - rxq->port_id, rxq_ctrl->idx, wqe_n); + rxq->port_id, rxq->idx, wqe_n); return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ @@ -168,7 +168,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq->mprq_bufs)[i] = NULL; } DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", - rxq->port_id, rxq_ctrl->idx); + rxq->port_id, rxq->idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -241,7 +241,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) DRV_LOG(DEBUG, "port %u Rx queue %u allocated and configured %u segments" " (max %u packets)", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n, + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); return 0; error: @@ -253,7 +253,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = NULL; } DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -287,7 +287,7 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t i; DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", - rxq->port_id, rxq_ctrl->idx); + rxq->port_id, rxq->idx); if (rxq->mprq_bufs == NULL) return; assert(mlx5_rxq_check_vec_support(rxq) < 0); @@ -318,7 +318,7 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t i; DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq->idx); if (rxq->elts == NULL) return; /** @@ -364,7 +364,7 @@ void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", - PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); if (rxq_ctrl->ibv) mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); @@ -495,11 +495,11 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx)) + if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx)) rte_panic("port %u Rx queue %u is still used by a flow and" " cannot be removed\n", - PORT_ID(priv), rxq_ctrl->idx); - mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx); + PORT_ID(priv), rxq->idx); + mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx); } /** @@ -793,7 +793,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl) { DRV_LOG(ERR, "port %u Rx queue %u cannot allocate verbs resources", - dev->data->port_id, rxq_ctrl->idx); + dev->data->port_id, rxq_data->idx); rte_errno = ENOMEM; goto error; } @@ -1104,7 +1104,7 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", - dev->data->port_id, rxq_ibv->rxq_ctrl->idx); + dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx); ++ret; } return ret; @@ -1470,7 +1470,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = mp; - tmpl->rxq.stats.idx = idx; tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n); @@ -1479,7 +1478,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, #ifndef RTE_ARCH_64 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; #endif - tmpl->idx = idx; + tmpl->rxq.idx = idx; rte_atomic32_inc(&tmpl->refcnt); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; @@ -1592,7 +1591,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", - dev->data->port_id, rxq_ctrl->idx); + dev->data->port_id, rxq_ctrl->rxq.idx); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index ced9945888..7b58063ceb 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -41,7 +41,6 @@ #define MLX5_FLOW_TUNNEL 5 struct mlx5_rxq_stats { - unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t ipackets; /**< Total of successfully received packets. */ uint64_t ibytes; /**< Total of successfully received bytes. */ @@ -51,7 +50,6 @@ struct mlx5_rxq_stats { }; struct mlx5_txq_stats { - unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t opackets; /**< Total of successfully sent packets. */ uint64_t obytes; /**< Total of successfully sent bytes. */ @@ -116,6 +114,7 @@ struct mlx5_rxq_data { struct rte_mempool *mp; struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ + uint16_t idx; /* Queue index. */ struct mlx5_rxq_stats stats; uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ @@ -141,14 +140,13 @@ struct mlx5_rxq_ibv { /* RX queue control descriptor. */ struct mlx5_rxq_ctrl { + struct mlx5_rxq_data rxq; /* Data path structure. */ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ struct mlx5_priv *priv; /* Back pointer to private data. */ - struct mlx5_rxq_data rxq; /* Data path structure. */ unsigned int socket; /* CPU socket ID for allocations. */ unsigned int irq:1; /* Whether IRQ is enabled. */ - uint16_t idx; /* Queue index. */ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ }; @@ -205,6 +203,7 @@ struct mlx5_txq_data { volatile uint32_t *cq_db; /* Completion queue doorbell. */ volatile void *bf_reg; /* Blueflame register remapped. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ + uint16_t idx; /* Queue index. */ struct mlx5_txq_stats stats; /* TX queue counters. */ #ifndef RTE_ARCH_64 rte_spinlock_t *uar_lock; @@ -223,6 +222,7 @@ struct mlx5_txq_ibv { /* TX queue control descriptor. */ struct mlx5_txq_ctrl { + struct mlx5_txq_data txq; /* Data path structure. */ LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ unsigned int socket; /* CPU socket ID for allocations. */ @@ -230,10 +230,8 @@ struct mlx5_txq_ctrl { unsigned int max_tso_header; /* Max TSO header size. */ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */ struct mlx5_priv *priv; /* Back pointer to private data. */ - struct mlx5_txq_data txq; /* Data path structure. */ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ volatile void *bf_reg_orig; /* Blueflame register from verbs. */ - uint16_t idx; /* Queue index. */ }; /* mlx5_rxq.c */ diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 5af199d0d5..ed50667f45 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -386,7 +386,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (rxq == NULL) continue; - idx = rxq->stats.idx; + idx = rxq->idx; if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { #ifdef MLX5_PMD_SOFT_COUNTERS tmp.q_ipackets[idx] += rxq->stats.ipackets; @@ -407,7 +407,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (txq == NULL) continue; - idx = txq->stats.idx; + idx = txq->idx; if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { #ifdef MLX5_PMD_SOFT_COUNTERS tmp.q_opackets[idx] += txq->stats.opackets; @@ -442,21 +442,18 @@ mlx5_stats_reset(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; unsigned int i; - unsigned int idx; for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) continue; - idx = (*priv->rxqs)[i]->stats.idx; - (*priv->rxqs)[i]->stats = - (struct mlx5_rxq_stats){ .idx = idx }; + memset(&(*priv->rxqs)[i]->stats, 0, + sizeof(struct mlx5_rxq_stats)); } for (i = 0; (i != priv->txqs_n); ++i) { if ((*priv->txqs)[i] == NULL) continue; - idx = (*priv->txqs)[i]->stats.idx; - (*priv->txqs)[i]->stats = - (struct mlx5_txq_stats){ .idx = idx }; + memset(&(*priv->txqs)[i]->stats, 0, + sizeof(struct mlx5_txq_stats)); } mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); #ifndef MLX5_PMD_SOFT_COUNTERS diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 5b73f0ff03..7c1e5594d6 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -123,7 +123,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev) DRV_LOG(DEBUG, "port %u Rx queue %u registering" " mp %s having %u chunks", - dev->data->port_id, rxq_ctrl->idx, + dev->data->port_id, rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks); mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); ret = rxq_alloc_elts(rxq_ctrl); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1b3d89f2f6..4bd08cb035 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", - PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n); + PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", - PORT_ID(txq_ctrl->priv), txq_ctrl->idx); + PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -224,7 +224,7 @@ mlx5_tx_queue_release(void *dpdk_txq) if ((*priv->txqs)[i] == txq) { mlx5_txq_release(ETH_DEV(priv), i); DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", - PORT_ID(priv), txq_ctrl->idx); + PORT_ID(priv), txq->idx); break; } } @@ -273,7 +273,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) continue; txq = (*priv->txqs)[i]; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - assert(txq_ctrl->idx == (uint16_t)i); + assert(txq->idx == (uint16_t)i); /* UAR addr form verbs used to find dup and offset in page. */ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig; off = uar_va & (page_size - 1); /* offset in page. */ @@ -301,7 +301,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) DRV_LOG(ERR, "port %u call to mmap failed on UAR" " for txq %u", - dev->data->port_id, txq_ctrl->idx); + dev->data->port_id, txq->idx); rte_errno = ENXIO; return -rte_errno; } @@ -629,7 +629,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev) LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced", - dev->data->port_id, txq_ibv->txq_ctrl->idx); + dev->data->port_id, txq_ibv->txq_ctrl->txq.idx); ++ret; } return ret; @@ -778,7 +778,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->priv = priv; tmpl->socket = socket; tmpl->txq.elts_n = log2above(desc); - tmpl->idx = idx; + tmpl->txq.idx = idx; txq_set_params(tmpl); DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d", dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr); @@ -786,7 +786,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge); tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); - tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -893,12 +892,12 @@ int mlx5_txq_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_txq_ctrl *txq; + struct mlx5_txq_ctrl *txq_ctrl; int ret = 0; - LIST_FOREACH(txq, &priv->txqsctrl, next) { + LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) { DRV_LOG(DEBUG, "port %u Tx queue %u still referenced", - dev->data->port_id, txq->idx); + dev->data->port_id, txq_ctrl->txq.idx); ++ret; } return ret; -- 2.11.0