From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lb0-f180.google.com (mail-lb0-f180.google.com [209.85.217.180]) by dpdk.org (Postfix) with ESMTP id 18666C38E for ; Wed, 22 Jun 2016 11:06:18 +0200 (CEST) Received: by mail-lb0-f180.google.com with SMTP id oe3so18516532lbb.1 for ; Wed, 22 Jun 2016 02:06:18 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=iUnazvH9y0BVofEpJ2vK2pz2AHt5ArJh+x6btooujp0=; b=OGguyiUWPHxfIC/4mpa8JeAmah3dUJojel69JqbjiJm2zK/ftSm1fIDzg5wAGlnc6V K6vWHePyvEGBP+XO1W1PtcOg8C9sIdtkTQqxC0qjLjaHoRKLsEkTSAQwW1WFlGczH1GH 5WMx/2Jd2zYDzwn42sP5+QmjJIqHLf0f/H1py12W+Ozq5vmbFEhtT69AV11Q1i2tbwgS bUO79HYNz1ikFZ4UHD//F6ZBU+XCLWQF0aKM6pzwo1pFdc1daghBDIjLSDrQP9dqcSgb EkJd5R+4rUpa8ji6/Nlk33omKLROnQ1qQAVTGVO6JnyhGmeMUtHio9mDlPJCvnxcOQDR sj4A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=iUnazvH9y0BVofEpJ2vK2pz2AHt5ArJh+x6btooujp0=; b=R0MlgJwGtBc6jf345qliBXZislJb/bxlG47hT1V5S6RvhJQIBmh8MKGovo8XaZp7Kr zwVqEMIHB42KylgdgcrWqgo3/DaRij0uSNChqZpndb0ghoRIpmwIcNdJ+VTuTXqcfj0M ZbbKoaDj6cbF7g2zrapxBaBYXfY8JTMZFnb/tlUaSxDHLnMv9i1O4haHK+hBl2wGhzNm 8GrAtPEDYv5P11wVSJC8wfemj0q54depSiSu8nQDLRx3HOrizH4TWo6TfnIYme0t/QqH d2exUoP6E7XRP3wftORwV+K+oRp19am7aX7Ri6tri83Mvw9sSrZ5m8Jk6uwLAMsM+Sgs na4g== X-Gm-Message-State: ALyK8tK3v2KdZsQR9Okw4TxnVtIrgIlaVCy7p264URW2VVf9Ibma3UgwZXJ8fBr80ziOo3Ol X-Received: by 10.194.54.198 with SMTP id l6mr22961199wjp.67.1466586375701; Wed, 22 Jun 2016 02:06:15 -0700 (PDT) Received: from ping.vm.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net. [82.239.227.177]) by smtp.gmail.com with ESMTPSA id z5sm7019178wme.5.2016.06.22.02.06.14 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 22 Jun 2016 02:06:15 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: Ferruh Yigit , Adrien Mazarguil Date: Wed, 22 Jun 2016 11:05:37 +0200 Message-Id: <1466586355-30777-8-git-send-email-nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1466586355-30777-1-git-send-email-nelio.laranjeiro@6wind.com> References: <1466493818-1877-1-git-send-email-nelio.laranjeiro@6wind.com> <1466586355-30777-1-git-send-email-nelio.laranjeiro@6wind.com> Subject: [dpdk-dev] [PATCH v4 07/25] mlx5: split Tx queue structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 22 Jun 2016 09:06:18 -0000 To keep the data path as efficient as possible, move fields only useful to the control path into new structure txq_ctrl. Signed-off-by: Nelio Laranjeiro Signed-off-by: Adrien Mazarguil --- drivers/net/mlx5/mlx5.c | 21 +++-- drivers/net/mlx5/mlx5_ethdev.c | 28 +++--- drivers/net/mlx5/mlx5_mr.c | 39 ++++---- drivers/net/mlx5/mlx5_rxtx.h | 9 +- drivers/net/mlx5/mlx5_txq.c | 198 +++++++++++++++++++++-------------------- 5 files changed, 159 insertions(+), 136 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 350028b..3d30e00 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -98,7 +98,6 @@ static void mlx5_dev_close(struct rte_eth_dev *dev) { struct priv *priv = mlx5_get_priv(dev); - void *tmp; unsigned int i; priv_lock(priv); @@ -122,12 +121,13 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) { - tmp = (*priv->rxqs)[i]; - if (tmp == NULL) + struct rxq *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) continue; (*priv->rxqs)[i] = NULL; - rxq_cleanup(tmp); - rte_free(tmp); + rxq_cleanup(rxq); + rte_free(rxq); } priv->rxqs_n = 0; priv->rxqs = NULL; @@ -136,12 +136,15 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_tx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->txqs_n); ++i) { - tmp = (*priv->txqs)[i]; - if (tmp == NULL) + struct txq *txq = (*priv->txqs)[i]; + struct txq_ctrl *txq_ctrl; + + if (txq == NULL) continue; + txq_ctrl = container_of(txq, struct txq_ctrl, txq); (*priv->txqs)[i] = NULL; - txq_cleanup(tmp); - rte_free(tmp); + txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); } priv->txqs_n = 0; priv->txqs = NULL; diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index ca57021..4095a06 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -1232,28 +1232,32 @@ mlx5_secondary_data_setup(struct priv *priv) /* TX queues. */ for (i = 0; i != nb_tx_queues; ++i) { struct txq *primary_txq = (*sd->primary_priv->txqs)[i]; - struct txq *txq; + struct txq_ctrl *primary_txq_ctrl; + struct txq_ctrl *txq_ctrl; if (primary_txq == NULL) continue; - txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, - primary_txq->socket); - if (txq != NULL) { + primary_txq_ctrl = container_of(primary_txq, + struct txq_ctrl, txq); + txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl), 0, + primary_txq_ctrl->socket); + if (txq_ctrl != NULL) { if (txq_setup(priv->dev, - txq, + primary_txq_ctrl, primary_txq->elts_n, - primary_txq->socket, + primary_txq_ctrl->socket, NULL) == 0) { - txq->stats.idx = primary_txq->stats.idx; - tx_queues[i] = txq; + txq_ctrl->txq.stats.idx = + primary_txq->stats.idx; + tx_queues[i] = &txq_ctrl->txq; continue; } - rte_free(txq); + rte_free(txq_ctrl); } while (i) { - txq = tx_queues[--i]; - txq_cleanup(txq); - rte_free(txq); + txq_ctrl = tx_queues[--i]; + txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); } goto error; } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 7c3e87f..79d5568 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -183,33 +183,36 @@ mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp) uint32_t txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx) { + struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); struct ibv_mr *mr; /* Add a new entry, register MR first. */ DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq, mp->name, (void *)mp); - mr = mlx5_mp2mr(txq->priv->pd, mp); + (void *)txq_ctrl, mp->name, (void *)mp); + mr = mlx5_mp2mr(txq_ctrl->txq.priv->pd, mp); if (unlikely(mr == NULL)) { DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq); + (void *)txq_ctrl); return (uint32_t)-1; } - if (unlikely(idx == RTE_DIM(txq->mp2mr))) { + if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) { /* Table is full, remove oldest entry. */ DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq); + (void *)txq_ctrl); --idx; - claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); + claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr)); + memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1], + (sizeof(txq_ctrl->txq.mp2mr) - + sizeof(txq_ctrl->txq.mp2mr[0]))); } /* Store the new entry. */ - txq->mp2mr[idx].mp = mp; - txq->mp2mr[idx].mr = mr; - txq->mp2mr[idx].lkey = mr->lkey; + txq_ctrl->txq.mp2mr[idx].mp = mp; + txq_ctrl->txq.mp2mr[idx].mr = mr; + txq_ctrl->txq.mp2mr[idx].lkey = mr->lkey; DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq, mp->name, (void *)mp, txq->mp2mr[idx].lkey); - return txq->mp2mr[idx].lkey; + (void *)txq_ctrl, mp->name, (void *)mp, + txq_ctrl->txq.mp2mr[idx].lkey); + return txq_ctrl->txq.mp2mr[idx].lkey; } struct txq_mp2mr_mbuf_check_data { @@ -255,7 +258,7 @@ txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, void txq_mp2mr_iter(struct rte_mempool *mp, void *arg) { - struct txq *txq = arg; + struct txq_ctrl *txq_ctrl = arg; struct txq_mp2mr_mbuf_check_data data = { .ret = 0, }; @@ -265,13 +268,13 @@ txq_mp2mr_iter(struct rte_mempool *mp, void *arg) if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || data.ret == -1) return; - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i].mp == NULL)) { + for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { + if (unlikely(txq_ctrl->txq.mp2mr[i].mp == NULL)) { /* Unknown MP, add a new MR for it. */ break; } - if (txq->mp2mr[i].mp == mp) + if (txq_ctrl->txq.mp2mr[i].mp == mp) return; } - txq_mp2mr_reg(txq, mp, i); + txq_mp2mr_reg(&txq_ctrl->txq, mp, i); } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 3a353b0..5baefcb 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -256,6 +256,10 @@ struct txq { uint32_t lkey; /* mr->lkey */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ struct mlx5_txq_stats stats; /* TX queue counters. */ +} __rte_cache_aligned; + +/* TX queue control descriptor. */ +struct txq_ctrl { #ifdef HAVE_VERBS_VLAN_INSERTION struct ibv_exp_qp_burst_family_v1 *if_qp; /* QP burst interface. */ #else @@ -264,6 +268,7 @@ struct txq { struct ibv_exp_cq_family *if_cq; /* CQ interface. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ unsigned int socket; /* CPU socket ID for allocations. */ + struct txq txq; /* Data path structure. */ }; /* mlx5_rxq.c */ @@ -291,8 +296,8 @@ uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); /* mlx5_txq.c */ -void txq_cleanup(struct txq *); -int txq_setup(struct rte_eth_dev *, struct txq *, uint16_t, unsigned int, +void txq_cleanup(struct txq_ctrl *); +int txq_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t, unsigned int, const struct rte_eth_txconf *); int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_txconf *); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 75da65b..4683775 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -68,7 +68,7 @@ /** * Allocate TX queue elements. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. * @param elts_n * Number of elements to allocate. @@ -77,15 +77,15 @@ * 0 on success, errno value on failure. */ static int -txq_alloc_elts(struct txq *txq, unsigned int elts_n) +txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) { unsigned int i; struct txq_elt (*elts)[elts_n] = - rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket); + rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq_ctrl->socket); int ret = 0; if (elts == NULL) { - ERROR("%p: can't allocate packets array", (void *)txq); + ERROR("%p: can't allocate packets array", (void *)txq_ctrl); ret = ENOMEM; goto error; } @@ -94,24 +94,24 @@ txq_alloc_elts(struct txq *txq, unsigned int elts_n) elt->buf = NULL; } - DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n); - txq->elts_n = elts_n; - txq->elts = elts; - txq->elts_head = 0; - txq->elts_tail = 0; - txq->elts_comp = 0; + DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); + txq_ctrl->txq.elts_n = elts_n; + txq_ctrl->txq.elts = elts; + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; /* Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or * at least 4 times per ring. */ - txq->elts_comp_cd_init = + txq_ctrl->txq.elts_comp_cd_init = ((MLX5_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ? MLX5_PMD_TX_PER_COMP_REQ : (elts_n / 4)); - txq->elts_comp_cd = txq->elts_comp_cd_init; + txq_ctrl->txq.elts_comp_cd = txq_ctrl->txq.elts_comp_cd_init; assert(ret == 0); return 0; error: rte_free(elts); - DEBUG("%p: failed, freed everything", (void *)txq); + DEBUG("%p: failed, freed everything", (void *)txq_ctrl); assert(ret > 0); return ret; } @@ -119,25 +119,25 @@ error: /** * Free TX queue elements. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. */ static void -txq_free_elts(struct txq *txq) +txq_free_elts(struct txq_ctrl *txq_ctrl) { - unsigned int elts_n = txq->elts_n; - unsigned int elts_head = txq->elts_head; - unsigned int elts_tail = txq->elts_tail; - struct txq_elt (*elts)[elts_n] = txq->elts; + unsigned int elts_n = txq_ctrl->txq.elts_n; + unsigned int elts_head = txq_ctrl->txq.elts_head; + unsigned int elts_tail = txq_ctrl->txq.elts_tail; + struct txq_elt (*elts)[elts_n] = txq_ctrl->txq.elts; - DEBUG("%p: freeing WRs", (void *)txq); - txq->elts_n = 0; - txq->elts_head = 0; - txq->elts_tail = 0; - txq->elts_comp = 0; - txq->elts_comp_cd = 0; - txq->elts_comp_cd_init = 0; - txq->elts = NULL; + DEBUG("%p: freeing WRs", (void *)txq_ctrl); + txq_ctrl->txq.elts_n = 0; + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; + txq_ctrl->txq.elts_comp_cd = 0; + txq_ctrl->txq.elts_comp_cd_init = 0; + txq_ctrl->txq.elts = NULL; if (elts == NULL) return; @@ -161,63 +161,63 @@ txq_free_elts(struct txq *txq) * * Destroy objects, free allocated memory and reset the structure for reuse. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. */ void -txq_cleanup(struct txq *txq) +txq_cleanup(struct txq_ctrl *txq_ctrl) { struct ibv_exp_release_intf_params params; size_t i; - DEBUG("cleaning up %p", (void *)txq); - txq_free_elts(txq); - txq->poll_cnt = NULL; - txq->send_flush = NULL; - if (txq->if_qp != NULL) { - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - assert(txq->qp != NULL); + DEBUG("cleaning up %p", (void *)txq_ctrl); + txq_free_elts(txq_ctrl); + txq_ctrl->txq.poll_cnt = NULL; + txq_ctrl->txq.send_flush = NULL; + if (txq_ctrl->if_qp != NULL) { + assert(txq_ctrl->txq.priv != NULL); + assert(txq_ctrl->txq.priv->ctx != NULL); + assert(txq_ctrl->txq.qp != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(txq->priv->ctx, - txq->if_qp, + claim_zero(ibv_exp_release_intf(txq_ctrl->txq.priv->ctx, + txq_ctrl->if_qp, ¶ms)); } - if (txq->if_cq != NULL) { - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - assert(txq->cq != NULL); + if (txq_ctrl->if_cq != NULL) { + assert(txq_ctrl->txq.priv != NULL); + assert(txq_ctrl->txq.priv->ctx != NULL); + assert(txq_ctrl->txq.cq != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(txq->priv->ctx, - txq->if_cq, + claim_zero(ibv_exp_release_intf(txq_ctrl->txq.priv->ctx, + txq_ctrl->if_cq, ¶ms)); } - if (txq->qp != NULL) - claim_zero(ibv_destroy_qp(txq->qp)); - if (txq->cq != NULL) - claim_zero(ibv_destroy_cq(txq->cq)); - if (txq->rd != NULL) { + if (txq_ctrl->txq.qp != NULL) + claim_zero(ibv_destroy_qp(txq_ctrl->txq.qp)); + if (txq_ctrl->txq.cq != NULL) + claim_zero(ibv_destroy_cq(txq_ctrl->txq.cq)); + if (txq_ctrl->rd != NULL) { struct ibv_exp_destroy_res_domain_attr attr = { .comp_mask = 0, }; - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx, - txq->rd, + assert(txq_ctrl->txq.priv != NULL); + assert(txq_ctrl->txq.priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->txq.priv->ctx, + txq_ctrl->rd, &attr)); } - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (txq->mp2mr[i].mp == NULL) + for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { + if (txq_ctrl->txq.mp2mr[i].mp == NULL) break; - assert(txq->mp2mr[i].mr != NULL); - claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); + assert(txq_ctrl->txq.mp2mr[i].mr != NULL); + claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr)); } - memset(txq, 0, sizeof(*txq)); + memset(txq_ctrl, 0, sizeof(*txq_ctrl)); } /** @@ -225,7 +225,7 @@ txq_cleanup(struct txq *txq) * * @param dev * Pointer to Ethernet device structure. - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. * @param desc * Number of descriptors to configure in queue. @@ -238,13 +238,15 @@ txq_cleanup(struct txq *txq) * 0 on success, errno value on failure. */ int -txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, +txq_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) { struct priv *priv = mlx5_get_priv(dev); - struct txq tmpl = { - .priv = priv, - .socket = socket + struct txq_ctrl tmpl = { + .socket = socket, + .txq = { + .priv = priv, + }, }; union { struct ibv_exp_query_intf_params params; @@ -279,8 +281,8 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .res_domain = tmpl.rd, }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); - if (tmpl.cq == NULL) { + tmpl.txq.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + if (tmpl.txq.cq == NULL) { ret = ENOMEM; ERROR("%p: CQ creation failure: %s", (void *)dev, strerror(ret)); @@ -292,9 +294,9 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, priv->device_attr.max_sge); attr.init = (struct ibv_exp_qp_init_attr){ /* CQ to be associated with the send queue. */ - .send_cq = tmpl.cq, + .send_cq = tmpl.txq.cq, /* CQ to be associated with the receive queue. */ - .recv_cq = tmpl.cq, + .recv_cq = tmpl.txq.cq, .cap = { /* Max number of outstanding WRs. */ .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? @@ -312,8 +314,8 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), }; - tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); - if (tmpl.qp == NULL) { + tmpl.txq.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (tmpl.txq.qp == NULL) { ret = (errno ? errno : EINVAL); ERROR("%p: QP creation failure: %s", (void *)dev, strerror(ret)); @@ -325,7 +327,7 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, /* Primary port number. */ .port_num = priv->port }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, + ret = ibv_exp_modify_qp(tmpl.txq.qp, &attr.mod, (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); if (ret) { ERROR("%p: QP state to IBV_QPS_INIT failed: %s", @@ -341,14 +343,14 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, attr.mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RTR }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_exp_modify_qp(tmpl.txq.qp, &attr.mod, IBV_EXP_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTR failed: %s", (void *)dev, strerror(ret)); goto error; } attr.mod.qp_state = IBV_QPS_RTS; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_exp_modify_qp(tmpl.txq.qp, &attr.mod, IBV_EXP_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTS failed: %s", (void *)dev, strerror(ret)); @@ -357,7 +359,7 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, attr.params = (struct ibv_exp_query_intf_params){ .intf_scope = IBV_EXP_INTF_GLOBAL, .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, + .obj = tmpl.txq.cq, }; tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); if (tmpl.if_cq == NULL) { @@ -369,7 +371,7 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, attr.params = (struct ibv_exp_query_intf_params){ .intf_scope = IBV_EXP_INTF_GLOBAL, .intf = IBV_EXP_INTF_QP_BURST, - .obj = tmpl.qp, + .obj = tmpl.txq.qp, #ifdef HAVE_VERBS_VLAN_INSERTION .intf_version = 1, #endif @@ -389,18 +391,18 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, goto error; } /* Clean up txq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old txq just in case", (void *)txq); - txq_cleanup(txq); - *txq = tmpl; - txq->poll_cnt = txq->if_cq->poll_cnt; - txq->send_pending = txq->if_qp->send_pending; + DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); + txq_cleanup(txq_ctrl); + *txq_ctrl = tmpl; + txq_ctrl->txq.poll_cnt = txq_ctrl->if_cq->poll_cnt; + txq_ctrl->txq.send_pending = txq_ctrl->if_qp->send_pending; #ifdef HAVE_VERBS_VLAN_INSERTION - txq->send_pending_vlan = txq->if_qp->send_pending_vlan; + txq_ctrl->txq.send_pending_vlan = txq_ctrl->if_qp->send_pending_vlan; #endif - txq->send_flush = txq->if_qp->send_flush; - DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); + txq_ctrl->txq.send_flush = txq_ctrl->if_qp->send_flush; + DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl); /* Pre-register known mempools. */ - rte_mempool_walk(txq_mp2mr_iter, txq); + rte_mempool_walk(txq_mp2mr_iter, txq_ctrl); assert(ret == 0); return 0; error: @@ -432,12 +434,15 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, { struct priv *priv = dev->data->dev_private; struct txq *txq = (*priv->txqs)[idx]; + struct txq_ctrl *txq_ctrl; int ret; if (mlx5_is_secondary()) return -E_RTE_SECONDARY; priv_lock(priv); + if (txq) + txq_ctrl = container_of(txq, struct txq_ctrl, txq); DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); if (idx >= priv->txqs_n) { @@ -454,24 +459,25 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -EEXIST; } (*priv->txqs)[idx] = NULL; - txq_cleanup(txq); + txq_cleanup(txq_ctrl); } else { - txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket); - if (txq == NULL) { + txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl), + 0, socket); + if (txq_ctrl == NULL) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); priv_unlock(priv); return -ENOMEM; } } - ret = txq_setup(dev, txq, desc, socket, conf); + ret = txq_setup(dev, txq_ctrl, desc, socket, conf); if (ret) - rte_free(txq); + rte_free(txq_ctrl); else { - txq->stats.idx = idx; + txq_ctrl->txq.stats.idx = idx; DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq); - (*priv->txqs)[idx] = txq; + (void *)dev, (void *)txq_ctrl); + (*priv->txqs)[idx] = &txq_ctrl->txq; /* Update send callback. */ dev->tx_pkt_burst = mlx5_tx_burst; } @@ -489,6 +495,7 @@ void mlx5_tx_queue_release(void *dpdk_txq) { struct txq *txq = (struct txq *)dpdk_txq; + struct txq_ctrl *txq_ctrl; struct priv *priv; unsigned int i; @@ -497,17 +504,18 @@ mlx5_tx_queue_release(void *dpdk_txq) if (txq == NULL) return; + txq_ctrl = container_of(txq, struct txq_ctrl, txq); priv = txq->priv; priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq); + (void *)priv->dev, (void *)txq_ctrl); (*priv->txqs)[i] = NULL; break; } - txq_cleanup(txq); - rte_free(txq); + txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); priv_unlock(priv); } -- 2.1.4