DPDK patches and discussions
 help / color / mirror / Atom feed
From: Adrien Mazarguil <adrien.mazarguil@6wind.com>
To: Ferruh Yigit <ferruh.yigit@intel.com>
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v1 23/29] net/mlx4: drop live queue reconfiguration support
Date: Wed, 11 Oct 2017 16:35:25 +0200	[thread overview]
Message-ID: <572359b6395f8ff9b38424b265ec6b1e93f46c83.1507730496.git.adrien.mazarguil@6wind.com> (raw)
In-Reply-To: <cover.1507730496.git.adrien.mazarguil@6wind.com>

DPDK ensures that setup functions are never called on configured queues,
or only if they have previously been released.

PMDs therefore do not need to deal with the unexpected reconfiguration of
live queues which may fail with no easy way to recover. Dropping support
for this scenario greatly simplifies the code as allocation and setup steps
and checks can be merged.

Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx4/mlx4_rxq.c  | 281 ++++++++++++++------------------------
 drivers/net/mlx4/mlx4_rxtx.h |   2 -
 drivers/net/mlx4/mlx4_txq.c  | 239 +++++++++++---------------------
 3 files changed, 184 insertions(+), 338 deletions(-)

diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 693db4f..30b0654 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -162,36 +162,12 @@ mlx4_rxq_free_elts(struct rxq *rxq)
 }
 
 /**
- * Clean up a Rx queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param rxq
- *   Pointer to Rx queue structure.
- */
-void
-mlx4_rxq_cleanup(struct rxq *rxq)
-{
-	DEBUG("cleaning up %p", (void *)rxq);
-	mlx4_rxq_free_elts(rxq);
-	if (rxq->qp != NULL)
-		claim_zero(ibv_destroy_qp(rxq->qp));
-	if (rxq->cq != NULL)
-		claim_zero(ibv_destroy_cq(rxq->cq));
-	if (rxq->channel != NULL)
-		claim_zero(ibv_destroy_comp_channel(rxq->channel));
-	if (rxq->mr != NULL)
-		claim_zero(ibv_dereg_mr(rxq->mr));
-	memset(rxq, 0, sizeof(*rxq));
-}
-
-/**
- * Configure a Rx queue.
+ * DPDK callback to configure a Rx queue.
  *
  * @param dev
  *   Pointer to Ethernet device structure.
- * @param rxq
- *   Pointer to Rx queue structure.
+ * @param idx
+ *   Rx queue index.
  * @param desc
  *   Number of descriptors to configure in queue.
  * @param socket
@@ -204,30 +180,53 @@ mlx4_rxq_cleanup(struct rxq *rxq)
  * @return
  *   0 on success, negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx4_rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
-	       unsigned int socket, const struct rte_eth_rxconf *conf,
-	       struct rte_mempool *mp)
+int
+mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+		    unsigned int socket, const struct rte_eth_rxconf *conf,
+		    struct rte_mempool *mp)
 {
 	struct priv *priv = dev->data->dev_private;
-	struct rxq tmpl = {
-		.priv = priv,
-		.mp = mp,
-		.socket = socket
-	};
-	struct ibv_qp_attr mod;
-	struct ibv_qp_init_attr qp_init;
-	struct ibv_recv_wr *bad_wr;
-	unsigned int mb_len;
+	uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
+	struct rte_flow_error error;
+	struct rxq *rxq;
 	int ret;
 
 	(void)conf; /* Thresholds configuration (ignored). */
-	mb_len = rte_pktmbuf_data_room_size(mp);
-	if (desc == 0) {
+	DEBUG("%p: configuring queue %u for %u descriptors",
+	      (void *)dev, idx, desc);
+	if (idx >= dev->data->nb_rx_queues) {
+		rte_errno = EOVERFLOW;
+		ERROR("%p: queue index out of range (%u >= %u)",
+		      (void *)dev, idx, dev->data->nb_rx_queues);
+		return -rte_errno;
+	}
+	rxq = dev->data->rx_queues[idx];
+	if (rxq) {
+		rte_errno = EEXIST;
+		ERROR("%p: Rx queue %u already configured, release it first",
+		      (void *)dev, idx);
+		return -rte_errno;
+	}
+	if (!desc) {
 		rte_errno = EINVAL;
 		ERROR("%p: invalid number of Rx descriptors", (void *)dev);
-		goto error;
+		return -rte_errno;
+	}
+	/* Allocate and initialize Rx queue. */
+	rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
+	if (!rxq) {
+		rte_errno = ENOMEM;
+		ERROR("%p: unable to allocate queue index %u",
+		      (void *)dev, idx);
+		return -rte_errno;
 	}
+	*rxq = (struct rxq){
+		.priv = priv,
+		.mp = mp,
+		.port_id = dev->data->port_id,
+		.stats.idx = idx,
+		.socket = socket,
+	};
 	/* Enable scattered packets support for this queue if necessary. */
 	assert(mb_len >= RTE_PKTMBUF_HEADROOM);
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
@@ -246,201 +245,115 @@ mlx4_rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
 		     mb_len - RTE_PKTMBUF_HEADROOM);
 	}
 	/* Use the entire Rx mempool as the memory region. */
-	tmpl.mr = mlx4_mp2mr(priv->pd, mp);
-	if (tmpl.mr == NULL) {
+	rxq->mr = mlx4_mp2mr(priv->pd, mp);
+	if (!rxq->mr) {
 		rte_errno = EINVAL;
 		ERROR("%p: MR creation failure: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
 	if (dev->data->dev_conf.intr_conf.rxq) {
-		tmpl.channel = ibv_create_comp_channel(priv->ctx);
-		if (tmpl.channel == NULL) {
+		rxq->channel = ibv_create_comp_channel(priv->ctx);
+		if (rxq->channel == NULL) {
 			rte_errno = ENOMEM;
 			ERROR("%p: Rx interrupt completion channel creation"
 			      " failure: %s",
 			      (void *)dev, strerror(rte_errno));
 			goto error;
 		}
-		if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
+		if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
 			ERROR("%p: unable to make Rx interrupt completion"
 			      " channel non-blocking: %s",
 			      (void *)dev, strerror(rte_errno));
 			goto error;
 		}
 	}
-	tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
-	if (tmpl.cq == NULL) {
+	rxq->cq = ibv_create_cq(priv->ctx, desc, NULL, rxq->channel, 0);
+	if (!rxq->cq) {
 		rte_errno = ENOMEM;
 		ERROR("%p: CQ creation failure: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	DEBUG("priv->device_attr.max_qp_wr is %d",
-	      priv->device_attr.max_qp_wr);
-	DEBUG("priv->device_attr.max_sge is %d",
-	      priv->device_attr.max_sge);
-	qp_init = (struct ibv_qp_init_attr){
-		/* CQ to be associated with the send queue. */
-		.send_cq = tmpl.cq,
-		/* CQ to be associated with the receive queue. */
-		.recv_cq = tmpl.cq,
-		.cap = {
-			/* Max number of outstanding WRs. */
-			.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
-					priv->device_attr.max_qp_wr :
-					desc),
-			/* Max number of scatter/gather elements in a WR. */
-			.max_recv_sge = 1,
-		},
-		.qp_type = IBV_QPT_RAW_PACKET,
-	};
-	tmpl.qp = ibv_create_qp(priv->pd, &qp_init);
-	if (tmpl.qp == NULL) {
+	rxq->qp = ibv_create_qp
+		(priv->pd,
+		 &(struct ibv_qp_init_attr){
+			.send_cq = rxq->cq,
+			.recv_cq = rxq->cq,
+			.cap = {
+				.max_recv_wr =
+					RTE_MIN(priv->device_attr.max_qp_wr,
+						desc),
+				.max_recv_sge = 1,
+			},
+			.qp_type = IBV_QPT_RAW_PACKET,
+		 });
+	if (!rxq->qp) {
 		rte_errno = errno ? errno : EINVAL;
 		ERROR("%p: QP creation failure: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	mod = (struct ibv_qp_attr){
-		/* Move the QP to this state. */
-		.qp_state = IBV_QPS_INIT,
-		/* Primary port number. */
-		.port_num = priv->port
-	};
-	ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
+	ret = ibv_modify_qp
+		(rxq->qp,
+		 &(struct ibv_qp_attr){
+			.qp_state = IBV_QPS_INIT,
+			.port_num = priv->port,
+		 },
+		 IBV_QP_STATE | IBV_QP_PORT);
 	if (ret) {
 		rte_errno = ret;
 		ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	ret = mlx4_rxq_alloc_elts(&tmpl, desc);
+	ret = mlx4_rxq_alloc_elts(rxq, desc);
 	if (ret) {
 		ERROR("%p: RXQ allocation failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
+	ret = ibv_post_recv(rxq->qp, &(*rxq->elts)[0].wr,
+			    &(struct ibv_recv_wr *){ NULL });
 	if (ret) {
 		rte_errno = ret;
-		ERROR("%p: ibv_post_recv() failed for WR %p: %s",
+		ERROR("%p: ibv_post_recv() failed: %s",
 		      (void *)dev,
-		      (void *)bad_wr,
 		      strerror(rte_errno));
 		goto error;
 	}
-	mod = (struct ibv_qp_attr){
-		.qp_state = IBV_QPS_RTR
-	};
-	ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
+	ret = ibv_modify_qp
+		(rxq->qp,
+		 &(struct ibv_qp_attr){
+			.qp_state = IBV_QPS_RTR,
+		 },
+		 IBV_QP_STATE);
 	if (ret) {
 		rte_errno = ret;
 		ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	/* Save port ID. */
-	tmpl.port_id = dev->data->port_id;
-	DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
-	/* Clean up rxq in case we're reinitializing it. */
-	DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
-	mlx4_rxq_cleanup(rxq);
-	*rxq = tmpl;
-	DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
-	return 0;
+	DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
+	dev->data->rx_queues[idx] = rxq;
+	/* Enable associated flows. */
+	ret = mlx4_flow_sync(priv, &error);
+	if (!ret)
+		return 0;
+	ERROR("cannot re-attach flow rules to queue %u"
+	      " (code %d, \"%s\"), flow error type %d, cause %p, message: %s",
+	      idx, -ret, strerror(-ret), error.type, error.cause,
+	      error.message ? error.message : "(unspecified)");
 error:
+	dev->data->rx_queues[idx] = NULL;
 	ret = rte_errno;
-	mlx4_rxq_cleanup(&tmpl);
+	mlx4_rx_queue_release(rxq);
 	rte_errno = ret;
 	assert(rte_errno > 0);
 	return -rte_errno;
 }
 
 /**
- * DPDK callback to configure a Rx queue.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param idx
- *   Rx queue index.
- * @param desc
- *   Number of descriptors to configure in queue.
- * @param socket
- *   NUMA socket on which memory must be allocated.
- * @param[in] conf
- *   Thresholds parameters.
- * @param mp
- *   Memory pool for buffer allocations.
- *
- * @return
- *   0 on success, negative errno value otherwise and rte_errno is set.
- */
-int
-mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
-		    unsigned int socket, const struct rte_eth_rxconf *conf,
-		    struct rte_mempool *mp)
-{
-	struct priv *priv = dev->data->dev_private;
-	struct rxq *rxq = dev->data->rx_queues[idx];
-	int ret;
-
-	DEBUG("%p: configuring queue %u for %u descriptors",
-	      (void *)dev, idx, desc);
-	if (idx >= dev->data->nb_rx_queues) {
-		rte_errno = EOVERFLOW;
-		ERROR("%p: queue index out of range (%u >= %u)",
-		      (void *)dev, idx, dev->data->nb_rx_queues);
-		return -rte_errno;
-	}
-	if (rxq != NULL) {
-		DEBUG("%p: reusing already allocated queue index %u (%p)",
-		      (void *)dev, idx, (void *)rxq);
-		if (priv->started) {
-			rte_errno = EEXIST;
-			return -rte_errno;
-		}
-		dev->data->rx_queues[idx] = NULL;
-		/* Disable associated flows. */
-		mlx4_flow_sync(priv, NULL);
-		mlx4_rxq_cleanup(rxq);
-	} else {
-		rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
-		if (rxq == NULL) {
-			rte_errno = ENOMEM;
-			ERROR("%p: unable to allocate queue index %u",
-			      (void *)dev, idx);
-			return -rte_errno;
-		}
-	}
-	ret = mlx4_rxq_setup(dev, rxq, desc, socket, conf, mp);
-	if (ret) {
-		rte_free(rxq);
-	} else {
-		struct rte_flow_error error;
-
-		rxq->stats.idx = idx;
-		DEBUG("%p: adding Rx queue %p to list",
-		      (void *)dev, (void *)rxq);
-		dev->data->rx_queues[idx] = rxq;
-		/* Re-enable associated flows. */
-		ret = mlx4_flow_sync(priv, &error);
-		if (ret) {
-			ERROR("cannot re-attach flow rules to queue %u"
-			      " (code %d, \"%s\"), flow error type %d,"
-			      " cause %p, message: %s", idx,
-			      -ret, strerror(-ret), error.type, error.cause,
-			      error.message ? error.message : "(unspecified)");
-			dev->data->rx_queues[idx] = NULL;
-			mlx4_rxq_cleanup(rxq);
-			rte_free(rxq);
-			return ret;
-		}
-	}
-	return ret;
-}
-
-/**
  * DPDK callback to release a Rx queue.
  *
  * @param dpdk_rxq
@@ -464,6 +377,14 @@ mlx4_rx_queue_release(void *dpdk_rxq)
 			break;
 		}
 	mlx4_flow_sync(priv, NULL);
-	mlx4_rxq_cleanup(rxq);
+	mlx4_rxq_free_elts(rxq);
+	if (rxq->qp)
+		claim_zero(ibv_destroy_qp(rxq->qp));
+	if (rxq->cq)
+		claim_zero(ibv_destroy_cq(rxq->cq));
+	if (rxq->channel)
+		claim_zero(ibv_destroy_comp_channel(rxq->channel));
+	if (rxq->mr)
+		claim_zero(ibv_dereg_mr(rxq->mr));
 	rte_free(rxq);
 }
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 7a2c982..d62120e 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -122,7 +122,6 @@ struct txq {
 
 /* mlx4_rxq.c */
 
-void mlx4_rxq_cleanup(struct rxq *rxq);
 int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 			uint16_t desc, unsigned int socket,
 			const struct rte_eth_rxconf *conf,
@@ -143,7 +142,6 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
 
 /* mlx4_txq.c */
 
-void mlx4_txq_cleanup(struct txq *txq);
 int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 			uint16_t desc, unsigned int socket,
 			const struct rte_eth_txconf *conf);
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 3cece3e..f102c68 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -155,34 +155,6 @@ mlx4_txq_free_elts(struct txq *txq)
 	rte_free(elts);
 }
 
-/**
- * Clean up a Tx queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param txq
- *   Pointer to Tx queue structure.
- */
-void
-mlx4_txq_cleanup(struct txq *txq)
-{
-	size_t i;
-
-	DEBUG("cleaning up %p", (void *)txq);
-	mlx4_txq_free_elts(txq);
-	if (txq->qp != NULL)
-		claim_zero(ibv_destroy_qp(txq->qp));
-	if (txq->cq != NULL)
-		claim_zero(ibv_destroy_cq(txq->cq));
-	for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
-		if (txq->mp2mr[i].mp == NULL)
-			break;
-		assert(txq->mp2mr[i].mr != NULL);
-		claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
-	}
-	memset(txq, 0, sizeof(*txq));
-}
-
 struct txq_mp2mr_mbuf_check_data {
 	int ret;
 };
@@ -242,12 +214,12 @@ mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
 }
 
 /**
- * Configure a Tx queue.
+ * DPDK callback to configure a Tx queue.
  *
  * @param dev
  *   Pointer to Ethernet device structure.
- * @param txq
- *   Pointer to Tx queue structure.
+ * @param idx
+ *   Tx queue index.
  * @param desc
  *   Number of descriptors to configure in queue.
  * @param socket
@@ -258,190 +230,135 @@ mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
  * @return
  *   0 on success, negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx4_txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
-	       unsigned int socket, const struct rte_eth_txconf *conf)
+int
+mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+		    unsigned int socket, const struct rte_eth_txconf *conf)
 {
 	struct priv *priv = dev->data->dev_private;
-	struct txq tmpl = {
-		.priv = priv,
-		.socket = socket
-	};
-	union {
-		struct ibv_qp_init_attr init;
-		struct ibv_qp_attr mod;
-	} attr;
+	struct ibv_qp_init_attr qp_init_attr;
+	struct txq *txq;
 	int ret;
 
 	(void)conf; /* Thresholds configuration (ignored). */
-	if (priv == NULL) {
-		rte_errno = EINVAL;
-		goto error;
+	DEBUG("%p: configuring queue %u for %u descriptors",
+	      (void *)dev, idx, desc);
+	if (idx >= dev->data->nb_tx_queues) {
+		rte_errno = EOVERFLOW;
+		ERROR("%p: queue index out of range (%u >= %u)",
+		      (void *)dev, idx, dev->data->nb_tx_queues);
+		return -rte_errno;
+	}
+	txq = dev->data->tx_queues[idx];
+	if (txq) {
+		rte_errno = EEXIST;
+		DEBUG("%p: Tx queue %u already configured, release it first",
+		      (void *)dev, idx);
+		return -rte_errno;
 	}
-	if (desc == 0) {
+	if (!desc) {
 		rte_errno = EINVAL;
 		ERROR("%p: invalid number of Tx descriptors", (void *)dev);
-		goto error;
+		return -rte_errno;
 	}
-	/* MRs will be registered in mp2mr[] later. */
-	tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
-	if (tmpl.cq == NULL) {
+	/* Allocate and initialize Tx queue. */
+	txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
+	if (!txq) {
+		rte_errno = ENOMEM;
+		ERROR("%p: unable to allocate queue index %u",
+		      (void *)dev, idx);
+		return -rte_errno;
+	}
+	*txq = (struct txq){
+		.priv = priv,
+		.stats.idx = idx,
+		.socket = socket,
+	};
+	txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+	if (!txq->cq) {
 		rte_errno = ENOMEM;
 		ERROR("%p: CQ creation failure: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	DEBUG("priv->device_attr.max_qp_wr is %d",
-	      priv->device_attr.max_qp_wr);
-	DEBUG("priv->device_attr.max_sge is %d",
-	      priv->device_attr.max_sge);
-	attr.init = (struct ibv_qp_init_attr){
-		/* CQ to be associated with the send queue. */
-		.send_cq = tmpl.cq,
-		/* CQ to be associated with the receive queue. */
-		.recv_cq = tmpl.cq,
+	qp_init_attr = (struct ibv_qp_init_attr){
+		.send_cq = txq->cq,
+		.recv_cq = txq->cq,
 		.cap = {
-			/* Max number of outstanding WRs. */
-			.max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
-					priv->device_attr.max_qp_wr :
-					desc),
-			/* Max number of scatter/gather elements in a WR. */
+			.max_send_wr =
+				RTE_MIN(priv->device_attr.max_qp_wr, desc),
 			.max_send_sge = 1,
 			.max_inline_data = MLX4_PMD_MAX_INLINE,
 		},
 		.qp_type = IBV_QPT_RAW_PACKET,
-		/*
-		 * Do *NOT* enable this, completions events are managed per
-		 * Tx burst.
-		 */
+		/* No completion events must occur by default. */
 		.sq_sig_all = 0,
 	};
-	tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
-	if (tmpl.qp == NULL) {
+	txq->qp = ibv_create_qp(priv->pd, &qp_init_attr);
+	if (!txq->qp) {
 		rte_errno = errno ? errno : EINVAL;
 		ERROR("%p: QP creation failure: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	/* ibv_create_qp() updates this value. */
-	tmpl.max_inline = attr.init.cap.max_inline_data;
-	attr.mod = (struct ibv_qp_attr){
-		/* Move the QP to this state. */
-		.qp_state = IBV_QPS_INIT,
-		/* Primary port number. */
-		.port_num = priv->port
-	};
-	ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE | IBV_QP_PORT);
+	txq->max_inline = qp_init_attr.cap.max_inline_data;
+	ret = ibv_modify_qp
+		(txq->qp,
+		 &(struct ibv_qp_attr){
+			.qp_state = IBV_QPS_INIT,
+			.port_num = priv->port,
+		 },
+		 IBV_QP_STATE | IBV_QP_PORT);
 	if (ret) {
 		rte_errno = ret;
 		ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	ret = mlx4_txq_alloc_elts(&tmpl, desc);
+	ret = mlx4_txq_alloc_elts(txq, desc);
 	if (ret) {
 		ERROR("%p: TXQ allocation failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	attr.mod = (struct ibv_qp_attr){
-		.qp_state = IBV_QPS_RTR
-	};
-	ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+	ret = ibv_modify_qp
+		(txq->qp,
+		 &(struct ibv_qp_attr){
+			.qp_state = IBV_QPS_RTR,
+		 },
+		 IBV_QP_STATE);
 	if (ret) {
 		rte_errno = ret;
 		ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	attr.mod.qp_state = IBV_QPS_RTS;
-	ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+	ret = ibv_modify_qp
+		(txq->qp,
+		 &(struct ibv_qp_attr){
+			.qp_state = IBV_QPS_RTS,
+		 },
+		 IBV_QP_STATE);
 	if (ret) {
 		rte_errno = ret;
 		ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
 		      (void *)dev, strerror(rte_errno));
 		goto error;
 	}
-	/* Clean up txq in case we're reinitializing it. */
-	DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
-	mlx4_txq_cleanup(txq);
-	*txq = tmpl;
-	DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
 	/* Pre-register known mempools. */
 	rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
+	DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
+	dev->data->tx_queues[idx] = txq;
 	return 0;
 error:
+	dev->data->tx_queues[idx] = NULL;
 	ret = rte_errno;
-	mlx4_txq_cleanup(&tmpl);
+	mlx4_tx_queue_release(txq);
 	rte_errno = ret;
 	assert(rte_errno > 0);
 	return -rte_errno;
 }
 
 /**
- * DPDK callback to configure a Tx queue.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param idx
- *   Tx queue index.
- * @param desc
- *   Number of descriptors to configure in queue.
- * @param socket
- *   NUMA socket on which memory must be allocated.
- * @param[in] conf
- *   Thresholds parameters.
- *
- * @return
- *   0 on success, negative errno value otherwise and rte_errno is set.
- */
-int
-mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
-		    unsigned int socket, const struct rte_eth_txconf *conf)
-{
-	struct priv *priv = dev->data->dev_private;
-	struct txq *txq = dev->data->tx_queues[idx];
-	int ret;
-
-	DEBUG("%p: configuring queue %u for %u descriptors",
-	      (void *)dev, idx, desc);
-	if (idx >= dev->data->nb_tx_queues) {
-		rte_errno = EOVERFLOW;
-		ERROR("%p: queue index out of range (%u >= %u)",
-		      (void *)dev, idx, dev->data->nb_tx_queues);
-		return -rte_errno;
-	}
-	if (txq != NULL) {
-		DEBUG("%p: reusing already allocated queue index %u (%p)",
-		      (void *)dev, idx, (void *)txq);
-		if (priv->started) {
-			rte_errno = EEXIST;
-			return -rte_errno;
-		}
-		dev->data->tx_queues[idx] = NULL;
-		mlx4_txq_cleanup(txq);
-	} else {
-		txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
-		if (txq == NULL) {
-			rte_errno = ENOMEM;
-			ERROR("%p: unable to allocate queue index %u",
-			      (void *)dev, idx);
-			return -rte_errno;
-		}
-	}
-	ret = mlx4_txq_setup(dev, txq, desc, socket, conf);
-	if (ret) {
-		rte_free(txq);
-	} else {
-		txq->stats.idx = idx;
-		DEBUG("%p: adding Tx queue %p to list",
-		      (void *)dev, (void *)txq);
-		dev->data->tx_queues[idx] = txq;
-	}
-	return ret;
-}
-
-/**
  * DPDK callback to release a Tx queue.
  *
  * @param dpdk_txq
@@ -464,6 +381,16 @@ mlx4_tx_queue_release(void *dpdk_txq)
 			priv->dev->data->tx_queues[i] = NULL;
 			break;
 		}
-	mlx4_txq_cleanup(txq);
+	mlx4_txq_free_elts(txq);
+	if (txq->qp)
+		claim_zero(ibv_destroy_qp(txq->qp));
+	if (txq->cq)
+		claim_zero(ibv_destroy_cq(txq->cq));
+	for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
+		if (!txq->mp2mr[i].mp)
+			break;
+		assert(txq->mp2mr[i].mr);
+		claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+	}
 	rte_free(txq);
 }
-- 
2.1.4

  parent reply	other threads:[~2017-10-11 14:36 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-10-11 14:35 [dpdk-dev] [PATCH v1 00/29] net/mlx4: restore PMD functionality Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 01/29] ethdev: expose flow API error helper Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 02/29] net/mlx4: replace bit-field type Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 03/29] net/mlx4: remove Rx QP initializer function Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 04/29] net/mlx4: enhance header files comments Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 05/29] net/mlx4: expose support for flow rule priorities Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 06/29] net/mlx4: clarify flow objects naming scheme Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 07/29] net/mlx4: tidy up flow rule handling code Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 08/29] net/mlx4: compact flow rule error reporting Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 09/29] mem: add iovec-like allocation wrappers Adrien Mazarguil
2017-10-11 21:58   ` Ferruh Yigit
2017-10-11 22:00     ` Ferruh Yigit
2017-10-12 11:07     ` Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 10/29] net/mlx4: merge flow creation and validation code Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 11/29] net/mlx4: allocate drop flow resources on demand Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 12/29] net/mlx4: relax check on missing flow rule target Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 13/29] net/mlx4: refactor internal flow rules Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 14/29] net/mlx4: generalize flow rule priority support Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 15/29] net/mlx4: simplify trigger code for flow rules Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 16/29] net/mlx4: refactor flow item validation code Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 17/29] net/mlx4: add MAC addresses configuration support Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 18/29] net/mlx4: add VLAN filter " Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 19/29] net/mlx4: add flow support for multicast traffic Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 20/29] net/mlx4: restore promisc and allmulti support Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 21/29] net/mlx4: update Rx/Tx callbacks consistently Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 22/29] net/mlx4: fix invalid errno value sign Adrien Mazarguil
2017-10-11 14:35 ` Adrien Mazarguil [this message]
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 24/29] net/mlx4: allocate queues and mbuf rings together Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 25/29] net/mlx4: convert Rx path to work queues Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 26/29] net/mlx4: remove unnecessary check Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 27/29] net/mlx4: add RSS flow rule action support Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 28/29] net/mlx4: disable UDP support in RSS flow rules Adrien Mazarguil
2017-10-11 14:35 ` [dpdk-dev] [PATCH v1 29/29] net/mlx4: add RSS support outside flow API Adrien Mazarguil
2017-10-12 12:19 ` [dpdk-dev] [PATCH v2 00/29] net/mlx4: restore PMD functionality Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 01/29] ethdev: expose flow API error helper Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 02/29] net/mlx4: replace bit-field type Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 03/29] net/mlx4: remove Rx QP initializer function Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 04/29] net/mlx4: enhance header files comments Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 05/29] net/mlx4: expose support for flow rule priorities Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 06/29] net/mlx4: clarify flow objects naming scheme Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 07/29] net/mlx4: tidy up flow rule handling code Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 08/29] net/mlx4: compact flow rule error reporting Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 09/29] net/mlx4: add iovec-like allocation wrappers Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 10/29] net/mlx4: merge flow creation and validation code Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 11/29] net/mlx4: allocate drop flow resources on demand Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 12/29] net/mlx4: relax check on missing flow rule target Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 13/29] net/mlx4: refactor internal flow rules Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 14/29] net/mlx4: generalize flow rule priority support Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 15/29] net/mlx4: simplify trigger code for flow rules Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 16/29] net/mlx4: refactor flow item validation code Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 17/29] net/mlx4: add MAC addresses configuration support Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 18/29] net/mlx4: add VLAN filter " Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 19/29] net/mlx4: add flow support for multicast traffic Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 20/29] net/mlx4: restore promisc and allmulti support Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 21/29] net/mlx4: update Rx/Tx callbacks consistently Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 22/29] net/mlx4: fix invalid errno value sign Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 23/29] net/mlx4: drop live queue reconfiguration support Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 24/29] net/mlx4: allocate queues and mbuf rings together Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 25/29] net/mlx4: convert Rx path to work queues Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 26/29] net/mlx4: remove unnecessary check Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 27/29] net/mlx4: add RSS flow rule action support Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 28/29] net/mlx4: disable UDP support in RSS flow rules Adrien Mazarguil
2017-10-12 12:19   ` [dpdk-dev] [PATCH v2 29/29] net/mlx4: add RSS support outside flow API Adrien Mazarguil
2017-10-12 19:12   ` [dpdk-dev] [PATCH v2 00/29] net/mlx4: restore PMD functionality Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=572359b6395f8ff9b38424b265ec6b1e93f46c83.1507730496.git.adrien.mazarguil@6wind.com \
    --to=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).