DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix MTU update
@ 2017-08-01  7:55 Nelio Laranjeiro
  2017-08-01 10:11 ` Adrien Mazarguil
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Nelio Laranjeiro @ 2017-08-01  7:55 UTC (permalink / raw)
  To: dev; +Cc: Yongseok Koh, stable

Changing the MTU is not related to changing the number of segments,
activating or not the multi-segment support should be handled by the
application.

Fixes: 9964b965ad69 ("net/mlx5: re-add Rx scatter support")
Cc: stable@dpdk.org

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5_ethdev.c | 127 ++++++-----------------------------------
 drivers/net/mlx5/mlx5_rxq.c    |  67 ----------------------
 drivers/net/mlx5/mlx5_rxtx.h   |   1 -
 3 files changed, 16 insertions(+), 179 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 08cc814..b0eb3cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -924,12 +924,6 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 /**
  * DPDK callback to change the MTU.
  *
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
  * @param dev
  *   Pointer to Ethernet device structure.
  * @param in_mtu
@@ -942,122 +936,33 @@ int
 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
 	struct priv *priv = dev->data->dev_private;
+	uint16_t kern_mtu;
 	int ret = 0;
-	unsigned int i;
-	unsigned int max_frame_len;
-	int rehash;
-	int restart = priv->started;
 
 	if (mlx5_is_secondary())
 		return -E_RTE_SECONDARY;
 
 	priv_lock(priv);
+	ret = priv_get_mtu(priv, &kern_mtu);
+	if (ret)
+		goto out;
 	/* Set kernel interface MTU first. */
-	if (priv_set_mtu(priv, mtu)) {
-		ret = errno;
-		WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
-		     strerror(ret));
+	ret = priv_set_mtu(priv, mtu);
+	if (ret)
 		goto out;
-	} else
+	ret = priv_get_mtu(priv, &kern_mtu);
+	if (ret)
+		goto out;
+	if (kern_mtu == mtu) {
+		priv->mtu = mtu;
 		DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
-	/* Temporarily replace RX handler with a fake one, assuming it has not
-	 * been copied elsewhere. */
-	dev->rx_pkt_burst = removed_rx_burst;
-	/* Make sure everyone has left dev->rx_pkt_burst() and uses
-	 * removed_rx_burst() instead. */
-	rte_wmb();
-	usleep(1000);
-	/* MTU does not include header and CRC. */
-	max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
-	/* Check if at least one queue is going to need a SGE update. */
-	for (i = 0; i != priv->rxqs_n; ++i) {
-		struct rxq *rxq = (*priv->rxqs)[i];
-		unsigned int mb_len;
-		unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
-		unsigned int sges_n;
-
-		if (rxq == NULL)
-			continue;
-		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
-		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-		/*
-		 * Determine the number of SGEs needed for a full packet
-		 * and round it to the next power of two.
-		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
-		if (sges_n != rxq->sges_n)
-			break;
 	}
-	/*
-	 * If all queues have the right number of SGEs, a simple rehash
-	 * of their buffers is enough, otherwise SGE information can only
-	 * be updated in a queue by recreating it. All resources that depend
-	 * on queues (flows, indirection tables) must be recreated as well in
-	 * that case.
-	 */
-	rehash = (i == priv->rxqs_n);
-	if (!rehash) {
-		/* Clean up everything as with mlx5_dev_stop(). */
-		priv_special_flow_disable_all(priv);
-		priv_mac_addrs_disable(priv);
-		priv_destroy_hash_rxqs(priv);
-		priv_fdir_disable(priv);
-		priv_dev_interrupt_handler_uninstall(priv, dev);
-	}
-recover:
-	/* Reconfigure each RX queue. */
-	for (i = 0; (i != priv->rxqs_n); ++i) {
-		struct rxq *rxq = (*priv->rxqs)[i];
-		struct rxq_ctrl *rxq_ctrl =
-			container_of(rxq, struct rxq_ctrl, rxq);
-		unsigned int mb_len;
-		unsigned int tmp;
-
-		if (rxq == NULL)
-			continue;
-		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
-		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-		/* Provide new values to rxq_setup(). */
-		dev->data->dev_conf.rxmode.jumbo_frame =
-			(max_frame_len > ETHER_MAX_LEN);
-		dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
-		if (rehash)
-			ret = rxq_rehash(dev, rxq_ctrl);
-		else
-			ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
-					     rxq_ctrl->socket, NULL, rxq->mp);
-		if (!ret)
-			continue;
-		/* Attempt to roll back in case of error. */
-		tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
-		if (max_frame_len != tmp) {
-			max_frame_len = tmp;
-			goto recover;
-		}
-		/* Double fault, disable RX. */
-		break;
-	}
-	/* Mimic mlx5_dev_start(). */
-	if (ret) {
-		ERROR("unable to reconfigure RX queues, RX disabled");
-	} else if (restart &&
-		   !rehash &&
-		   !priv_create_hash_rxqs(priv) &&
-		   !priv_rehash_flows(priv)) {
-		if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
-			priv_fdir_enable(priv);
-		priv_dev_interrupt_handler_install(priv, dev);
-	}
-	priv->mtu = mtu;
-	/* Burst functions can now be called again. */
-	rte_wmb();
-	/*
-	 * Use a safe RX burst function in case of error, otherwise select RX
-	 * burst function again.
-	 */
-	if (!ret)
-		priv_select_rx_function(priv);
+	priv_unlock(priv);
+	return 0;
 out:
+	ret = errno;
+	WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
+	     strerror(ret));
 	priv_unlock(priv);
 	assert(ret >= 0);
 	return -ret;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 34ec95b..118a2d9 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -797,73 +797,6 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
 }
 
 /**
- * Reconfigure RX queue buffers.
- *
- * rxq_rehash() does not allocate mbufs, which, if not done from the right
- * thread (such as a control thread), may corrupt the pool.
- * In case of failure, the queue is left untouched.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param rxq_ctrl
- *   RX queue pointer.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
-{
-	unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
-	unsigned int i;
-	struct ibv_exp_wq_attr mod;
-	int err;
-
-	DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
-	      (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
-	assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
-	/* From now on, any failure will render the queue unusable.
-	 * Reinitialize WQ. */
-	mod = (struct ibv_exp_wq_attr){
-		.attr_mask = IBV_EXP_WQ_ATTR_STATE,
-		.wq_state = IBV_EXP_WQS_RESET,
-	};
-	err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
-	if (err) {
-		ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
-		assert(err > 0);
-		return err;
-	}
-	/* Snatch mbufs from original queue. */
-	claim_zero(rxq_trim_elts(&rxq_ctrl->rxq));
-	claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
-	for (i = 0; i != elts_n; ++i) {
-		struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
-
-		assert(rte_mbuf_refcnt_read(buf) == 2);
-		rte_pktmbuf_free_seg(buf);
-	}
-	/* Change queue state to ready. */
-	mod = (struct ibv_exp_wq_attr){
-		.attr_mask = IBV_EXP_WQ_ATTR_STATE,
-		.wq_state = IBV_EXP_WQS_RDY,
-	};
-	err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
-	if (err) {
-		ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
-		      (void *)dev, strerror(err));
-		goto error;
-	}
-	/* Update doorbell counter. */
-	rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
-	rte_wmb();
-	*rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
-error:
-	assert(err >= 0);
-	return err;
-}
-
-/**
  * Initialize RX queue.
  *
  * @param tmpl
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 02184ae..eda8b00 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -307,7 +307,6 @@ void priv_destroy_hash_rxqs(struct priv *);
 int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
 int priv_rehash_flows(struct priv *);
 void rxq_cleanup(struct rxq_ctrl *);
-int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
 int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
 		   unsigned int, const struct rte_eth_rxconf *,
 		   struct rte_mempool *);
-- 
2.1.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix MTU update
  2017-08-01  7:55 [dpdk-dev] [PATCH] net/mlx5: fix MTU update Nelio Laranjeiro
@ 2017-08-01 10:11 ` Adrien Mazarguil
  2017-08-01 11:43   ` Nélio Laranjeiro
  2017-08-02 23:13 ` Yongseok Koh
  2017-08-03  9:31 ` [dpdk-dev] [PATCH v2] " Nelio Laranjeiro
  2 siblings, 1 reply; 6+ messages in thread
From: Adrien Mazarguil @ 2017-08-01 10:11 UTC (permalink / raw)
  To: Nelio Laranjeiro; +Cc: dev, Yongseok Koh, stable

On Tue, Aug 01, 2017 at 09:55:57AM +0200, Nelio Laranjeiro wrote:
> Changing the MTU is not related to changing the number of segments,
> activating or not the multi-segment support should be handled by the
> application.
> 
> Fixes: 9964b965ad69 ("net/mlx5: re-add Rx scatter support")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> Acked-by: Yongseok Koh <yskoh@mellanox.com>

Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

Shouldn't this fix target an even older commit?

 cf37ca9563d5 ("mlx5: support MTU configuration")

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix MTU update
  2017-08-01 10:11 ` Adrien Mazarguil
@ 2017-08-01 11:43   ` Nélio Laranjeiro
  0 siblings, 0 replies; 6+ messages in thread
From: Nélio Laranjeiro @ 2017-08-01 11:43 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: dev, Yongseok Koh, stable

On Tue, Aug 01, 2017 at 12:11:16PM +0200, Adrien Mazarguil wrote:
> On Tue, Aug 01, 2017 at 09:55:57AM +0200, Nelio Laranjeiro wrote:
> > Changing the MTU is not related to changing the number of segments,
> > activating or not the multi-segment support should be handled by the
> > application.
> > 
> > Fixes: 9964b965ad69 ("net/mlx5: re-add Rx scatter support")
> > Cc: stable@dpdk.org
> > 
> > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> > Acked-by: Yongseok Koh <yskoh@mellanox.com>
> 
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> 
> Shouldn't this fix target an even older commit?
> 
>  cf37ca9563d5 ("mlx5: support MTU configuration")

Right,

-- 
Nélio Laranjeiro
6WIND

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix MTU update
  2017-08-01  7:55 [dpdk-dev] [PATCH] net/mlx5: fix MTU update Nelio Laranjeiro
  2017-08-01 10:11 ` Adrien Mazarguil
@ 2017-08-02 23:13 ` Yongseok Koh
  2017-08-03  9:31 ` [dpdk-dev] [PATCH v2] " Nelio Laranjeiro
  2 siblings, 0 replies; 6+ messages in thread
From: Yongseok Koh @ 2017-08-02 23:13 UTC (permalink / raw)
  To: Nélio Laranjeiro; +Cc: dev, stable


> On Aug 1, 2017, at 12:55 AM, Nelio Laranjeiro <nelio.laranjeiro@6wind.com> wrote:
> 
> Changing the MTU is not related to changing the number of segments,
> activating or not the multi-segment support should be handled by the
> application.
> 
> Fixes: 9964b965ad69 ("net/mlx5: re-add Rx scatter support")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> Acked-by: Yongseok Koh <yskoh@mellanox.com>
One small comment although I had already acked.
I think rxq_ctrl_setup() can be static now, it doesn't need to be exposed??

Thanks,
Yongseok

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2] net/mlx5: fix MTU update
  2017-08-01  7:55 [dpdk-dev] [PATCH] net/mlx5: fix MTU update Nelio Laranjeiro
  2017-08-01 10:11 ` Adrien Mazarguil
  2017-08-02 23:13 ` Yongseok Koh
@ 2017-08-03  9:31 ` Nelio Laranjeiro
  2017-08-03 21:14   ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
  2 siblings, 1 reply; 6+ messages in thread
From: Nelio Laranjeiro @ 2017-08-03  9:31 UTC (permalink / raw)
  To: dev; +Cc: Yongseok Koh, stable

Changing the MTU is not related to changing the number of segments,
activating or not the multi-segment support should be handled by the
application.

Fixes: 9964b965ad69 ("net/mlx5: re-add Rx scatter support")
Cc: stable@dpdk.org

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>

---

Changes in v2:

 change rxq_ctrl_setup() to be static.
---
 drivers/net/mlx5/mlx5_ethdev.c | 127 ++++++-----------------------------------
 drivers/net/mlx5/mlx5_rxq.c    |  69 +---------------------
 drivers/net/mlx5/mlx5_rxtx.h   |   4 --
 3 files changed, 17 insertions(+), 183 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 08cc814..b0eb3cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -924,12 +924,6 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 /**
  * DPDK callback to change the MTU.
  *
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
  * @param dev
  *   Pointer to Ethernet device structure.
  * @param in_mtu
@@ -942,122 +936,33 @@ int
 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
 	struct priv *priv = dev->data->dev_private;
+	uint16_t kern_mtu;
 	int ret = 0;
-	unsigned int i;
-	unsigned int max_frame_len;
-	int rehash;
-	int restart = priv->started;
 
 	if (mlx5_is_secondary())
 		return -E_RTE_SECONDARY;
 
 	priv_lock(priv);
+	ret = priv_get_mtu(priv, &kern_mtu);
+	if (ret)
+		goto out;
 	/* Set kernel interface MTU first. */
-	if (priv_set_mtu(priv, mtu)) {
-		ret = errno;
-		WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
-		     strerror(ret));
+	ret = priv_set_mtu(priv, mtu);
+	if (ret)
 		goto out;
-	} else
+	ret = priv_get_mtu(priv, &kern_mtu);
+	if (ret)
+		goto out;
+	if (kern_mtu == mtu) {
+		priv->mtu = mtu;
 		DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
-	/* Temporarily replace RX handler with a fake one, assuming it has not
-	 * been copied elsewhere. */
-	dev->rx_pkt_burst = removed_rx_burst;
-	/* Make sure everyone has left dev->rx_pkt_burst() and uses
-	 * removed_rx_burst() instead. */
-	rte_wmb();
-	usleep(1000);
-	/* MTU does not include header and CRC. */
-	max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
-	/* Check if at least one queue is going to need a SGE update. */
-	for (i = 0; i != priv->rxqs_n; ++i) {
-		struct rxq *rxq = (*priv->rxqs)[i];
-		unsigned int mb_len;
-		unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
-		unsigned int sges_n;
-
-		if (rxq == NULL)
-			continue;
-		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
-		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-		/*
-		 * Determine the number of SGEs needed for a full packet
-		 * and round it to the next power of two.
-		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
-		if (sges_n != rxq->sges_n)
-			break;
 	}
-	/*
-	 * If all queues have the right number of SGEs, a simple rehash
-	 * of their buffers is enough, otherwise SGE information can only
-	 * be updated in a queue by recreating it. All resources that depend
-	 * on queues (flows, indirection tables) must be recreated as well in
-	 * that case.
-	 */
-	rehash = (i == priv->rxqs_n);
-	if (!rehash) {
-		/* Clean up everything as with mlx5_dev_stop(). */
-		priv_special_flow_disable_all(priv);
-		priv_mac_addrs_disable(priv);
-		priv_destroy_hash_rxqs(priv);
-		priv_fdir_disable(priv);
-		priv_dev_interrupt_handler_uninstall(priv, dev);
-	}
-recover:
-	/* Reconfigure each RX queue. */
-	for (i = 0; (i != priv->rxqs_n); ++i) {
-		struct rxq *rxq = (*priv->rxqs)[i];
-		struct rxq_ctrl *rxq_ctrl =
-			container_of(rxq, struct rxq_ctrl, rxq);
-		unsigned int mb_len;
-		unsigned int tmp;
-
-		if (rxq == NULL)
-			continue;
-		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
-		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-		/* Provide new values to rxq_setup(). */
-		dev->data->dev_conf.rxmode.jumbo_frame =
-			(max_frame_len > ETHER_MAX_LEN);
-		dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
-		if (rehash)
-			ret = rxq_rehash(dev, rxq_ctrl);
-		else
-			ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
-					     rxq_ctrl->socket, NULL, rxq->mp);
-		if (!ret)
-			continue;
-		/* Attempt to roll back in case of error. */
-		tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
-		if (max_frame_len != tmp) {
-			max_frame_len = tmp;
-			goto recover;
-		}
-		/* Double fault, disable RX. */
-		break;
-	}
-	/* Mimic mlx5_dev_start(). */
-	if (ret) {
-		ERROR("unable to reconfigure RX queues, RX disabled");
-	} else if (restart &&
-		   !rehash &&
-		   !priv_create_hash_rxqs(priv) &&
-		   !priv_rehash_flows(priv)) {
-		if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
-			priv_fdir_enable(priv);
-		priv_dev_interrupt_handler_install(priv, dev);
-	}
-	priv->mtu = mtu;
-	/* Burst functions can now be called again. */
-	rte_wmb();
-	/*
-	 * Use a safe RX burst function in case of error, otherwise select RX
-	 * burst function again.
-	 */
-	if (!ret)
-		priv_select_rx_function(priv);
+	priv_unlock(priv);
+	return 0;
 out:
+	ret = errno;
+	WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
+	     strerror(ret));
 	priv_unlock(priv);
 	assert(ret >= 0);
 	return -ret;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 34ec95b..a82c415 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -797,73 +797,6 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
 }
 
 /**
- * Reconfigure RX queue buffers.
- *
- * rxq_rehash() does not allocate mbufs, which, if not done from the right
- * thread (such as a control thread), may corrupt the pool.
- * In case of failure, the queue is left untouched.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param rxq_ctrl
- *   RX queue pointer.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
-{
-	unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
-	unsigned int i;
-	struct ibv_exp_wq_attr mod;
-	int err;
-
-	DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
-	      (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
-	assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
-	/* From now on, any failure will render the queue unusable.
-	 * Reinitialize WQ. */
-	mod = (struct ibv_exp_wq_attr){
-		.attr_mask = IBV_EXP_WQ_ATTR_STATE,
-		.wq_state = IBV_EXP_WQS_RESET,
-	};
-	err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
-	if (err) {
-		ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
-		assert(err > 0);
-		return err;
-	}
-	/* Snatch mbufs from original queue. */
-	claim_zero(rxq_trim_elts(&rxq_ctrl->rxq));
-	claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
-	for (i = 0; i != elts_n; ++i) {
-		struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
-
-		assert(rte_mbuf_refcnt_read(buf) == 2);
-		rte_pktmbuf_free_seg(buf);
-	}
-	/* Change queue state to ready. */
-	mod = (struct ibv_exp_wq_attr){
-		.attr_mask = IBV_EXP_WQ_ATTR_STATE,
-		.wq_state = IBV_EXP_WQS_RDY,
-	};
-	err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
-	if (err) {
-		ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
-		      (void *)dev, strerror(err));
-		goto error;
-	}
-	/* Update doorbell counter. */
-	rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
-	rte_wmb();
-	*rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
-error:
-	assert(err >= 0);
-	return err;
-}
-
-/**
  * Initialize RX queue.
  *
  * @param tmpl
@@ -927,7 +860,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
  * @return
  *   0 on success, errno value on failure.
  */
-int
+static int
 rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
 	       uint16_t desc, unsigned int socket,
 	       const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 02184ae..7de1d10 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -307,10 +307,6 @@ void priv_destroy_hash_rxqs(struct priv *);
 int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
 int priv_rehash_flows(struct priv *);
 void rxq_cleanup(struct rxq_ctrl *);
-int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
-int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
-		   unsigned int, const struct rte_eth_rxconf *,
-		   struct rte_mempool *);
 int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
 			const struct rte_eth_rxconf *, struct rte_mempool *);
 void mlx5_rx_queue_release(void *);
-- 
2.1.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v2] net/mlx5: fix MTU update
  2017-08-03  9:31 ` [dpdk-dev] [PATCH v2] " Nelio Laranjeiro
@ 2017-08-03 21:14   ` Thomas Monjalon
  0 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2017-08-03 21:14 UTC (permalink / raw)
  To: Nelio Laranjeiro; +Cc: stable, dev, Yongseok Koh

03/08/2017 11:31, Nelio Laranjeiro:
> Changing the MTU is not related to changing the number of segments,
> activating or not the multi-segment support should be handled by the
> application.
> 
> Fixes: 9964b965ad69 ("net/mlx5: re-add Rx scatter support")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> Acked-by: Yongseok Koh <yskoh@mellanox.com>

Applied, thanks

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-08-03 21:14 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-01  7:55 [dpdk-dev] [PATCH] net/mlx5: fix MTU update Nelio Laranjeiro
2017-08-01 10:11 ` Adrien Mazarguil
2017-08-01 11:43   ` Nélio Laranjeiro
2017-08-02 23:13 ` Yongseok Koh
2017-08-03  9:31 ` [dpdk-dev] [PATCH v2] " Nelio Laranjeiro
2017-08-03 21:14   ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).