DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/cnxk: flush SQ before configuring MTU
@ 2023-06-15  5:04 skoteshwar
  2023-06-15  8:50 ` Jerin Jacob
  0 siblings, 1 reply; 2+ messages in thread
From: skoteshwar @ 2023-06-15  5:04 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

When try to configure MTU for lower value causes run time failure
due to old bigger packets enqueued. To avoid error interrupts better
to flush the all SQs of this port before configuring new MTU.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.h     |  1 +
 drivers/net/cnxk/cnxk_ethdev_ops.c | 47 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 48 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index e280d6c..45460ae 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -446,6 +446,7 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
 		   struct rte_pci_device *pci_dev);
 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev);
 int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 				    struct rte_ether_addr *mc_addr_set,
 				    uint32_t nb_mc_addr);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index bce6d59..da5ee19 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -496,6 +496,44 @@
 }
 
 int
+cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	int i, rc = 0;
+
+	/* Flush all tx queues */
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct roc_nix_sq *sq = &dev->sqs[i];
+
+		if (eth_dev->data->tx_queues[i] == NULL)
+			continue;
+
+		rc = roc_nix_tm_sq_aura_fc(sq, false);
+		if (rc) {
+			plt_err("Failed to disable sqb aura fc, rc=%d", rc);
+			goto exit;
+		}
+
+		/* Wait for sq entries to be flushed */
+		rc = roc_nix_tm_sq_flush_spin(sq);
+		if (rc) {
+			plt_err("Failed to drain sq, rc=%d\n", rc);
+			goto exit;
+		}
+		if (data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) {
+			rc = roc_nix_tm_sq_aura_fc(sq, true);
+			if (rc) {
+				plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", i, rc);
+				goto exit;
+			}
+		}
+	}
+exit:
+	return rc;
+}
+
+int
 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 {
 	uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
@@ -538,6 +576,15 @@
 		goto exit;
 	}
 
+	/* if new MTU was smaller than old one, then flush all SQs before MTU change */
+	if (old_frame_size > frame_size) {
+		if (data->dev_started) {
+			plt_err("Reducing MTU is not supported when device started");
+			goto exit;
+		}
+		cnxk_nix_sq_flush(eth_dev);
+	}
+
 	frame_size -= RTE_ETHER_CRC_LEN;
 
 	/* Update mtu on Tx */
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] net/cnxk: flush SQ before configuring MTU
  2023-06-15  5:04 [PATCH] net/cnxk: flush SQ before configuring MTU skoteshwar
@ 2023-06-15  8:50 ` Jerin Jacob
  0 siblings, 0 replies; 2+ messages in thread
From: Jerin Jacob @ 2023-06-15  8:50 UTC (permalink / raw)
  To: skoteshwar; +Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, dev

On Thu, Jun 15, 2023 at 10:34 AM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> When try to configure MTU for lower value causes run time failure
> due to old bigger packets enqueued. To avoid error interrupts better
> to flush the all SQs of this port before configuring new MTU.

Added
    Fixes: 8589ec212e80 ("net/cnxk: support MTU set")
    Cc: stable@dpdk.org


Applied to dpdk-next-net-mrvl/for-next-net. Thanks

>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
> ---
>  drivers/net/cnxk/cnxk_ethdev.h     |  1 +
>  drivers/net/cnxk/cnxk_ethdev_ops.c | 47 ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 48 insertions(+)
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index e280d6c..45460ae 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -446,6 +446,7 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
>                    struct rte_pci_device *pci_dev);
>  int cnxk_nix_remove(struct rte_pci_device *pci_dev);
>  int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
> +int cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev);
>  int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
>                                     struct rte_ether_addr *mc_addr_set,
>                                     uint32_t nb_mc_addr);
> diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
> index bce6d59..da5ee19 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_ops.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
> @@ -496,6 +496,44 @@
>  }
>
>  int
> +cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev)
> +{
> +       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> +       struct rte_eth_dev_data *data = eth_dev->data;
> +       int i, rc = 0;
> +
> +       /* Flush all tx queues */
> +       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
> +               struct roc_nix_sq *sq = &dev->sqs[i];
> +
> +               if (eth_dev->data->tx_queues[i] == NULL)
> +                       continue;
> +
> +               rc = roc_nix_tm_sq_aura_fc(sq, false);
> +               if (rc) {
> +                       plt_err("Failed to disable sqb aura fc, rc=%d", rc);
> +                       goto exit;
> +               }
> +
> +               /* Wait for sq entries to be flushed */
> +               rc = roc_nix_tm_sq_flush_spin(sq);
> +               if (rc) {
> +                       plt_err("Failed to drain sq, rc=%d\n", rc);
> +                       goto exit;
> +               }
> +               if (data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) {
> +                       rc = roc_nix_tm_sq_aura_fc(sq, true);
> +                       if (rc) {
> +                               plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", i, rc);
> +                               goto exit;
> +                       }
> +               }
> +       }
> +exit:
> +       return rc;
> +}
> +
> +int
>  cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
>  {
>         uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
> @@ -538,6 +576,15 @@
>                 goto exit;
>         }
>
> +       /* if new MTU was smaller than old one, then flush all SQs before MTU change */
> +       if (old_frame_size > frame_size) {
> +               if (data->dev_started) {
> +                       plt_err("Reducing MTU is not supported when device started");
> +                       goto exit;
> +               }
> +               cnxk_nix_sq_flush(eth_dev);
> +       }
> +
>         frame_size -= RTE_ETHER_CRC_LEN;
>
>         /* Update mtu on Tx */
> --
> 1.8.3.1
>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-06-15  8:51 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-15  5:04 [PATCH] net/cnxk: flush SQ before configuring MTU skoteshwar
2023-06-15  8:50 ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).