DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/pcap: support MTU set
@ 2022-03-17 17:43 ido g
  2022-03-17 18:20 ` Stephen Hemminger
                   ` (8 more replies)
  0 siblings, 9 replies; 38+ messages in thread
From: ido g @ 2022-03-17 17:43 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, ido g

Support rte_eth_dev_set_mtu by pcap vdevs
Enforce mtu on rx/tx
For more details see https://bugs.dpdk.org/show_bug.cgi?id=961

Signed-off-by: ido g <ido@cgstowernetworks.com>
---
 drivers/net/pcap/pcap_ethdev.c | 34 +++++++++++++++++++++++++++++++---
 1 file changed, 31 insertions(+), 3 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..476d03c6b1 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -278,11 +278,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	const u_char *packet;
 	struct rte_mbuf *mbuf;
 	struct pcap_rx_queue *pcap_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[pcap_q->port_id];
 	uint16_t num_rx = 0;
 	uint32_t rx_bytes = 0;
 	pcap_t *pcap;
 
-	pp = rte_eth_devices[pcap_q->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->rx_pcap[pcap_q->queue_id];
 
 	if (unlikely(pcap == NULL || nb_pkts == 0))
@@ -303,6 +304,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			break;
 		}
 
+		if (unlikely(header.caplen > dev->data->mtu)) {
+			pcap_q->rx_stat.err_pkts++;
+			rte_pktmbuf_free(mbuf);
+			break;
+		}
+
 		if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
 			/* pcap packet will fit in the mbuf, can copy it */
 			rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
@@ -378,6 +385,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *dumper_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[dumper_q->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	struct pcap_pkthdr header;
@@ -385,7 +393,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len, caplen;
 
-	pp = rte_eth_devices[dumper_q->port_id].process_private;
+	pp = dev->process_private;
 	dumper = pp->tx_dumper[dumper_q->queue_id];
 
 	if (dumper == NULL || nb_pkts == 0)
@@ -396,6 +404,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = caplen = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			caplen = sizeof(temp_data);
@@ -464,13 +478,14 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *tx_queue = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[tx_queue->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	pcap_t *pcap;
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len;
 
-	pp = rte_eth_devices[tx_queue->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->tx_pcap[tx_queue->queue_id];
 
 	if (unlikely(nb_pkts == 0 || pcap == NULL))
@@ -479,6 +494,12 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			PMD_LOG(ERR,
@@ -807,6 +828,12 @@ eth_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int eth_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	PMD_LOG(INFO, "mtu set %s %u\n", dev->device->name, mtu);
+	return 0;
+}
+
 static inline void
 infinite_rx_ring_free(struct rte_ring *pkts)
 {
@@ -1004,6 +1031,7 @@ static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+	.mtu_set = eth_mtu_set,
 };
 
 static int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 38+ messages in thread
* [PATCH v2] pcap: support MTU set
@ 2022-05-23 14:27 Ido Goshen
  2022-05-23 14:52 ` Ido Goshen
  0 siblings, 1 reply; 38+ messages in thread
From: Ido Goshen @ 2022-05-23 14:27 UTC (permalink / raw)
  To: Thomas Monjalon, Ferruh Yigit, Andrew Rybchenko; +Cc: dev, Ido Goshen

Support rte_eth_dev_set_mtu by pcap vdevs
Enforce mtu on rx/tx

Bugzilla ID: 961
Signed-off-by: Ido Goshen <ido@cgstowernetworks.com>

---
v2:
preserve pcap behavior to support max size packets by default
---
 drivers/net/pcap/pcap_ethdev.c | 35 +++++++++++++++++++++++++++++++---
 lib/ethdev/rte_ethdev.c        |  3 ++-
 lib/ethdev/rte_ethdev.h        |  1 +
 3 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..7c5fd6dd98 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -278,11 +278,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	const u_char *packet;
 	struct rte_mbuf *mbuf;
 	struct pcap_rx_queue *pcap_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[pcap_q->port_id];
 	uint16_t num_rx = 0;
 	uint32_t rx_bytes = 0;
 	pcap_t *pcap;
 
-	pp = rte_eth_devices[pcap_q->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->rx_pcap[pcap_q->queue_id];
 
 	if (unlikely(pcap == NULL || nb_pkts == 0))
@@ -303,6 +304,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			break;
 		}
 
+		if (unlikely(header.caplen > dev->data->mtu)) {
+			pcap_q->rx_stat.err_pkts++;
+			rte_pktmbuf_free(mbuf);
+			break;
+		}
+
 		if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
 			/* pcap packet will fit in the mbuf, can copy it */
 			rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
@@ -378,6 +385,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *dumper_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[dumper_q->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	struct pcap_pkthdr header;
@@ -385,7 +393,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len, caplen;
 
-	pp = rte_eth_devices[dumper_q->port_id].process_private;
+	pp = dev->process_private;
 	dumper = pp->tx_dumper[dumper_q->queue_id];
 
 	if (dumper == NULL || nb_pkts == 0)
@@ -396,6 +404,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = caplen = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			caplen = sizeof(temp_data);
@@ -464,13 +478,14 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *tx_queue = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[tx_queue->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	pcap_t *pcap;
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len;
 
-	pp = rte_eth_devices[tx_queue->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->tx_pcap[tx_queue->queue_id];
 
 	if (unlikely(nb_pkts == 0 || pcap == NULL))
@@ -479,6 +494,12 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			PMD_LOG(ERR,
@@ -737,6 +758,7 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
 	dev_info->min_rx_bufsize = 0;
+	dev_info->default_mtu = dev_info->max_mtu;
 
 	return 0;
 }
@@ -807,6 +829,12 @@ eth_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int eth_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	PMD_LOG(INFO, "mtu set %s %u\n", dev->device->name, mtu);
+	return 0;
+}
+
 static inline void
 infinite_rx_ring_free(struct rte_ring *pkts)
 {
@@ -1004,6 +1032,7 @@ static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+	.mtu_set = eth_mtu_set,
 };
 
 static int
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 29a3d80466..75cb38a4b2 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -1186,7 +1186,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	if (dev_conf->rxmode.mtu == 0)
-		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
+		dev->data->dev_conf.rxmode.mtu = dev_info.default_mtu;
 
 	ret = eth_dev_validate_mtu(port_id, &dev_info,
 			dev->data->dev_conf.rxmode.mtu);
@@ -3126,6 +3126,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
 		RTE_ETHER_CRC_LEN;
 	dev_info->max_mtu = UINT16_MAX;
+	dev_info->default_mtu = RTE_ETHER_MTU;
 
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 04cff8ee10..8a2fd2f4ff 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1861,6 +1861,7 @@ struct rte_eth_dev_info {
 		Use if_indextoname() to translate into an interface name. */
 	uint16_t min_mtu;	/**< Minimum MTU allowed */
 	uint16_t max_mtu;	/**< Maximum MTU allowed */
+	uint16_t default_mtu;
 	const uint32_t *dev_flags; /**< Device flags */
 	uint32_t min_rx_bufsize; /**< Minimum size of Rx buffer. */
 	uint32_t max_rx_pktlen; /**< Maximum configurable length of Rx pkt. */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 38+ messages in thread

end of thread, other threads:[~2023-07-11  9:41 UTC | newest]

Thread overview: 38+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-17 17:43 [PATCH] net/pcap: support MTU set ido g
2022-03-17 18:20 ` Stephen Hemminger
2022-03-17 19:11   ` Ido Goshen
2022-03-22 13:02     ` Ido Goshen
2022-04-26 17:03       ` Ferruh Yigit
2022-04-27 18:21         ` Ido Goshen
2022-04-27 19:14           ` Stephen Hemminger
2022-05-23  7:48             ` Ido Goshen
2022-05-30 10:36 ` [PATCH v3] pcap: " Ido Goshen
2022-05-30 18:05   ` Ferruh Yigit
2022-05-31 13:12     ` Ido Goshen
2022-06-06  9:40     ` Ido Goshen
2022-06-06 16:21 ` [PATCH v4] " Ido Goshen
2022-06-06 17:10   ` Stephen Hemminger
2022-06-06 19:07     ` Ido Goshen
2022-06-07  6:27 ` [PATCH v5] pcap: support MTU set for linux interafces Ido Goshen
2022-06-08 16:04   ` [PATCH v6] " Ido Goshen
2022-06-08 16:23     ` Stephen Hemminger
2022-06-19  9:30 ` [PATCH v7 0/3] pcap: support MTU set for linux interfaces Ido Goshen
2022-06-19  9:30   ` [PATCH v7 1/3] " Ido Goshen
2022-06-19  9:30   ` [PATCH v7 2/3] pcap: support MTU set for linux interfaces TX enhancment Ido Goshen
2022-06-20 22:52     ` Stephen Hemminger
2022-06-21  9:07       ` Ido Goshen
2022-06-19  9:30   ` [PATCH v7 3/3] pcap: support MTU set for linux interfaces count ierrors Ido Goshen
2022-06-20  8:39 ` [PATCH v8 0/3] pcap: support MTU set for linux interfaces Ido Goshen
2022-06-20  8:39   ` [PATCH v8 1/3] " Ido Goshen
2022-06-20  8:39   ` [PATCH v8 2/3] pcap: support MTU set for linux interfaces TX enhancment Ido Goshen
2022-06-20  8:39   ` [PATCH v8 3/3] pcap: support MTU set for linux interfaces count ierrors Ido Goshen
2023-07-04 17:43 ` [PATCH] pcap: support MTU set Stephen Hemminger
2023-07-04 21:02 ` [PATCH v2] " Stephen Hemminger
2023-07-05 11:37   ` Ferruh Yigit
2023-07-05 15:18     ` Stephen Hemminger
2023-07-06 10:45       ` Ido Goshen
2023-07-10 16:45 ` [PATCH] net/pcap: " Stephen Hemminger
2023-07-10 17:46   ` Ferruh Yigit
2023-07-11  9:41     ` Ido Goshen
2022-05-23 14:27 [PATCH v2] pcap: " Ido Goshen
2022-05-23 14:52 ` Ido Goshen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).