DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ido Goshen <ido@cgstowernetworks.com>
To: Thomas Monjalon <thomas@monjalon.net>,
	Ferruh Yigit <ferruh.yigit@xilinx.com>,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Cc: dev@dpdk.org, Ido Goshen <ido@cgstowernetworks.com>
Subject: [PATCH v2] pcap: support MTU set
Date: Mon, 23 May 2022 17:27:05 +0300	[thread overview]
Message-ID: <20220523142705.60326-1-ido@cgstowernetworks.com> (raw)

Support rte_eth_dev_set_mtu by pcap vdevs
Enforce mtu on rx/tx

Bugzilla ID: 961
Signed-off-by: Ido Goshen <ido@cgstowernetworks.com>

---
v2:
preserve pcap behavior to support max size packets by default
---
 drivers/net/pcap/pcap_ethdev.c | 35 +++++++++++++++++++++++++++++++---
 lib/ethdev/rte_ethdev.c        |  3 ++-
 lib/ethdev/rte_ethdev.h        |  1 +
 3 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..7c5fd6dd98 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -278,11 +278,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	const u_char *packet;
 	struct rte_mbuf *mbuf;
 	struct pcap_rx_queue *pcap_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[pcap_q->port_id];
 	uint16_t num_rx = 0;
 	uint32_t rx_bytes = 0;
 	pcap_t *pcap;
 
-	pp = rte_eth_devices[pcap_q->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->rx_pcap[pcap_q->queue_id];
 
 	if (unlikely(pcap == NULL || nb_pkts == 0))
@@ -303,6 +304,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			break;
 		}
 
+		if (unlikely(header.caplen > dev->data->mtu)) {
+			pcap_q->rx_stat.err_pkts++;
+			rte_pktmbuf_free(mbuf);
+			break;
+		}
+
 		if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
 			/* pcap packet will fit in the mbuf, can copy it */
 			rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
@@ -378,6 +385,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *dumper_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[dumper_q->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	struct pcap_pkthdr header;
@@ -385,7 +393,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len, caplen;
 
-	pp = rte_eth_devices[dumper_q->port_id].process_private;
+	pp = dev->process_private;
 	dumper = pp->tx_dumper[dumper_q->queue_id];
 
 	if (dumper == NULL || nb_pkts == 0)
@@ -396,6 +404,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = caplen = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			caplen = sizeof(temp_data);
@@ -464,13 +478,14 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *tx_queue = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[tx_queue->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	pcap_t *pcap;
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len;
 
-	pp = rte_eth_devices[tx_queue->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->tx_pcap[tx_queue->queue_id];
 
 	if (unlikely(nb_pkts == 0 || pcap == NULL))
@@ -479,6 +494,12 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			PMD_LOG(ERR,
@@ -737,6 +758,7 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
 	dev_info->min_rx_bufsize = 0;
+	dev_info->default_mtu = dev_info->max_mtu;
 
 	return 0;
 }
@@ -807,6 +829,12 @@ eth_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int eth_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	PMD_LOG(INFO, "mtu set %s %u\n", dev->device->name, mtu);
+	return 0;
+}
+
 static inline void
 infinite_rx_ring_free(struct rte_ring *pkts)
 {
@@ -1004,6 +1032,7 @@ static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+	.mtu_set = eth_mtu_set,
 };
 
 static int
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 29a3d80466..75cb38a4b2 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -1186,7 +1186,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	if (dev_conf->rxmode.mtu == 0)
-		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
+		dev->data->dev_conf.rxmode.mtu = dev_info.default_mtu;
 
 	ret = eth_dev_validate_mtu(port_id, &dev_info,
 			dev->data->dev_conf.rxmode.mtu);
@@ -3126,6 +3126,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
 		RTE_ETHER_CRC_LEN;
 	dev_info->max_mtu = UINT16_MAX;
+	dev_info->default_mtu = RTE_ETHER_MTU;
 
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 04cff8ee10..8a2fd2f4ff 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1861,6 +1861,7 @@ struct rte_eth_dev_info {
 		Use if_indextoname() to translate into an interface name. */
 	uint16_t min_mtu;	/**< Minimum MTU allowed */
 	uint16_t max_mtu;	/**< Maximum MTU allowed */
+	uint16_t default_mtu;
 	const uint32_t *dev_flags; /**< Device flags */
 	uint32_t min_rx_bufsize; /**< Minimum size of Rx buffer. */
 	uint32_t max_rx_pktlen; /**< Maximum configurable length of Rx pkt. */
-- 
2.17.1


             reply	other threads:[~2022-05-23 14:28 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-23 14:27 Ido Goshen [this message]
2022-05-23 14:52 ` Ido Goshen
  -- strict thread matches above, loose matches on Subject: below --
2022-03-17 17:43 [PATCH] net/pcap: " ido g
2023-07-04 21:02 ` [PATCH v2] pcap: " Stephen Hemminger
2023-07-05 11:37   ` Ferruh Yigit
2023-07-05 15:18     ` Stephen Hemminger
2023-07-06 10:45       ` Ido Goshen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220523142705.60326-1-ido@cgstowernetworks.com \
    --to=ido@cgstowernetworks.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@xilinx.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).