DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, stable@dpdk.org
Subject: [dpdk-dev] [PATCH 3/3] net/pcap: fix concurrent multiseg packet transmits
Date: Wed, 24 Jul 2019 13:54:30 +0200	[thread overview]
Message-ID: <1563969270-29669-4-git-send-email-david.marchand@redhat.com> (raw)
In-Reply-To: <1563969270-29669-1-git-send-email-david.marchand@redhat.com>

Two cores can send multi segment packets on two different pcap ports.
Because of this, we can't have one single buffer to linearize packets.

Use rte_pktmbuf_read() to copy the packet into a buffer on the stack
and remove eth_pcap_gather_data().

Fixes: 6db141c91e1f ("pcap: support jumbo frames")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/pcap/rte_eth_pcap.c | 90 +++++++++++++++--------------------------
 1 file changed, 32 insertions(+), 58 deletions(-)

diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 5e5aab7..7398b1b 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -46,7 +46,6 @@
 #define RTE_PMD_PCAP_MAX_QUEUES 16
 
 static char errbuf[PCAP_ERRBUF_SIZE];
-static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
 static struct timeval start_time;
 static uint64_t start_cycles;
 static uint64_t hz;
@@ -180,21 +179,6 @@ struct pmd_devargs_all {
 	return mbuf->nb_segs;
 }
 
-/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
-static void
-eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
-{
-	uint16_t data_len = 0;
-
-	while (mbuf) {
-		rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
-			mbuf->data_len);
-
-		data_len += mbuf->data_len;
-		mbuf = mbuf->next;
-	}
-}
-
 static uint16_t
 eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
@@ -325,6 +309,9 @@ struct pmd_devargs_all {
 	uint32_t tx_bytes = 0;
 	struct pcap_pkthdr header;
 	pcap_dumper_t *dumper;
+	unsigned char _data[RTE_ETH_PCAP_SNAPLEN];
+	const unsigned char *data;
+	size_t len;
 
 	pp = rte_eth_devices[dumper_q->port_id].process_private;
 	dumper = pp->tx_dumper[dumper_q->queue_id];
@@ -336,31 +323,25 @@ struct pmd_devargs_all {
 	 * dumper */
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
-		calculate_timestamp(&header.ts);
-		header.len = mbuf->pkt_len;
-		header.caplen = header.len;
-
-		if (likely(mbuf->nb_segs == 1)) {
-			pcap_dump((u_char *)dumper, &header,
-				  rte_pktmbuf_mtod(mbuf, void*));
+		len = rte_pktmbuf_pkt_len(mbuf);
+		if (likely(rte_pktmbuf_is_contiguous(mbuf))) {
+			data = rte_pktmbuf_mtod(mbuf, unsigned char *);
+		} else if (len <= sizeof(_data)) {
+			data = rte_pktmbuf_read(mbuf, 0, len, _data);
 		} else {
-			if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
-				eth_pcap_gather_data(tx_pcap_data, mbuf);
-				pcap_dump((u_char *)dumper, &header,
-					  tx_pcap_data);
-			} else {
-				PMD_LOG(ERR,
-					"Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
-					mbuf->pkt_len,
-					RTE_ETHER_MAX_JUMBO_FRAME_LEN);
-
-				rte_pktmbuf_free(mbuf);
-				continue;
-			}
+			PMD_LOG(ERR, "Dropping PCAP packet. Size (%zd) > max size (%zd).",
+				len, sizeof(_data));
+			rte_pktmbuf_free(mbuf);
+			continue;
 		}
 
+		calculate_timestamp(&header.ts);
+		header.len = len;
+		header.caplen = header.len;
+		pcap_dump((u_char *)dumper, &header, data);
+
 		num_tx++;
-		tx_bytes += mbuf->pkt_len;
+		tx_bytes += len;
 		rte_pktmbuf_free(mbuf);
 	}
 
@@ -408,13 +389,15 @@ struct pmd_devargs_all {
 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	unsigned int i;
-	int ret;
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *tx_queue = queue;
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	pcap_t *pcap;
+	unsigned char _data[RTE_ETH_PCAP_SNAPLEN];
+	const unsigned char *data;
+	size_t len;
 
 	pp = rte_eth_devices[tx_queue->port_id].process_private;
 	pcap = pp->tx_pcap[tx_queue->queue_id];
@@ -424,30 +407,21 @@ struct pmd_devargs_all {
 
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
-
-		if (likely(mbuf->nb_segs == 1)) {
-			ret = pcap_sendpacket(pcap,
-					rte_pktmbuf_mtod(mbuf, u_char *),
-					mbuf->pkt_len);
+		len = rte_pktmbuf_pkt_len(mbuf);
+		if (likely(rte_pktmbuf_is_contiguous(mbuf))) {
+			data = rte_pktmbuf_mtod(mbuf, unsigned char *);
+		} else if (len <= sizeof(_data)) {
+			data = rte_pktmbuf_read(mbuf, 0, len, _data);
 		} else {
-			if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
-				eth_pcap_gather_data(tx_pcap_data, mbuf);
-				ret = pcap_sendpacket(pcap,
-						tx_pcap_data, mbuf->pkt_len);
-			} else {
-				PMD_LOG(ERR,
-					"Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
-					mbuf->pkt_len,
-					RTE_ETHER_MAX_JUMBO_FRAME_LEN);
-
-				rte_pktmbuf_free(mbuf);
-				continue;
-			}
+			PMD_LOG(ERR, "Dropping PCAP packet. Size (%zd) > max size (%zd).",
+				len, sizeof(_data));
+			rte_pktmbuf_free(mbuf);
+			continue;
 		}
 
-		if (ret == 0) {
+		if (pcap_sendpacket(pcap, data, len) == 0) {
 			num_tx++;
-			tx_bytes += mbuf->pkt_len;
+			tx_bytes += len;
 		}
 		rte_pktmbuf_free(mbuf);
 	}
-- 
1.8.3.1


  parent reply	other threads:[~2019-07-24 11:55 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-24 11:54 [dpdk-dev] [PATCH 0/3] Multiseg fixes for pcap pmd David Marchand
2019-07-24 11:54 ` [dpdk-dev] [PATCH 1/3] net/pcap: fix Rx with small buffers David Marchand
2019-07-24 18:28   ` Ferruh Yigit
2019-07-24 11:54 ` [dpdk-dev] [PATCH 2/3] net/pcap: fix transmit return count in error conditions David Marchand
2019-07-24 18:36   ` Ferruh Yigit
2019-07-25  7:40     ` David Marchand
2019-07-25 11:01       ` Ferruh Yigit
2019-07-24 11:54 ` David Marchand [this message]
2019-07-24 18:43   ` [dpdk-dev] [PATCH 3/3] net/pcap: fix concurrent multiseg packet transmits Ferruh Yigit
2019-07-25  8:18   ` [dpdk-dev] [dpdk-stable] " David Marchand
2019-07-25 11:07     ` Ferruh Yigit
2019-07-25 12:04 ` [dpdk-dev] [PATCH v2 0/3] Multiseg fixes for pcap pmd David Marchand
2019-07-25 12:04   ` [dpdk-dev] [PATCH v2 1/3] net/pcap: fix Rx with small buffers David Marchand
2019-07-25 12:04   ` [dpdk-dev] [PATCH v2 2/3] net/pcap: fix transmit return count in error conditions David Marchand
2019-07-25 14:43     ` Ferruh Yigit
2019-07-25 12:04   ` [dpdk-dev] [PATCH v2 3/3] net/pcap: fix concurrent multiseg packet transmits David Marchand
2019-07-25 12:05     ` [dpdk-dev] [dpdk-stable] " David Marchand
2019-07-25 14:47     ` [dpdk-dev] " Ferruh Yigit
2019-07-25 19:24 ` [dpdk-dev] [PATCH v3 0/3] Multiseg fixes for pcap pmd David Marchand
2019-07-25 19:24   ` [dpdk-dev] [PATCH v3 1/3] net/pcap: fix Rx with small buffers David Marchand
2019-07-25 19:24   ` [dpdk-dev] [PATCH v3 2/3] net/pcap: fix transmit return count in error conditions David Marchand
2019-07-25 19:24   ` [dpdk-dev] [PATCH v3 3/3] net/pcap: fix concurrent multiseg packet transmits David Marchand
2019-07-25 22:36   ` [dpdk-dev] [PATCH v3 0/3] Multiseg fixes for pcap pmd Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1563969270-29669-4-git-send-email-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).