From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, stable@dpdk.org
Subject: [dpdk-dev] [PATCH v3 3/3] net/pcap: fix concurrent multiseg packet transmits
Date: Thu, 25 Jul 2019 21:24:19 +0200 [thread overview]
Message-ID: <1564082659-21922-4-git-send-email-david.marchand@redhat.com> (raw)
In-Reply-To: <1564082659-21922-1-git-send-email-david.marchand@redhat.com>
Two cores can send multi segment packets on two different pcap ports.
Because of this, we can't have one single buffer to linearize packets.
Use rte_pktmbuf_read() to copy the packet into a buffer on the stack
and remove eth_pcap_gather_data() when necessary (if the mbuf is
contiguous, rte_pktmbuf_read() just points at the buffer address).
Fixes: 6db141c91e1f ("pcap: support jumbo frames")
Cc: stable@dpdk.org
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
Changelog since v2:
- fixed commit log
- fixed coding style
Changelog since v1:
- rely on rte_pktmbuf_read to handle both mono and multi segments cases
---
drivers/net/pcap/rte_eth_pcap.c | 95 +++++++++++++++++------------------------
1 file changed, 38 insertions(+), 57 deletions(-)
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index bfc0756..da03b97 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -46,7 +46,6 @@
#define RTE_PMD_PCAP_MAX_QUEUES 16
static char errbuf[PCAP_ERRBUF_SIZE];
-static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
static struct timeval start_time;
static uint64_t start_cycles;
static uint64_t hz;
@@ -180,21 +179,6 @@ eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
return mbuf->nb_segs;
}
-/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
-static void
-eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
-{
- uint16_t data_len = 0;
-
- while (mbuf) {
- rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
- mbuf->data_len);
-
- data_len += mbuf->data_len;
- mbuf = mbuf->next;
- }
-}
-
static uint16_t
eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -325,6 +309,8 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint32_t tx_bytes = 0;
struct pcap_pkthdr header;
pcap_dumper_t *dumper;
+ unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
+ size_t len;
pp = rte_eth_devices[dumper_q->port_id].process_private;
dumper = pp->tx_dumper[dumper_q->queue_id];
@@ -336,31 +322,28 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
* dumper */
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
+ len = rte_pktmbuf_pkt_len(mbuf);
+ if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
+ len > sizeof(temp_data))) {
+ PMD_LOG(ERR,
+ "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).",
+ len, sizeof(temp_data));
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
calculate_timestamp(&header.ts);
- header.len = mbuf->pkt_len;
+ header.len = len;
header.caplen = header.len;
-
- if (likely(mbuf->nb_segs == 1)) {
- pcap_dump((u_char *)dumper, &header,
- rte_pktmbuf_mtod(mbuf, void*));
- } else {
- if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
- eth_pcap_gather_data(tx_pcap_data, mbuf);
- pcap_dump((u_char *)dumper, &header,
- tx_pcap_data);
- } else {
- PMD_LOG(ERR,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
- mbuf->pkt_len,
- RTE_ETHER_MAX_JUMBO_FRAME_LEN);
-
- rte_pktmbuf_free(mbuf);
- continue;
- }
- }
+ /* rte_pktmbuf_read() returns a pointer to the data directly
+ * in the mbuf (when the mbuf is contiguous) or, otherwise,
+ * a pointer to temp_data after copying into it.
+ */
+ pcap_dump((u_char *)dumper, &header,
+ rte_pktmbuf_read(mbuf, 0, len, temp_data));
num_tx++;
- tx_bytes += mbuf->pkt_len;
+ tx_bytes += len;
rte_pktmbuf_free(mbuf);
}
@@ -415,6 +398,8 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint16_t num_tx = 0;
uint32_t tx_bytes = 0;
pcap_t *pcap;
+ unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
+ size_t len;
pp = rte_eth_devices[tx_queue->port_id].process_private;
pcap = pp->tx_pcap[tx_queue->queue_id];
@@ -424,31 +409,27 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
-
- if (likely(mbuf->nb_segs == 1)) {
- ret = pcap_sendpacket(pcap,
- rte_pktmbuf_mtod(mbuf, u_char *),
- mbuf->pkt_len);
- } else {
- if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
- eth_pcap_gather_data(tx_pcap_data, mbuf);
- ret = pcap_sendpacket(pcap,
- tx_pcap_data, mbuf->pkt_len);
- } else {
- PMD_LOG(ERR,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
- mbuf->pkt_len,
- RTE_ETHER_MAX_JUMBO_FRAME_LEN);
-
- rte_pktmbuf_free(mbuf);
- continue;
- }
+ len = rte_pktmbuf_pkt_len(mbuf);
+ if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
+ len > sizeof(temp_data))) {
+ PMD_LOG(ERR,
+ "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).",
+ len, sizeof(temp_data));
+ rte_pktmbuf_free(mbuf);
+ continue;
}
+ /* rte_pktmbuf_read() returns a pointer to the data directly
+ * in the mbuf (when the mbuf is contiguous) or, otherwise,
+ * a pointer to temp_data after copying into it.
+ */
+ ret = pcap_sendpacket(pcap,
+ rte_pktmbuf_read(mbuf, 0, len, temp_data),
+ len);
if (unlikely(ret != 0))
break;
num_tx++;
- tx_bytes += mbuf->pkt_len;
+ tx_bytes += len;
rte_pktmbuf_free(mbuf);
}
--
1.8.3.1
next prev parent reply other threads:[~2019-07-25 19:24 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-24 11:54 [dpdk-dev] [PATCH 0/3] Multiseg fixes for pcap pmd David Marchand
2019-07-24 11:54 ` [dpdk-dev] [PATCH 1/3] net/pcap: fix Rx with small buffers David Marchand
2019-07-24 18:28 ` Ferruh Yigit
2019-07-24 11:54 ` [dpdk-dev] [PATCH 2/3] net/pcap: fix transmit return count in error conditions David Marchand
2019-07-24 18:36 ` Ferruh Yigit
2019-07-25 7:40 ` David Marchand
2019-07-25 11:01 ` Ferruh Yigit
2019-07-24 11:54 ` [dpdk-dev] [PATCH 3/3] net/pcap: fix concurrent multiseg packet transmits David Marchand
2019-07-24 18:43 ` Ferruh Yigit
2019-07-25 8:18 ` [dpdk-dev] [dpdk-stable] " David Marchand
2019-07-25 11:07 ` Ferruh Yigit
2019-07-25 12:04 ` [dpdk-dev] [PATCH v2 0/3] Multiseg fixes for pcap pmd David Marchand
2019-07-25 12:04 ` [dpdk-dev] [PATCH v2 1/3] net/pcap: fix Rx with small buffers David Marchand
2019-07-25 12:04 ` [dpdk-dev] [PATCH v2 2/3] net/pcap: fix transmit return count in error conditions David Marchand
2019-07-25 14:43 ` Ferruh Yigit
2019-07-25 12:04 ` [dpdk-dev] [PATCH v2 3/3] net/pcap: fix concurrent multiseg packet transmits David Marchand
2019-07-25 12:05 ` [dpdk-dev] [dpdk-stable] " David Marchand
2019-07-25 14:47 ` [dpdk-dev] " Ferruh Yigit
2019-07-25 19:24 ` [dpdk-dev] [PATCH v3 0/3] Multiseg fixes for pcap pmd David Marchand
2019-07-25 19:24 ` [dpdk-dev] [PATCH v3 1/3] net/pcap: fix Rx with small buffers David Marchand
2019-07-25 19:24 ` [dpdk-dev] [PATCH v3 2/3] net/pcap: fix transmit return count in error conditions David Marchand
2019-07-25 19:24 ` David Marchand [this message]
2019-07-25 22:36 ` [dpdk-dev] [PATCH v3 0/3] Multiseg fixes for pcap pmd Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1564082659-21922-4-git-send-email-david.marchand@redhat.com \
--to=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).