Soft Patch Panel
 help / color / mirror / Atom feed
* [spp] [PATCH 0/3] Fix bugs of spp_pcap
@ 2019-03-04  6:53 ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 1/3] spp_pcap: change mbuf size for dequeue packets ogawa.yasufumi
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-03-04  6:53 UTC (permalink / raw)
  To: spp, ferruh.yigit, ogawa.yasufumi

From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>

This series of patches is to fix bugs of spp_pcap reported by
Hiroyuki[1].

* Fix bug of some of packets are discarded while packet capturing is stopped.

* Increase the size of shared ring buffer to avoid discarding packets.

* Refactors for name of variables and log messages.

[1] https://mails.dpdk.org/archives/spp/2019-February/001373.html


Hideyuki Yamashita (2):
  spp_pcap: change mbuf size for dequeue packets
  spp_pcap: fix bug pkts remained after pcap stopped

Yasufumi Ogawa (1):
  spp_pcap: add buffer size to reduce dropped pkts

 src/pcap/spp_pcap.c | 146 +++++++++++++++++++++++++++++---------------
 1 file changed, 98 insertions(+), 48 deletions(-)

-- 
2.17.1

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [spp] [PATCH 1/3] spp_pcap: change mbuf size for dequeue packets
  2019-03-04  6:53 [spp] [PATCH 0/3] Fix bugs of spp_pcap ogawa.yasufumi
@ 2019-03-04  6:53 ` ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 2/3] spp_pcap: fix bug pkts remained after pcap stopped ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 3/3] spp_pcap: add buffer size to reduce dropped pkts ogawa.yasufumi
  2 siblings, 0 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-03-04  6:53 UTC (permalink / raw)
  To: spp, ferruh.yigit, ogawa.yasufumi; +Cc: Hideyuki Yamashita

From: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>

To increase the number of dequeued packets at once, increase the size of
mbuf.

Signed-off-by: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>
Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
 src/pcap/spp_pcap.c | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/src/pcap/spp_pcap.c b/src/pcap/spp_pcap.c
index 7f2e564..a6b58d5 100644
--- a/src/pcap/spp_pcap.c
+++ b/src/pcap/spp_pcap.c
@@ -22,29 +22,28 @@
 /* Declare global variables */
 #define RTE_LOGTYPE_SPP_PCAP RTE_LOGTYPE_USER2
 
+/* Pcap file attributes */
 #define PCAP_FPATH_STRLEN 128
 #define PCAP_FNAME_STRLEN 64
 #define PCAP_FDATE_STRLEN 16
-/**
- * The first 4 bytes 0xa1b2c3d4 constitute the magic number which is used to
- * identify pcap files.
- */
+
+/* Used to identify pcap files */
 #define TCPDUMP_MAGIC 0xa1b2c3d4
-/* constant which indicates major verions of libpcap file */
+
+/* Indicates major verions of libpcap file */
 #define PCAP_VERSION_MAJOR 2
 #define PCAP_VERSION_MINOR 4
+
 #define PCAP_SNAPLEN_MAX 65535
-/**
- * pcap header value which indicates physical layer type.
- * 1 means LINKTYPE_ETHERNET
- */
-#define PCAP_LINKTYPE 1
+
+#define PCAP_LINKTYPE 1  /* Link type 1 means LINKTYPE_ETHERNET */
 #define IN_CHUNK_SIZE (16*1024)
 #define DEFAULT_OUTPUT_DIR "/tmp"
-#define DEFAULT_FILE_LIMIT 1073741824 /* 1GiB */
+#define DEFAULT_FILE_LIMIT 1073741824  /* 1GiB */
 #define PORT_STR_SIZE 16
 #define RING_SIZE 8192
-/* macro */
+#define MAX_PCAP_BURST 256  /* Num of received packets at once */
+
 /* Ensure snaplen not to be over the maximum size */
 #define TRANCATE_SNAPLEN(a, b) (((a) < (b))?(a):(b))
 
@@ -756,7 +755,7 @@ static int pcap_proc_receive(int lcore_id)
 	int nb_rx = 0;
 	int nb_tx = 0;
 	struct spp_port_info *rx;
-	struct rte_mbuf *bufs[MAX_PKT_BURST];
+	struct rte_mbuf *bufs[MAX_PCAP_BURST];
 	struct pcap_mng_info *info = &g_pcap_info[lcore_id];
 	struct rte_ring *write_ring = g_pcap_option.cap_ring;
 
@@ -786,7 +785,7 @@ static int pcap_proc_receive(int lcore_id)
 	/* Receive packets */
 	rx = &g_pcap_option.port_cap;
 
-	nb_rx = spp_eth_rx_burst(rx->dpdk_port, 0, bufs, MAX_PKT_BURST);
+	nb_rx = spp_eth_rx_burst(rx->dpdk_port, 0, bufs, MAX_PCAP_BURST);
 	if (unlikely(nb_rx == 0))
 		return SPP_RET_OK;
 
@@ -810,7 +809,7 @@ static int pcap_proc_write(int lcore_id)
 	int ret = SPP_RET_OK;
 	int buf;
 	int nb_rx = 0;
-	struct rte_mbuf *bufs[MAX_PKT_BURST];
+	struct rte_mbuf *bufs[MAX_PCAP_BURST];
 	struct rte_mbuf *mbuf = NULL;
 	struct pcap_mng_info *info = &g_pcap_info[lcore_id];
 	struct rte_ring *read_ring = g_pcap_option.cap_ring;
@@ -837,7 +836,7 @@ static int pcap_proc_write(int lcore_id)
 	}
 
 	/* Read packets */
-	nb_rx =  rte_ring_dequeue_bulk(read_ring, (void *)bufs, MAX_PKT_BURST,
+	nb_rx =  rte_ring_dequeue_bulk(read_ring, (void *)bufs, MAX_PCAP_BURST,
 									NULL);
 	if (unlikely(nb_rx == 0))
 		return SPP_RET_OK;
@@ -860,6 +859,7 @@ static int pcap_proc_write(int lcore_id)
 	/* mbuf free */
 	for (buf = 0; buf < nb_rx; buf++)
 		rte_pktmbuf_free(bufs[buf]);
+
 	return ret;
 }
 
-- 
2.17.1

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [spp] [PATCH 2/3] spp_pcap: fix bug pkts remained after pcap stopped
  2019-03-04  6:53 [spp] [PATCH 0/3] Fix bugs of spp_pcap ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 1/3] spp_pcap: change mbuf size for dequeue packets ogawa.yasufumi
@ 2019-03-04  6:53 ` ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 3/3] spp_pcap: add buffer size to reduce dropped pkts ogawa.yasufumi
  2 siblings, 0 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-03-04  6:53 UTC (permalink / raw)
  To: spp, ferruh.yigit, ogawa.yasufumi; +Cc: Hideyuki Yamashita

From: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>

* Add dequeueing before finalize pcap because spp_pcap does not dequeue
  remained packets from ring and discards it while stopping pcap.

* Refactors for naming and log messages.

Signed-off-by: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>
Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
 src/pcap/spp_pcap.c | 74 +++++++++++++++++++++++++--------------------
 1 file changed, 42 insertions(+), 32 deletions(-)

diff --git a/src/pcap/spp_pcap.c b/src/pcap/spp_pcap.c
index a6b58d5..a59a89b 100644
--- a/src/pcap/spp_pcap.c
+++ b/src/pcap/spp_pcap.c
@@ -746,10 +746,10 @@ static int compress_file_packet(struct pcap_mng_info *info,
 	return SPP_RET_OK;
 }
 
-/* receive thread */
+/* Receive packets from shared ring buffer */
 static int pcap_proc_receive(int lcore_id)
 {
-	struct timespec now_time;
+	struct timespec cur_time;  /* Used as timestamp for the file name */
 	struct tm l_time;
 	int buf;
 	int nb_rx = 0;
@@ -761,36 +761,41 @@ static int pcap_proc_receive(int lcore_id)
 
 	if (g_capture_request == SPP_CAPTURE_IDLE) {
 		if (info->status == SPP_CAPTURE_RUNNING) {
-			RTE_LOG(DEBUG, SPP_PCAP, "recive[%d], run->idle\n",
-								lcore_id);
+			RTE_LOG(DEBUG, SPP_PCAP,
+					"Recive on lcore %d, run->idle\n",
+					lcore_id);
+
 			info->status = SPP_CAPTURE_IDLE;
 			g_capture_status = SPP_CAPTURE_IDLE;
 		}
 		return SPP_RET_OK;
 	}
 	if (info->status == SPP_CAPTURE_IDLE) {
-		/* get time */
-		clock_gettime(CLOCK_REALTIME, &now_time);
+		/* Get time for output file name */
+		clock_gettime(CLOCK_REALTIME, &cur_time);
 		memset(g_pcap_option.compress_file_date, 0, PCAP_FDATE_STRLEN);
-		localtime_r(&now_time.tv_sec, &l_time);
+		localtime_r(&cur_time.tv_sec, &l_time);
 		strftime(g_pcap_option.compress_file_date, PCAP_FDATE_STRLEN,
 					"%Y%m%d%H%M%S", &l_time);
 		info->status = SPP_CAPTURE_RUNNING;
 		g_capture_status = SPP_CAPTURE_RUNNING;
-		RTE_LOG(DEBUG, SPP_PCAP, "recive[%d], idle->run\n", lcore_id);
-		RTE_LOG(DEBUG, SPP_PCAP, "recive[%d], start time=%s\n",
-			lcore_id, g_pcap_option.compress_file_date);
+
+		RTE_LOG(DEBUG, SPP_PCAP,
+				"Recive on lcore %d, idle->run\n", lcore_id);
+		RTE_LOG(DEBUG, SPP_PCAP,
+				"Recive on lcore %d, start time=%s\n",
+				lcore_id, g_pcap_option.compress_file_date);
+
 	}
 
 	/* Receive packets */
 	rx = &g_pcap_option.port_cap;
-
 	nb_rx = spp_eth_rx_burst(rx->dpdk_port, 0, bufs, MAX_PCAP_BURST);
 	if (unlikely(nb_rx == 0))
 		return SPP_RET_OK;
 
-	/* Write ring packets */
-	nb_tx = rte_ring_enqueue_bulk(write_ring, (void *)bufs, nb_rx, NULL);
+	/* Forward to ring for writer thread */
+	nb_tx = rte_ring_enqueue_burst(write_ring, (void *)bufs, nb_rx, NULL);
 
 	/* Discard remained packets to release mbuf */
 	if (unlikely(nb_tx < nb_rx)) {
@@ -803,7 +808,7 @@ static int pcap_proc_receive(int lcore_id)
 	return SPP_RET_OK;
 }
 
-/* write thread */
+/* Output packets to file on writer thread */
 static int pcap_proc_write(int lcore_id)
 {
 	int ret = SPP_RET_OK;
@@ -815,15 +820,8 @@ static int pcap_proc_write(int lcore_id)
 	struct rte_ring *read_ring = g_pcap_option.cap_ring;
 
 	if (g_capture_status == SPP_CAPTURE_IDLE) {
-		if (info->status == SPP_CAPTURE_RUNNING) {
-			RTE_LOG(DEBUG, SPP_PCAP, "write[%d] run->idle\n",
-								lcore_id);
-			info->status = SPP_CAPTURE_IDLE;
-			if (file_compression_operation(info, CLOSE_MODE)
-							!= SPP_RET_OK)
-				return SPP_RET_NG;
-		}
-		return SPP_RET_OK;
+		if (info->status == SPP_CAPTURE_IDLE)
+			return SPP_RET_OK;
 	}
 	if (info->status == SPP_CAPTURE_IDLE) {
 		RTE_LOG(DEBUG, SPP_PCAP, "write[%d] idle->run\n", lcore_id);
@@ -835,28 +833,40 @@ static int pcap_proc_write(int lcore_id)
 		}
 	}
 
-	/* Read packets */
-	nb_rx =  rte_ring_dequeue_bulk(read_ring, (void *)bufs, MAX_PCAP_BURST,
-									NULL);
-	if (unlikely(nb_rx == 0))
+	/* Read packets from shared ring */
+	nb_rx =  rte_ring_mc_dequeue_burst(read_ring, (void *)bufs,
+					   MAX_PCAP_BURST, NULL);
+	if (unlikely(nb_rx == 0)) {
+		if (g_capture_status == SPP_CAPTURE_IDLE) {
+			RTE_LOG(DEBUG, SPP_PCAP,
+					"Write on lcore %d, run->idle\n",
+					lcore_id);
+
+			info->status = SPP_CAPTURE_IDLE;
+			if (file_compression_operation(info, CLOSE_MODE)
+							!= SPP_RET_OK)
+				return SPP_RET_NG;
+		}
 		return SPP_RET_OK;
+	}
 
 	for (buf = 0; buf < nb_rx; buf++) {
 		mbuf = bufs[buf];
 		rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
 		if (compress_file_packet(&g_pcap_info[lcore_id], mbuf)
 							!= SPP_RET_OK) {
-			RTE_LOG(ERR, SPP_PCAP, "capture file write error: "
-				"%d (%s)\n", errno, strerror(errno));
-			RTE_LOG(ERR, SPP_PCAP, "drop packets(write) %d\n",
-							(nb_rx - buf));
+			RTE_LOG(ERR, SPP_PCAP,
+					"Failed compress_file_packet(), "
+					"errno=%d (%s)\n",
+					errno, strerror(errno));
 			ret = SPP_RET_NG;
 			info->status = SPP_CAPTURE_IDLE;
 			file_compression_operation(info, CLOSE_MODE);
 			break;
 		}
 	}
-	/* mbuf free */
+
+	/* Free mbuf */
 	for (buf = 0; buf < nb_rx; buf++)
 		rte_pktmbuf_free(bufs[buf]);
 
-- 
2.17.1

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [spp] [PATCH 3/3] spp_pcap: add buffer size to reduce dropped pkts
  2019-03-04  6:53 [spp] [PATCH 0/3] Fix bugs of spp_pcap ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 1/3] spp_pcap: change mbuf size for dequeue packets ogawa.yasufumi
  2019-03-04  6:53 ` [spp] [PATCH 2/3] spp_pcap: fix bug pkts remained after pcap stopped ogawa.yasufumi
@ 2019-03-04  6:53 ` ogawa.yasufumi
  2 siblings, 0 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-03-04  6:53 UTC (permalink / raw)
  To: spp, ferruh.yigit, ogawa.yasufumi; +Cc: Hideyuki Yamashita

From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>

* Increase size of ring buffer to avoid packets to be not discarded
  before dequeuing.

* Add checking the number of threads.

* Add vars for logging statistics.

Signed-off-by: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>
Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
 src/pcap/spp_pcap.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 42 insertions(+), 2 deletions(-)

diff --git a/src/pcap/spp_pcap.c b/src/pcap/spp_pcap.c
index a59a89b..1fb6c32 100644
--- a/src/pcap/spp_pcap.c
+++ b/src/pcap/spp_pcap.c
@@ -41,7 +41,7 @@
 #define DEFAULT_OUTPUT_DIR "/tmp"
 #define DEFAULT_FILE_LIMIT 1073741824  /* 1GiB */
 #define PORT_STR_SIZE 16
-#define RING_SIZE 8192
+#define RING_SIZE 16384
 #define MAX_PCAP_BURST 256  /* Num of received packets at once */
 
 /* Ensure snaplen not to be over the maximum size */
@@ -136,6 +136,12 @@ struct pcap_mng_info {
 	uint64_t file_size;            /* file write size */
 };
 
+/* Pcap status info. */
+struct pcap_status_info {
+	int thread_cnt;		/* thread count */
+	int start_up_cnt;	/* thread start up count */
+};
+
 /* Logical core ID for main thread */
 static unsigned int g_main_lcore_id = 0xffffffff;
 
@@ -160,6 +166,12 @@ static struct pcap_option g_pcap_option;
 /* pcap managed info */
 static struct pcap_mng_info g_pcap_info[RTE_MAX_LCORE];
 
+/* pcap thread status info */
+struct pcap_status_info g_pcap_thread_info;
+
+/* pcap total write packet count */
+static long long g_total_write[RTE_MAX_LCORE];
+
 /* Print help message */
 static void
 usage(const char *progname)
@@ -758,15 +770,23 @@ static int pcap_proc_receive(int lcore_id)
 	struct rte_mbuf *bufs[MAX_PCAP_BURST];
 	struct pcap_mng_info *info = &g_pcap_info[lcore_id];
 	struct rte_ring *write_ring = g_pcap_option.cap_ring;
+	static long long total_rx;
+	static long long total_drop;
 
 	if (g_capture_request == SPP_CAPTURE_IDLE) {
 		if (info->status == SPP_CAPTURE_RUNNING) {
 			RTE_LOG(DEBUG, SPP_PCAP,
 					"Recive on lcore %d, run->idle\n",
 					lcore_id);
+			RTE_LOG(INFO, SPP_PCAP,
+					"Recive on lcore %d, total_rx=%llu, "
+					"total_drop=%llu\n", lcore_id,
+					total_rx, total_drop);
 
 			info->status = SPP_CAPTURE_IDLE;
 			g_capture_status = SPP_CAPTURE_IDLE;
+			if (g_pcap_thread_info.start_up_cnt != 0)
+				g_pcap_thread_info.start_up_cnt -= 1;
 		}
 		return SPP_RET_OK;
 	}
@@ -785,9 +805,15 @@ static int pcap_proc_receive(int lcore_id)
 		RTE_LOG(DEBUG, SPP_PCAP,
 				"Recive on lcore %d, start time=%s\n",
 				lcore_id, g_pcap_option.compress_file_date);
-
+		g_pcap_thread_info.start_up_cnt += 1;
+		total_rx = 0;
+		total_drop = 0;
 	}
 
+	/* Write thread start up wait. */
+	if (g_pcap_thread_info.thread_cnt > g_pcap_thread_info.start_up_cnt)
+		return SPP_RET_OK;
+
 	/* Receive packets */
 	rx = &g_pcap_option.port_cap;
 	nb_rx = spp_eth_rx_burst(rx->dpdk_port, 0, bufs, MAX_PCAP_BURST);
@@ -805,6 +831,9 @@ static int pcap_proc_receive(int lcore_id)
 			rte_pktmbuf_free(bufs[buf]);
 	}
 
+	total_rx += nb_rx;
+	total_drop += nb_rx - nb_tx;
+
 	return SPP_RET_OK;
 }
 
@@ -831,6 +860,8 @@ static int pcap_proc_write(int lcore_id)
 			info->status = SPP_CAPTURE_IDLE;
 			return SPP_RET_NG;
 		}
+		g_pcap_thread_info.start_up_cnt += 1;
+		g_total_write[lcore_id] = 0;
 	}
 
 	/* Read packets from shared ring */
@@ -841,8 +872,13 @@ static int pcap_proc_write(int lcore_id)
 			RTE_LOG(DEBUG, SPP_PCAP,
 					"Write on lcore %d, run->idle\n",
 					lcore_id);
+			RTE_LOG(INFO, SPP_PCAP,
+					"Write on lcore %d, total_write=%llu\n",
+					lcore_id, g_total_write[lcore_id]);
 
 			info->status = SPP_CAPTURE_IDLE;
+			if (g_pcap_thread_info.start_up_cnt != 0)
+				g_pcap_thread_info.start_up_cnt -= 1;
 			if (file_compression_operation(info, CLOSE_MODE)
 							!= SPP_RET_OK)
 				return SPP_RET_NG;
@@ -870,6 +906,7 @@ static int pcap_proc_write(int lcore_id)
 	for (buf = 0; buf < nb_rx; buf++)
 		rte_pktmbuf_free(bufs[buf]);
 
+	g_total_write[lcore_id] += nb_rx;
 	return ret;
 }
 
@@ -1039,7 +1076,10 @@ main(int argc, char *argv[])
 		/* Start worker threads of recive or write */
 		unsigned int lcore_id = 0;
 		unsigned int thread_no = 0;
+		g_pcap_thread_info.thread_cnt = 0;
+		g_pcap_thread_info.start_up_cnt = 0;
 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+			g_pcap_thread_info.thread_cnt += 1;
 			g_pcap_info[lcore_id].thread_no = thread_no++;
 			rte_eal_remote_launch(slave_main, NULL, lcore_id);
 		}
-- 
2.17.1

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-03-04  6:56 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-03-04  6:53 [spp] [PATCH 0/3] Fix bugs of spp_pcap ogawa.yasufumi
2019-03-04  6:53 ` [spp] [PATCH 1/3] spp_pcap: change mbuf size for dequeue packets ogawa.yasufumi
2019-03-04  6:53 ` [spp] [PATCH 2/3] spp_pcap: fix bug pkts remained after pcap stopped ogawa.yasufumi
2019-03-04  6:53 ` [spp] [PATCH 3/3] spp_pcap: add buffer size to reduce dropped pkts ogawa.yasufumi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).