From: ogawa.yasufumi@lab.ntt.co.jp
To: spp@dpdk.org, ferruh.yigit@intel.com, ogawa.yasufumi@lab.ntt.co.jp
Cc: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>
Subject: [spp] [PATCH 3/3] spp_pcap: add buffer size to reduce dropped pkts
Date: Mon, 4 Mar 2019 15:53:47 +0900 [thread overview]
Message-ID: <1551682427-13835-4-git-send-email-ogawa.yasufumi@lab.ntt.co.jp> (raw)
In-Reply-To: <1551682427-13835-1-git-send-email-ogawa.yasufumi@lab.ntt.co.jp>
From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
* Increase size of ring buffer to avoid packets to be not discarded
before dequeuing.
* Add checking the number of threads.
* Add vars for logging statistics.
Signed-off-by: Hideyuki Yamashita <yamashita.hideyuki@po.ntt-tx.co.jp>
Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
src/pcap/spp_pcap.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 42 insertions(+), 2 deletions(-)
diff --git a/src/pcap/spp_pcap.c b/src/pcap/spp_pcap.c
index a59a89b..1fb6c32 100644
--- a/src/pcap/spp_pcap.c
+++ b/src/pcap/spp_pcap.c
@@ -41,7 +41,7 @@
#define DEFAULT_OUTPUT_DIR "/tmp"
#define DEFAULT_FILE_LIMIT 1073741824 /* 1GiB */
#define PORT_STR_SIZE 16
-#define RING_SIZE 8192
+#define RING_SIZE 16384
#define MAX_PCAP_BURST 256 /* Num of received packets at once */
/* Ensure snaplen not to be over the maximum size */
@@ -136,6 +136,12 @@ struct pcap_mng_info {
uint64_t file_size; /* file write size */
};
+/* Pcap status info. */
+struct pcap_status_info {
+ int thread_cnt; /* thread count */
+ int start_up_cnt; /* thread start up count */
+};
+
/* Logical core ID for main thread */
static unsigned int g_main_lcore_id = 0xffffffff;
@@ -160,6 +166,12 @@ static struct pcap_option g_pcap_option;
/* pcap managed info */
static struct pcap_mng_info g_pcap_info[RTE_MAX_LCORE];
+/* pcap thread status info */
+struct pcap_status_info g_pcap_thread_info;
+
+/* pcap total write packet count */
+static long long g_total_write[RTE_MAX_LCORE];
+
/* Print help message */
static void
usage(const char *progname)
@@ -758,15 +770,23 @@ static int pcap_proc_receive(int lcore_id)
struct rte_mbuf *bufs[MAX_PCAP_BURST];
struct pcap_mng_info *info = &g_pcap_info[lcore_id];
struct rte_ring *write_ring = g_pcap_option.cap_ring;
+ static long long total_rx;
+ static long long total_drop;
if (g_capture_request == SPP_CAPTURE_IDLE) {
if (info->status == SPP_CAPTURE_RUNNING) {
RTE_LOG(DEBUG, SPP_PCAP,
"Recive on lcore %d, run->idle\n",
lcore_id);
+ RTE_LOG(INFO, SPP_PCAP,
+ "Recive on lcore %d, total_rx=%llu, "
+ "total_drop=%llu\n", lcore_id,
+ total_rx, total_drop);
info->status = SPP_CAPTURE_IDLE;
g_capture_status = SPP_CAPTURE_IDLE;
+ if (g_pcap_thread_info.start_up_cnt != 0)
+ g_pcap_thread_info.start_up_cnt -= 1;
}
return SPP_RET_OK;
}
@@ -785,9 +805,15 @@ static int pcap_proc_receive(int lcore_id)
RTE_LOG(DEBUG, SPP_PCAP,
"Recive on lcore %d, start time=%s\n",
lcore_id, g_pcap_option.compress_file_date);
-
+ g_pcap_thread_info.start_up_cnt += 1;
+ total_rx = 0;
+ total_drop = 0;
}
+ /* Write thread start up wait. */
+ if (g_pcap_thread_info.thread_cnt > g_pcap_thread_info.start_up_cnt)
+ return SPP_RET_OK;
+
/* Receive packets */
rx = &g_pcap_option.port_cap;
nb_rx = spp_eth_rx_burst(rx->dpdk_port, 0, bufs, MAX_PCAP_BURST);
@@ -805,6 +831,9 @@ static int pcap_proc_receive(int lcore_id)
rte_pktmbuf_free(bufs[buf]);
}
+ total_rx += nb_rx;
+ total_drop += nb_rx - nb_tx;
+
return SPP_RET_OK;
}
@@ -831,6 +860,8 @@ static int pcap_proc_write(int lcore_id)
info->status = SPP_CAPTURE_IDLE;
return SPP_RET_NG;
}
+ g_pcap_thread_info.start_up_cnt += 1;
+ g_total_write[lcore_id] = 0;
}
/* Read packets from shared ring */
@@ -841,8 +872,13 @@ static int pcap_proc_write(int lcore_id)
RTE_LOG(DEBUG, SPP_PCAP,
"Write on lcore %d, run->idle\n",
lcore_id);
+ RTE_LOG(INFO, SPP_PCAP,
+ "Write on lcore %d, total_write=%llu\n",
+ lcore_id, g_total_write[lcore_id]);
info->status = SPP_CAPTURE_IDLE;
+ if (g_pcap_thread_info.start_up_cnt != 0)
+ g_pcap_thread_info.start_up_cnt -= 1;
if (file_compression_operation(info, CLOSE_MODE)
!= SPP_RET_OK)
return SPP_RET_NG;
@@ -870,6 +906,7 @@ static int pcap_proc_write(int lcore_id)
for (buf = 0; buf < nb_rx; buf++)
rte_pktmbuf_free(bufs[buf]);
+ g_total_write[lcore_id] += nb_rx;
return ret;
}
@@ -1039,7 +1076,10 @@ main(int argc, char *argv[])
/* Start worker threads of recive or write */
unsigned int lcore_id = 0;
unsigned int thread_no = 0;
+ g_pcap_thread_info.thread_cnt = 0;
+ g_pcap_thread_info.start_up_cnt = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ g_pcap_thread_info.thread_cnt += 1;
g_pcap_info[lcore_id].thread_no = thread_no++;
rte_eal_remote_launch(slave_main, NULL, lcore_id);
}
--
2.17.1
prev parent reply other threads:[~2019-03-04 6:56 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-03-04 6:53 [spp] [PATCH 0/3] Fix bugs of spp_pcap ogawa.yasufumi
2019-03-04 6:53 ` [spp] [PATCH 1/3] spp_pcap: change mbuf size for dequeue packets ogawa.yasufumi
2019-03-04 6:53 ` [spp] [PATCH 2/3] spp_pcap: fix bug pkts remained after pcap stopped ogawa.yasufumi
2019-03-04 6:53 ` ogawa.yasufumi [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1551682427-13835-4-git-send-email-ogawa.yasufumi@lab.ntt.co.jp \
--to=ogawa.yasufumi@lab.ntt.co.jp \
--cc=ferruh.yigit@intel.com \
--cc=spp@dpdk.org \
--cc=yamashita.hideyuki@po.ntt-tx.co.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).