* [dpdk-dev] [PATCH] net/pcap: support snaplen option to truncate packet
@ 2020-07-03 6:19 Zhike Wang
0 siblings, 0 replies; only message in thread
From: Zhike Wang @ 2020-07-03 6:19 UTC (permalink / raw)
To: dev; +Cc: wangzhike, ferruh.yigit, reshma.pattan
Introduced "snaplen=<length>" option. It is convinient to truncate
large packets to only capture necessary headers.
Signed-off-by: Zhike Wang <wangzhike@jd.com>
---
app/pdump/main.c | 28 +++++++++++++++++++++++++++-
drivers/net/pcap/rte_eth_pcap.c | 34 +++++++++++++++++++++++++++++++++-
2 files changed, 60 insertions(+), 2 deletions(-)
diff --git a/app/pdump/main.c b/app/pdump/main.c
index c38c537..721315e 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -41,10 +41,12 @@
#define PDUMP_RING_SIZE_ARG "ring-size"
#define PDUMP_MSIZE_ARG "mbuf-size"
#define PDUMP_NUM_MBUFS_ARG "total-num-mbufs"
+#define PDUMP_SNAPLEN_ARG "snaplen"
#define VDEV_NAME_FMT "net_pcap_%s_%d"
#define VDEV_PCAP_ARGS_FMT "tx_pcap=%s"
#define VDEV_IFACE_ARGS_FMT "tx_iface=%s"
+#define VDEV_SNAPLEN_ARGS_FMT "snaplen=%d"
#define TX_STREAM_SIZE 64
#define MP_NAME "pdump_pool_%d"
@@ -97,6 +99,7 @@ enum pdump_by {
PDUMP_RING_SIZE_ARG,
PDUMP_MSIZE_ARG,
PDUMP_NUM_MBUFS_ARG,
+ PDUMP_SNAPLEN_ARG,
NULL
};
@@ -116,6 +119,7 @@ struct pdump_tuples {
uint32_t ring_size;
uint16_t mbuf_data_size;
uint32_t total_num_mbufs;
+ uint16_t snaplen;
/* params for library API call */
uint32_t dir;
@@ -160,7 +164,8 @@ struct parse_val {
" tx-dev=<iface or pcap file>,"
"[ring-size=<ring size>default:16384],"
"[mbuf-size=<mbuf data size>default:2176],"
- "[total-num-mbufs=<number of mbufs>default:65535]'\n",
+ "[total-num-mbufs=<number of mbufs>default:65535],",
+ "[snaplen=<snap length>default:0, meaning no truncation]'\n",
prgname);
}
@@ -370,6 +375,19 @@ struct parse_val {
} else
pt->total_num_mbufs = MBUFS_PER_POOL;
+ /* snaplen parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_SNAPLEN_ARG);
+ if (cnt1 == 1) {
+ v.min = 1;
+ v.max = UINT16_MAX;
+ ret = rte_kvargs_process(kvlist, PDUMP_SNAPLEN_ARG,
+ &parse_uint_value, &v);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->snaplen = (uint16_t) v.val;
+ } else
+ pt->snaplen = 0;
+
num_tuples++;
free_kvlist:
@@ -692,6 +710,8 @@ struct parse_val {
VDEV_IFACE_ARGS_FMT, pt->rx_dev) :
snprintf(vdev_args, sizeof(vdev_args),
VDEV_PCAP_ARGS_FMT, pt->rx_dev);
+ snprintf(vdev_args + strlen(vdev_args), sizeof(vdev_args) - strlen(vdev_args),
+ ","VDEV_SNAPLEN_ARGS_FMT, pt->snaplen);
if (rte_eal_hotplug_add("vdev", vdev_name,
vdev_args) < 0) {
cleanup_rings();
@@ -722,6 +742,8 @@ struct parse_val {
VDEV_IFACE_ARGS_FMT, pt->tx_dev) :
snprintf(vdev_args, sizeof(vdev_args),
VDEV_PCAP_ARGS_FMT, pt->tx_dev);
+ snprintf(vdev_args + strlen(vdev_args), sizeof(vdev_args) - strlen(vdev_args),
+ ","VDEV_SNAPLEN_ARGS_FMT, pt->snaplen);
if (rte_eal_hotplug_add("vdev", vdev_name,
vdev_args) < 0) {
cleanup_rings();
@@ -762,6 +784,8 @@ struct parse_val {
VDEV_IFACE_ARGS_FMT, pt->rx_dev) :
snprintf(vdev_args, sizeof(vdev_args),
VDEV_PCAP_ARGS_FMT, pt->rx_dev);
+ snprintf(vdev_args + strlen(vdev_args), sizeof(vdev_args) - strlen(vdev_args),
+ ","VDEV_SNAPLEN_ARGS_FMT, pt->snaplen);
if (rte_eal_hotplug_add("vdev", vdev_name,
vdev_args) < 0) {
cleanup_rings();
@@ -799,6 +823,8 @@ struct parse_val {
VDEV_IFACE_ARGS_FMT, pt->tx_dev) :
snprintf(vdev_args, sizeof(vdev_args),
VDEV_PCAP_ARGS_FMT, pt->tx_dev);
+ snprintf(vdev_args + strlen(vdev_args), sizeof(vdev_args) - strlen(vdev_args),
+ ","VDEV_SNAPLEN_ARGS_FMT, pt->snaplen);
if (rte_eal_hotplug_add("vdev", vdev_name,
vdev_args) < 0) {
cleanup_rings();
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 13a3d0a..8a78ceb 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -40,6 +40,7 @@
#define ETH_PCAP_IFACE_ARG "iface"
#define ETH_PCAP_PHY_MAC_ARG "phy_mac"
#define ETH_PCAP_INFINITE_RX_ARG "infinite_rx"
+#define ETH_PCAP_SNAPLEN_ARG "snaplen"
#define ETH_PCAP_ARG_MAXLEN 64
@@ -92,6 +93,7 @@ struct pmd_process_private {
pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
+ int snaplen;
};
struct pmd_devargs {
@@ -114,6 +116,7 @@ struct pmd_devargs_all {
unsigned int is_rx_pcap;
unsigned int is_rx_iface;
unsigned int infinite_rx;
+ int snaplen;
};
static const char *valid_arguments[] = {
@@ -125,6 +128,7 @@ struct pmd_devargs_all {
ETH_PCAP_IFACE_ARG,
ETH_PCAP_PHY_MAC_ARG,
ETH_PCAP_INFINITE_RX_ARG,
+ ETH_PCAP_SNAPLEN_ARG,
NULL
};
@@ -339,6 +343,8 @@ struct pmd_devargs_all {
caplen = sizeof(temp_data);
}
+ if ((pp->snaplen > 0) && (caplen > pp->snaplen))
+ caplen = pp->snaplen;
calculate_timestamp(&header.ts);
header.len = len;
header.caplen = caplen;
@@ -949,6 +955,7 @@ struct pmd_devargs_all {
{
const char *pcap_filename = value;
struct pmd_devargs *dumpers = extra_args;
+ int snaplen;
pcap_dumper_t *dumper;
if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
@@ -1083,6 +1090,19 @@ struct pmd_devargs_all {
}
static int
+get_snaplen_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (extra_args) {
+ const int snaplen = atoi(value);
+ int *snaplen_p = extra_args;
+
+ *snaplen_p = snaplen;
+ }
+ return 0;
+}
+
+static int
pmd_init_internals(struct rte_vdev_device *vdev,
const unsigned int nb_rx_queues,
const unsigned int nb_tx_queues,
@@ -1335,6 +1355,7 @@ struct pmd_devargs_all {
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internal;
int ret = 0;
+ unsigned int snaplen_cnt;
struct pmd_devargs_all devargs_all = {
.single_iface = 0,
@@ -1412,6 +1433,15 @@ struct pmd_devargs_all {
rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0;
dumpers.num_of_queue = 0;
+ snaplen_cnt = rte_kvargs_count(kvlist,
+ ETH_PCAP_SNAPLEN_ARG);
+ if (snaplen_cnt == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_SNAPLEN_ARG,
+ &get_snaplen_arg, &devargs_all.snaplen);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
if (devargs_all.is_rx_pcap) {
/*
* We check whether we want to infinitely rx the pcap file.
@@ -1518,6 +1548,7 @@ struct pmd_devargs_all {
pp->tx_pcap[i] = dumpers.queue[i].pcap;
}
+ pp->snaplen = devargs_all.snaplen;
eth_dev->process_private = pp;
eth_dev->rx_pkt_burst = eth_pcap_rx;
if (devargs_all.is_tx_pcap)
@@ -1587,7 +1618,8 @@ struct pmd_devargs_all {
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc> "
ETH_PCAP_PHY_MAC_ARG "=<int>"
- ETH_PCAP_INFINITE_RX_ARG "=<0|1>");
+ ETH_PCAP_INFINITE_RX_ARG "=<0|1>"
+ ETH_PCAP_SNAPLEN_ARG "=<int>");
RTE_INIT(eth_pcap_init_log)
{
--
1.8.3.1
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2020-07-03 6:19 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-03 6:19 [dpdk-dev] [PATCH] net/pcap: support snaplen option to truncate packet Zhike Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).